Ioremap 코드 분석
2021. 9. 27. 13:19ㆍLinux
- 목차
반응형
부팅 시,
iomem_resource에 structmemblock_region를 add
1) System RAM 영역 요청
이는 "System RAM" 임 (kernel code section과 data section의 start / end를 가지고 – symbol 참고 – region을 결정)
이 부분의 page frame들에 대해서 request_resource 함
2) Kernel code 영역(section)
request_resource(res, &kernel_code);
3) Kernel datasection
request_resource(res, &kernel_data);
Ioremap은
Virtual address에서 page들을 할당하여 physical address의 page frame들에 대한 page table entry를 mapping(치환) 하는 것
#define ioremap(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE)) // ioremap_cache void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot) { return __ioremap_caller(phys_addr, size, prot, __builtin_return_address(0)); } |
static void __iomem *__ioremap_caller(phys_addr_t phys_addr, size_t size, pgprot_t prot, void *caller) { phys_addr &= PAGE_MASK; ß address를 page단위로 align size = PAGE_ALIGN(size + offset); pfn_valid(__phys_to_pfn(phys_addr)) ß physical address가 실제 memory 상의 page frame에 존재하는지 확인 (해당 page frame이 memory block에 존재하는지 확인) area = get_vm_area_caller(size, VM_IOREMAP, caller); ß virtual memory 할당 (vmalloc의 구현부) addr = (unsigned long)area->addr; ioremap_page_range(addr, addr + size, phys_addr, prot); ß virtual address와 physical address를 mapping page upper directory à page mid-level directory à page table à page(4KB) 순서대로 top-down 하여 page mapping } static int ioremap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, phys_addr_t phys_addr, pgprot_t prot) { pfn = phys_addr >> PAGE_SHIFT; pte = pte_alloc_kernel(pmd, addr); do { set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot)); ß physical address의 PTE와 pte_alloc_kernel에서 할당한 virtual address의 pfn++; virtual address의 pte에 physical address의 pte 설정 } while (pte++, addr += PAGE_SIZE, addr != end); … } int __pte_alloc_kernel(pmd_t *pmd, unsigned long address) { pte_t *new = pte_alloc_one_kernel(&init_mm, address); } static inline pte_t * pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr) { return (pte_t *)__get_free_page(PGALLOC_GFP); ß page의 virtual이 pte_t 임 (page table entry) } unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) ß order is ‘0’ { struct page * page = alloc_pages(gfp_mask, order); return (unsigned long) page_address(page); ß highmem.c에서 page의 virtual을 리턴 (kernel virtual address) } static inline struct page * alloc_pages(gfp_t gfp_mask, unsigned int order) { return alloc_pages_current(gfp_mask, order); } struct page *alloc_pages_current(gfp_t gfp, unsigned order) { struct page *page; if (pol->mode == MPOL_INTERLEAVE) page = alloc_page_interleave(gfp, order, interleave_nodes(pol)); else page = __alloc_pages_nodemask(gfp, order, policy_zonelist(gfp, pol, numa_node_id()), policy_nodemask(gfp, pol)); } static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, unsigned nid) { page = __alloc_pages(gfp, order, zl); } static inline struct page * __alloc_pages(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist) { return __alloc_pages_nodemask(gfp_mask, order, zonelist, NULL); } struct page * __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist, nodemask_t *nodemask) { struct page *page = NULL; page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order, …); … } static struct page * get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order, struct zonelist *zonelist, int high_zoneidx, int alloc_flags, struct zone *preferred_zone, int migratetype) { struct page *page = NULL; page = buffered_rmqueue(preferred_zone, zone, order, gfp_mask, migratetype); … } static inline struct page *buffered_rmqueue(struct zone *preferred_zone, struct zone *zone, int order, gfp_t gfp_flags, int migratetype) { struct page *page; if (likely(order == 0)) { struct per_cpu_pages *pcp; struct list_head *list; pcp = &this_cpu_ptr(zone->pageset)->pcp; list = &pcp->lists[migratetype]; … } else { page = __rmqueue(zone, order, migratetype); } } static struct page *__rmqueue(struct zone *zone, unsigned int order, // from the buddy allocator int migratetype) { struct page *page; page = __rmqueue_smallest(zone, order, migratetype); } static inline struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, int migratetype) { struct free_area * area; struct page *page; for (current_order = order; current_order < MAX_ORDER; ++current_order) { ß order is ‘0’ area = &(zone->free_area[current_order]); if (list_empty(&area->free_list[migratetype])) continue; page = list_entry(area->free_list[migratetype].next, struct page, lru); list_del(&page->lru); area->nr_free--; … } |
한 개의 zone에는free_area들이 존재 (11개), area에는 page들이 존재 (list로서)
static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr) { // memblock_type은 memblock_region을 지님 unsigned int left = 0, right = type->cnt; do { unsigned int mid = (right + left) / 2; if (addr < type->regions[mid].base) right = mid; else if (addr >= (type->regions[mid].base + type->regions[mid].size)) left = mid + 1; else return mid; } while (left < right); return -1; } |
반응형
'Linux' 카테고리의 다른 글
input driver (0) | 2021.09.27 |
---|---|
System call (0) | 2021.09.27 |
Linux의 kmalloc과 vmalloc에 대해서 (0) | 2021.09.27 |
pthread_barrier_t 사용법 (pthread barrier) (0) | 2021.09.27 |
vmalloc (0) | 2021.09.21 |