| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| #include <linux/mm.h> |
| #include <linux/mmzone.h> |
| #include <linux/memblock.h> |
| #include <linux/memremap.h> |
| #include <linux/highmem.h> |
| #include <linux/slab.h> |
| #include <linux/spinlock.h> |
| #include <linux/vmalloc.h> |
| #include <linux/sched.h> |
| #include <asm/dma.h> |
| #include <asm/pgalloc.h> |
| |
| |
| |
| |
| |
| |
| |
| static void * __ref __earlyonly_bootmem_alloc(int node, |
| <------><------><------><------>unsigned long size, |
| <------><------><------><------>unsigned long align, |
| <------><------><------><------>unsigned long goal) |
| { |
| <------>return memblock_alloc_try_nid_raw(size, align, goal, |
| <------><------><------><------><------> MEMBLOCK_ALLOC_ACCESSIBLE, node); |
| } |
| |
| void * __meminit vmemmap_alloc_block(unsigned long size, int node) |
| { |
| <------> |
| <------>if (slab_is_available()) { |
| <------><------>gfp_t gfp_mask = GFP_KERNEL|__GFP_RETRY_MAYFAIL|__GFP_NOWARN; |
| <------><------>int order = get_order(size); |
| <------><------>static bool warned; |
| <------><------>struct page *page; |
| |
| <------><------>page = alloc_pages_node(node, gfp_mask, order); |
| <------><------>if (page) |
| <------><------><------>return page_address(page); |
| |
| <------><------>if (!warned) { |
| <------><------><------>warn_alloc(gfp_mask & ~__GFP_NOWARN, NULL, |
| <------><------><------><------> "vmemmap alloc failure: order:%u", order); |
| <------><------><------>warned = true; |
| <------><------>} |
| <------><------>return NULL; |
| <------>} else |
| <------><------>return __earlyonly_bootmem_alloc(node, size, size, |
| <------><------><------><------>__pa(MAX_DMA_ADDRESS)); |
| } |
| |
| static void * __meminit altmap_alloc_block_buf(unsigned long size, |
| <------><------><------><------><------> struct vmem_altmap *altmap); |
| |
| |
| void * __meminit vmemmap_alloc_block_buf(unsigned long size, int node, |
| <------><------><------><------><------> struct vmem_altmap *altmap) |
| { |
| <------>void *ptr; |
| |
| <------>if (altmap) |
| <------><------>return altmap_alloc_block_buf(size, altmap); |
| |
| <------>ptr = sparse_buffer_alloc(size); |
| <------>if (!ptr) |
| <------><------>ptr = vmemmap_alloc_block(size, node); |
| <------>return ptr; |
| } |
| |
| static unsigned long __meminit vmem_altmap_next_pfn(struct vmem_altmap *altmap) |
| { |
| <------>return altmap->base_pfn + altmap->reserve + altmap->alloc |
| <------><------>+ altmap->align; |
| } |
| |
| static unsigned long __meminit vmem_altmap_nr_free(struct vmem_altmap *altmap) |
| { |
| <------>unsigned long allocated = altmap->alloc + altmap->align; |
| |
| <------>if (altmap->free > allocated) |
| <------><------>return altmap->free - allocated; |
| <------>return 0; |
| } |
| |
| static void * __meminit altmap_alloc_block_buf(unsigned long size, |
| <------><------><------><------><------> struct vmem_altmap *altmap) |
| { |
| <------>unsigned long pfn, nr_pfns, nr_align; |
| |
| <------>if (size & ~PAGE_MASK) { |
| <------><------>pr_warn_once("%s: allocations must be multiple of PAGE_SIZE (%ld)\n", |
| <------><------><------><------>__func__, size); |
| <------><------>return NULL; |
| <------>} |
| |
| <------>pfn = vmem_altmap_next_pfn(altmap); |
| <------>nr_pfns = size >> PAGE_SHIFT; |
| <------>nr_align = 1UL << find_first_bit(&nr_pfns, BITS_PER_LONG); |
| <------>nr_align = ALIGN(pfn, nr_align) - pfn; |
| <------>if (nr_pfns + nr_align > vmem_altmap_nr_free(altmap)) |
| <------><------>return NULL; |
| |
| <------>altmap->alloc += nr_pfns; |
| <------>altmap->align += nr_align; |
| <------>pfn += nr_align; |
| |
| <------>pr_debug("%s: pfn: %#lx alloc: %ld align: %ld nr: %#lx\n", |
| <------><------><------>__func__, pfn, altmap->alloc, altmap->align, nr_pfns); |
| <------>return __va(__pfn_to_phys(pfn)); |
| } |
| |
| void __meminit vmemmap_verify(pte_t *pte, int node, |
| <------><------><------><------>unsigned long start, unsigned long end) |
| { |
| <------>unsigned long pfn = pte_pfn(*pte); |
| <------>int actual_node = early_pfn_to_nid(pfn); |
| |
| <------>if (node_distance(actual_node, node) > LOCAL_DISTANCE) |
| <------><------>pr_warn("[%lx-%lx] potential offnode page_structs\n", |
| <------><------><------>start, end - 1); |
| } |
| |
| pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node, |
| <------><------><------><------> struct vmem_altmap *altmap) |
| { |
| <------>pte_t *pte = pte_offset_kernel(pmd, addr); |
| <------>if (pte_none(*pte)) { |
| <------><------>pte_t entry; |
| <------><------>void *p; |
| |
| <------><------>p = vmemmap_alloc_block_buf(PAGE_SIZE, node, altmap); |
| <------><------>if (!p) |
| <------><------><------>return NULL; |
| <------><------>entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL); |
| <------><------>set_pte_at(&init_mm, addr, pte, entry); |
| <------>} |
| <------>return pte; |
| } |
| |
| static void * __meminit vmemmap_alloc_block_zero(unsigned long size, int node) |
| { |
| <------>void *p = vmemmap_alloc_block(size, node); |
| |
| <------>if (!p) |
| <------><------>return NULL; |
| <------>memset(p, 0, size); |
| |
| <------>return p; |
| } |
| |
| pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node) |
| { |
| <------>pmd_t *pmd = pmd_offset(pud, addr); |
| <------>if (pmd_none(*pmd)) { |
| <------><------>void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node); |
| <------><------>if (!p) |
| <------><------><------>return NULL; |
| <------><------>pmd_populate_kernel(&init_mm, pmd, p); |
| <------>} |
| <------>return pmd; |
| } |
| |
| pud_t * __meminit vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node) |
| { |
| <------>pud_t *pud = pud_offset(p4d, addr); |
| <------>if (pud_none(*pud)) { |
| <------><------>void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node); |
| <------><------>if (!p) |
| <------><------><------>return NULL; |
| <------><------>pud_populate(&init_mm, pud, p); |
| <------>} |
| <------>return pud; |
| } |
| |
| p4d_t * __meminit vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node) |
| { |
| <------>p4d_t *p4d = p4d_offset(pgd, addr); |
| <------>if (p4d_none(*p4d)) { |
| <------><------>void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node); |
| <------><------>if (!p) |
| <------><------><------>return NULL; |
| <------><------>p4d_populate(&init_mm, p4d, p); |
| <------>} |
| <------>return p4d; |
| } |
| |
| pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node) |
| { |
| <------>pgd_t *pgd = pgd_offset_k(addr); |
| <------>if (pgd_none(*pgd)) { |
| <------><------>void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node); |
| <------><------>if (!p) |
| <------><------><------>return NULL; |
| <------><------>pgd_populate(&init_mm, pgd, p); |
| <------>} |
| <------>return pgd; |
| } |
| |
| int __meminit vmemmap_populate_basepages(unsigned long start, unsigned long end, |
| <------><------><------><------><------> int node, struct vmem_altmap *altmap) |
| { |
| <------>unsigned long addr = start; |
| <------>pgd_t *pgd; |
| <------>p4d_t *p4d; |
| <------>pud_t *pud; |
| <------>pmd_t *pmd; |
| <------>pte_t *pte; |
| |
| <------>for (; addr < end; addr += PAGE_SIZE) { |
| <------><------>pgd = vmemmap_pgd_populate(addr, node); |
| <------><------>if (!pgd) |
| <------><------><------>return -ENOMEM; |
| <------><------>p4d = vmemmap_p4d_populate(pgd, addr, node); |
| <------><------>if (!p4d) |
| <------><------><------>return -ENOMEM; |
| <------><------>pud = vmemmap_pud_populate(p4d, addr, node); |
| <------><------>if (!pud) |
| <------><------><------>return -ENOMEM; |
| <------><------>pmd = vmemmap_pmd_populate(pud, addr, node); |
| <------><------>if (!pmd) |
| <------><------><------>return -ENOMEM; |
| <------><------>pte = vmemmap_pte_populate(pmd, addr, node, altmap); |
| <------><------>if (!pte) |
| <------><------><------>return -ENOMEM; |
| <------><------>vmemmap_verify(pte, node, addr, addr + PAGE_SIZE); |
| <------>} |
| |
| <------>return 0; |
| } |
| |
| struct page * __meminit __populate_section_memmap(unsigned long pfn, |
| <------><------>unsigned long nr_pages, int nid, struct vmem_altmap *altmap) |
| { |
| <------>unsigned long start = (unsigned long) pfn_to_page(pfn); |
| <------>unsigned long end = start + nr_pages * sizeof(struct page); |
| |
| <------>if (WARN_ON_ONCE(!IS_ALIGNED(pfn, PAGES_PER_SUBSECTION) || |
| <------><------>!IS_ALIGNED(nr_pages, PAGES_PER_SUBSECTION))) |
| <------><------>return NULL; |
| |
| <------>if (vmemmap_populate(start, end, nid, altmap)) |
| <------><------>return NULL; |
| |
| <------>return pfn_to_page(pfn); |
| } |
| |