Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Copyright (C) 2001-2008 Silicon Graphics, Inc.  All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * A simple uncached page allocator using the generic allocator. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * allocator first utilizes the spare (spill) pages found in the EFI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * memmap and will then start converting cached pages to uncached ones
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  * at a granule at a time. Node awareness is implemented by having a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  * pool of pages per node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/efi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <linux/nmi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <linux/genalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <linux/pgtable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #include <asm/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #include <asm/pal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #include <asm/tlbflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) extern void __init efi_memmap_walk_uc(efi_freemem_callback_t, void *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) struct uncached_pool {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	struct gen_pool *pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 	struct mutex add_chunk_mutex;	/* serialize adding a converted chunk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	int nchunks_added;		/* #of converted chunks added to pool */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	atomic_t status;		/* smp called function's return status*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) #define MAX_CONVERTED_CHUNKS_PER_NODE	2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) struct uncached_pool uncached_pools[MAX_NUMNODES];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) static void uncached_ipi_visibility(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	struct uncached_pool *uc_pool = (struct uncached_pool *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	if ((status != PAL_VISIBILITY_OK) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	    (status != PAL_VISIBILITY_OK_REMOTE_NEEDED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 		atomic_inc(&uc_pool->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) static void uncached_ipi_mc_drain(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	struct uncached_pool *uc_pool = (struct uncached_pool *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	status = ia64_pal_mc_drain();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	if (status != PAL_STATUS_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 		atomic_inc(&uc_pool->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67)  * Add a new chunk of uncached memory pages to the specified pool.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69)  * @pool: pool to add new chunk of uncached memory to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70)  * @nid: node id of node to allocate memory from, or -1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72)  * This is accomplished by first allocating a granule of cached memory pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73)  * and then converting them to uncached memory pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	int status, i, nchunks_added = uc_pool->nchunks_added;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	unsigned long c_addr, uc_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	if (mutex_lock_interruptible(&uc_pool->add_chunk_mutex) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 		return -1;	/* interrupted by a signal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	if (uc_pool->nchunks_added > nchunks_added) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 		/* someone added a new chunk while we were waiting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 		mutex_unlock(&uc_pool->add_chunk_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	if (uc_pool->nchunks_added >= MAX_CONVERTED_CHUNKS_PER_NODE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 		mutex_unlock(&uc_pool->add_chunk_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	/* attempt to allocate a granule's worth of cached memory pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	page = __alloc_pages_node(nid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 				GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 				IA64_GRANULE_SHIFT-PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	if (!page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 		mutex_unlock(&uc_pool->add_chunk_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	/* convert the memory pages from cached to uncached */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	c_addr = (unsigned long)page_address(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	uc_addr = c_addr - PAGE_OFFSET + __IA64_UNCACHED_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	 * There's a small race here where it's possible for someone to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	 * access the page through /dev/mem halfway through the conversion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	 * to uncached - not sure it's really worth bothering about
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	for (i = 0; i < (IA64_GRANULE_SIZE / PAGE_SIZE); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 		SetPageUncached(&page[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	flush_tlb_kernel_range(uc_addr, uc_addr + IA64_GRANULE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	if (status == PAL_VISIBILITY_OK_REMOTE_NEEDED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 		atomic_set(&uc_pool->status, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 		smp_call_function(uncached_ipi_visibility, uc_pool, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 		if (atomic_read(&uc_pool->status))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 			goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	} else if (status != PAL_VISIBILITY_OK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 		goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	flush_icache_range(uc_addr, uc_addr + IA64_GRANULE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	/* flush the just introduced uncached translation from the TLB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	local_flush_tlb_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	status = ia64_pal_mc_drain();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	if (status != PAL_STATUS_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 		goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	atomic_set(&uc_pool->status, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	smp_call_function(uncached_ipi_mc_drain, uc_pool, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	if (atomic_read(&uc_pool->status))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 		goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	 * The chunk of memory pages has been converted to uncached so now we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	 * can add it to the pool.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	status = gen_pool_add(uc_pool->pool, uc_addr, IA64_GRANULE_SIZE, nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 		goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	uc_pool->nchunks_added++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	mutex_unlock(&uc_pool->add_chunk_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	/* failed to convert or add the chunk so give it back to the kernel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	for (i = 0; i < (IA64_GRANULE_SIZE / PAGE_SIZE); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 		ClearPageUncached(&page[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	free_pages(c_addr, IA64_GRANULE_SHIFT-PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	mutex_unlock(&uc_pool->add_chunk_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)  * uncached_alloc_page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)  * @starting_nid: node id of node to start with, or -1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)  * @n_pages: number of contiguous pages to allocate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)  * Allocate the specified number of contiguous uncached pages on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)  * the requested node. If not enough contiguous uncached pages are available
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)  * on the requested node, roundrobin starting with the next higher node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) unsigned long uncached_alloc_page(int starting_nid, int n_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	unsigned long uc_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	struct uncached_pool *uc_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	int nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	if (unlikely(starting_nid >= MAX_NUMNODES))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	if (starting_nid < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 		starting_nid = numa_node_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	nid = starting_nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 		if (!node_state(nid, N_HIGH_MEMORY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 		uc_pool = &uncached_pools[nid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 		if (uc_pool->pool == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 		do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 			uc_addr = gen_pool_alloc(uc_pool->pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 						 n_pages * PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 			if (uc_addr != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 				return uc_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 		} while (uncached_add_chunk(uc_pool, nid) == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	} while ((nid = (nid + 1) % MAX_NUMNODES) != starting_nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) EXPORT_SYMBOL(uncached_alloc_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)  * uncached_free_page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)  * @uc_addr: uncached address of first page to free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)  * @n_pages: number of contiguous pages to free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)  * Free the specified number of uncached pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) void uncached_free_page(unsigned long uc_addr, int n_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	int nid = paddr_to_nid(uc_addr - __IA64_UNCACHED_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	struct gen_pool *pool = uncached_pools[nid].pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	if (unlikely(pool == NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	if ((uc_addr & (0XFUL << 60)) != __IA64_UNCACHED_OFFSET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 		panic("uncached_free_page invalid address %lx\n", uc_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	gen_pool_free(pool, uc_addr, n_pages * PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) EXPORT_SYMBOL(uncached_free_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)  * uncached_build_memmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)  * @uc_start: uncached starting address of a chunk of uncached memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)  * @uc_end: uncached ending address of a chunk of uncached memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)  * @arg: ignored, (NULL argument passed in on call to efi_memmap_walk_uc())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)  * Called at boot time to build a map of pages that can be used for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)  * memory special operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) static int __init uncached_build_memmap(u64 uc_start, u64 uc_end, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	int nid = paddr_to_nid(uc_start - __IA64_UNCACHED_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	struct gen_pool *pool = uncached_pools[nid].pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	size_t size = uc_end - uc_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	touch_softlockup_watchdog();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	if (pool != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 		memset((char *)uc_start, 0, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 		(void) gen_pool_add(pool, uc_start, size, nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) static int __init uncached_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	int nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	for_each_node_state(nid, N_ONLINE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 		uncached_pools[nid].pool = gen_pool_create(PAGE_SHIFT, nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 		mutex_init(&uncached_pools[nid].add_chunk_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	efi_memmap_walk_uc(uncached_build_memmap, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) __initcall(uncached_init);