^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * page_pool.h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Author: Jesper Dangaard Brouer <netoptimizer@brouer.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2016 Red Hat, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * DOC: page_pool allocator
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * This page_pool allocator is optimized for the XDP mode that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * uses one-frame-per-page, but have fallbacks that act like the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * regular page allocator APIs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * Basic use involve replacing alloc_pages() calls with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * page_pool_alloc_pages() call. Drivers should likely use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * page_pool_dev_alloc_pages() replacing dev_alloc_pages().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * API keeps track of in-flight pages, in-order to let API user know
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * when it is safe to dealloactor page_pool object. Thus, API users
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * must make sure to call page_pool_release_page() when a page is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * "leaving" the page_pool. Or call page_pool_put_page() where
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * appropiate. For maintaining correct accounting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * API user must only call page_pool_put_page() once on a page, as it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * will either recycle the page, or in case of elevated refcnt, it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * will release the DMA mapping and in-flight state accounting. We
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * hope to lift this requirement in the future.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #ifndef _NET_PAGE_POOL_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define _NET_PAGE_POOL_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/mm.h> /* Needed by ptr_ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/ptr_ring.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/dma-direction.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <linux/android_kabi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define PP_FLAG_DMA_MAP BIT(0) /* Should page_pool do the DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * map/unmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define PP_FLAG_DMA_SYNC_DEV BIT(1) /* If set all pages that the driver gets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * from page_pool will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * DMA-synced-for-device according to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * the length provided by the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * Please note DMA-sync-for-CPU is still
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * device driver responsibility
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define PP_FLAG_ALL (PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * Fast allocation side cache array/stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * The cache size and refill watermark is related to the network
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * use-case. The NAPI budget is 64 packets. After a NAPI poll the RX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * ring is usually refilled and the max consumed elements will be 64,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * thus a natural max size of objects needed in the cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * Keeping room for more objects, is due to XDP_DROP use-case. As
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * XDP_DROP allows the opportunity to recycle objects directly into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * this array, as it shares the same softirq/NAPI protection. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * cache is already full (or partly full) then the XDP_DROP recycles
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * would have to take a slower code path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define PP_ALLOC_CACHE_SIZE 128
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #define PP_ALLOC_CACHE_REFILL 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) struct pp_alloc_cache {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) u32 count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) void *cache[PP_ALLOC_CACHE_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) struct page_pool_params {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) unsigned int flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) unsigned int order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) unsigned int pool_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) int nid; /* Numa node id to allocate from pages from */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) struct device *dev; /* device, for DMA pre-mapping purposes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) enum dma_data_direction dma_dir; /* DMA mapping direction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) unsigned int max_len; /* max DMA sync memory size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) unsigned int offset; /* DMA addr offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) struct page_pool {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) struct page_pool_params p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) struct delayed_work release_dw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) void (*disconnect)(void *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) unsigned long defer_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) unsigned long defer_warn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) u32 pages_state_hold_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * Data structure for allocation side
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * Drivers allocation side usually already perform some kind
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * of resource protection. Piggyback on this protection, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * require driver to protect allocation side.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * For NIC drivers this means, allocate a page_pool per
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * RX-queue. As the RX-queue is already protected by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * Softirq/BH scheduling and napi_schedule. NAPI schedule
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * guarantee that a single napi_struct will only be scheduled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * on a single CPU (see napi_schedule).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) struct pp_alloc_cache alloc ____cacheline_aligned_in_smp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) /* Data structure for storing recycled pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * Returning/freeing pages is more complicated synchronization
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * wise, because free's can happen on remote CPUs, with no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * association with allocation resource.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * Use ptr_ring, as it separates consumer and producer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * effeciently, it a way that doesn't bounce cache-lines.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * TODO: Implement bulk return pages into this structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) struct ptr_ring ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) atomic_t pages_state_release_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) /* A page_pool is strictly tied to a single RX-queue being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * protected by NAPI, due to above pp_alloc_cache. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * refcnt serves purpose is to simplify drivers error handling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) refcount_t user_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) u64 destroy_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) ANDROID_KABI_RESERVE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) static inline struct page *page_pool_dev_alloc_pages(struct page_pool *pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) return page_pool_alloc_pages(pool, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) /* get the stored dma direction. A driver might decide to treat this locally and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * avoid the extra cache line from page_pool to determine the direction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) inline enum dma_data_direction page_pool_get_dma_dir(struct page_pool *pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) return pool->p.dma_dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) struct page_pool *page_pool_create(const struct page_pool_params *params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) #ifdef CONFIG_PAGE_POOL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) void page_pool_destroy(struct page_pool *pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) void page_pool_release_page(struct page_pool *pool, struct page *page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) static inline void page_pool_destroy(struct page_pool *pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) static inline void page_pool_use_xdp_mem(struct page_pool *pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) void (*disconnect)(void *))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) static inline void page_pool_release_page(struct page_pool *pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) void page_pool_put_page(struct page_pool *pool, struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) unsigned int dma_sync_size, bool allow_direct);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) /* Same as above but will try to sync the entire area pool->max_len */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) static inline void page_pool_put_full_page(struct page_pool *pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) struct page *page, bool allow_direct)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) /* When page_pool isn't compiled-in, net/core/xdp.c doesn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * allow registering MEM_TYPE_PAGE_POOL, but shield linker.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) #ifdef CONFIG_PAGE_POOL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) page_pool_put_page(pool, page, -1, allow_direct);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) /* Same as above but the caller must guarantee safe context. e.g NAPI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) static inline void page_pool_recycle_direct(struct page_pool *pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) page_pool_put_full_page(pool, page, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) static inline dma_addr_t page_pool_get_dma_addr(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) dma_addr_t ret = page->dma_addr[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) if (sizeof(dma_addr_t) > sizeof(unsigned long))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) ret |= (dma_addr_t)page->dma_addr[1] << 16 << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) static inline void page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) page->dma_addr[0] = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) if (sizeof(dma_addr_t) > sizeof(unsigned long))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) page->dma_addr[1] = upper_32_bits(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) static inline bool is_page_pool_compiled_in(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) #ifdef CONFIG_PAGE_POOL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) static inline bool page_pool_put(struct page_pool *pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) return refcount_dec_and_test(&pool->user_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) /* Caller must provide appropriate safe context, e.g. NAPI. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) void page_pool_update_nid(struct page_pool *pool, int new_nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) static inline void page_pool_nid_changed(struct page_pool *pool, int new_nid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) if (unlikely(pool->p.nid != new_nid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) page_pool_update_nid(pool, new_nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) #endif /* _NET_PAGE_POOL_H */