^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /**************************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX. USA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright 2016 Intel Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * All Rights Reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Permission is hereby granted, free of charge, to any person obtaining a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * copy of this software and associated documentation files (the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * "Software"), to deal in the Software without restriction, including
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * without limitation the rights to use, copy, modify, merge, publish,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * distribute, sub license, and/or sell copies of the Software, and to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * permit persons to whom the Software is furnished to do so, subject to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * the following conditions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * The above copyright notice and this permission notice (including the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * next paragraph) shall be included in all copies or substantial portions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * of the Software.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * USE OR OTHER DEALINGS IN THE SOFTWARE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) **************************************************************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * Authors:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #ifndef _DRM_MM_H_
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define _DRM_MM_H_
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * Generic range manager structs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <linux/bug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include <linux/rbtree.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include <linux/mm_types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #ifdef CONFIG_DRM_DEBUG_MM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #include <linux/stackdepot.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #include <drm/drm_print.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #ifdef CONFIG_DRM_DEBUG_MM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define DRM_MM_BUG_ON(expr) BUG_ON(expr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #define DRM_MM_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * enum drm_mm_insert_mode - control search and allocation behaviour
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * The &struct drm_mm range manager supports finding a suitable modes using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * a number of search trees. These trees are oranised by size, by address and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * in most recent eviction order. This allows the user to find either the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * smallest hole to reuse, the lowest or highest address to reuse, or simply
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * reuse the most recent eviction that fits. When allocating the &drm_mm_node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * from within the hole, the &drm_mm_insert_mode also dictate whether to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * allocate the lowest matching address or the highest.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) enum drm_mm_insert_mode {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * @DRM_MM_INSERT_BEST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * Search for the smallest hole (within the search range) that fits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * the desired node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * Allocates the node from the bottom of the found hole.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) DRM_MM_INSERT_BEST = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * @DRM_MM_INSERT_LOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * Search for the lowest hole (address closest to 0, within the search
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * range) that fits the desired node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * Allocates the node from the bottom of the found hole.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) DRM_MM_INSERT_LOW,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * @DRM_MM_INSERT_HIGH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * Search for the highest hole (address closest to U64_MAX, within the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * search range) that fits the desired node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * Allocates the node from the *top* of the found hole. The specified
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * alignment for the node is applied to the base of the node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * (&drm_mm_node.start).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) DRM_MM_INSERT_HIGH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * @DRM_MM_INSERT_EVICT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * Search for the most recently evicted hole (within the search range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * that fits the desired node. This is appropriate for use immediately
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * after performing an eviction scan (see drm_mm_scan_init()) and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * removing the selected nodes to form a hole.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * Allocates the node from the bottom of the found hole.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) DRM_MM_INSERT_EVICT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * @DRM_MM_INSERT_ONCE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * Only check the first hole for suitablity and report -ENOSPC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * immediately otherwise, rather than check every hole until a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * suitable one is found. Can only be used in conjunction with another
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * search method such as DRM_MM_INSERT_HIGH or DRM_MM_INSERT_LOW.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) DRM_MM_INSERT_ONCE = BIT(31),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * @DRM_MM_INSERT_HIGHEST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * Only check the highest hole (the hole with the largest address) and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * insert the node at the top of the hole or report -ENOSPC if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) * unsuitable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * Does not search all holes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) DRM_MM_INSERT_HIGHEST = DRM_MM_INSERT_HIGH | DRM_MM_INSERT_ONCE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * @DRM_MM_INSERT_LOWEST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) * Only check the lowest hole (the hole with the smallest address) and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) * insert the node at the bottom of the hole or report -ENOSPC if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * unsuitable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * Does not search all holes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) DRM_MM_INSERT_LOWEST = DRM_MM_INSERT_LOW | DRM_MM_INSERT_ONCE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * struct drm_mm_node - allocated block in the DRM allocator
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * This represents an allocated block in a &drm_mm allocator. Except for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * pre-reserved nodes inserted using drm_mm_reserve_node() the structure is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) * entirely opaque and should only be accessed through the provided funcions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * Since allocation of these nodes is entirely handled by the driver they can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * embedded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) struct drm_mm_node {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) /** @color: Opaque driver-private tag. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) unsigned long color;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) /** @start: Start address of the allocated block. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) u64 start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) /** @size: Size of the allocated block. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) u64 size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) /* private: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) struct drm_mm *mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) struct list_head node_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) struct list_head hole_stack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) struct rb_node rb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) struct rb_node rb_hole_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) struct rb_node rb_hole_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) u64 __subtree_last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) u64 hole_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) u64 subtree_max_hole;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) #define DRM_MM_NODE_ALLOCATED_BIT 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) #define DRM_MM_NODE_SCANNED_BIT 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) #ifdef CONFIG_DRM_DEBUG_MM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) depot_stack_handle_t stack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * struct drm_mm - DRM allocator
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * DRM range allocator with a few special functions and features geared towards
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * managing GPU memory. Except for the @color_adjust callback the structure is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * entirely opaque and should only be accessed through the provided functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * and macros. This structure can be embedded into larger driver structures.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) struct drm_mm {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) * @color_adjust:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) * Optional driver callback to further apply restrictions on a hole. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) * node argument points at the node containing the hole from which the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) * block would be allocated (see drm_mm_hole_follows() and friends). The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) * other arguments are the size of the block to be allocated. The driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) * can adjust the start and end as needed to e.g. insert guard pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) void (*color_adjust)(const struct drm_mm_node *node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) unsigned long color,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) u64 *start, u64 *end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) /* private: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) /* List of all memory nodes that immediately precede a free hole. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) struct list_head hole_stack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) /* head_node.node_list is the list of all memory nodes, ordered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) * according to the (increasing) start address of the memory node. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) struct drm_mm_node head_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) /* Keep an interval_tree for fast lookup of drm_mm_nodes by address. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) struct rb_root_cached interval_tree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) struct rb_root_cached holes_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) struct rb_root holes_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) unsigned long scan_active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) * struct drm_mm_scan - DRM allocator eviction roaster data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) * This structure tracks data needed for the eviction roaster set up using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) * drm_mm_scan_init(), and used with drm_mm_scan_add_block() and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) * drm_mm_scan_remove_block(). The structure is entirely opaque and should only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) * be accessed through the provided functions and macros. It is meant to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) * allocated temporarily by the driver on the stack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) struct drm_mm_scan {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) /* private: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) struct drm_mm *mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) u64 size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) u64 alignment;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) u64 remainder_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) u64 range_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) u64 range_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) u64 hit_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) u64 hit_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) unsigned long color;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) enum drm_mm_insert_mode mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) * drm_mm_node_allocated - checks whether a node is allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) * @node: drm_mm_node to check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) * Drivers are required to clear a node prior to using it with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) * drm_mm range manager.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) * Drivers should use this helper for proper encapsulation of drm_mm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) * internals.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) * True if the @node is allocated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) static inline bool drm_mm_node_allocated(const struct drm_mm_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) return test_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) * drm_mm_initialized - checks whether an allocator is initialized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) * @mm: drm_mm to check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) * Drivers should clear the struct drm_mm prior to initialisation if they
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) * want to use this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) * Drivers should use this helper for proper encapsulation of drm_mm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) * internals.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) * True if the @mm is initialized.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) static inline bool drm_mm_initialized(const struct drm_mm *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) return READ_ONCE(mm->hole_stack.next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) * drm_mm_hole_follows - checks whether a hole follows this node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) * @node: drm_mm_node to check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) * Holes are embedded into the drm_mm using the tail of a drm_mm_node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) * If you wish to know whether a hole follows this particular node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) * query this function. See also drm_mm_hole_node_start() and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) * drm_mm_hole_node_end().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) * True if a hole follows the @node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) static inline bool drm_mm_hole_follows(const struct drm_mm_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) return node->hole_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) static inline u64 __drm_mm_hole_node_start(const struct drm_mm_node *hole_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) return hole_node->start + hole_node->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) * drm_mm_hole_node_start - computes the start of the hole following @node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) * @hole_node: drm_mm_node which implicitly tracks the following hole
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) * This is useful for driver-specific debug dumpers. Otherwise drivers should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) * not inspect holes themselves. Drivers must check first whether a hole indeed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) * follows by looking at drm_mm_hole_follows()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) * Start of the subsequent hole.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) static inline u64 drm_mm_hole_node_start(const struct drm_mm_node *hole_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) DRM_MM_BUG_ON(!drm_mm_hole_follows(hole_node));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) return __drm_mm_hole_node_start(hole_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) static inline u64 __drm_mm_hole_node_end(const struct drm_mm_node *hole_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) return list_next_entry(hole_node, node_list)->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) * drm_mm_hole_node_end - computes the end of the hole following @node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) * @hole_node: drm_mm_node which implicitly tracks the following hole
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) * This is useful for driver-specific debug dumpers. Otherwise drivers should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) * not inspect holes themselves. Drivers must check first whether a hole indeed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) * follows by looking at drm_mm_hole_follows().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) * End of the subsequent hole.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) static inline u64 drm_mm_hole_node_end(const struct drm_mm_node *hole_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) return __drm_mm_hole_node_end(hole_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) * drm_mm_nodes - list of nodes under the drm_mm range manager
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) * @mm: the struct drm_mm range manager
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) * As the drm_mm range manager hides its node_list deep with its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) * structure, extracting it looks painful and repetitive. This is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) * not expected to be used outside of the drm_mm_for_each_node()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) * macros and similar internal functions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) * The node list, may be empty.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) #define drm_mm_nodes(mm) (&(mm)->head_node.node_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) * drm_mm_for_each_node - iterator to walk over all allocated nodes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) * @entry: &struct drm_mm_node to assign to in each iteration step
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) * @mm: &drm_mm allocator to walk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) * This iterator walks over all nodes in the range allocator. It is implemented
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) * with list_for_each(), so not save against removal of elements.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) #define drm_mm_for_each_node(entry, mm) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) list_for_each_entry(entry, drm_mm_nodes(mm), node_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) * drm_mm_for_each_node_safe - iterator to walk over all allocated nodes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) * @entry: &struct drm_mm_node to assign to in each iteration step
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) * @next: &struct drm_mm_node to store the next step
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) * @mm: &drm_mm allocator to walk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) * This iterator walks over all nodes in the range allocator. It is implemented
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) * with list_for_each_safe(), so save against removal of elements.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) #define drm_mm_for_each_node_safe(entry, next, mm) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) list_for_each_entry_safe(entry, next, drm_mm_nodes(mm), node_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) * drm_mm_for_each_hole - iterator to walk over all holes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) * @pos: &drm_mm_node used internally to track progress
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) * @mm: &drm_mm allocator to walk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) * @hole_start: ulong variable to assign the hole start to on each iteration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) * @hole_end: ulong variable to assign the hole end to on each iteration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) * This iterator walks over all holes in the range allocator. It is implemented
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) * with list_for_each(), so not save against removal of elements. @entry is used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) * internally and will not reflect a real drm_mm_node for the very first hole.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) * Hence users of this iterator may not access it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) * Implementation Note:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) * We need to inline list_for_each_entry in order to be able to set hole_start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) * and hole_end on each iteration while keeping the macro sane.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) #define drm_mm_for_each_hole(pos, mm, hole_start, hole_end) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) for (pos = list_first_entry(&(mm)->hole_stack, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) typeof(*pos), hole_stack); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) &pos->hole_stack != &(mm)->hole_stack ? \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) hole_start = drm_mm_hole_node_start(pos), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) hole_end = hole_start + pos->hole_size, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 1 : 0; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) pos = list_next_entry(pos, hole_stack))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) * Basic range manager support (drm_mm.c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) int drm_mm_insert_node_in_range(struct drm_mm *mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) struct drm_mm_node *node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) u64 size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) u64 alignment,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) unsigned long color,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) u64 start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) u64 end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) enum drm_mm_insert_mode mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) * drm_mm_insert_node_generic - search for space and insert @node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) * @mm: drm_mm to allocate from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) * @node: preallocate node to insert
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) * @size: size of the allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) * @alignment: alignment of the allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) * @color: opaque tag value to use for this node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) * @mode: fine-tune the allocation search and placement
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) * This is a simplified version of drm_mm_insert_node_in_range() with no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) * range restrictions applied.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) * The preallocated node must be cleared to 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) * 0 on success, -ENOSPC if there's no suitable hole.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) u64 size, u64 alignment,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) unsigned long color,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) enum drm_mm_insert_mode mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) return drm_mm_insert_node_in_range(mm, node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) size, alignment, color,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 0, U64_MAX, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) * drm_mm_insert_node - search for space and insert @node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) * @mm: drm_mm to allocate from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) * @node: preallocate node to insert
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) * @size: size of the allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) * This is a simplified version of drm_mm_insert_node_generic() with @color set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) * to 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) * The preallocated node must be cleared to 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) * 0 on success, -ENOSPC if there's no suitable hole.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) static inline int drm_mm_insert_node(struct drm_mm *mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) struct drm_mm_node *node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) u64 size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) return drm_mm_insert_node_generic(mm, node, size, 0, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) void drm_mm_remove_node(struct drm_mm_node *node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) void drm_mm_init(struct drm_mm *mm, u64 start, u64 size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) void drm_mm_takedown(struct drm_mm *mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) * drm_mm_clean - checks whether an allocator is clean
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) * @mm: drm_mm allocator to check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) * True if the allocator is completely free, false if there's still a node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) * allocated in it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) static inline bool drm_mm_clean(const struct drm_mm *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) return list_empty(drm_mm_nodes(mm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) struct drm_mm_node *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) __drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) * drm_mm_for_each_node_in_range - iterator to walk over a range of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) * allocated nodes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) * @node__: drm_mm_node structure to assign to in each iteration step
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) * @mm__: drm_mm allocator to walk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) * @start__: starting offset, the first node will overlap this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) * @end__: ending offset, the last node will start before this (but may overlap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) * This iterator walks over all nodes in the range allocator that lie
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) * between @start and @end. It is implemented similarly to list_for_each(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) * but using the internal interval tree to accelerate the search for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) * starting node, and so not safe against removal of elements. It assumes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) * that @end is within (or is the upper limit of) the drm_mm allocator.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) * If [@start, @end] are beyond the range of the drm_mm, the iterator may walk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) * over the special _unallocated_ &drm_mm.head_node, and may even continue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) * indefinitely.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) #define drm_mm_for_each_node_in_range(node__, mm__, start__, end__) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) for (node__ = __drm_mm_interval_first((mm__), (start__), (end__)-1); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) node__->start < (end__); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) node__ = list_next_entry(node__, node_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) void drm_mm_scan_init_with_range(struct drm_mm_scan *scan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) struct drm_mm *mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) u64 size, u64 alignment, unsigned long color,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) u64 start, u64 end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) enum drm_mm_insert_mode mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) * drm_mm_scan_init - initialize lru scanning
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) * @scan: scan state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) * @mm: drm_mm to scan
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) * @size: size of the allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) * @alignment: alignment of the allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) * @color: opaque tag value to use for the allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) * @mode: fine-tune the allocation search and placement
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) * This is a simplified version of drm_mm_scan_init_with_range() with no range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) * restrictions applied.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) * This simply sets up the scanning routines with the parameters for the desired
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) * hole.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) * Warning:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) * As long as the scan list is non-empty, no other operations than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) * adding/removing nodes to/from the scan list are allowed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) static inline void drm_mm_scan_init(struct drm_mm_scan *scan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) struct drm_mm *mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) u64 size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) u64 alignment,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) unsigned long color,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) enum drm_mm_insert_mode mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) drm_mm_scan_init_with_range(scan, mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) size, alignment, color,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 0, U64_MAX, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) struct drm_mm_node *node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) bool drm_mm_scan_remove_block(struct drm_mm_scan *scan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) struct drm_mm_node *node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) struct drm_mm_node *drm_mm_scan_color_evict(struct drm_mm_scan *scan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) void drm_mm_print(const struct drm_mm *mm, struct drm_printer *p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) #endif