^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /**************************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright 2016 Intel Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * All Rights Reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Permission is hereby granted, free of charge, to any person obtaining a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * copy of this software and associated documentation files (the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * "Software"), to deal in the Software without restriction, including
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * without limitation the rights to use, copy, modify, merge, publish,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * distribute, sub license, and/or sell copies of the Software, and to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * permit persons to whom the Software is furnished to do so, subject to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * the following conditions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * The above copyright notice and this permission notice (including the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * next paragraph) shall be included in all copies or substantial portions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * of the Software.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * USE OR OTHER DEALINGS IN THE SOFTWARE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) **************************************************************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * Generic simple memory manager implementation. Intended to be used as a base
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * class implementation for more advanced memory managers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * Note that the algorithm used is quite simple and there might be substantial
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * performance gains if a smarter free list is implemented. Currently it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * just an unordered stack of free regions. This could easily be improved if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * an RB-tree is used instead. At least if we expect heavy fragmentation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * Aligned allocations can also see improvement.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * Authors:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #include <linux/interval_tree_generic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #include <linux/seq_file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #include <linux/stacktrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #include <drm/drm_mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * DOC: Overview
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * drm_mm provides a simple range allocator. The drivers are free to use the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * resource allocator from the linux core if it suits them, the upside of drm_mm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * is that it's in the DRM core. Which means that it's easier to extend for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * some of the crazier special purpose needs of gpus.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * The main data struct is &drm_mm, allocations are tracked in &drm_mm_node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * Drivers are free to embed either of them into their own suitable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * datastructures. drm_mm itself will not do any memory allocations of its own,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * so if drivers choose not to embed nodes they need to still allocate them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * themselves.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * The range allocator also supports reservation of preallocated blocks. This is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * useful for taking over initial mode setting configurations from the firmware,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * where an object needs to be created which exactly matches the firmware's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * scanout target. As long as the range is still free it can be inserted anytime
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * after the allocator is initialized, which helps with avoiding looped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * dependencies in the driver load sequence.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * drm_mm maintains a stack of most recently freed holes, which of all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * simplistic datastructures seems to be a fairly decent approach to clustering
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * allocations and avoiding too much fragmentation. This means free space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * searches are O(num_holes). Given that all the fancy features drm_mm supports
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * something better would be fairly complex and since gfx thrashing is a fairly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * steep cliff not a real concern. Removing a node again is O(1).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * drm_mm supports a few features: Alignment and range restrictions can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * supplied. Furthermore every &drm_mm_node has a color value (which is just an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * opaque unsigned long) which in conjunction with a driver callback can be used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * to implement sophisticated placement restrictions. The i915 DRM driver uses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * this to implement guard pages between incompatible caching domains in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * graphics TT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * Two behaviors are supported for searching and allocating: bottom-up and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * top-down. The default is bottom-up. Top-down allocation can be used if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * memory area has different restrictions, or just to reduce fragmentation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * Finally iteration helpers to walk all nodes and all holes are provided as are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * some basic allocator dumpers for debugging.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * Note that this range allocator is not thread-safe, drivers need to protect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * modifications with their own locking. The idea behind this is that for a full
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * memory manager additional data needs to be protected anyway, hence internal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * locking would be fully redundant.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #ifdef CONFIG_DRM_DEBUG_MM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #include <linux/stackdepot.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #define STACKDEPTH 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #define BUFSZ 4096
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) static noinline void save_stack(struct drm_mm_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) unsigned long entries[STACKDEPTH];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) unsigned int n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) n = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) /* May be called under spinlock, so avoid sleeping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) node->stack = stack_depot_save(entries, n, GFP_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) static void show_leaks(struct drm_mm *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) struct drm_mm_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) unsigned long *entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) unsigned int nr_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) char *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) buf = kmalloc(BUFSZ, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) if (!buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) list_for_each_entry(node, drm_mm_nodes(mm), node_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) if (!node->stack) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) DRM_ERROR("node [%08llx + %08llx]: unknown owner\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) node->start, node->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) nr_entries = stack_depot_fetch(node->stack, &entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) stack_trace_snprint(buf, BUFSZ, entries, nr_entries, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) DRM_ERROR("node [%08llx + %08llx]: inserted at\n%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) node->start, node->size, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) kfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) #undef STACKDEPTH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) #undef BUFSZ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) static void save_stack(struct drm_mm_node *node) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) static void show_leaks(struct drm_mm *mm) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) #define START(node) ((node)->start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) #define LAST(node) ((node)->start + (node)->size - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) INTERVAL_TREE_DEFINE(struct drm_mm_node, rb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) u64, __subtree_last,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) START, LAST, static inline, drm_mm_interval_tree)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) struct drm_mm_node *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) __drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) return drm_mm_interval_tree_iter_first((struct rb_root_cached *)&mm->interval_tree,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) start, last) ?: (struct drm_mm_node *)&mm->head_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) EXPORT_SYMBOL(__drm_mm_interval_first);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) struct drm_mm_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) struct drm_mm *mm = hole_node->mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) struct rb_node **link, *rb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) struct drm_mm_node *parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) bool leftmost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) node->__subtree_last = LAST(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) if (drm_mm_node_allocated(hole_node)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) rb = &hole_node->rb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) while (rb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) parent = rb_entry(rb, struct drm_mm_node, rb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) if (parent->__subtree_last >= node->__subtree_last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) parent->__subtree_last = node->__subtree_last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) rb = rb_parent(rb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) rb = &hole_node->rb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) link = &hole_node->rb.rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) leftmost = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) rb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) link = &mm->interval_tree.rb_root.rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) leftmost = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) while (*link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) rb = *link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) parent = rb_entry(rb, struct drm_mm_node, rb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) if (parent->__subtree_last < node->__subtree_last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) parent->__subtree_last = node->__subtree_last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) if (node->start < parent->start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) link = &parent->rb.rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) link = &parent->rb.rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) leftmost = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) rb_link_node(&node->rb, rb, link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) rb_insert_augmented_cached(&node->rb, &mm->interval_tree, leftmost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) &drm_mm_interval_tree_augment);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) #define HOLE_SIZE(NODE) ((NODE)->hole_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) #define HOLE_ADDR(NODE) (__drm_mm_hole_node_start(NODE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) static u64 rb_to_hole_size(struct rb_node *rb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) return rb_entry(rb, struct drm_mm_node, rb_hole_size)->hole_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) static void insert_hole_size(struct rb_root_cached *root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) struct drm_mm_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) struct rb_node **link = &root->rb_root.rb_node, *rb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) u64 x = node->hole_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) bool first = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) while (*link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) rb = *link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) if (x > rb_to_hole_size(rb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) link = &rb->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) link = &rb->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) first = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) rb_link_node(&node->rb_hole_size, rb, link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) rb_insert_color_cached(&node->rb_hole_size, root, first);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) RB_DECLARE_CALLBACKS_MAX(static, augment_callbacks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) struct drm_mm_node, rb_hole_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) u64, subtree_max_hole, HOLE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) static void insert_hole_addr(struct rb_root *root, struct drm_mm_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) struct rb_node **link = &root->rb_node, *rb_parent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) u64 start = HOLE_ADDR(node), subtree_max_hole = node->subtree_max_hole;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) struct drm_mm_node *parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) while (*link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) rb_parent = *link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) parent = rb_entry(rb_parent, struct drm_mm_node, rb_hole_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) if (parent->subtree_max_hole < subtree_max_hole)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) parent->subtree_max_hole = subtree_max_hole;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) if (start < HOLE_ADDR(parent))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) link = &parent->rb_hole_addr.rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) link = &parent->rb_hole_addr.rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) rb_link_node(&node->rb_hole_addr, rb_parent, link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) rb_insert_augmented(&node->rb_hole_addr, root, &augment_callbacks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) static void add_hole(struct drm_mm_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) struct drm_mm *mm = node->mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) node->hole_size =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) __drm_mm_hole_node_end(node) - __drm_mm_hole_node_start(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) node->subtree_max_hole = node->hole_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) insert_hole_size(&mm->holes_size, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) insert_hole_addr(&mm->holes_addr, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) list_add(&node->hole_stack, &mm->hole_stack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) static void rm_hole(struct drm_mm_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) list_del(&node->hole_stack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) rb_erase_cached(&node->rb_hole_size, &node->mm->holes_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) rb_erase_augmented(&node->rb_hole_addr, &node->mm->holes_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) &augment_callbacks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) node->hole_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) node->subtree_max_hole = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) DRM_MM_BUG_ON(drm_mm_hole_follows(node));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) static inline struct drm_mm_node *rb_hole_size_to_node(struct rb_node *rb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) return rb_entry_safe(rb, struct drm_mm_node, rb_hole_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) static inline struct drm_mm_node *rb_hole_addr_to_node(struct rb_node *rb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) return rb_entry_safe(rb, struct drm_mm_node, rb_hole_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) static struct drm_mm_node *best_hole(struct drm_mm *mm, u64 size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) struct rb_node *rb = mm->holes_size.rb_root.rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) struct drm_mm_node *best = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) struct drm_mm_node *node =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) rb_entry(rb, struct drm_mm_node, rb_hole_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) if (size <= node->hole_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) best = node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) rb = rb->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) rb = rb->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) } while (rb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) return best;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) static bool usable_hole_addr(struct rb_node *rb, u64 size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) return rb && rb_hole_addr_to_node(rb)->subtree_max_hole >= size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) static struct drm_mm_node *find_hole_addr(struct drm_mm *mm, u64 addr, u64 size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) struct rb_node *rb = mm->holes_addr.rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) struct drm_mm_node *node = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) while (rb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) u64 hole_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) if (!usable_hole_addr(rb, size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) node = rb_hole_addr_to_node(rb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) hole_start = __drm_mm_hole_node_start(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) if (addr < hole_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) rb = node->rb_hole_addr.rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) else if (addr > hole_start + node->hole_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) rb = node->rb_hole_addr.rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) return node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) static struct drm_mm_node *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) first_hole(struct drm_mm *mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) u64 start, u64 end, u64 size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) enum drm_mm_insert_mode mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) switch (mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) case DRM_MM_INSERT_BEST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) return best_hole(mm, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) case DRM_MM_INSERT_LOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) return find_hole_addr(mm, start, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) case DRM_MM_INSERT_HIGH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) return find_hole_addr(mm, end, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) case DRM_MM_INSERT_EVICT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) return list_first_entry_or_null(&mm->hole_stack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) struct drm_mm_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) hole_stack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) * DECLARE_NEXT_HOLE_ADDR - macro to declare next hole functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) * @name: name of function to declare
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) * @first: first rb member to traverse (either rb_left or rb_right).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) * @last: last rb member to traverse (either rb_right or rb_left).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) * This macro declares a function to return the next hole of the addr rb tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) * While traversing the tree we take the searched size into account and only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) * visit branches with potential big enough holes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) #define DECLARE_NEXT_HOLE_ADDR(name, first, last) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) static struct drm_mm_node *name(struct drm_mm_node *entry, u64 size) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) struct rb_node *parent, *node = &entry->rb_hole_addr; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) if (!entry || RB_EMPTY_NODE(node)) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) return NULL; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) if (usable_hole_addr(node->first, size)) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) node = node->first; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) while (usable_hole_addr(node->last, size)) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) node = node->last; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) return rb_hole_addr_to_node(node); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) while ((parent = rb_parent(node)) && node == parent->first) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) node = parent; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) return rb_hole_addr_to_node(parent); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) DECLARE_NEXT_HOLE_ADDR(next_hole_high_addr, rb_left, rb_right)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) DECLARE_NEXT_HOLE_ADDR(next_hole_low_addr, rb_right, rb_left)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) static struct drm_mm_node *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) next_hole(struct drm_mm *mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) struct drm_mm_node *node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) u64 size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) enum drm_mm_insert_mode mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) switch (mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) case DRM_MM_INSERT_BEST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) return rb_hole_size_to_node(rb_prev(&node->rb_hole_size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) case DRM_MM_INSERT_LOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) return next_hole_low_addr(node, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) case DRM_MM_INSERT_HIGH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) return next_hole_high_addr(node, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) case DRM_MM_INSERT_EVICT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) node = list_next_entry(node, hole_stack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) return &node->hole_stack == &mm->hole_stack ? NULL : node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) * drm_mm_reserve_node - insert an pre-initialized node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) * @mm: drm_mm allocator to insert @node into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) * @node: drm_mm_node to insert
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) * This functions inserts an already set-up &drm_mm_node into the allocator,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) * meaning that start, size and color must be set by the caller. All other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) * fields must be cleared to 0. This is useful to initialize the allocator with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) * preallocated objects which must be set-up before the range allocator can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) * set-up, e.g. when taking over a firmware framebuffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) * 0 on success, -ENOSPC if there's no hole where @node is.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) struct drm_mm_node *hole;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) u64 hole_start, hole_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) u64 adj_start, adj_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) u64 end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) end = node->start + node->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) if (unlikely(end <= node->start))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) /* Find the relevant hole to add our node to */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) hole = find_hole_addr(mm, node->start, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) if (!hole)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) adj_start = hole_start = __drm_mm_hole_node_start(hole);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) adj_end = hole_end = hole_start + hole->hole_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) if (mm->color_adjust)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) mm->color_adjust(hole, node->color, &adj_start, &adj_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) if (adj_start > node->start || adj_end < end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) node->mm = mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) list_add(&node->node_list, &hole->node_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) drm_mm_interval_tree_add_node(hole, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) node->hole_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) rm_hole(hole);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) if (node->start > hole_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) add_hole(hole);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) if (end < hole_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) add_hole(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) save_stack(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) EXPORT_SYMBOL(drm_mm_reserve_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) static u64 rb_to_hole_size_or_zero(struct rb_node *rb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) return rb ? rb_to_hole_size(rb) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) * drm_mm_insert_node_in_range - ranged search for space and insert @node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) * @mm: drm_mm to allocate from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) * @node: preallocate node to insert
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) * @size: size of the allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) * @alignment: alignment of the allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) * @color: opaque tag value to use for this node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) * @range_start: start of the allowed range for this node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) * @range_end: end of the allowed range for this node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) * @mode: fine-tune the allocation search and placement
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) * The preallocated @node must be cleared to 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) * 0 on success, -ENOSPC if there's no suitable hole.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) int drm_mm_insert_node_in_range(struct drm_mm * const mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) struct drm_mm_node * const node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) u64 size, u64 alignment,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) unsigned long color,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) u64 range_start, u64 range_end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) enum drm_mm_insert_mode mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) struct drm_mm_node *hole;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) u64 remainder_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) bool once;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) DRM_MM_BUG_ON(range_start > range_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) if (unlikely(size == 0 || range_end - range_start < size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) if (rb_to_hole_size_or_zero(rb_first_cached(&mm->holes_size)) < size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) if (alignment <= 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) alignment = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) once = mode & DRM_MM_INSERT_ONCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) mode &= ~DRM_MM_INSERT_ONCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) for (hole = first_hole(mm, range_start, range_end, size, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) hole;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) hole = once ? NULL : next_hole(mm, hole, size, mode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) u64 hole_start = __drm_mm_hole_node_start(hole);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) u64 hole_end = hole_start + hole->hole_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) u64 adj_start, adj_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) u64 col_start, col_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) if (mode == DRM_MM_INSERT_LOW && hole_start >= range_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) if (mode == DRM_MM_INSERT_HIGH && hole_end <= range_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) col_start = hole_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) col_end = hole_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) if (mm->color_adjust)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) mm->color_adjust(hole, color, &col_start, &col_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) adj_start = max(col_start, range_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) adj_end = min(col_end, range_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) if (adj_end <= adj_start || adj_end - adj_start < size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) if (mode == DRM_MM_INSERT_HIGH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) adj_start = adj_end - size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) if (alignment) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) u64 rem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) if (likely(remainder_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) rem = adj_start & remainder_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) div64_u64_rem(adj_start, alignment, &rem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) if (rem) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) adj_start -= rem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) if (mode != DRM_MM_INSERT_HIGH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) adj_start += alignment;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) if (adj_start < max(col_start, range_start) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) min(col_end, range_end) - adj_start < size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) if (adj_end <= adj_start ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) adj_end - adj_start < size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) node->mm = mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) node->size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) node->start = adj_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) node->color = color;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) node->hole_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) list_add(&node->node_list, &hole->node_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) drm_mm_interval_tree_add_node(hole, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) rm_hole(hole);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) if (adj_start > hole_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) add_hole(hole);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) if (adj_start + size < hole_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) add_hole(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) save_stack(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) EXPORT_SYMBOL(drm_mm_insert_node_in_range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) static inline bool drm_mm_node_scanned_block(const struct drm_mm_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) return test_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) * drm_mm_remove_node - Remove a memory node from the allocator.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) * @node: drm_mm_node to remove
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) * This just removes a node from its drm_mm allocator. The node does not need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) * be cleared again before it can be re-inserted into this or any other drm_mm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) * allocator. It is a bug to call this function on a unallocated node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) void drm_mm_remove_node(struct drm_mm_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) struct drm_mm *mm = node->mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) struct drm_mm_node *prev_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) DRM_MM_BUG_ON(!drm_mm_node_allocated(node));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) DRM_MM_BUG_ON(drm_mm_node_scanned_block(node));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) prev_node = list_prev_entry(node, node_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) if (drm_mm_hole_follows(node))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) rm_hole(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) drm_mm_interval_tree_remove(node, &mm->interval_tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) list_del(&node->node_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) if (drm_mm_hole_follows(prev_node))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) rm_hole(prev_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) add_hole(prev_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) clear_bit_unlock(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) EXPORT_SYMBOL(drm_mm_remove_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) * drm_mm_replace_node - move an allocation from @old to @new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) * @old: drm_mm_node to remove from the allocator
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) * @new: drm_mm_node which should inherit @old's allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) * This is useful for when drivers embed the drm_mm_node structure and hence
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) * can't move allocations by reassigning pointers. It's a combination of remove
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) * and insert with the guarantee that the allocation start will match.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) struct drm_mm *mm = old->mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) DRM_MM_BUG_ON(!drm_mm_node_allocated(old));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) *new = *old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &new->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) list_replace(&old->node_list, &new->node_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) rb_replace_node_cached(&old->rb, &new->rb, &mm->interval_tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) if (drm_mm_hole_follows(old)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) list_replace(&old->hole_stack, &new->hole_stack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) rb_replace_node_cached(&old->rb_hole_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) &new->rb_hole_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) &mm->holes_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) rb_replace_node(&old->rb_hole_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) &new->rb_hole_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) &mm->holes_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) clear_bit_unlock(DRM_MM_NODE_ALLOCATED_BIT, &old->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) EXPORT_SYMBOL(drm_mm_replace_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) * DOC: lru scan roster
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) * Very often GPUs need to have continuous allocations for a given object. When
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) * evicting objects to make space for a new one it is therefore not most
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) * efficient when we simply start to select all objects from the tail of an LRU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) * until there's a suitable hole: Especially for big objects or nodes that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) * otherwise have special allocation constraints there's a good chance we evict
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) * lots of (smaller) objects unnecessarily.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) * The DRM range allocator supports this use-case through the scanning
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) * interfaces. First a scan operation needs to be initialized with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) * drm_mm_scan_init() or drm_mm_scan_init_with_range(). The driver adds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) * objects to the roster, probably by walking an LRU list, but this can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) * freely implemented. Eviction candiates are added using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) * drm_mm_scan_add_block() until a suitable hole is found or there are no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) * further evictable objects. Eviction roster metadata is tracked in &struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) * drm_mm_scan.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) * The driver must walk through all objects again in exactly the reverse
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) * order to restore the allocator state. Note that while the allocator is used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) * in the scan mode no other operation is allowed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) * Finally the driver evicts all objects selected (drm_mm_scan_remove_block()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) * reported true) in the scan, and any overlapping nodes after color adjustment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) * (drm_mm_scan_color_evict()). Adding and removing an object is O(1), and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) * since freeing a node is also O(1) the overall complexity is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) * O(scanned_objects). So like the free stack which needs to be walked before a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) * scan operation even begins this is linear in the number of objects. It
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) * doesn't seem to hurt too badly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) * drm_mm_scan_init_with_range - initialize range-restricted lru scanning
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) * @scan: scan state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) * @mm: drm_mm to scan
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) * @size: size of the allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) * @alignment: alignment of the allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) * @color: opaque tag value to use for the allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) * @start: start of the allowed range for the allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) * @end: end of the allowed range for the allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) * @mode: fine-tune the allocation search and placement
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) * This simply sets up the scanning routines with the parameters for the desired
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) * hole.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) * Warning:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) * As long as the scan list is non-empty, no other operations than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) * adding/removing nodes to/from the scan list are allowed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) void drm_mm_scan_init_with_range(struct drm_mm_scan *scan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) struct drm_mm *mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) u64 size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) u64 alignment,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) unsigned long color,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) u64 start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) u64 end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) enum drm_mm_insert_mode mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) DRM_MM_BUG_ON(start >= end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) DRM_MM_BUG_ON(!size || size > end - start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) DRM_MM_BUG_ON(mm->scan_active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) scan->mm = mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) if (alignment <= 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) alignment = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) scan->color = color;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) scan->alignment = alignment;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) scan->remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) scan->size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) scan->mode = mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) DRM_MM_BUG_ON(end <= start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) scan->range_start = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) scan->range_end = end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) scan->hit_start = U64_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) scan->hit_end = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) EXPORT_SYMBOL(drm_mm_scan_init_with_range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) * drm_mm_scan_add_block - add a node to the scan list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) * @scan: the active drm_mm scanner
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) * @node: drm_mm_node to add
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) * Add a node to the scan list that might be freed to make space for the desired
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) * hole.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) * True if a hole has been found, false otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) struct drm_mm_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) struct drm_mm *mm = scan->mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) struct drm_mm_node *hole;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) u64 hole_start, hole_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) u64 col_start, col_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) u64 adj_start, adj_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) DRM_MM_BUG_ON(node->mm != mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) DRM_MM_BUG_ON(!drm_mm_node_allocated(node));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) DRM_MM_BUG_ON(drm_mm_node_scanned_block(node));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) __set_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) mm->scan_active++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) /* Remove this block from the node_list so that we enlarge the hole
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) * (distance between the end of our previous node and the start of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) * or next), without poisoning the link so that we can restore it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) * later in drm_mm_scan_remove_block().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) hole = list_prev_entry(node, node_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) DRM_MM_BUG_ON(list_next_entry(hole, node_list) != node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) __list_del_entry(&node->node_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) hole_start = __drm_mm_hole_node_start(hole);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) hole_end = __drm_mm_hole_node_end(hole);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) col_start = hole_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) col_end = hole_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) if (mm->color_adjust)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) mm->color_adjust(hole, scan->color, &col_start, &col_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) adj_start = max(col_start, scan->range_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) adj_end = min(col_end, scan->range_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) if (adj_end <= adj_start || adj_end - adj_start < scan->size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) if (scan->mode == DRM_MM_INSERT_HIGH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) adj_start = adj_end - scan->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) if (scan->alignment) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) u64 rem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) if (likely(scan->remainder_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) rem = adj_start & scan->remainder_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) div64_u64_rem(adj_start, scan->alignment, &rem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) if (rem) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) adj_start -= rem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) if (scan->mode != DRM_MM_INSERT_HIGH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) adj_start += scan->alignment;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) if (adj_start < max(col_start, scan->range_start) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) min(col_end, scan->range_end) - adj_start < scan->size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) if (adj_end <= adj_start ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) adj_end - adj_start < scan->size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) scan->hit_start = adj_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) scan->hit_end = adj_start + scan->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) DRM_MM_BUG_ON(scan->hit_start >= scan->hit_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) DRM_MM_BUG_ON(scan->hit_start < hole_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) DRM_MM_BUG_ON(scan->hit_end > hole_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) EXPORT_SYMBOL(drm_mm_scan_add_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) * drm_mm_scan_remove_block - remove a node from the scan list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) * @scan: the active drm_mm scanner
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) * @node: drm_mm_node to remove
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) * Nodes **must** be removed in exactly the reverse order from the scan list as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) * they have been added (e.g. using list_add() as they are added and then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) * list_for_each() over that eviction list to remove), otherwise the internal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) * state of the memory manager will be corrupted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) * When the scan list is empty, the selected memory nodes can be freed. An
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) * immediately following drm_mm_insert_node_in_range_generic() or one of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) * simpler versions of that function with !DRM_MM_SEARCH_BEST will then return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) * the just freed block (because it's at the top of the free_stack list).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) * True if this block should be evicted, false otherwise. Will always
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) * return false when no hole has been found.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) bool drm_mm_scan_remove_block(struct drm_mm_scan *scan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) struct drm_mm_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) struct drm_mm_node *prev_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) DRM_MM_BUG_ON(node->mm != scan->mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) DRM_MM_BUG_ON(!drm_mm_node_scanned_block(node));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) __clear_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) DRM_MM_BUG_ON(!node->mm->scan_active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) node->mm->scan_active--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) /* During drm_mm_scan_add_block() we decoupled this node leaving
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) * its pointers intact. Now that the caller is walking back along
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) * the eviction list we can restore this block into its rightful
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) * place on the full node_list. To confirm that the caller is walking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) * backwards correctly we check that prev_node->next == node->next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) * i.e. both believe the same node should be on the other side of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) * hole.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) prev_node = list_prev_entry(node, node_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) DRM_MM_BUG_ON(list_next_entry(prev_node, node_list) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) list_next_entry(node, node_list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) list_add(&node->node_list, &prev_node->node_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) return (node->start + node->size > scan->hit_start &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) node->start < scan->hit_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) EXPORT_SYMBOL(drm_mm_scan_remove_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) * drm_mm_scan_color_evict - evict overlapping nodes on either side of hole
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) * @scan: drm_mm scan with target hole
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) * After completing an eviction scan and removing the selected nodes, we may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) * need to remove a few more nodes from either side of the target hole if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) * mm.color_adjust is being used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) * A node to evict, or NULL if there are no overlapping nodes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) struct drm_mm_node *drm_mm_scan_color_evict(struct drm_mm_scan *scan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) struct drm_mm *mm = scan->mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) struct drm_mm_node *hole;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) u64 hole_start, hole_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) DRM_MM_BUG_ON(list_empty(&mm->hole_stack));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) if (!mm->color_adjust)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) * The hole found during scanning should ideally be the first element
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) * in the hole_stack list, but due to side-effects in the driver it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) * may not be.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) list_for_each_entry(hole, &mm->hole_stack, hole_stack) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) hole_start = __drm_mm_hole_node_start(hole);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) hole_end = hole_start + hole->hole_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) if (hole_start <= scan->hit_start &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) hole_end >= scan->hit_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) /* We should only be called after we found the hole previously */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) DRM_MM_BUG_ON(&hole->hole_stack == &mm->hole_stack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) if (unlikely(&hole->hole_stack == &mm->hole_stack))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) DRM_MM_BUG_ON(hole_start > scan->hit_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) DRM_MM_BUG_ON(hole_end < scan->hit_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) mm->color_adjust(hole, scan->color, &hole_start, &hole_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) if (hole_start > scan->hit_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) return hole;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) if (hole_end < scan->hit_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) return list_next_entry(hole, node_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) EXPORT_SYMBOL(drm_mm_scan_color_evict);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) * drm_mm_init - initialize a drm-mm allocator
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) * @mm: the drm_mm structure to initialize
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) * @start: start of the range managed by @mm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) * @size: end of the range managed by @mm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) * Note that @mm must be cleared to 0 before calling this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) void drm_mm_init(struct drm_mm *mm, u64 start, u64 size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) DRM_MM_BUG_ON(start + size <= start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) mm->color_adjust = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) INIT_LIST_HEAD(&mm->hole_stack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) mm->interval_tree = RB_ROOT_CACHED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) mm->holes_size = RB_ROOT_CACHED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) mm->holes_addr = RB_ROOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) /* Clever trick to avoid a special case in the free hole tracking. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) INIT_LIST_HEAD(&mm->head_node.node_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) mm->head_node.flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) mm->head_node.mm = mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) mm->head_node.start = start + size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) mm->head_node.size = -size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) add_hole(&mm->head_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) mm->scan_active = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) EXPORT_SYMBOL(drm_mm_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) * drm_mm_takedown - clean up a drm_mm allocator
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) * @mm: drm_mm allocator to clean up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) * Note that it is a bug to call this function on an allocator which is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) * clean.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) void drm_mm_takedown(struct drm_mm *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) if (WARN(!drm_mm_clean(mm),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) "Memory manager not clean during takedown.\n"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) show_leaks(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) EXPORT_SYMBOL(drm_mm_takedown);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) static u64 drm_mm_dump_hole(struct drm_printer *p, const struct drm_mm_node *entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) u64 start, size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) size = entry->hole_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) if (size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) start = drm_mm_hole_node_start(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) drm_printf(p, "%#018llx-%#018llx: %llu: free\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) start, start + size, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) return size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) * drm_mm_print - print allocator state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) * @mm: drm_mm allocator to print
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) * @p: DRM printer to use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) void drm_mm_print(const struct drm_mm *mm, struct drm_printer *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) const struct drm_mm_node *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) u64 total_used = 0, total_free = 0, total = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) total_free += drm_mm_dump_hole(p, &mm->head_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) drm_mm_for_each_node(entry, mm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) drm_printf(p, "%#018llx-%#018llx: %llu: used\n", entry->start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) entry->start + entry->size, entry->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) total_used += entry->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) total_free += drm_mm_dump_hole(p, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) total = total_free + total_used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) drm_printf(p, "total: %llu, used %llu free %llu\n", total,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) total_used, total_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) EXPORT_SYMBOL(drm_mm_print);