^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * drivers/base/devres.c - device resource management
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (c) 2006 SUSE Linux Products GmbH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (c) 2006 Tejun Heo <teheo@suse.de>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/percpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <asm/sections.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include "base.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) struct devres_node {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) struct list_head entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) dr_release_t release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #ifdef CONFIG_DEBUG_DEVRES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) struct devres {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) struct devres_node node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * Some archs want to perform DMA into kmalloc caches
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * and need a guaranteed alignment larger than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * the alignment of a 64-bit integer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * Thus we use ARCH_KMALLOC_MINALIGN here and get exactly the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * buffer alignment as if it was allocated by plain kmalloc().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) u8 __aligned(ARCH_KMALLOC_MINALIGN) data[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) struct devres_group {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) struct devres_node node[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) void *id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) int color;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) /* -- 8 pointers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #ifdef CONFIG_DEBUG_DEVRES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) static int log_devres = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) module_param_named(log, log_devres, int, S_IRUGO | S_IWUSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) static void set_node_dbginfo(struct devres_node *node, const char *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) node->name = name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) node->size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) static void devres_log(struct device *dev, struct devres_node *node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) const char *op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) if (unlikely(log_devres))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) dev_err(dev, "DEVRES %3s %p %s (%lu bytes)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) op, node, node->name, (unsigned long)node->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #else /* CONFIG_DEBUG_DEVRES */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define set_node_dbginfo(node, n, s) do {} while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #define devres_log(dev, node, op) do {} while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #endif /* CONFIG_DEBUG_DEVRES */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * Release functions for devres group. These callbacks are used only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * for identification.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) static void group_open_release(struct device *dev, void *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) /* noop */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) static void group_close_release(struct device *dev, void *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) /* noop */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) static struct devres_group * node_to_group(struct devres_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) if (node->release == &group_open_release)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) return container_of(node, struct devres_group, node[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) if (node->release == &group_close_release)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) return container_of(node, struct devres_group, node[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) static bool check_dr_size(size_t size, size_t *tot_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) /* We must catch any near-SIZE_MAX cases that could overflow. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) if (unlikely(check_add_overflow(sizeof(struct devres),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) size, tot_size)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) static __always_inline struct devres * alloc_dr(dr_release_t release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) size_t size, gfp_t gfp, int nid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) size_t tot_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) struct devres *dr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) if (!check_dr_size(size, &tot_size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) dr = kmalloc_node_track_caller(tot_size, gfp, nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) if (unlikely(!dr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) memset(dr, 0, offsetof(struct devres, data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) INIT_LIST_HEAD(&dr->node.entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) dr->node.release = release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) return dr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) static void add_dr(struct device *dev, struct devres_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) devres_log(dev, node, "ADD");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) BUG_ON(!list_empty(&node->entry));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) list_add_tail(&node->entry, &dev->devres_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) static void replace_dr(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) struct devres_node *old, struct devres_node *new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) devres_log(dev, old, "REPLACE");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) BUG_ON(!list_empty(&new->entry));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) list_replace(&old->entry, &new->entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) #ifdef CONFIG_DEBUG_DEVRES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) void * __devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, int nid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) struct devres *dr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) dr = alloc_dr(release, size, gfp | __GFP_ZERO, nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) if (unlikely(!dr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) set_node_dbginfo(&dr->node, name, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) return dr->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) EXPORT_SYMBOL_GPL(__devres_alloc_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * devres_alloc - Allocate device resource data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * @release: Release function devres will be associated with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * @size: Allocation size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) * @gfp: Allocation flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) * @nid: NUMA node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) * Allocate devres of @size bytes. The allocated area is zeroed, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * associated with @release. The returned pointer can be passed to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * other devres_*() functions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * RETURNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * Pointer to allocated devres on success, NULL on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) void * devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, int nid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) struct devres *dr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) dr = alloc_dr(release, size, gfp | __GFP_ZERO, nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) if (unlikely(!dr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) return dr->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) EXPORT_SYMBOL_GPL(devres_alloc_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * devres_for_each_res - Resource iterator
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) * @dev: Device to iterate resource from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) * @release: Look for resources associated with this release function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * @match: Match function (optional)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * @match_data: Data for the match function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * @fn: Function to be called for each matched resource.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * @data: Data for @fn, the 3rd parameter of @fn
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * Call @fn for each devres of @dev which is associated with @release
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) * and for which @match returns 1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) * RETURNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) * void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) void devres_for_each_res(struct device *dev, dr_release_t release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) dr_match_t match, void *match_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) void (*fn)(struct device *, void *, void *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) struct devres_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) struct devres_node *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) if (!fn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) spin_lock_irqsave(&dev->devres_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) list_for_each_entry_safe_reverse(node, tmp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) &dev->devres_head, entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) struct devres *dr = container_of(node, struct devres, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) if (node->release != release)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) if (match && !match(dev, dr->data, match_data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) fn(dev, dr->data, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) spin_unlock_irqrestore(&dev->devres_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) EXPORT_SYMBOL_GPL(devres_for_each_res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) * devres_free - Free device resource data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) * @res: Pointer to devres data to free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) * Free devres created with devres_alloc().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) void devres_free(void *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) if (res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) struct devres *dr = container_of(res, struct devres, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) BUG_ON(!list_empty(&dr->node.entry));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) kfree(dr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) EXPORT_SYMBOL_GPL(devres_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) * devres_add - Register device resource
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) * @dev: Device to add resource to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) * @res: Resource to register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) * Register devres @res to @dev. @res should have been allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) * using devres_alloc(). On driver detach, the associated release
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) * function will be invoked and devres will be freed automatically.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) void devres_add(struct device *dev, void *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) struct devres *dr = container_of(res, struct devres, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) spin_lock_irqsave(&dev->devres_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) add_dr(dev, &dr->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) spin_unlock_irqrestore(&dev->devres_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) EXPORT_SYMBOL_GPL(devres_add);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) static struct devres *find_dr(struct device *dev, dr_release_t release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) dr_match_t match, void *match_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) struct devres_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) list_for_each_entry_reverse(node, &dev->devres_head, entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) struct devres *dr = container_of(node, struct devres, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) if (node->release != release)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) if (match && !match(dev, dr->data, match_data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) return dr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) * devres_find - Find device resource
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) * @dev: Device to lookup resource from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) * @release: Look for resources associated with this release function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) * @match: Match function (optional)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) * @match_data: Data for the match function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) * Find the latest devres of @dev which is associated with @release
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) * and for which @match returns 1. If @match is NULL, it's considered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) * to match all.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) * RETURNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) * Pointer to found devres, NULL if not found.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) void * devres_find(struct device *dev, dr_release_t release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) dr_match_t match, void *match_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) struct devres *dr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) spin_lock_irqsave(&dev->devres_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) dr = find_dr(dev, release, match, match_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) spin_unlock_irqrestore(&dev->devres_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) if (dr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) return dr->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) EXPORT_SYMBOL_GPL(devres_find);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) * devres_get - Find devres, if non-existent, add one atomically
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) * @dev: Device to lookup or add devres for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) * @new_res: Pointer to new initialized devres to add if not found
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) * @match: Match function (optional)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) * @match_data: Data for the match function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) * Find the latest devres of @dev which has the same release function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * as @new_res and for which @match return 1. If found, @new_res is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * freed; otherwise, @new_res is added atomically.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) * RETURNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) * Pointer to found or added devres.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) void * devres_get(struct device *dev, void *new_res,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) dr_match_t match, void *match_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) struct devres *new_dr = container_of(new_res, struct devres, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) struct devres *dr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) spin_lock_irqsave(&dev->devres_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) dr = find_dr(dev, new_dr->node.release, match, match_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) if (!dr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) add_dr(dev, &new_dr->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) dr = new_dr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) new_res = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) spin_unlock_irqrestore(&dev->devres_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) devres_free(new_res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) return dr->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) EXPORT_SYMBOL_GPL(devres_get);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) * devres_remove - Find a device resource and remove it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) * @dev: Device to find resource from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) * @release: Look for resources associated with this release function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) * @match: Match function (optional)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) * @match_data: Data for the match function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) * Find the latest devres of @dev associated with @release and for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) * which @match returns 1. If @match is NULL, it's considered to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) * match all. If found, the resource is removed atomically and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) * returned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) * RETURNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) * Pointer to removed devres on success, NULL if not found.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) void * devres_remove(struct device *dev, dr_release_t release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) dr_match_t match, void *match_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) struct devres *dr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) spin_lock_irqsave(&dev->devres_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) dr = find_dr(dev, release, match, match_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) if (dr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) list_del_init(&dr->node.entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) devres_log(dev, &dr->node, "REM");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) spin_unlock_irqrestore(&dev->devres_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) if (dr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) return dr->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) EXPORT_SYMBOL_GPL(devres_remove);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) * devres_destroy - Find a device resource and destroy it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) * @dev: Device to find resource from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) * @release: Look for resources associated with this release function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) * @match: Match function (optional)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) * @match_data: Data for the match function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) * Find the latest devres of @dev associated with @release and for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) * which @match returns 1. If @match is NULL, it's considered to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) * match all. If found, the resource is removed atomically and freed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) * Note that the release function for the resource will not be called,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) * only the devres-allocated data will be freed. The caller becomes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) * responsible for freeing any other data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) * RETURNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) * 0 if devres is found and freed, -ENOENT if not found.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) int devres_destroy(struct device *dev, dr_release_t release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) dr_match_t match, void *match_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) void *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) res = devres_remove(dev, release, match, match_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) if (unlikely(!res))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) devres_free(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) EXPORT_SYMBOL_GPL(devres_destroy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) * devres_release - Find a device resource and destroy it, calling release
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) * @dev: Device to find resource from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) * @release: Look for resources associated with this release function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) * @match: Match function (optional)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) * @match_data: Data for the match function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) * Find the latest devres of @dev associated with @release and for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) * which @match returns 1. If @match is NULL, it's considered to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) * match all. If found, the resource is removed atomically, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) * release function called and the resource freed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) * RETURNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) * 0 if devres is found and freed, -ENOENT if not found.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) int devres_release(struct device *dev, dr_release_t release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) dr_match_t match, void *match_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) void *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) res = devres_remove(dev, release, match, match_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) if (unlikely(!res))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) (*release)(dev, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) devres_free(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) EXPORT_SYMBOL_GPL(devres_release);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) static int remove_nodes(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) struct list_head *first, struct list_head *end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) struct list_head *todo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) int cnt = 0, nr_groups = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) struct list_head *cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) /* First pass - move normal devres entries to @todo and clear
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) * devres_group colors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) cur = first;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) while (cur != end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) struct devres_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) struct devres_group *grp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) node = list_entry(cur, struct devres_node, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) cur = cur->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) grp = node_to_group(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) if (grp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) /* clear color of group markers in the first pass */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) grp->color = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) nr_groups++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) /* regular devres entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) if (&node->entry == first)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) first = first->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) list_move_tail(&node->entry, todo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) if (!nr_groups)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) return cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) /* Second pass - Scan groups and color them. A group gets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) * color value of two iff the group is wholly contained in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) * [cur, end). That is, for a closed group, both opening and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) * closing markers should be in the range, while just the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) * opening marker is enough for an open group.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) cur = first;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) while (cur != end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) struct devres_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) struct devres_group *grp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) node = list_entry(cur, struct devres_node, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) cur = cur->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) grp = node_to_group(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) BUG_ON(!grp || list_empty(&grp->node[0].entry));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) grp->color++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) if (list_empty(&grp->node[1].entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) grp->color++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) BUG_ON(grp->color <= 0 || grp->color > 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) if (grp->color == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) /* No need to update cur or end. The removed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) * nodes are always before both.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) list_move_tail(&grp->node[0].entry, todo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) list_del_init(&grp->node[1].entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) return cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) static int release_nodes(struct device *dev, struct list_head *first,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) struct list_head *end, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) __releases(&dev->devres_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) LIST_HEAD(todo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) int cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) struct devres *dr, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) cnt = remove_nodes(dev, first, end, &todo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) spin_unlock_irqrestore(&dev->devres_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) /* Release. Note that both devres and devres_group are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) * handled as devres in the following loop. This is safe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) list_for_each_entry_safe_reverse(dr, tmp, &todo, node.entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) devres_log(dev, &dr->node, "REL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) dr->node.release(dev, dr->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) kfree(dr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) return cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) * devres_release_all - Release all managed resources
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) * @dev: Device to release resources for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) * Release all resources associated with @dev. This function is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) * called on driver detach.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) int devres_release_all(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) /* Looks like an uninitialized device structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) if (WARN_ON(dev->devres_head.next == NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) spin_lock_irqsave(&dev->devres_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) return release_nodes(dev, dev->devres_head.next, &dev->devres_head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) * devres_open_group - Open a new devres group
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) * @dev: Device to open devres group for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) * @id: Separator ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) * @gfp: Allocation flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) * Open a new devres group for @dev with @id. For @id, using a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) * pointer to an object which won't be used for another group is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) * recommended. If @id is NULL, address-wise unique ID is created.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) * RETURNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) * ID of the new group, NULL on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) void * devres_open_group(struct device *dev, void *id, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) struct devres_group *grp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) grp = kmalloc(sizeof(*grp), gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) if (unlikely(!grp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) grp->node[0].release = &group_open_release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) grp->node[1].release = &group_close_release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) INIT_LIST_HEAD(&grp->node[0].entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) INIT_LIST_HEAD(&grp->node[1].entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) set_node_dbginfo(&grp->node[0], "grp<", 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) set_node_dbginfo(&grp->node[1], "grp>", 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) grp->id = grp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) if (id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) grp->id = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) spin_lock_irqsave(&dev->devres_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) add_dr(dev, &grp->node[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) spin_unlock_irqrestore(&dev->devres_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) return grp->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) EXPORT_SYMBOL_GPL(devres_open_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) /* Find devres group with ID @id. If @id is NULL, look for the latest. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) static struct devres_group * find_group(struct device *dev, void *id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) struct devres_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) list_for_each_entry_reverse(node, &dev->devres_head, entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) struct devres_group *grp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) if (node->release != &group_open_release)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) grp = container_of(node, struct devres_group, node[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) if (id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) if (grp->id == id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) return grp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) } else if (list_empty(&grp->node[1].entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) return grp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) * devres_close_group - Close a devres group
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) * @dev: Device to close devres group for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) * @id: ID of target group, can be NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) * Close the group identified by @id. If @id is NULL, the latest open
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) * group is selected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) void devres_close_group(struct device *dev, void *id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) struct devres_group *grp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) spin_lock_irqsave(&dev->devres_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) grp = find_group(dev, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) if (grp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) add_dr(dev, &grp->node[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) spin_unlock_irqrestore(&dev->devres_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) EXPORT_SYMBOL_GPL(devres_close_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) * devres_remove_group - Remove a devres group
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) * @dev: Device to remove group for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) * @id: ID of target group, can be NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) * Remove the group identified by @id. If @id is NULL, the latest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) * open group is selected. Note that removing a group doesn't affect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) * any other resources.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) void devres_remove_group(struct device *dev, void *id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) struct devres_group *grp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) spin_lock_irqsave(&dev->devres_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) grp = find_group(dev, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) if (grp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) list_del_init(&grp->node[0].entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) list_del_init(&grp->node[1].entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) devres_log(dev, &grp->node[0], "REM");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) spin_unlock_irqrestore(&dev->devres_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) kfree(grp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) EXPORT_SYMBOL_GPL(devres_remove_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) * devres_release_group - Release resources in a devres group
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) * @dev: Device to release group for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) * @id: ID of target group, can be NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) * Release all resources in the group identified by @id. If @id is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) * NULL, the latest open group is selected. The selected group and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) * groups properly nested inside the selected group are removed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) * RETURNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) * The number of released non-group resources.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) int devres_release_group(struct device *dev, void *id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) struct devres_group *grp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) int cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) spin_lock_irqsave(&dev->devres_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) grp = find_group(dev, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) if (grp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) struct list_head *first = &grp->node[0].entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) struct list_head *end = &dev->devres_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) if (!list_empty(&grp->node[1].entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) end = grp->node[1].entry.next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) cnt = release_nodes(dev, first, end, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) spin_unlock_irqrestore(&dev->devres_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) return cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) EXPORT_SYMBOL_GPL(devres_release_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) * Custom devres actions allow inserting a simple function call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) * into the teadown sequence.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) struct action_devres {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) void *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) void (*action)(void *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) static int devm_action_match(struct device *dev, void *res, void *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) struct action_devres *devres = res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) struct action_devres *target = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) return devres->action == target->action &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) devres->data == target->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) static void devm_action_release(struct device *dev, void *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) struct action_devres *devres = res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) devres->action(devres->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) * devm_add_action() - add a custom action to list of managed resources
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) * @dev: Device that owns the action
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) * @action: Function that should be called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) * @data: Pointer to data passed to @action implementation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) * This adds a custom action to the list of managed resources so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) * it gets executed as part of standard resource unwinding.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) int devm_add_action(struct device *dev, void (*action)(void *), void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) struct action_devres *devres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) devres = devres_alloc(devm_action_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) sizeof(struct action_devres), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) if (!devres)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) devres->data = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) devres->action = action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) devres_add(dev, devres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) EXPORT_SYMBOL_GPL(devm_add_action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) * devm_remove_action() - removes previously added custom action
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) * @dev: Device that owns the action
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) * @action: Function implementing the action
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) * @data: Pointer to data passed to @action implementation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) * Removes instance of @action previously added by devm_add_action().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) * Both action and data should match one of the existing entries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) void devm_remove_action(struct device *dev, void (*action)(void *), void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) struct action_devres devres = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) .data = data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) .action = action,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) WARN_ON(devres_destroy(dev, devm_action_release, devm_action_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) &devres));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) EXPORT_SYMBOL_GPL(devm_remove_action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) * devm_release_action() - release previously added custom action
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) * @dev: Device that owns the action
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) * @action: Function implementing the action
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) * @data: Pointer to data passed to @action implementation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) * Releases and removes instance of @action previously added by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) * devm_add_action(). Both action and data should match one of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) * existing entries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) void devm_release_action(struct device *dev, void (*action)(void *), void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) struct action_devres devres = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) .data = data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) .action = action,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) WARN_ON(devres_release(dev, devm_action_release, devm_action_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) &devres));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) EXPORT_SYMBOL_GPL(devm_release_action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) * Managed kmalloc/kfree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) static void devm_kmalloc_release(struct device *dev, void *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) /* noop */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) static int devm_kmalloc_match(struct device *dev, void *res, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) return res == data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) * devm_kmalloc - Resource-managed kmalloc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) * @dev: Device to allocate memory for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) * @size: Allocation size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) * @gfp: Allocation gfp flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) * Managed kmalloc. Memory allocated with this function is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) * automatically freed on driver detach. Like all other devres
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) * resources, guaranteed alignment is unsigned long long.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) * RETURNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) * Pointer to allocated memory on success, NULL on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) struct devres *dr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) if (unlikely(!size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) return ZERO_SIZE_PTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) /* use raw alloc_dr for kmalloc caller tracing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) dr = alloc_dr(devm_kmalloc_release, size, gfp, dev_to_node(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) if (unlikely(!dr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) * This is named devm_kzalloc_release for historical reasons
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) * The initial implementation did not support kmalloc, only kzalloc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) set_node_dbginfo(&dr->node, "devm_kzalloc_release", size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) devres_add(dev, dr->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) return dr->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) EXPORT_SYMBOL_GPL(devm_kmalloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) * devm_krealloc - Resource-managed krealloc()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) * @dev: Device to re-allocate memory for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) * @ptr: Pointer to the memory chunk to re-allocate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) * @new_size: New allocation size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) * @gfp: Allocation gfp flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) * Managed krealloc(). Resizes the memory chunk allocated with devm_kmalloc().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) * Behaves similarly to regular krealloc(): if @ptr is NULL or ZERO_SIZE_PTR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) * it's the equivalent of devm_kmalloc(). If new_size is zero, it frees the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) * previously allocated memory and returns ZERO_SIZE_PTR. This function doesn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) * change the order in which the release callback for the re-alloc'ed devres
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) * will be called (except when falling back to devm_kmalloc() or when freeing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) * resources when new_size is zero). The contents of the memory are preserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) * up to the lesser of new and old sizes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) void *devm_krealloc(struct device *dev, void *ptr, size_t new_size, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) size_t total_new_size, total_old_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) struct devres *old_dr, *new_dr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) if (unlikely(!new_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) devm_kfree(dev, ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) return ZERO_SIZE_PTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) if (unlikely(ZERO_OR_NULL_PTR(ptr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) return devm_kmalloc(dev, new_size, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) if (WARN_ON(is_kernel_rodata((unsigned long)ptr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) * We cannot reliably realloc a const string returned by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) * devm_kstrdup_const().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) if (!check_dr_size(new_size, &total_new_size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) total_old_size = ksize(container_of(ptr, struct devres, data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) if (total_old_size == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) WARN(1, "Pointer doesn't point to dynamically allocated memory.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) * If new size is smaller or equal to the actual number of bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) * allocated previously - just return the same pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) if (total_new_size <= total_old_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) return ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) * Otherwise: allocate new, larger chunk. We need to allocate before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) * taking the lock as most probably the caller uses GFP_KERNEL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) new_dr = alloc_dr(devm_kmalloc_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) total_new_size, gfp, dev_to_node(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) if (!new_dr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) * The spinlock protects the linked list against concurrent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) * modifications but not the resource itself.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) spin_lock_irqsave(&dev->devres_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) old_dr = find_dr(dev, devm_kmalloc_release, devm_kmalloc_match, ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) if (!old_dr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) spin_unlock_irqrestore(&dev->devres_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) kfree(new_dr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) WARN(1, "Memory chunk not managed or managed by a different device.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) replace_dr(dev, &old_dr->node, &new_dr->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) spin_unlock_irqrestore(&dev->devres_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) * We can copy the memory contents after releasing the lock as we're
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) * no longer modyfing the list links.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) memcpy(new_dr->data, old_dr->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) total_old_size - offsetof(struct devres, data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) * Same for releasing the old devres - it's now been removed from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) * list. This is also the reason why we must not use devm_kfree() - the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) * links are no longer valid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) kfree(old_dr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) return new_dr->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) EXPORT_SYMBOL_GPL(devm_krealloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) * devm_kstrdup - Allocate resource managed space and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) * copy an existing string into that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) * @dev: Device to allocate memory for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) * @s: the string to duplicate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) * @gfp: the GFP mask used in the devm_kmalloc() call when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) * allocating memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) * RETURNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) * Pointer to allocated string on success, NULL on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) char *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) if (!s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) size = strlen(s) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) buf = devm_kmalloc(dev, size, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) if (buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) memcpy(buf, s, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) return buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) EXPORT_SYMBOL_GPL(devm_kstrdup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) * devm_kstrdup_const - resource managed conditional string duplication
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) * @dev: device for which to duplicate the string
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) * @s: the string to duplicate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) * @gfp: the GFP mask used in the kmalloc() call when allocating memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) * Strings allocated by devm_kstrdup_const will be automatically freed when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) * the associated device is detached.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) * RETURNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) * Source string if it is in .rodata section otherwise it falls back to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) * devm_kstrdup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) const char *devm_kstrdup_const(struct device *dev, const char *s, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) if (is_kernel_rodata((unsigned long)s))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) return s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) return devm_kstrdup(dev, s, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) EXPORT_SYMBOL_GPL(devm_kstrdup_const);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) * devm_kvasprintf - Allocate resource managed space and format a string
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) * into that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) * @dev: Device to allocate memory for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) * @gfp: the GFP mask used in the devm_kmalloc() call when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) * allocating memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) * @fmt: The printf()-style format string
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) * @ap: Arguments for the format string
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) * RETURNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) * Pointer to allocated string on success, NULL on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) va_list ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) unsigned int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) char *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) va_list aq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) va_copy(aq, ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) len = vsnprintf(NULL, 0, fmt, aq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) va_end(aq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) p = devm_kmalloc(dev, len+1, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) vsnprintf(p, len+1, fmt, ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) return p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) EXPORT_SYMBOL(devm_kvasprintf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) * devm_kasprintf - Allocate resource managed space and format a string
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) * into that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) * @dev: Device to allocate memory for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) * @gfp: the GFP mask used in the devm_kmalloc() call when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) * allocating memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) * @fmt: The printf()-style format string
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) * @...: Arguments for the format string
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) * RETURNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) * Pointer to allocated string on success, NULL on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) va_list ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) char *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) va_start(ap, fmt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) p = devm_kvasprintf(dev, gfp, fmt, ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) va_end(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) return p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) EXPORT_SYMBOL_GPL(devm_kasprintf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) * devm_kfree - Resource-managed kfree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) * @dev: Device this memory belongs to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) * @p: Memory to free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) * Free memory allocated with devm_kmalloc().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) void devm_kfree(struct device *dev, const void *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) * Special cases: pointer to a string in .rodata returned by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) * devm_kstrdup_const() or NULL/ZERO ptr.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) if (unlikely(is_kernel_rodata((unsigned long)p) || ZERO_OR_NULL_PTR(p)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) rc = devres_destroy(dev, devm_kmalloc_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) devm_kmalloc_match, (void *)p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) WARN_ON(rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) EXPORT_SYMBOL_GPL(devm_kfree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) * devm_kmemdup - Resource-managed kmemdup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) * @dev: Device this memory belongs to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) * @src: Memory region to duplicate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) * @len: Memory region length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) * @gfp: GFP mask to use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) * Duplicate region of a memory using resource managed kmalloc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) void *devm_kmemdup(struct device *dev, const void *src, size_t len, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) void *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) p = devm_kmalloc(dev, len, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) if (p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) memcpy(p, src, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) return p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) EXPORT_SYMBOL_GPL(devm_kmemdup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) struct pages_devres {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) unsigned long addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) unsigned int order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) static int devm_pages_match(struct device *dev, void *res, void *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) struct pages_devres *devres = res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) struct pages_devres *target = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) return devres->addr == target->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) static void devm_pages_release(struct device *dev, void *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) struct pages_devres *devres = res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) free_pages(devres->addr, devres->order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) * devm_get_free_pages - Resource-managed __get_free_pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) * @dev: Device to allocate memory for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) * @gfp_mask: Allocation gfp flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) * @order: Allocation size is (1 << order) pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) * Managed get_free_pages. Memory allocated with this function is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) * automatically freed on driver detach.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) * RETURNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) * Address of allocated memory on success, 0 on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) unsigned long devm_get_free_pages(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) gfp_t gfp_mask, unsigned int order)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) struct pages_devres *devres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) unsigned long addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) addr = __get_free_pages(gfp_mask, order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) if (unlikely(!addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) devres = devres_alloc(devm_pages_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) sizeof(struct pages_devres), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) if (unlikely(!devres)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) free_pages(addr, order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) devres->addr = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) devres->order = order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) devres_add(dev, devres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) return addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) EXPORT_SYMBOL_GPL(devm_get_free_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) * devm_free_pages - Resource-managed free_pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) * @dev: Device this memory belongs to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) * @addr: Memory to free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) * Free memory allocated with devm_get_free_pages(). Unlike free_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) * there is no need to supply the @order.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) void devm_free_pages(struct device *dev, unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) struct pages_devres devres = { .addr = addr };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) WARN_ON(devres_release(dev, devm_pages_release, devm_pages_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) &devres));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) EXPORT_SYMBOL_GPL(devm_free_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) static void devm_percpu_release(struct device *dev, void *pdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) void __percpu *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) p = *(void __percpu **)pdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) free_percpu(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) static int devm_percpu_match(struct device *dev, void *data, void *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) struct devres *devr = container_of(data, struct devres, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) return *(void **)devr->data == p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) * __devm_alloc_percpu - Resource-managed alloc_percpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) * @dev: Device to allocate per-cpu memory for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) * @size: Size of per-cpu memory to allocate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) * @align: Alignment of per-cpu memory to allocate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) * Managed alloc_percpu. Per-cpu memory allocated with this function is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) * automatically freed on driver detach.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) * RETURNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) * Pointer to allocated memory on success, NULL on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) void __percpu *__devm_alloc_percpu(struct device *dev, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) size_t align)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) void *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) void __percpu *pcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) pcpu = __alloc_percpu(size, align);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) if (!pcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) p = devres_alloc(devm_percpu_release, sizeof(void *), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) if (!p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) free_percpu(pcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) *(void __percpu **)p = pcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) devres_add(dev, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) return pcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) EXPORT_SYMBOL_GPL(__devm_alloc_percpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) * devm_free_percpu - Resource-managed free_percpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) * @dev: Device this memory belongs to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) * @pdata: Per-cpu memory to free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) * Free memory allocated with devm_alloc_percpu().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) void devm_free_percpu(struct device *dev, void __percpu *pdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) WARN_ON(devres_destroy(dev, devm_percpu_release, devm_percpu_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) (void *)pdata));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) EXPORT_SYMBOL_GPL(devm_free_percpu);