^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Asynchronous refcounty things
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright 2012 Google, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/debugfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/seq_file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/sched/debug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include "closure.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) static inline void closure_put_after_sub(struct closure *cl, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) int r = flags & CLOSURE_REMAINING_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) BUG_ON(flags & CLOSURE_GUARD_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) BUG_ON(!r && (flags & ~CLOSURE_DESTRUCTOR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) if (!r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) if (cl->fn && !(flags & CLOSURE_DESTRUCTOR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) atomic_set(&cl->remaining,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) CLOSURE_REMAINING_INITIALIZER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) closure_queue(cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) struct closure *parent = cl->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) closure_fn *destructor = cl->fn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) closure_debug_destroy(cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) if (destructor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) destructor(cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) if (parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) closure_put(parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) /* For clearing flags with the same atomic op as a put */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) void closure_sub(struct closure *cl, int v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) closure_put_after_sub(cl, atomic_sub_return(v, &cl->remaining));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * closure_put - decrement a closure's refcount
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) void closure_put(struct closure *cl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) closure_put_after_sub(cl, atomic_dec_return(&cl->remaining));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * closure_wake_up - wake up all closures on a wait list, without memory barrier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) void __closure_wake_up(struct closure_waitlist *wait_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) struct llist_node *list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) struct closure *cl, *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) struct llist_node *reverse = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) list = llist_del_all(&wait_list->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) /* We first reverse the list to preserve FIFO ordering and fairness */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) reverse = llist_reverse_order(list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) /* Then do the wakeups */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) llist_for_each_entry_safe(cl, t, reverse, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) closure_set_waiting(cl, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) closure_sub(cl, CLOSURE_WAITING + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * closure_wait - add a closure to a waitlist
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * @waitlist: will own a ref on @cl, which will be released when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * closure_wake_up() is called on @waitlist.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * @cl: closure pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) bool closure_wait(struct closure_waitlist *waitlist, struct closure *cl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) if (atomic_read(&cl->remaining) & CLOSURE_WAITING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) closure_set_waiting(cl, _RET_IP_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) atomic_add(CLOSURE_WAITING + 1, &cl->remaining);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) llist_add(&cl->list, &waitlist->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) struct closure_syncer {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) struct task_struct *task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) int done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) static void closure_sync_fn(struct closure *cl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) struct closure_syncer *s = cl->s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) struct task_struct *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) p = READ_ONCE(s->task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) s->done = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) wake_up_process(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) void __sched __closure_sync(struct closure *cl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) struct closure_syncer s = { .task = current };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) cl->s = &s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) continue_at(cl, closure_sync_fn, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) set_current_state(TASK_UNINTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) if (s.done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) __set_current_state(TASK_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) #ifdef CONFIG_BCACHE_CLOSURES_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) static LIST_HEAD(closure_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) static DEFINE_SPINLOCK(closure_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) void closure_debug_create(struct closure *cl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) BUG_ON(cl->magic == CLOSURE_MAGIC_ALIVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) cl->magic = CLOSURE_MAGIC_ALIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) spin_lock_irqsave(&closure_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) list_add(&cl->all, &closure_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) spin_unlock_irqrestore(&closure_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) void closure_debug_destroy(struct closure *cl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) BUG_ON(cl->magic != CLOSURE_MAGIC_ALIVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) cl->magic = CLOSURE_MAGIC_DEAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) spin_lock_irqsave(&closure_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) list_del(&cl->all);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) spin_unlock_irqrestore(&closure_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) static struct dentry *closure_debug;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) static int debug_show(struct seq_file *f, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) struct closure *cl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) spin_lock_irq(&closure_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) list_for_each_entry(cl, &closure_list, all) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) int r = atomic_read(&cl->remaining);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) seq_printf(f, "%p: %pS -> %pS p %p r %i ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) cl, (void *) cl->ip, cl->fn, cl->parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) r & CLOSURE_REMAINING_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) seq_printf(f, "%s%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) test_bit(WORK_STRUCT_PENDING_BIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) work_data_bits(&cl->work)) ? "Q" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) r & CLOSURE_RUNNING ? "R" : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) if (r & CLOSURE_WAITING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) seq_printf(f, " W %pS\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) (void *) cl->waiting_on);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) seq_printf(f, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) spin_unlock_irq(&closure_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) DEFINE_SHOW_ATTRIBUTE(debug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) void __init closure_debug_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) if (!IS_ERR_OR_NULL(bcache_debug))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) * it is unnecessary to check return value of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) * debugfs_create_file(), we should not care
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * about this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) closure_debug = debugfs_create_file(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) "closures", 0400, bcache_debug, NULL, &debug_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) MODULE_AUTHOR("Kent Overstreet <koverstreet@google.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) MODULE_LICENSE("GPL");