^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /* Copyright (c) 2019 Facebook */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #include <linux/hash.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include <linux/bpf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/filter.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/ftrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/rbtree_latch.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/perf_event.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/btf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/rcupdate_trace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/rcupdate_wait.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <trace/hooks/memory.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) /* dummy _ops. The verifier will operate on target program's ops. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) const struct bpf_verifier_ops bpf_extension_verifier_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) const struct bpf_prog_ops bpf_extension_prog_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) /* btf_vmlinux has ~22k attachable functions. 1k htab is enough. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #define TRAMPOLINE_HASH_BITS 10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #define TRAMPOLINE_TABLE_SIZE (1 << TRAMPOLINE_HASH_BITS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) static struct hlist_head trampoline_table[TRAMPOLINE_TABLE_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) /* serializes access to trampoline_table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) static DEFINE_MUTEX(trampoline_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) void *bpf_jit_alloc_exec_page(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) void *image;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) image = bpf_jit_alloc_exec(PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) if (!image)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) set_vm_flush_reset_perms(image);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) /* Keep image as writeable. The alternative is to keep flipping ro/rw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * everytime new program is attached or detached.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) set_memory_x((long)image, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) trace_android_vh_set_memory_x((unsigned long)image, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) return image;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) void bpf_image_ksym_add(void *data, struct bpf_ksym *ksym)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) ksym->start = (unsigned long) data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) ksym->end = ksym->start + PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) bpf_ksym_add(ksym);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_BPF, ksym->start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) PAGE_SIZE, false, ksym->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) void bpf_image_ksym_del(struct bpf_ksym *ksym)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) bpf_ksym_del(ksym);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_BPF, ksym->start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) PAGE_SIZE, true, ksym->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) static struct bpf_trampoline *bpf_trampoline_lookup(u64 key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) struct bpf_trampoline *tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) struct hlist_head *head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) mutex_lock(&trampoline_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) head = &trampoline_table[hash_64(key, TRAMPOLINE_HASH_BITS)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) hlist_for_each_entry(tr, head, hlist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) if (tr->key == key) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) refcount_inc(&tr->refcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) tr = kzalloc(sizeof(*tr), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) if (!tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) tr->key = key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) INIT_HLIST_NODE(&tr->hlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) hlist_add_head(&tr->hlist, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) refcount_set(&tr->refcnt, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) mutex_init(&tr->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) for (i = 0; i < BPF_TRAMP_MAX; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) INIT_HLIST_HEAD(&tr->progs_hlist[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) mutex_unlock(&trampoline_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) return tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) static int is_ftrace_location(void *ip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) long addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) addr = ftrace_location((long)ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) if (!addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) if (WARN_ON_ONCE(addr != (long)ip))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) static int unregister_fentry(struct bpf_trampoline *tr, void *old_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) void *ip = tr->func.addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) if (tr->func.ftrace_managed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) ret = unregister_ftrace_direct((long)ip, (long)old_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, old_addr, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) static int modify_fentry(struct bpf_trampoline *tr, void *old_addr, void *new_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) void *ip = tr->func.addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) if (tr->func.ftrace_managed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) ret = modify_ftrace_direct((long)ip, (long)old_addr, (long)new_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, old_addr, new_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) /* first time registering */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) static int register_fentry(struct bpf_trampoline *tr, void *new_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) void *ip = tr->func.addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) ret = is_ftrace_location(ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) tr->func.ftrace_managed = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) if (tr->func.ftrace_managed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) ret = register_ftrace_direct((long)ip, (long)new_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, NULL, new_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) static struct bpf_tramp_progs *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) bpf_trampoline_get_progs(const struct bpf_trampoline *tr, int *total)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) const struct bpf_prog_aux *aux;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) struct bpf_tramp_progs *tprogs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) struct bpf_prog **progs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) int kind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) *total = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) tprogs = kcalloc(BPF_TRAMP_MAX, sizeof(*tprogs), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) if (!tprogs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) for (kind = 0; kind < BPF_TRAMP_MAX; kind++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) tprogs[kind].nr_progs = tr->progs_cnt[kind];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) *total += tr->progs_cnt[kind];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) progs = tprogs[kind].progs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) hlist_for_each_entry(aux, &tr->progs_hlist[kind], tramp_hlist)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) *progs++ = aux->prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) return tprogs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) static void __bpf_tramp_image_put_deferred(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) struct bpf_tramp_image *im;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) im = container_of(work, struct bpf_tramp_image, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) bpf_image_ksym_del(&im->ksym);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) trace_android_vh_set_memory_nx((unsigned long)im->image, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) bpf_jit_free_exec(im->image);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) bpf_jit_uncharge_modmem(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) percpu_ref_exit(&im->pcref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) kfree_rcu(im, rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) /* callback, fexit step 3 or fentry step 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) static void __bpf_tramp_image_put_rcu(struct rcu_head *rcu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) struct bpf_tramp_image *im;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) im = container_of(rcu, struct bpf_tramp_image, rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) INIT_WORK(&im->work, __bpf_tramp_image_put_deferred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) schedule_work(&im->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) /* callback, fexit step 2. Called after percpu_ref_kill confirms. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) static void __bpf_tramp_image_release(struct percpu_ref *pcref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) struct bpf_tramp_image *im;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) im = container_of(pcref, struct bpf_tramp_image, pcref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) call_rcu_tasks(&im->rcu, __bpf_tramp_image_put_rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) /* callback, fexit or fentry step 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) static void __bpf_tramp_image_put_rcu_tasks(struct rcu_head *rcu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) struct bpf_tramp_image *im;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) im = container_of(rcu, struct bpf_tramp_image, rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) if (im->ip_after_call)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) /* the case of fmod_ret/fexit trampoline and CONFIG_PREEMPTION=y */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) percpu_ref_kill(&im->pcref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) /* the case of fentry trampoline */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) call_rcu_tasks(&im->rcu, __bpf_tramp_image_put_rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) static void bpf_tramp_image_put(struct bpf_tramp_image *im)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) /* The trampoline image that calls original function is using:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) * rcu_read_lock_trace to protect sleepable bpf progs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) * rcu_read_lock to protect normal bpf progs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) * percpu_ref to protect trampoline itself
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) * rcu tasks to protect trampoline asm not covered by percpu_ref
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) * (which are few asm insns before __bpf_tramp_enter and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) * after __bpf_tramp_exit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) * The trampoline is unreachable before bpf_tramp_image_put().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) * First, patch the trampoline to avoid calling into fexit progs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) * The progs will be freed even if the original function is still
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) * executing or sleeping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) * In case of CONFIG_PREEMPT=y use call_rcu_tasks() to wait on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) * first few asm instructions to execute and call into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) * __bpf_tramp_enter->percpu_ref_get.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) * Then use percpu_ref_kill to wait for the trampoline and the original
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) * function to finish.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) * Then use call_rcu_tasks() to make sure few asm insns in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) * the trampoline epilogue are done as well.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) * In !PREEMPT case the task that got interrupted in the first asm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) * insns won't go through an RCU quiescent state which the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) * percpu_ref_kill will be waiting for. Hence the first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) * call_rcu_tasks() is not necessary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) if (im->ip_after_call) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) int err = bpf_arch_text_poke(im->ip_after_call, BPF_MOD_JUMP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) NULL, im->ip_epilogue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) WARN_ON(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) if (IS_ENABLED(CONFIG_PREEMPTION))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) call_rcu_tasks(&im->rcu, __bpf_tramp_image_put_rcu_tasks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) percpu_ref_kill(&im->pcref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) /* The trampoline without fexit and fmod_ret progs doesn't call original
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) * function and doesn't use percpu_ref.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) * Use call_rcu_tasks_trace() to wait for sleepable progs to finish.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) * Then use call_rcu_tasks() to wait for the rest of trampoline asm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) * and normal progs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) call_rcu_tasks_trace(&im->rcu, __bpf_tramp_image_put_rcu_tasks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) static struct bpf_tramp_image *bpf_tramp_image_alloc(u64 key, u32 idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) struct bpf_tramp_image *im;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) struct bpf_ksym *ksym;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) void *image;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) int err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) im = kzalloc(sizeof(*im), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) if (!im)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) err = bpf_jit_charge_modmem(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) goto out_free_im;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) im->image = image = bpf_jit_alloc_exec_page();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) if (!image)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) goto out_uncharge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) err = percpu_ref_init(&im->pcref, __bpf_tramp_image_release, 0, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) goto out_free_image;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) ksym = &im->ksym;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) INIT_LIST_HEAD_RCU(&ksym->lnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) snprintf(ksym->name, KSYM_NAME_LEN, "bpf_trampoline_%llu_%u", key, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) bpf_image_ksym_add(image, ksym);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) return im;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) out_free_image:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) bpf_jit_free_exec(im->image);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) out_uncharge:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) bpf_jit_uncharge_modmem(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) out_free_im:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) kfree(im);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) static int bpf_trampoline_update(struct bpf_trampoline *tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) struct bpf_tramp_image *im;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) struct bpf_tramp_progs *tprogs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) u32 flags = BPF_TRAMP_F_RESTORE_REGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) int err, total;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) tprogs = bpf_trampoline_get_progs(tr, &total);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) if (IS_ERR(tprogs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) return PTR_ERR(tprogs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) if (total == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) err = unregister_fentry(tr, tr->cur_image->image);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) bpf_tramp_image_put(tr->cur_image);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) tr->cur_image = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) tr->selector = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) im = bpf_tramp_image_alloc(tr->key, tr->selector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) if (IS_ERR(im)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) err = PTR_ERR(im);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) if (tprogs[BPF_TRAMP_FEXIT].nr_progs ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) tprogs[BPF_TRAMP_MODIFY_RETURN].nr_progs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) flags = BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) err = arch_prepare_bpf_trampoline(im, im->image, im->image + PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) &tr->func.model, flags, tprogs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) tr->func.addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) WARN_ON(tr->cur_image && tr->selector == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) WARN_ON(!tr->cur_image && tr->selector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) if (tr->cur_image)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) /* progs already running at this address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) err = modify_fentry(tr, tr->cur_image->image, im->image);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) /* first time registering */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) err = register_fentry(tr, im->image);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) if (tr->cur_image)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) bpf_tramp_image_put(tr->cur_image);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) tr->cur_image = im;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) tr->selector++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) kfree(tprogs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) static enum bpf_tramp_prog_type bpf_attach_type_to_tramp(struct bpf_prog *prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) switch (prog->expected_attach_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) case BPF_TRACE_FENTRY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) return BPF_TRAMP_FENTRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) case BPF_MODIFY_RETURN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) return BPF_TRAMP_MODIFY_RETURN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) case BPF_TRACE_FEXIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) return BPF_TRAMP_FEXIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) case BPF_LSM_MAC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) if (!prog->aux->attach_func_proto->type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) /* The function returns void, we cannot modify its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) * return value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) return BPF_TRAMP_FEXIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) return BPF_TRAMP_MODIFY_RETURN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) return BPF_TRAMP_REPLACE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) int bpf_trampoline_link_prog(struct bpf_prog *prog, struct bpf_trampoline *tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) enum bpf_tramp_prog_type kind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) int cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) kind = bpf_attach_type_to_tramp(prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) mutex_lock(&tr->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) if (tr->extension_prog) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) /* cannot attach fentry/fexit if extension prog is attached.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) * cannot overwrite extension prog either.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) err = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) cnt = tr->progs_cnt[BPF_TRAMP_FENTRY] + tr->progs_cnt[BPF_TRAMP_FEXIT];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) if (kind == BPF_TRAMP_REPLACE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) /* Cannot attach extension if fentry/fexit are in use. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) if (cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) err = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) tr->extension_prog = prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_JUMP, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) prog->bpf_func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) if (cnt >= BPF_MAX_TRAMP_PROGS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) err = -E2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) if (!hlist_unhashed(&prog->aux->tramp_hlist)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) /* prog already linked */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) err = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) hlist_add_head(&prog->aux->tramp_hlist, &tr->progs_hlist[kind]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) tr->progs_cnt[kind]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) err = bpf_trampoline_update(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) hlist_del(&prog->aux->tramp_hlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) tr->progs_cnt[kind]--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) mutex_unlock(&tr->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) /* bpf_trampoline_unlink_prog() should never fail. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) int bpf_trampoline_unlink_prog(struct bpf_prog *prog, struct bpf_trampoline *tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) enum bpf_tramp_prog_type kind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) kind = bpf_attach_type_to_tramp(prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) mutex_lock(&tr->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) if (kind == BPF_TRAMP_REPLACE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) WARN_ON_ONCE(!tr->extension_prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_JUMP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) tr->extension_prog->bpf_func, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) tr->extension_prog = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) hlist_del(&prog->aux->tramp_hlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) tr->progs_cnt[kind]--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) err = bpf_trampoline_update(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) mutex_unlock(&tr->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) struct bpf_trampoline *bpf_trampoline_get(u64 key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) struct bpf_attach_target_info *tgt_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) struct bpf_trampoline *tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) tr = bpf_trampoline_lookup(key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) if (!tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) mutex_lock(&tr->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) if (tr->func.addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) memcpy(&tr->func.model, &tgt_info->fmodel, sizeof(tgt_info->fmodel));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) tr->func.addr = (void *)tgt_info->tgt_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) mutex_unlock(&tr->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) return tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) void bpf_trampoline_put(struct bpf_trampoline *tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) if (!tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) mutex_lock(&trampoline_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) if (!refcount_dec_and_test(&tr->refcnt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) WARN_ON_ONCE(mutex_is_locked(&tr->mutex));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) if (WARN_ON_ONCE(!hlist_empty(&tr->progs_hlist[BPF_TRAMP_FENTRY])))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) if (WARN_ON_ONCE(!hlist_empty(&tr->progs_hlist[BPF_TRAMP_FEXIT])))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) /* This code will be executed even when the last bpf_tramp_image
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) * is alive. All progs are detached from the trampoline and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) * trampoline image is patched with jmp into epilogue to skip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) * fexit progs. The fentry-only trampoline will be freed via
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) * multiple rcu callbacks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) hlist_del(&tr->hlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) kfree(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) mutex_unlock(&trampoline_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) /* The logic is similar to BPF_PROG_RUN, but with an explicit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) * rcu_read_lock() and migrate_disable() which are required
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) * for the trampoline. The macro is split into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) * call _bpf_prog_enter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) * call prog->bpf_func
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) * call __bpf_prog_exit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) u64 notrace __bpf_prog_enter(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) __acquires(RCU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) u64 start = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) migrate_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) if (static_branch_unlikely(&bpf_stats_enabled_key))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) start = sched_clock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) return start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) __releases(RCU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) struct bpf_prog_stats *stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) if (static_branch_unlikely(&bpf_stats_enabled_key) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) /* static_key could be enabled in __bpf_prog_enter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) * and disabled in __bpf_prog_exit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) * And vice versa.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) * Hence check that 'start' is not zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) stats = this_cpu_ptr(prog->aux->stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) u64_stats_update_begin(&stats->syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) stats->cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) stats->nsecs += sched_clock() - start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) u64_stats_update_end(&stats->syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) migrate_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) void notrace __bpf_prog_enter_sleepable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) rcu_read_lock_trace();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) might_fault();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) void notrace __bpf_prog_exit_sleepable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) rcu_read_unlock_trace();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) void notrace __bpf_tramp_enter(struct bpf_tramp_image *tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) percpu_ref_get(&tr->pcref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) percpu_ref_put(&tr->pcref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) int __weak
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) arch_prepare_bpf_trampoline(struct bpf_tramp_image *tr, void *image, void *image_end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) const struct btf_func_model *m, u32 flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) struct bpf_tramp_progs *tprogs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) void *orig_call)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) return -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) static int __init init_trampolines(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) for (i = 0; i < TRAMPOLINE_TABLE_SIZE; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) INIT_HLIST_HEAD(&trampoline_table[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) late_initcall(init_trampolines);