^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /* bpf/cpumap.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) /* The 'cpumap' is primarily used as a backend map for XDP BPF helper
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * call bpf_redirect_map() and XDP_REDIRECT action, like 'devmap'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Unlike devmap which redirects XDP frames out another NIC device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * this map type redirects raw XDP frames to another CPU. The remote
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * CPU will do SKB-allocation and call the normal network stack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * This is a scalability and isolation mechanism, that allow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * separating the early driver network XDP layer, from the rest of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * netstack, and assigning dedicated CPUs for this stage. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * basically allows for 10G wirespeed pre-filtering via bpf.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/bpf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/filter.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/ptr_ring.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <net/xdp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/workqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/kthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/capability.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <trace/events/xdp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/netdevice.h> /* netif_receive_skb_core */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/etherdevice.h> /* eth_type_trans */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) /* General idea: XDP packets getting XDP redirected to another CPU,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * will maximum be stored/queued for one driver ->poll() call. It is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * guaranteed that queueing the frame and the flush operation happen on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * same CPU. Thus, cpu_map_flush operation can deduct via this_cpu_ptr()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * which queue in bpf_cpu_map_entry contains packets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define CPU_MAP_BULK_SIZE 8 /* 8 == one cacheline on 64-bit archs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) struct bpf_cpu_map_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) struct bpf_cpu_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) struct xdp_bulk_queue {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) void *q[CPU_MAP_BULK_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) struct list_head flush_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) struct bpf_cpu_map_entry *obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) unsigned int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) /* Struct for every remote "destination" CPU in map */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) struct bpf_cpu_map_entry {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) u32 cpu; /* kthread CPU and map index */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) int map_id; /* Back reference to map */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) /* XDP can run multiple RX-ring queues, need __percpu enqueue store */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) struct xdp_bulk_queue __percpu *bulkq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) struct bpf_cpu_map *cmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) /* Queue with potential multi-producers, and single-consumer kthread */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) struct ptr_ring *queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) struct task_struct *kthread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) struct bpf_cpumap_val value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) struct bpf_prog *prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) atomic_t refcnt; /* Control when this struct can be free'ed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) struct rcu_head rcu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) struct work_struct kthread_stop_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) struct bpf_cpu_map {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) struct bpf_map map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) /* Below members specific for map type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) struct bpf_cpu_map_entry **cpu_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) static DEFINE_PER_CPU(struct list_head, cpu_map_flush_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) static struct bpf_map *cpu_map_alloc(union bpf_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) u32 value_size = attr->value_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) struct bpf_cpu_map *cmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) int err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) u64 cost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) if (!bpf_capable())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) return ERR_PTR(-EPERM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) /* check sanity of attributes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) if (attr->max_entries == 0 || attr->key_size != 4 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) (value_size != offsetofend(struct bpf_cpumap_val, qsize) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) value_size != offsetofend(struct bpf_cpumap_val, bpf_prog.fd)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) attr->map_flags & ~BPF_F_NUMA_NODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) cmap = kzalloc(sizeof(*cmap), GFP_USER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) if (!cmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) bpf_map_init_from_attr(&cmap->map, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) /* Pre-limit array size based on NR_CPUS, not final CPU check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) if (cmap->map.max_entries > NR_CPUS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) err = -E2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) goto free_cmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) /* make sure page count doesn't overflow */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) cost = (u64) cmap->map.max_entries * sizeof(struct bpf_cpu_map_entry *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) /* Notice returns -EPERM on if map size is larger than memlock limit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) ret = bpf_map_charge_init(&cmap->map.memory, cost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) err = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) goto free_cmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) /* Alloc array for possible remote "destination" CPUs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) cmap->cpu_map = bpf_map_area_alloc(cmap->map.max_entries *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) sizeof(struct bpf_cpu_map_entry *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) cmap->map.numa_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) if (!cmap->cpu_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) goto free_charge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) return &cmap->map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) free_charge:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) bpf_map_charge_finish(&cmap->map.memory);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) free_cmap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) kfree(cmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) static void get_cpu_map_entry(struct bpf_cpu_map_entry *rcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) atomic_inc(&rcpu->refcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) /* called from workqueue, to workaround syscall using preempt_disable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) static void cpu_map_kthread_stop(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) struct bpf_cpu_map_entry *rcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) rcpu = container_of(work, struct bpf_cpu_map_entry, kthread_stop_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) /* Wait for flush in __cpu_map_entry_free(), via full RCU barrier,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * as it waits until all in-flight call_rcu() callbacks complete.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) rcu_barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) /* kthread_stop will wake_up_process and wait for it to complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) kthread_stop(rcpu->kthread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) static struct sk_buff *cpu_map_build_skb(struct xdp_frame *xdpf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) unsigned int hard_start_headroom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) unsigned int frame_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) void *pkt_data_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) /* Part of headroom was reserved to xdpf */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) hard_start_headroom = sizeof(struct xdp_frame) + xdpf->headroom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) /* Memory size backing xdp_frame data already have reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * room for build_skb to place skb_shared_info in tailroom.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) frame_size = xdpf->frame_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) pkt_data_start = xdpf->data - hard_start_headroom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) skb = build_skb_around(skb, pkt_data_start, frame_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) if (unlikely(!skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) skb_reserve(skb, hard_start_headroom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) __skb_put(skb, xdpf->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) if (xdpf->metasize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) skb_metadata_set(skb, xdpf->metasize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) /* Essential SKB info: protocol and skb->dev */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) skb->protocol = eth_type_trans(skb, xdpf->dev_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) /* Optional SKB info, currently missing:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) * - HW checksum info (skb->ip_summed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) * - HW RX hash (skb_set_hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) * - RX ring dev queue index (skb_record_rx_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) /* Until page_pool get SKB return path, release DMA here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) xdp_release_frame(xdpf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) /* Allow SKB to reuse area used by xdp_frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) xdp_scrub_frame(xdpf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) static void __cpu_map_ring_cleanup(struct ptr_ring *ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) /* The tear-down procedure should have made sure that queue is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) * empty. See __cpu_map_entry_replace() and work-queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) * invoked cpu_map_kthread_stop(). Catch any broken behaviour
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) * gracefully and warn once.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) struct xdp_frame *xdpf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) while ((xdpf = ptr_ring_consume(ring)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) if (WARN_ON_ONCE(xdpf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) xdp_return_frame(xdpf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) static void put_cpu_map_entry(struct bpf_cpu_map_entry *rcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) if (atomic_dec_and_test(&rcpu->refcnt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) if (rcpu->prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) bpf_prog_put(rcpu->prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) /* The queue should be empty at this point */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) __cpu_map_ring_cleanup(rcpu->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) ptr_ring_cleanup(rcpu->queue, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) kfree(rcpu->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) kfree(rcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) static int cpu_map_bpf_prog_run_xdp(struct bpf_cpu_map_entry *rcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) void **frames, int n,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) struct xdp_cpumap_stats *stats)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) struct xdp_rxq_info rxq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) struct xdp_buff xdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) int i, nframes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) if (!rcpu->prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) return n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) rcu_read_lock_bh();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) xdp_set_return_frame_no_direct();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) xdp.rxq = &rxq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) for (i = 0; i < n; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) struct xdp_frame *xdpf = frames[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) u32 act;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) rxq.dev = xdpf->dev_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) rxq.mem = xdpf->mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) /* TODO: report queue_index to xdp_rxq_info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) xdp_convert_frame_to_buff(xdpf, &xdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) act = bpf_prog_run_xdp(rcpu->prog, &xdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) switch (act) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) case XDP_PASS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) err = xdp_update_frame_from_buff(&xdp, xdpf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) xdp_return_frame(xdpf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) stats->drop++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) frames[nframes++] = xdpf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) stats->pass++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) case XDP_REDIRECT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) err = xdp_do_redirect(xdpf->dev_rx, &xdp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) rcpu->prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) if (unlikely(err)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) xdp_return_frame(xdpf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) stats->drop++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) stats->redirect++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) bpf_warn_invalid_xdp_action(act);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) case XDP_DROP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) xdp_return_frame(xdpf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) stats->drop++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) if (stats->redirect)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) xdp_do_flush_map();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) xdp_clear_return_frame_no_direct();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) rcu_read_unlock_bh(); /* resched point, may call do_softirq() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) return nframes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) #define CPUMAP_BATCH 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) static int cpu_map_kthread_run(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) struct bpf_cpu_map_entry *rcpu = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) set_current_state(TASK_INTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) /* When kthread gives stop order, then rcpu have been disconnected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) * from map, thus no new packets can enter. Remaining in-flight
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) * per CPU stored packets are flushed to this queue. Wait honoring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) * kthread_stop signal until queue is empty.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) while (!kthread_should_stop() || !__ptr_ring_empty(rcpu->queue)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) struct xdp_cpumap_stats stats = {}; /* zero stats */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) gfp_t gfp = __GFP_ZERO | GFP_ATOMIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) unsigned int drops = 0, sched = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) void *frames[CPUMAP_BATCH];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) void *skbs[CPUMAP_BATCH];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) int i, n, m, nframes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) /* Release CPU reschedule checks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) if (__ptr_ring_empty(rcpu->queue)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) set_current_state(TASK_INTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) /* Recheck to avoid lost wake-up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) if (__ptr_ring_empty(rcpu->queue)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) sched = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) __set_current_state(TASK_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) sched = cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) * The bpf_cpu_map_entry is single consumer, with this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) * kthread CPU pinned. Lockless access to ptr_ring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) * consume side valid as no-resize allowed of queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) n = __ptr_ring_consume_batched(rcpu->queue, frames,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) CPUMAP_BATCH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) for (i = 0; i < n; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) void *f = frames[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) struct page *page = virt_to_page(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) /* Bring struct page memory area to curr CPU. Read by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) * build_skb_around via page_is_pfmemalloc(), and when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) * freed written by page_frag_free call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) prefetchw(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) /* Support running another XDP prog on this CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) nframes = cpu_map_bpf_prog_run_xdp(rcpu, frames, n, &stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) if (nframes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) m = kmem_cache_alloc_bulk(skbuff_head_cache, gfp, nframes, skbs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) if (unlikely(m == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) for (i = 0; i < nframes; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) skbs[i] = NULL; /* effect: xdp_return_frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) drops += nframes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) local_bh_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) for (i = 0; i < nframes; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) struct xdp_frame *xdpf = frames[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) struct sk_buff *skb = skbs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) skb = cpu_map_build_skb(xdpf, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) xdp_return_frame(xdpf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) /* Inject into network stack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) ret = netif_receive_skb_core(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) if (ret == NET_RX_DROP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) drops++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) /* Feedback loop via tracepoint */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) trace_xdp_cpumap_kthread(rcpu->map_id, n, drops, sched, &stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) local_bh_enable(); /* resched point, may call do_softirq() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) __set_current_state(TASK_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) put_cpu_map_entry(rcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) bool cpu_map_prog_allowed(struct bpf_map *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) return map->map_type == BPF_MAP_TYPE_CPUMAP &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) map->value_size != offsetofend(struct bpf_cpumap_val, qsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) static int __cpu_map_load_bpf_program(struct bpf_cpu_map_entry *rcpu, int fd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) struct bpf_prog *prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_XDP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) if (IS_ERR(prog))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) return PTR_ERR(prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) if (prog->expected_attach_type != BPF_XDP_CPUMAP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) bpf_prog_put(prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) rcpu->value.bpf_prog.id = prog->aux->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) rcpu->prog = prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) static struct bpf_cpu_map_entry *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) __cpu_map_entry_alloc(struct bpf_cpumap_val *value, u32 cpu, int map_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) int numa, err, i, fd = value->bpf_prog.fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) struct bpf_cpu_map_entry *rcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) struct xdp_bulk_queue *bq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) /* Have map->numa_node, but choose node of redirect target CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) numa = cpu_to_node(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) rcpu = kzalloc_node(sizeof(*rcpu), gfp, numa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) if (!rcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) /* Alloc percpu bulkq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) rcpu->bulkq = __alloc_percpu_gfp(sizeof(*rcpu->bulkq),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) sizeof(void *), gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) if (!rcpu->bulkq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) goto free_rcu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) for_each_possible_cpu(i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) bq = per_cpu_ptr(rcpu->bulkq, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) bq->obj = rcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) /* Alloc queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) rcpu->queue = kzalloc_node(sizeof(*rcpu->queue), gfp, numa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) if (!rcpu->queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) goto free_bulkq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) err = ptr_ring_init(rcpu->queue, value->qsize, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) goto free_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) rcpu->cpu = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) rcpu->map_id = map_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) rcpu->value.qsize = value->qsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) if (fd > 0 && __cpu_map_load_bpf_program(rcpu, fd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) goto free_ptr_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) /* Setup kthread */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) rcpu->kthread = kthread_create_on_node(cpu_map_kthread_run, rcpu, numa,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) "cpumap/%d/map:%d", cpu, map_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) if (IS_ERR(rcpu->kthread))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) goto free_prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) get_cpu_map_entry(rcpu); /* 1-refcnt for being in cmap->cpu_map[] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) get_cpu_map_entry(rcpu); /* 1-refcnt for kthread */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) /* Make sure kthread runs on a single CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) kthread_bind(rcpu->kthread, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) wake_up_process(rcpu->kthread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) return rcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) free_prog:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) if (rcpu->prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) bpf_prog_put(rcpu->prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) free_ptr_ring:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) ptr_ring_cleanup(rcpu->queue, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) free_queue:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) kfree(rcpu->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) free_bulkq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) free_percpu(rcpu->bulkq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) free_rcu:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) kfree(rcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) static void __cpu_map_entry_free(struct rcu_head *rcu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) struct bpf_cpu_map_entry *rcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) /* This cpu_map_entry have been disconnected from map and one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) * RCU grace-period have elapsed. Thus, XDP cannot queue any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) * new packets and cannot change/set flush_needed that can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) * find this entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) rcpu = container_of(rcu, struct bpf_cpu_map_entry, rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) free_percpu(rcpu->bulkq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) /* Cannot kthread_stop() here, last put free rcpu resources */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) put_cpu_map_entry(rcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) /* After xchg pointer to bpf_cpu_map_entry, use the call_rcu() to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) * ensure any driver rcu critical sections have completed, but this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) * does not guarantee a flush has happened yet. Because driver side
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) * rcu_read_lock/unlock only protects the running XDP program. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) * atomic xchg and NULL-ptr check in __cpu_map_flush() makes sure a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) * pending flush op doesn't fail.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) * The bpf_cpu_map_entry is still used by the kthread, and there can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) * still be pending packets (in queue and percpu bulkq). A refcnt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) * makes sure to last user (kthread_stop vs. call_rcu) free memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) * resources.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) * The rcu callback __cpu_map_entry_free flush remaining packets in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) * percpu bulkq to queue. Due to caller map_delete_elem() disable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) * preemption, cannot call kthread_stop() to make sure queue is empty.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) * Instead a work_queue is started for stopping kthread,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) * cpu_map_kthread_stop, which waits for an RCU grace period before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) * stopping kthread, emptying the queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) static void __cpu_map_entry_replace(struct bpf_cpu_map *cmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) u32 key_cpu, struct bpf_cpu_map_entry *rcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) struct bpf_cpu_map_entry *old_rcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) old_rcpu = xchg(&cmap->cpu_map[key_cpu], rcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) if (old_rcpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) call_rcu(&old_rcpu->rcu, __cpu_map_entry_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) INIT_WORK(&old_rcpu->kthread_stop_wq, cpu_map_kthread_stop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) schedule_work(&old_rcpu->kthread_stop_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) static int cpu_map_delete_elem(struct bpf_map *map, void *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) u32 key_cpu = *(u32 *)key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) if (key_cpu >= map->max_entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) /* notice caller map_delete_elem() use preempt_disable() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) __cpu_map_entry_replace(cmap, key_cpu, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) static int cpu_map_update_elem(struct bpf_map *map, void *key, void *value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) u64 map_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) struct bpf_cpumap_val cpumap_value = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) struct bpf_cpu_map_entry *rcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) /* Array index key correspond to CPU number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) u32 key_cpu = *(u32 *)key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) memcpy(&cpumap_value, value, map->value_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) if (unlikely(map_flags > BPF_EXIST))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) if (unlikely(key_cpu >= cmap->map.max_entries))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) return -E2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) if (unlikely(map_flags == BPF_NOEXIST))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) return -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) if (unlikely(cpumap_value.qsize > 16384)) /* sanity limit on qsize */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) return -EOVERFLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) /* Make sure CPU is a valid possible cpu */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) if (key_cpu >= nr_cpumask_bits || !cpu_possible(key_cpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) if (cpumap_value.qsize == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) rcpu = NULL; /* Same as deleting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) /* Updating qsize cause re-allocation of bpf_cpu_map_entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) rcpu = __cpu_map_entry_alloc(&cpumap_value, key_cpu, map->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) if (!rcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) rcpu->cmap = cmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) __cpu_map_entry_replace(cmap, key_cpu, rcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) static void cpu_map_free(struct bpf_map *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) /* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) * so the bpf programs (can be more than one that used this map) were
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) * disconnected from events. Wait for outstanding critical sections in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) * these programs to complete. The rcu critical section only guarantees
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) * no further "XDP/bpf-side" reads against bpf_cpu_map->cpu_map.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) * It does __not__ ensure pending flush operations (if any) are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) * complete.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) bpf_clear_redirect_map(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) synchronize_rcu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) /* For cpu_map the remote CPUs can still be using the entries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) * (struct bpf_cpu_map_entry).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) for (i = 0; i < cmap->map.max_entries; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) struct bpf_cpu_map_entry *rcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) rcpu = READ_ONCE(cmap->cpu_map[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) if (!rcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) /* bq flush and cleanup happens after RCU grace-period */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) __cpu_map_entry_replace(cmap, i, NULL); /* call_rcu */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) bpf_map_area_free(cmap->cpu_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) kfree(cmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) struct bpf_cpu_map_entry *rcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) if (key >= map->max_entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) rcpu = READ_ONCE(cmap->cpu_map[key]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) return rcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) static void *cpu_map_lookup_elem(struct bpf_map *map, void *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) struct bpf_cpu_map_entry *rcpu =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) __cpu_map_lookup_elem(map, *(u32 *)key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) return rcpu ? &rcpu->value : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) static int cpu_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) u32 index = key ? *(u32 *)key : U32_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) u32 *next = next_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) if (index >= cmap->map.max_entries) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) *next = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) if (index == cmap->map.max_entries - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) *next = index + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) static int cpu_map_btf_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) const struct bpf_map_ops cpu_map_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) .map_meta_equal = bpf_map_meta_equal,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) .map_alloc = cpu_map_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) .map_free = cpu_map_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) .map_delete_elem = cpu_map_delete_elem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) .map_update_elem = cpu_map_update_elem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) .map_lookup_elem = cpu_map_lookup_elem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) .map_get_next_key = cpu_map_get_next_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) .map_check_btf = map_check_no_btf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) .map_btf_name = "bpf_cpu_map",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) .map_btf_id = &cpu_map_btf_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) static void bq_flush_to_queue(struct xdp_bulk_queue *bq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) struct bpf_cpu_map_entry *rcpu = bq->obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) unsigned int processed = 0, drops = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) const int to_cpu = rcpu->cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) struct ptr_ring *q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) if (unlikely(!bq->count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) q = rcpu->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) spin_lock(&q->producer_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) for (i = 0; i < bq->count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) struct xdp_frame *xdpf = bq->q[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) err = __ptr_ring_produce(q, xdpf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) drops++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) xdp_return_frame_rx_napi(xdpf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) processed++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) bq->count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) spin_unlock(&q->producer_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) __list_del_clearprev(&bq->flush_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) /* Feedback loop via tracepoints */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) trace_xdp_cpumap_enqueue(rcpu->map_id, processed, drops, to_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) /* Runs under RCU-read-side, plus in softirq under NAPI protection.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) * Thus, safe percpu variable access.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) static void bq_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) struct list_head *flush_list = this_cpu_ptr(&cpu_map_flush_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) struct xdp_bulk_queue *bq = this_cpu_ptr(rcpu->bulkq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) if (unlikely(bq->count == CPU_MAP_BULK_SIZE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) bq_flush_to_queue(bq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) /* Notice, xdp_buff/page MUST be queued here, long enough for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) * driver to code invoking us to finished, due to driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) * (e.g. ixgbe) recycle tricks based on page-refcnt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) * Thus, incoming xdp_frame is always queued here (else we race
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) * with another CPU on page-refcnt and remaining driver code).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) * Queue time is very short, as driver will invoke flush
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) * operation, when completing napi->poll call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) bq->q[bq->count++] = xdpf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) if (!bq->flush_node.prev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) list_add(&bq->flush_node, flush_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) struct net_device *dev_rx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) struct xdp_frame *xdpf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) xdpf = xdp_convert_buff_to_frame(xdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) if (unlikely(!xdpf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) return -EOVERFLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) /* Info needed when constructing SKB on remote CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) xdpf->dev_rx = dev_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) bq_enqueue(rcpu, xdpf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) void __cpu_map_flush(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) struct list_head *flush_list = this_cpu_ptr(&cpu_map_flush_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) struct xdp_bulk_queue *bq, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) list_for_each_entry_safe(bq, tmp, flush_list, flush_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) bq_flush_to_queue(bq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) /* If already running, costs spin_lock_irqsave + smb_mb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) wake_up_process(bq->obj->kthread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) static int __init cpu_map_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) for_each_possible_cpu(cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) INIT_LIST_HEAD(&per_cpu(cpu_map_flush_list, cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) subsys_initcall(cpu_map_init);