Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3) /* net/sched/sch_taprio.c	 Time Aware Priority Scheduler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Authors:	Vinicius Costa Gomes <vinicius.gomes@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/math64.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/rcupdate.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <net/netlink.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <net/pkt_sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <net/pkt_cls.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <net/sch_generic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <net/sock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <net/tcp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) static LIST_HEAD(taprio_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) static DEFINE_SPINLOCK(taprio_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #define TAPRIO_ALL_GATES_OPEN -1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #define TXTIME_ASSIST_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #define FULL_OFFLOAD_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #define TAPRIO_FLAGS_INVALID U32_MAX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) struct sched_entry {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) 	struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) 	/* The instant that this entry "closes" and the next one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) 	 * should open, the qdisc will make some effort so that no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 	 * packet leaves after this time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 	ktime_t close_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 	ktime_t next_txtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 	atomic_t budget;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 	int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 	u32 gate_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 	u32 interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 	u8 command;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) struct sched_gate_list {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 	struct rcu_head rcu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 	struct list_head entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 	size_t num_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 	ktime_t cycle_close_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 	s64 cycle_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 	s64 cycle_time_extension;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 	s64 base_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) struct taprio_sched {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 	struct Qdisc **qdiscs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 	struct Qdisc *root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 	u32 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 	enum tk_offsets tk_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 	int clockid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 	atomic64_t picos_per_byte; /* Using picoseconds because for 10Gbps+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 				    * speeds it's sub-nanoseconds per byte
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 				    */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 	/* Protects the update side of the RCU protected current_entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 	spinlock_t current_entry_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 	struct sched_entry __rcu *current_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 	struct sched_gate_list __rcu *oper_sched;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 	struct sched_gate_list __rcu *admin_sched;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 	struct hrtimer advance_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 	struct list_head taprio_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 	struct sk_buff *(*dequeue)(struct Qdisc *sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 	struct sk_buff *(*peek)(struct Qdisc *sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 	u32 txtime_delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) struct __tc_taprio_qopt_offload {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	refcount_t users;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 	struct tc_taprio_qopt_offload offload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) static ktime_t sched_base_time(const struct sched_gate_list *sched)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 	if (!sched)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 		return KTIME_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	return ns_to_ktime(sched->base_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) static ktime_t taprio_mono_to_any(const struct taprio_sched *q, ktime_t mono)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 	/* This pairs with WRITE_ONCE() in taprio_parse_clockid() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 	enum tk_offsets tk_offset = READ_ONCE(q->tk_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 	switch (tk_offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 	case TK_OFFS_MAX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 		return mono;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 		return ktime_mono_to_any(mono, tk_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) static ktime_t taprio_get_time(const struct taprio_sched *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 	return taprio_mono_to_any(q, ktime_get());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) static void taprio_free_sched_cb(struct rcu_head *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 	struct sched_gate_list *sched = container_of(head, struct sched_gate_list, rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 	struct sched_entry *entry, *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 	if (!sched)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 	list_for_each_entry_safe(entry, n, &sched->entries, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 		list_del(&entry->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 		kfree(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 	kfree(sched);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) static void switch_schedules(struct taprio_sched *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 			     struct sched_gate_list **admin,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 			     struct sched_gate_list **oper)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 	rcu_assign_pointer(q->oper_sched, *admin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 	rcu_assign_pointer(q->admin_sched, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 	if (*oper)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 		call_rcu(&(*oper)->rcu, taprio_free_sched_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 	*oper = *admin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 	*admin = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) /* Get how much time has been already elapsed in the current cycle. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) static s32 get_cycle_time_elapsed(struct sched_gate_list *sched, ktime_t time)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 	ktime_t time_since_sched_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 	s32 time_elapsed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 	time_since_sched_start = ktime_sub(time, sched->base_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 	div_s64_rem(time_since_sched_start, sched->cycle_time, &time_elapsed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 	return time_elapsed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) static ktime_t get_interval_end_time(struct sched_gate_list *sched,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 				     struct sched_gate_list *admin,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 				     struct sched_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 				     ktime_t intv_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 	s32 cycle_elapsed = get_cycle_time_elapsed(sched, intv_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 	ktime_t intv_end, cycle_ext_end, cycle_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 	cycle_end = ktime_add_ns(intv_start, sched->cycle_time - cycle_elapsed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 	intv_end = ktime_add_ns(intv_start, entry->interval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	cycle_ext_end = ktime_add(cycle_end, sched->cycle_time_extension);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 	if (ktime_before(intv_end, cycle_end))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 		return intv_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 	else if (admin && admin != sched &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 		 ktime_after(admin->base_time, cycle_end) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 		 ktime_before(admin->base_time, cycle_ext_end))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 		return admin->base_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 		return cycle_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) static int length_to_duration(struct taprio_sched *q, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	return div_u64(len * atomic64_read(&q->picos_per_byte), 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) /* Returns the entry corresponding to next available interval. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185)  * validate_interval is set, it only validates whether the timestamp occurs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186)  * when the gate corresponding to the skb's traffic class is open.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) static struct sched_entry *find_entry_to_transmit(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 						  struct Qdisc *sch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 						  struct sched_gate_list *sched,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 						  struct sched_gate_list *admin,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 						  ktime_t time,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 						  ktime_t *interval_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 						  ktime_t *interval_end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 						  bool validate_interval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 	ktime_t curr_intv_start, curr_intv_end, cycle_end, packet_transmit_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	ktime_t earliest_txtime = KTIME_MAX, txtime, cycle, transmit_end_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	struct sched_entry *entry = NULL, *entry_found = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 	struct taprio_sched *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 	struct net_device *dev = qdisc_dev(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	bool entry_available = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	s32 cycle_elapsed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	int tc, n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	tc = netdev_get_prio_tc_map(dev, skb->priority);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 	packet_transmit_time = length_to_duration(q, qdisc_pkt_len(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	*interval_start = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	*interval_end = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 	if (!sched)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 	cycle = sched->cycle_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	cycle_elapsed = get_cycle_time_elapsed(sched, time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 	curr_intv_end = ktime_sub_ns(time, cycle_elapsed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 	cycle_end = ktime_add_ns(curr_intv_end, cycle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	list_for_each_entry(entry, &sched->entries, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 		curr_intv_start = curr_intv_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 		curr_intv_end = get_interval_end_time(sched, admin, entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 						      curr_intv_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 		if (ktime_after(curr_intv_start, cycle_end))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 		if (!(entry->gate_mask & BIT(tc)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 		    packet_transmit_time > entry->interval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 		txtime = entry->next_txtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 		if (ktime_before(txtime, time) || validate_interval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 			transmit_end_time = ktime_add_ns(time, packet_transmit_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 			if ((ktime_before(curr_intv_start, time) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 			     ktime_before(transmit_end_time, curr_intv_end)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 			    (ktime_after(curr_intv_start, time) && !validate_interval)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 				entry_found = entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 				*interval_start = curr_intv_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 				*interval_end = curr_intv_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 			} else if (!entry_available && !validate_interval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 				/* Here, we are just trying to find out the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 				 * first available interval in the next cycle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 				entry_available = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 				entry_found = entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 				*interval_start = ktime_add_ns(curr_intv_start, cycle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 				*interval_end = ktime_add_ns(curr_intv_end, cycle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 		} else if (ktime_before(txtime, earliest_txtime) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 			   !entry_available) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 			earliest_txtime = txtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 			entry_found = entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 			n = div_s64(ktime_sub(txtime, curr_intv_start), cycle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 			*interval_start = ktime_add(curr_intv_start, n * cycle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 			*interval_end = ktime_add(curr_intv_end, n * cycle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 	return entry_found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) static bool is_valid_interval(struct sk_buff *skb, struct Qdisc *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	struct taprio_sched *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 	struct sched_gate_list *sched, *admin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	ktime_t interval_start, interval_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	struct sched_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	sched = rcu_dereference(q->oper_sched);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 	admin = rcu_dereference(q->admin_sched);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 	entry = find_entry_to_transmit(skb, sch, sched, admin, skb->tstamp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 				       &interval_start, &interval_end, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	return entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) static bool taprio_flags_valid(u32 flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 	/* Make sure no other flag bits are set. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 	if (flags & ~(TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 		      TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	/* txtime-assist and full offload are mutually exclusive */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 	if ((flags & TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 	    (flags & TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) /* This returns the tstamp value set by TCP in terms of the set clock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) static ktime_t get_tcp_tstamp(struct taprio_sched *q, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 	unsigned int offset = skb_network_offset(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 	const struct ipv6hdr *ipv6h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 	const struct iphdr *iph;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 	struct ipv6hdr _ipv6h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	ipv6h = skb_header_pointer(skb, offset, sizeof(_ipv6h), &_ipv6h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	if (!ipv6h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	if (ipv6h->version == 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 		iph = (struct iphdr *)ipv6h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 		offset += iph->ihl * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 		/* special-case 6in4 tunnelling, as that is a common way to get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 		 * v6 connectivity in the home
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 		if (iph->protocol == IPPROTO_IPV6) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 			ipv6h = skb_header_pointer(skb, offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 						   sizeof(_ipv6h), &_ipv6h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 			if (!ipv6h || ipv6h->nexthdr != IPPROTO_TCP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 				return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 		} else if (iph->protocol != IPPROTO_TCP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 	} else if (ipv6h->version == 6 && ipv6h->nexthdr != IPPROTO_TCP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 	return taprio_mono_to_any(q, skb->skb_mstamp_ns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) /* There are a few scenarios where we will have to modify the txtime from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332)  * what is read from next_txtime in sched_entry. They are:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333)  * 1. If txtime is in the past,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334)  *    a. The gate for the traffic class is currently open and packet can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335)  *       transmitted before it closes, schedule the packet right away.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336)  *    b. If the gate corresponding to the traffic class is going to open later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337)  *       in the cycle, set the txtime of packet to the interval start.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338)  * 2. If txtime is in the future, there are packets corresponding to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339)  *    current traffic class waiting to be transmitted. So, the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340)  *    possibilities exist:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341)  *    a. We can transmit the packet before the window containing the txtime
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342)  *       closes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343)  *    b. The window might close before the transmission can be completed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344)  *       successfully. So, schedule the packet in the next open window.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) static long get_packet_txtime(struct sk_buff *skb, struct Qdisc *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 	ktime_t transmit_end_time, interval_end, interval_start, tcp_tstamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 	struct taprio_sched *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	struct sched_gate_list *sched, *admin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	ktime_t minimum_time, now, txtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	int len, packet_transmit_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	struct sched_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 	bool sched_changed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 	now = taprio_get_time(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 	minimum_time = ktime_add_ns(now, q->txtime_delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 	tcp_tstamp = get_tcp_tstamp(q, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 	minimum_time = max_t(ktime_t, minimum_time, tcp_tstamp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 	admin = rcu_dereference(q->admin_sched);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 	sched = rcu_dereference(q->oper_sched);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 	if (admin && ktime_after(minimum_time, admin->base_time))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 		switch_schedules(q, &admin, &sched);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 	/* Until the schedule starts, all the queues are open */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 	if (!sched || ktime_before(minimum_time, sched->base_time)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 		txtime = minimum_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 	len = qdisc_pkt_len(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 	packet_transmit_time = length_to_duration(q, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 		sched_changed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 		entry = find_entry_to_transmit(skb, sch, sched, admin,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 					       minimum_time,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 					       &interval_start, &interval_end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 					       false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 		if (!entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 			txtime = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 			goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 		txtime = entry->next_txtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 		txtime = max_t(ktime_t, txtime, minimum_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 		txtime = max_t(ktime_t, txtime, interval_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 		if (admin && admin != sched &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 		    ktime_after(txtime, admin->base_time)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 			sched = admin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 			sched_changed = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 		transmit_end_time = ktime_add(txtime, packet_transmit_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 		minimum_time = transmit_end_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 		/* Update the txtime of current entry to the next time it's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 		 * interval starts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 		if (ktime_after(transmit_end_time, interval_end))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 			entry->next_txtime = ktime_add(interval_start, sched->cycle_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	} while (sched_changed || ktime_after(transmit_end_time, interval_end));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	entry->next_txtime = transmit_end_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 	return txtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 			  struct sk_buff **to_free)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	struct taprio_sched *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 	struct Qdisc *child;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	int queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 	queue = skb_get_queue_mapping(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 	child = q->qdiscs[queue];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 	if (unlikely(!child))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 		return qdisc_drop(skb, sch, to_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 	if (skb->sk && sock_flag(skb->sk, SOCK_TXTIME)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 		if (!is_valid_interval(skb, sch))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 			return qdisc_drop(skb, sch, to_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 	} else if (TXTIME_ASSIST_IS_ENABLED(q->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 		skb->tstamp = get_packet_txtime(skb, sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 		if (!skb->tstamp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 			return qdisc_drop(skb, sch, to_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 	qdisc_qstats_backlog_inc(sch, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 	sch->q.qlen++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 	return qdisc_enqueue(skb, child, to_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) static struct sk_buff *taprio_peek_soft(struct Qdisc *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 	struct taprio_sched *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 	struct net_device *dev = qdisc_dev(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 	struct sched_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 	u32 gate_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 	entry = rcu_dereference(q->current_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	gate_mask = entry ? entry->gate_mask : TAPRIO_ALL_GATES_OPEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	if (!gate_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 	for (i = 0; i < dev->num_tx_queues; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 		struct Qdisc *child = q->qdiscs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 		int prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 		u8 tc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 		if (unlikely(!child))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 		skb = child->ops->peek(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 		if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 		if (TXTIME_ASSIST_IS_ENABLED(q->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 			return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 		prio = skb->priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 		tc = netdev_get_prio_tc_map(dev, prio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 		if (!(gate_mask & BIT(tc)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 		return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) static struct sk_buff *taprio_peek_offload(struct Qdisc *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	struct taprio_sched *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 	struct net_device *dev = qdisc_dev(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 	for (i = 0; i < dev->num_tx_queues; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 		struct Qdisc *child = q->qdiscs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 		if (unlikely(!child))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 		skb = child->ops->peek(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 		if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 		return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) static struct sk_buff *taprio_peek(struct Qdisc *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	struct taprio_sched *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	return q->peek(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) static void taprio_set_budget(struct taprio_sched *q, struct sched_entry *entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 	atomic_set(&entry->budget,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 		   div64_u64((u64)entry->interval * 1000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 			     atomic64_read(&q->picos_per_byte)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) static struct sk_buff *taprio_dequeue_soft(struct Qdisc *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 	struct taprio_sched *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 	struct net_device *dev = qdisc_dev(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 	struct sk_buff *skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	struct sched_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 	u32 gate_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 	entry = rcu_dereference(q->current_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 	/* if there's no entry, it means that the schedule didn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 	 * start yet, so force all gates to be open, this is in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	 * accordance to IEEE 802.1Qbv-2015 Section 8.6.9.4.5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 	 * "AdminGateSates"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 	gate_mask = entry ? entry->gate_mask : TAPRIO_ALL_GATES_OPEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	if (!gate_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	for (i = 0; i < dev->num_tx_queues; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 		struct Qdisc *child = q->qdiscs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 		ktime_t guard;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 		int prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 		int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 		u8 tc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 		if (unlikely(!child))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 		if (TXTIME_ASSIST_IS_ENABLED(q->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 			skb = child->ops->dequeue(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 			if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 			goto skb_found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 		skb = child->ops->peek(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 		if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 		prio = skb->priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 		tc = netdev_get_prio_tc_map(dev, prio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 		if (!(gate_mask & BIT(tc))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 			skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 		len = qdisc_pkt_len(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 		guard = ktime_add_ns(taprio_get_time(q),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 				     length_to_duration(q, len));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 		/* In the case that there's no gate entry, there's no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 		 * guard band ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 		if (gate_mask != TAPRIO_ALL_GATES_OPEN &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 		    ktime_after(guard, entry->close_time)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 			skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 		/* ... and no budget. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 		if (gate_mask != TAPRIO_ALL_GATES_OPEN &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 		    atomic_sub_return(len, &entry->budget) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 			skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 		skb = child->ops->dequeue(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 		if (unlikely(!skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 			goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) skb_found:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 		qdisc_bstats_update(sch, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 		qdisc_qstats_backlog_dec(sch, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 		sch->q.qlen--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 	return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) static struct sk_buff *taprio_dequeue_offload(struct Qdisc *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 	struct taprio_sched *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	struct net_device *dev = qdisc_dev(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	for (i = 0; i < dev->num_tx_queues; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 		struct Qdisc *child = q->qdiscs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 		if (unlikely(!child))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 		skb = child->ops->dequeue(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 		if (unlikely(!skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 		qdisc_bstats_update(sch, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 		qdisc_qstats_backlog_dec(sch, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 		sch->q.qlen--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 		return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) static struct sk_buff *taprio_dequeue(struct Qdisc *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 	struct taprio_sched *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 	return q->dequeue(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) static bool should_restart_cycle(const struct sched_gate_list *oper,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 				 const struct sched_entry *entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	if (list_is_last(&entry->list, &oper->entries))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 	if (ktime_compare(entry->close_time, oper->cycle_close_time) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) static bool should_change_schedules(const struct sched_gate_list *admin,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 				    const struct sched_gate_list *oper,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 				    ktime_t close_time)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 	ktime_t next_base_time, extension_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 	if (!admin)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 	next_base_time = sched_base_time(admin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 	/* This is the simple case, the close_time would fall after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 	 * the next schedule base_time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 	if (ktime_compare(next_base_time, close_time) <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 	/* This is the cycle_time_extension case, if the close_time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 	 * plus the amount that can be extended would fall after the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 	 * next schedule base_time, we can extend the current schedule
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 	 * for that amount.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 	extension_time = ktime_add_ns(close_time, oper->cycle_time_extension);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 	/* FIXME: the IEEE 802.1Q-2018 Specification isn't clear about
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 	 * how precisely the extension should be made. So after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 	 * conformance testing, this logic may change.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	if (ktime_compare(next_base_time, extension_time) <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) static enum hrtimer_restart advance_sched(struct hrtimer *timer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 	struct taprio_sched *q = container_of(timer, struct taprio_sched,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 					      advance_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	struct sched_gate_list *oper, *admin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	struct sched_entry *entry, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	struct Qdisc *sch = q->root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 	ktime_t close_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	spin_lock(&q->current_entry_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 	entry = rcu_dereference_protected(q->current_entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 					  lockdep_is_held(&q->current_entry_lock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 	oper = rcu_dereference_protected(q->oper_sched,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 					 lockdep_is_held(&q->current_entry_lock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	admin = rcu_dereference_protected(q->admin_sched,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 					  lockdep_is_held(&q->current_entry_lock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	if (!oper)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 		switch_schedules(q, &admin, &oper);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	/* This can happen in two cases: 1. this is the very first run
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 	 * of this function (i.e. we weren't running any schedule
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 	 * previously); 2. The previous schedule just ended. The first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	 * entry of all schedules are pre-calculated during the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	 * schedule initialization.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	if (unlikely(!entry || entry->close_time == oper->base_time)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 		next = list_first_entry(&oper->entries, struct sched_entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 					list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 		close_time = next->close_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 		goto first_run;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 	if (should_restart_cycle(oper, entry)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 		next = list_first_entry(&oper->entries, struct sched_entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 					list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 		oper->cycle_close_time = ktime_add_ns(oper->cycle_close_time,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 						      oper->cycle_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 		next = list_next_entry(entry, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	close_time = ktime_add_ns(entry->close_time, next->interval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	close_time = min_t(ktime_t, close_time, oper->cycle_close_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	if (should_change_schedules(admin, oper, close_time)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 		/* Set things so the next time this runs, the new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 		 * schedule runs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 		close_time = sched_base_time(admin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 		switch_schedules(q, &admin, &oper);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 	next->close_time = close_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	taprio_set_budget(q, next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) first_run:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	rcu_assign_pointer(q->current_entry, next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	spin_unlock(&q->current_entry_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	hrtimer_set_expires(&q->advance_timer, close_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 	__netif_schedule(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	return HRTIMER_RESTART;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) static const struct nla_policy entry_policy[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 	[TCA_TAPRIO_SCHED_ENTRY_INDEX]	   = { .type = NLA_U32 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	[TCA_TAPRIO_SCHED_ENTRY_CMD]	   = { .type = NLA_U8 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 	[TCA_TAPRIO_SCHED_ENTRY_GATE_MASK] = { .type = NLA_U32 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 	[TCA_TAPRIO_SCHED_ENTRY_INTERVAL]  = { .type = NLA_U32 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) static const struct nla_policy taprio_policy[TCA_TAPRIO_ATTR_MAX + 1] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	[TCA_TAPRIO_ATTR_PRIOMAP]	       = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 		.len = sizeof(struct tc_mqprio_qopt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 	[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST]           = { .type = NLA_NESTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 	[TCA_TAPRIO_ATTR_SCHED_BASE_TIME]            = { .type = NLA_S64 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	[TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY]         = { .type = NLA_NESTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 	[TCA_TAPRIO_ATTR_SCHED_CLOCKID]              = { .type = NLA_S32 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME]           = { .type = NLA_S64 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 	[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION] = { .type = NLA_S64 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 	[TCA_TAPRIO_ATTR_FLAGS]                      = { .type = NLA_U32 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 	[TCA_TAPRIO_ATTR_TXTIME_DELAY]		     = { .type = NLA_U32 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) static int fill_sched_entry(struct taprio_sched *q, struct nlattr **tb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 			    struct sched_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 			    struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 	int min_duration = length_to_duration(q, ETH_ZLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 	u32 interval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 	if (tb[TCA_TAPRIO_SCHED_ENTRY_CMD])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 		entry->command = nla_get_u8(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 			tb[TCA_TAPRIO_SCHED_ENTRY_CMD]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 	if (tb[TCA_TAPRIO_SCHED_ENTRY_GATE_MASK])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 		entry->gate_mask = nla_get_u32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 			tb[TCA_TAPRIO_SCHED_ENTRY_GATE_MASK]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 	if (tb[TCA_TAPRIO_SCHED_ENTRY_INTERVAL])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 		interval = nla_get_u32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 			tb[TCA_TAPRIO_SCHED_ENTRY_INTERVAL]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 	/* The interval should allow at least the minimum ethernet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 	 * frame to go out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	if (interval < min_duration) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 		NL_SET_ERR_MSG(extack, "Invalid interval for schedule entry");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 	entry->interval = interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) static int parse_sched_entry(struct taprio_sched *q, struct nlattr *n,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 			     struct sched_entry *entry, int index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 			     struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	struct nlattr *tb[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = { };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	err = nla_parse_nested_deprecated(tb, TCA_TAPRIO_SCHED_ENTRY_MAX, n,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 					  entry_policy, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 		NL_SET_ERR_MSG(extack, "Could not parse nested entry");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	entry->index = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	return fill_sched_entry(q, tb, entry, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) static int parse_sched_list(struct taprio_sched *q, struct nlattr *list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 			    struct sched_gate_list *sched,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 			    struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	struct nlattr *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	int err, rem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	if (!list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 	nla_for_each_nested(n, list, rem) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 		struct sched_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 		if (nla_type(n) != TCA_TAPRIO_SCHED_ENTRY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 			NL_SET_ERR_MSG(extack, "Attribute is not of type 'entry'");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 		entry = kzalloc(sizeof(*entry), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 		if (!entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 			NL_SET_ERR_MSG(extack, "Not enough memory for entry");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 		err = parse_sched_entry(q, n, entry, i, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 		if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 			kfree(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 		list_add_tail(&entry->list, &sched->entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 		i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	sched->num_entries = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) static int parse_taprio_schedule(struct taprio_sched *q, struct nlattr **tb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 				 struct sched_gate_list *new,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 				 struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	if (tb[TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 		NL_SET_ERR_MSG(extack, "Adding a single entry is not supported");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 		return -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 	if (tb[TCA_TAPRIO_ATTR_SCHED_BASE_TIME])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 		new->base_time = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_BASE_TIME]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	if (tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 		new->cycle_time_extension = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 	if (tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 		new->cycle_time = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 	if (tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 		err = parse_sched_list(q, tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 				       new, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 	if (!new->cycle_time) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 		struct sched_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 		ktime_t cycle = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 		list_for_each_entry(entry, &new->entries, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 			cycle = ktime_add_ns(cycle, entry->interval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 		if (!cycle) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 			NL_SET_ERR_MSG(extack, "'cycle_time' can never be 0");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 		new->cycle_time = cycle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) static int taprio_parse_mqprio_opt(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 				   struct tc_mqprio_qopt *qopt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 				   struct netlink_ext_ack *extack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 				   u32 taprio_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 	int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 	if (!qopt && !dev->num_tc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 		NL_SET_ERR_MSG(extack, "'mqprio' configuration is necessary");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 	/* If num_tc is already set, it means that the user already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 	 * configured the mqprio part
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 	if (dev->num_tc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 	/* Verify num_tc is not out of max range */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 	if (qopt->num_tc > TC_MAX_QUEUE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 		NL_SET_ERR_MSG(extack, "Number of traffic classes is outside valid range");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 	/* taprio imposes that traffic classes map 1:n to tx queues */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	if (qopt->num_tc > dev->num_tx_queues) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 		NL_SET_ERR_MSG(extack, "Number of traffic classes is greater than number of HW queues");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 	/* Verify priority mapping uses valid tcs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	for (i = 0; i <= TC_BITMASK; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 		if (qopt->prio_tc_map[i] >= qopt->num_tc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 			NL_SET_ERR_MSG(extack, "Invalid traffic class in priority to traffic class mapping");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 	for (i = 0; i < qopt->num_tc; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 		unsigned int last = qopt->offset[i] + qopt->count[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 		/* Verify the queue count is in tx range being equal to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 		 * real_num_tx_queues indicates the last queue is in use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 		if (qopt->offset[i] >= dev->num_tx_queues ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 		    !qopt->count[i] ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 		    last > dev->real_num_tx_queues) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 			NL_SET_ERR_MSG(extack, "Invalid queue in traffic class to queue mapping");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 		if (TXTIME_ASSIST_IS_ENABLED(taprio_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 		/* Verify that the offset and counts do not overlap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 		for (j = i + 1; j < qopt->num_tc; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 			if (last > qopt->offset[j]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 				NL_SET_ERR_MSG(extack, "Detected overlap in the traffic class to queue mapping");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 				return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) static int taprio_get_start_time(struct Qdisc *sch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 				 struct sched_gate_list *sched,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 				 ktime_t *start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 	struct taprio_sched *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 	ktime_t now, base, cycle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	s64 n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	base = sched_base_time(sched);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	now = taprio_get_time(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 	if (ktime_after(base, now)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 		*start = base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	cycle = sched->cycle_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	/* The qdisc is expected to have at least one sched_entry.  Moreover,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 	 * any entry must have 'interval' > 0. Thus if the cycle time is zero,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	 * something went really wrong. In that case, we should warn about this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 	 * inconsistent state and return error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	if (WARN_ON(!cycle))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	/* Schedule the start time for the beginning of the next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 	 * cycle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 	n = div64_s64(ktime_sub_ns(now, base), cycle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	*start = ktime_add_ns(base, (n + 1) * cycle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) static void setup_first_close_time(struct taprio_sched *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 				   struct sched_gate_list *sched, ktime_t base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 	struct sched_entry *first;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 	ktime_t cycle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 	first = list_first_entry(&sched->entries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 				 struct sched_entry, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 	cycle = sched->cycle_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 	/* FIXME: find a better place to do this */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 	sched->cycle_close_time = ktime_add_ns(base, cycle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 	first->close_time = ktime_add_ns(base, first->interval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 	taprio_set_budget(q, first);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 	rcu_assign_pointer(q->current_entry, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) static void taprio_start_sched(struct Qdisc *sch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 			       ktime_t start, struct sched_gate_list *new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 	struct taprio_sched *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 	ktime_t expires;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 	if (FULL_OFFLOAD_IS_ENABLED(q->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 	expires = hrtimer_get_expires(&q->advance_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 	if (expires == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 		expires = KTIME_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 	/* If the new schedule starts before the next expiration, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 	 * reprogram it to the earliest one, so we change the admin
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 	 * schedule to the operational one at the right time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 	start = min_t(ktime_t, start, expires);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 	hrtimer_start(&q->advance_timer, start, HRTIMER_MODE_ABS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) static void taprio_set_picos_per_byte(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 				      struct taprio_sched *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 	struct ethtool_link_ksettings ecmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 	int speed = SPEED_10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 	int picos_per_byte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 	err = __ethtool_get_link_ksettings(dev, &ecmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 		goto skip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 	if (ecmd.base.speed && ecmd.base.speed != SPEED_UNKNOWN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 		speed = ecmd.base.speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) skip:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 	picos_per_byte = (USEC_PER_SEC * 8) / speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 	atomic64_set(&q->picos_per_byte, picos_per_byte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 	netdev_dbg(dev, "taprio: set %s's picos_per_byte to: %lld, linkspeed: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 		   dev->name, (long long)atomic64_read(&q->picos_per_byte),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 		   ecmd.base.speed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) static int taprio_dev_notifier(struct notifier_block *nb, unsigned long event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 			       void *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 	struct net_device *qdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 	struct taprio_sched *q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 	bool found = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 	ASSERT_RTNL();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 	if (event != NETDEV_UP && event != NETDEV_CHANGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 		return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 	spin_lock(&taprio_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 	list_for_each_entry(q, &taprio_list, taprio_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 		qdev = qdisc_dev(q->root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 		if (qdev == dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 			found = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 	spin_unlock(&taprio_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 	if (found)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 		taprio_set_picos_per_byte(dev, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 	return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) static void setup_txtime(struct taprio_sched *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 			 struct sched_gate_list *sched, ktime_t base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 	struct sched_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 	u32 interval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 	list_for_each_entry(entry, &sched->entries, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 		entry->next_txtime = ktime_add_ns(base, interval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 		interval += entry->interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) static struct tc_taprio_qopt_offload *taprio_offload_alloc(int num_entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 	struct __tc_taprio_qopt_offload *__offload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 	__offload = kzalloc(struct_size(__offload, offload.entries, num_entries),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 			    GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 	if (!__offload)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 	refcount_set(&__offload->users, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 	return &__offload->offload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) struct tc_taprio_qopt_offload *taprio_offload_get(struct tc_taprio_qopt_offload
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 						  *offload)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 	struct __tc_taprio_qopt_offload *__offload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 	__offload = container_of(offload, struct __tc_taprio_qopt_offload,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 				 offload);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 	refcount_inc(&__offload->users);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 	return offload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) EXPORT_SYMBOL_GPL(taprio_offload_get);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) void taprio_offload_free(struct tc_taprio_qopt_offload *offload)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 	struct __tc_taprio_qopt_offload *__offload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 	__offload = container_of(offload, struct __tc_taprio_qopt_offload,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 				 offload);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 	if (!refcount_dec_and_test(&__offload->users))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 	kfree(__offload);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) EXPORT_SYMBOL_GPL(taprio_offload_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) /* The function will only serve to keep the pointers to the "oper" and "admin"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168)  * schedules valid in relation to their base times, so when calling dump() the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169)  * users looks at the right schedules.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170)  * When using full offload, the admin configuration is promoted to oper at the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171)  * base_time in the PHC time domain.  But because the system time is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172)  * necessarily in sync with that, we can't just trigger a hrtimer to call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173)  * switch_schedules at the right hardware time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174)  * At the moment we call this by hand right away from taprio, but in the future
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175)  * it will be useful to create a mechanism for drivers to notify taprio of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176)  * offload state (PENDING, ACTIVE, INACTIVE) so it can be visible in dump().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177)  * This is left as TODO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) static void taprio_offload_config_changed(struct taprio_sched *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 	struct sched_gate_list *oper, *admin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 	spin_lock(&q->current_entry_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 	oper = rcu_dereference_protected(q->oper_sched,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 					 lockdep_is_held(&q->current_entry_lock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 	admin = rcu_dereference_protected(q->admin_sched,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 					  lockdep_is_held(&q->current_entry_lock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 	switch_schedules(q, &admin, &oper);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 	spin_unlock(&q->current_entry_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) static u32 tc_map_to_queue_mask(struct net_device *dev, u32 tc_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 	u32 i, queue_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 	for (i = 0; i < dev->num_tc; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 		u32 offset, count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 		if (!(tc_mask & BIT(i)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 		offset = dev->tc_to_txq[i].offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 		count = dev->tc_to_txq[i].count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 		queue_mask |= GENMASK(offset + count - 1, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 	return queue_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) static void taprio_sched_to_offload(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 				    struct sched_gate_list *sched,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 				    struct tc_taprio_qopt_offload *offload)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 	struct sched_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 	int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 	offload->base_time = sched->base_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 	offload->cycle_time = sched->cycle_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 	offload->cycle_time_extension = sched->cycle_time_extension;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 	list_for_each_entry(entry, &sched->entries, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 		struct tc_taprio_sched_entry *e = &offload->entries[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 		e->command = entry->command;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 		e->interval = entry->interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 		e->gate_mask = tc_map_to_queue_mask(dev, entry->gate_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 		i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 	offload->num_entries = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) static int taprio_enable_offload(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 				 struct taprio_sched *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 				 struct sched_gate_list *sched,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 				 struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 	const struct net_device_ops *ops = dev->netdev_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 	struct tc_taprio_qopt_offload *offload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 	if (!ops->ndo_setup_tc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 		NL_SET_ERR_MSG(extack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 			       "Device does not support taprio offload");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 	offload = taprio_offload_alloc(sched->num_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 	if (!offload) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 		NL_SET_ERR_MSG(extack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 			       "Not enough memory for enabling offload mode");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 	offload->enable = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 	taprio_sched_to_offload(dev, sched, offload);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 	err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TAPRIO, offload);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 	if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 		NL_SET_ERR_MSG(extack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 			       "Device failed to setup taprio offload");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 	taprio_offload_free(offload);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) static int taprio_disable_offload(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 				  struct taprio_sched *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 				  struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 	const struct net_device_ops *ops = dev->netdev_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 	struct tc_taprio_qopt_offload *offload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 	if (!FULL_OFFLOAD_IS_ENABLED(q->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 	if (!ops->ndo_setup_tc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 	offload = taprio_offload_alloc(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 	if (!offload) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 		NL_SET_ERR_MSG(extack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 			       "Not enough memory to disable offload mode");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 	offload->enable = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 	err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TAPRIO, offload);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 	if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 		NL_SET_ERR_MSG(extack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 			       "Device failed to disable offload");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 	taprio_offload_free(offload);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) /* If full offload is enabled, the only possible clockid is the net device's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311)  * PHC. For that reason, specifying a clockid through netlink is incorrect.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312)  * For txtime-assist, it is implicitly assumed that the device's PHC is kept
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313)  * in sync with the specified clockid via a user space daemon such as phc2sys.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314)  * For both software taprio and txtime-assist, the clockid is used for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315)  * hrtimer that advances the schedule and hence mandatory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) static int taprio_parse_clockid(struct Qdisc *sch, struct nlattr **tb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 				struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 	struct taprio_sched *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 	struct net_device *dev = qdisc_dev(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 	int err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 	if (FULL_OFFLOAD_IS_ENABLED(q->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 		const struct ethtool_ops *ops = dev->ethtool_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 		struct ethtool_ts_info info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 			.cmd = ETHTOOL_GET_TS_INFO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 			.phc_index = -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 		};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 		if (tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 			NL_SET_ERR_MSG(extack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 				       "The 'clockid' cannot be specified for full offload");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 		if (ops && ops->get_ts_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 			err = ops->get_ts_info(dev, &info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 		if (err || info.phc_index < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 			NL_SET_ERR_MSG(extack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 				       "Device does not have a PTP clock");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 			err = -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 	} else if (tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 		int clockid = nla_get_s32(tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 		enum tk_offsets tk_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 		/* We only support static clockids and we don't allow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 		 * for it to be modified after the first init.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 		if (clockid < 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 		    (q->clockid != -1 && q->clockid != clockid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 			NL_SET_ERR_MSG(extack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 				       "Changing the 'clockid' of a running schedule is not supported");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 			err = -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 		switch (clockid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 		case CLOCK_REALTIME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 			tk_offset = TK_OFFS_REAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 		case CLOCK_MONOTONIC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 			tk_offset = TK_OFFS_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 		case CLOCK_BOOTTIME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 			tk_offset = TK_OFFS_BOOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 		case CLOCK_TAI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 			tk_offset = TK_OFFS_TAI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 			NL_SET_ERR_MSG(extack, "Invalid 'clockid'");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 			err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 		/* This pairs with READ_ONCE() in taprio_mono_to_any */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 		WRITE_ONCE(q->tk_offset, tk_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 		q->clockid = clockid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 		NL_SET_ERR_MSG(extack, "Specifying a 'clockid' is mandatory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 	/* Everything went ok, return success. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 	err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) static int taprio_mqprio_cmp(const struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 			     const struct tc_mqprio_qopt *mqprio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 	if (!mqprio || mqprio->num_tc != dev->num_tc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 	for (i = 0; i < mqprio->num_tc; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 		if (dev->tc_to_txq[i].count != mqprio->count[i] ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 		    dev->tc_to_txq[i].offset != mqprio->offset[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 			return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 	for (i = 0; i <= TC_BITMASK; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 		if (dev->prio_tc_map[i] != mqprio->prio_tc_map[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 			return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) /* The semantics of the 'flags' argument in relation to 'change()'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416)  * requests, are interpreted following two rules (which are applied in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417)  * this order): (1) an omitted 'flags' argument is interpreted as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418)  * zero; (2) the 'flags' of a "running" taprio instance cannot be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419)  * changed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) static int taprio_new_flags(const struct nlattr *attr, u32 old,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 			    struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 	u32 new = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 	if (attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 		new = nla_get_u32(attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 	if (old != TAPRIO_FLAGS_INVALID && old != new) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 		NL_SET_ERR_MSG_MOD(extack, "Changing 'flags' of a running schedule is not supported");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 	if (!taprio_flags_valid(new)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 		NL_SET_ERR_MSG_MOD(extack, "Specified 'flags' are not valid");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 	return new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 			 struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 	struct nlattr *tb[TCA_TAPRIO_ATTR_MAX + 1] = { };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 	struct sched_gate_list *oper, *admin, *new_admin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 	struct taprio_sched *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 	struct net_device *dev = qdisc_dev(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 	struct tc_mqprio_qopt *mqprio = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 	ktime_t start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 	int i, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 	err = nla_parse_nested_deprecated(tb, TCA_TAPRIO_ATTR_MAX, opt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 					  taprio_policy, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 	if (tb[TCA_TAPRIO_ATTR_PRIOMAP])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 		mqprio = nla_data(tb[TCA_TAPRIO_ATTR_PRIOMAP]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 	err = taprio_new_flags(tb[TCA_TAPRIO_ATTR_FLAGS],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 			       q->flags, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 	q->flags = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 	err = taprio_parse_mqprio_opt(dev, mqprio, extack, q->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 	new_admin = kzalloc(sizeof(*new_admin), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 	if (!new_admin) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 		NL_SET_ERR_MSG(extack, "Not enough memory for a new schedule");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 	INIT_LIST_HEAD(&new_admin->entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 	oper = rcu_dereference(q->oper_sched);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 	admin = rcu_dereference(q->admin_sched);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 	/* no changes - no new mqprio settings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 	if (!taprio_mqprio_cmp(dev, mqprio))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 		mqprio = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 	if (mqprio && (oper || admin)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 		NL_SET_ERR_MSG(extack, "Changing the traffic mapping of a running schedule is not supported");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 		err = -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 		goto free_sched;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 	err = parse_taprio_schedule(q, tb, new_admin, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 		goto free_sched;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 	if (new_admin->num_entries == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 		NL_SET_ERR_MSG(extack, "There should be at least one entry in the schedule");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 		err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 		goto free_sched;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 	err = taprio_parse_clockid(sch, tb, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 		goto free_sched;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 	taprio_set_picos_per_byte(dev, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 	if (mqprio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 		err = netdev_set_num_tc(dev, mqprio->num_tc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 			goto free_sched;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 		for (i = 0; i < mqprio->num_tc; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 			netdev_set_tc_queue(dev, i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 					    mqprio->count[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 					    mqprio->offset[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 		/* Always use supplied priority mappings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 		for (i = 0; i <= TC_BITMASK; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 			netdev_set_prio_tc_map(dev, i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 					       mqprio->prio_tc_map[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 	if (FULL_OFFLOAD_IS_ENABLED(q->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 		err = taprio_enable_offload(dev, q, new_admin, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 		err = taprio_disable_offload(dev, q, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 		goto free_sched;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 	/* Protects against enqueue()/dequeue() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 	spin_lock_bh(qdisc_lock(sch));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 	if (tb[TCA_TAPRIO_ATTR_TXTIME_DELAY]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 		if (!TXTIME_ASSIST_IS_ENABLED(q->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 			NL_SET_ERR_MSG_MOD(extack, "txtime-delay can only be set when txtime-assist mode is enabled");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 			err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 			goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 		q->txtime_delay = nla_get_u32(tb[TCA_TAPRIO_ATTR_TXTIME_DELAY]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 	if (!TXTIME_ASSIST_IS_ENABLED(q->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 	    !FULL_OFFLOAD_IS_ENABLED(q->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 	    !hrtimer_active(&q->advance_timer)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 		hrtimer_init(&q->advance_timer, q->clockid, HRTIMER_MODE_ABS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 		q->advance_timer.function = advance_sched;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 	if (FULL_OFFLOAD_IS_ENABLED(q->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 		q->dequeue = taprio_dequeue_offload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 		q->peek = taprio_peek_offload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 		/* Be sure to always keep the function pointers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 		 * in a consistent state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 		q->dequeue = taprio_dequeue_soft;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 		q->peek = taprio_peek_soft;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 	err = taprio_get_start_time(sch, new_admin, &start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 	if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 		NL_SET_ERR_MSG(extack, "Internal error: failed get start time");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 		goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 	setup_txtime(q, new_admin, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 	if (TXTIME_ASSIST_IS_ENABLED(q->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 		if (!oper) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 			rcu_assign_pointer(q->oper_sched, new_admin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 			err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 			new_admin = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 			goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 		rcu_assign_pointer(q->admin_sched, new_admin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 		if (admin)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 			call_rcu(&admin->rcu, taprio_free_sched_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 		setup_first_close_time(q, new_admin, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 		/* Protects against advance_sched() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 		spin_lock_irqsave(&q->current_entry_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 		taprio_start_sched(sch, start, new_admin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 		rcu_assign_pointer(q->admin_sched, new_admin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 		if (admin)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 			call_rcu(&admin->rcu, taprio_free_sched_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 		spin_unlock_irqrestore(&q->current_entry_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 		if (FULL_OFFLOAD_IS_ENABLED(q->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 			taprio_offload_config_changed(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 	new_admin = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 	err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 	spin_unlock_bh(qdisc_lock(sch));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) free_sched:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 	if (new_admin)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 		call_rcu(&new_admin->rcu, taprio_free_sched_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) static void taprio_reset(struct Qdisc *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 	struct taprio_sched *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 	struct net_device *dev = qdisc_dev(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 	hrtimer_cancel(&q->advance_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 	if (q->qdiscs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 		for (i = 0; i < dev->num_tx_queues; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 			if (q->qdiscs[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 				qdisc_reset(q->qdiscs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 	sch->qstats.backlog = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 	sch->q.qlen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) static void taprio_destroy(struct Qdisc *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 	struct taprio_sched *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 	struct net_device *dev = qdisc_dev(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 	spin_lock(&taprio_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 	list_del(&q->taprio_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 	spin_unlock(&taprio_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 	/* Note that taprio_reset() might not be called if an error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 	 * happens in qdisc_create(), after taprio_init() has been called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 	hrtimer_cancel(&q->advance_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 	taprio_disable_offload(dev, q, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 	if (q->qdiscs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 		for (i = 0; i < dev->num_tx_queues; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 			qdisc_put(q->qdiscs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 		kfree(q->qdiscs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 	q->qdiscs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 	netdev_reset_tc(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 	if (q->oper_sched)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 		call_rcu(&q->oper_sched->rcu, taprio_free_sched_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 	if (q->admin_sched)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 		call_rcu(&q->admin_sched->rcu, taprio_free_sched_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) static int taprio_init(struct Qdisc *sch, struct nlattr *opt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 		       struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 	struct taprio_sched *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 	struct net_device *dev = qdisc_dev(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 	spin_lock_init(&q->current_entry_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 	hrtimer_init(&q->advance_timer, CLOCK_TAI, HRTIMER_MODE_ABS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 	q->advance_timer.function = advance_sched;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 	q->dequeue = taprio_dequeue_soft;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 	q->peek = taprio_peek_soft;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 	q->root = sch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 	/* We only support static clockids. Use an invalid value as default
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 	 * and get the valid one on taprio_change().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 	q->clockid = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 	q->flags = TAPRIO_FLAGS_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 	spin_lock(&taprio_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 	list_add(&q->taprio_list, &taprio_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 	spin_unlock(&taprio_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 	if (sch->parent != TC_H_ROOT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 	if (!netif_is_multiqueue(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 	/* pre-allocate qdisc, attachment can't fail */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 	q->qdiscs = kcalloc(dev->num_tx_queues,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 			    sizeof(q->qdiscs[0]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 			    GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 	if (!q->qdiscs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 	if (!opt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 	for (i = 0; i < dev->num_tx_queues; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 		struct netdev_queue *dev_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 		struct Qdisc *qdisc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 		dev_queue = netdev_get_tx_queue(dev, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 		qdisc = qdisc_create_dflt(dev_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 					  &pfifo_qdisc_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 					  TC_H_MAKE(TC_H_MAJ(sch->handle),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 						    TC_H_MIN(i + 1)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 					  extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 		if (!qdisc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 		if (i < dev->real_num_tx_queues)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 			qdisc_hash_add(qdisc, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) 		q->qdiscs[i] = qdisc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 	return taprio_change(sch, opt, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) static struct netdev_queue *taprio_queue_get(struct Qdisc *sch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 					     unsigned long cl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 	struct net_device *dev = qdisc_dev(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 	unsigned long ntx = cl - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 	if (ntx >= dev->num_tx_queues)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 	return netdev_get_tx_queue(dev, ntx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) static int taprio_graft(struct Qdisc *sch, unsigned long cl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 			struct Qdisc *new, struct Qdisc **old,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 			struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 	struct taprio_sched *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 	struct net_device *dev = qdisc_dev(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 	struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 	if (!dev_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) 	if (dev->flags & IFF_UP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 		dev_deactivate(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 	*old = q->qdiscs[cl - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 	q->qdiscs[cl - 1] = new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 	if (new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) 		new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) 	if (dev->flags & IFF_UP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) 		dev_activate(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) static int dump_entry(struct sk_buff *msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 		      const struct sched_entry *entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 	struct nlattr *item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 	item = nla_nest_start_noflag(msg, TCA_TAPRIO_SCHED_ENTRY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 	if (!item)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) 		return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) 	if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_INDEX, entry->index))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) 		goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 	if (nla_put_u8(msg, TCA_TAPRIO_SCHED_ENTRY_CMD, entry->command))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) 		goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) 	if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_GATE_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 			entry->gate_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 		goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) 	if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_INTERVAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 			entry->interval))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 		goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 	return nla_nest_end(msg, item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) nla_put_failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) 	nla_nest_cancel(msg, item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 	return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) static int dump_schedule(struct sk_buff *msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 			 const struct sched_gate_list *root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 	struct nlattr *entry_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) 	struct sched_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) 	if (nla_put_s64(msg, TCA_TAPRIO_ATTR_SCHED_BASE_TIME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 			root->base_time, TCA_TAPRIO_PAD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) 	if (nla_put_s64(msg, TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) 			root->cycle_time, TCA_TAPRIO_PAD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) 	if (nla_put_s64(msg, TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) 			root->cycle_time_extension, TCA_TAPRIO_PAD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) 	entry_list = nla_nest_start_noflag(msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) 					   TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) 	if (!entry_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) 		goto error_nest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) 	list_for_each_entry(entry, &root->entries, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 		if (dump_entry(msg, entry) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) 			goto error_nest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 	nla_nest_end(msg, entry_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) error_nest:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) 	nla_nest_cancel(msg, entry_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) 	return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) static int taprio_dump(struct Qdisc *sch, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 	struct taprio_sched *q = qdisc_priv(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) 	struct net_device *dev = qdisc_dev(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) 	struct sched_gate_list *oper, *admin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 	struct tc_mqprio_qopt opt = { 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) 	struct nlattr *nest, *sched_nest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) 	oper = rcu_dereference(q->oper_sched);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) 	admin = rcu_dereference(q->admin_sched);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) 	opt.num_tc = netdev_get_num_tc(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) 	memcpy(opt.prio_tc_map, dev->prio_tc_map, sizeof(opt.prio_tc_map));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 	for (i = 0; i < netdev_get_num_tc(dev); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) 		opt.count[i] = dev->tc_to_txq[i].count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) 		opt.offset[i] = dev->tc_to_txq[i].offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 	if (!nest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 		goto start_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 	if (nla_put(skb, TCA_TAPRIO_ATTR_PRIOMAP, sizeof(opt), &opt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 		goto options_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 	if (!FULL_OFFLOAD_IS_ENABLED(q->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 	    nla_put_s32(skb, TCA_TAPRIO_ATTR_SCHED_CLOCKID, q->clockid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) 		goto options_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) 	if (q->flags && nla_put_u32(skb, TCA_TAPRIO_ATTR_FLAGS, q->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) 		goto options_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) 	if (q->txtime_delay &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) 	    nla_put_u32(skb, TCA_TAPRIO_ATTR_TXTIME_DELAY, q->txtime_delay))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) 		goto options_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) 	if (oper && dump_schedule(skb, oper))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) 		goto options_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 	if (!admin)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 	sched_nest = nla_nest_start_noflag(skb, TCA_TAPRIO_ATTR_ADMIN_SCHED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 	if (!sched_nest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 		goto options_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 	if (dump_schedule(skb, admin))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) 		goto admin_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 	nla_nest_end(skb, sched_nest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) 	return nla_nest_end(skb, nest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) admin_error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) 	nla_nest_cancel(skb, sched_nest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) options_error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) 	nla_nest_cancel(skb, nest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) start_error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) 	return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) static struct Qdisc *taprio_leaf(struct Qdisc *sch, unsigned long cl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) 	struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) 	if (!dev_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) 	return dev_queue->qdisc_sleeping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) static unsigned long taprio_find(struct Qdisc *sch, u32 classid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 	unsigned int ntx = TC_H_MIN(classid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) 	if (!taprio_queue_get(sch, ntx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) 	return ntx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) static int taprio_dump_class(struct Qdisc *sch, unsigned long cl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) 			     struct sk_buff *skb, struct tcmsg *tcm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) 	struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) 	tcm->tcm_parent = TC_H_ROOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) 	tcm->tcm_handle |= TC_H_MIN(cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) 	tcm->tcm_info = dev_queue->qdisc_sleeping->handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) static int taprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) 				   struct gnet_dump *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) 	__releases(d->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) 	__acquires(d->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) 	struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) 	sch = dev_queue->qdisc_sleeping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) 	if (gnet_stats_copy_basic(&sch->running, d, NULL, &sch->bstats) < 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) 	    qdisc_qstats_copy(d, sch) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) static void taprio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) 	struct net_device *dev = qdisc_dev(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) 	unsigned long ntx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) 	if (arg->stop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) 	arg->count = arg->skip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) 	for (ntx = arg->skip; ntx < dev->num_tx_queues; ntx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) 		if (arg->fn(sch, ntx + 1, arg) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) 			arg->stop = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) 		arg->count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) static struct netdev_queue *taprio_select_queue(struct Qdisc *sch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) 						struct tcmsg *tcm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) 	return taprio_queue_get(sch, TC_H_MIN(tcm->tcm_parent));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) static const struct Qdisc_class_ops taprio_class_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) 	.graft		= taprio_graft,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) 	.leaf		= taprio_leaf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) 	.find		= taprio_find,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) 	.walk		= taprio_walk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) 	.dump		= taprio_dump_class,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) 	.dump_stats	= taprio_dump_class_stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) 	.select_queue	= taprio_select_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) static struct Qdisc_ops taprio_qdisc_ops __read_mostly = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) 	.cl_ops		= &taprio_class_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) 	.id		= "taprio",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) 	.priv_size	= sizeof(struct taprio_sched),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) 	.init		= taprio_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) 	.change		= taprio_change,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) 	.destroy	= taprio_destroy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) 	.reset		= taprio_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) 	.peek		= taprio_peek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) 	.dequeue	= taprio_dequeue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) 	.enqueue	= taprio_enqueue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) 	.dump		= taprio_dump,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) 	.owner		= THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) static struct notifier_block taprio_device_notifier = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) 	.notifier_call = taprio_dev_notifier,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) static int __init taprio_module_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) 	int err = register_netdevice_notifier(&taprio_device_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) 	return register_qdisc(&taprio_qdisc_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) static void __exit taprio_module_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) 	unregister_qdisc(&taprio_qdisc_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) 	unregister_netdevice_notifier(&taprio_device_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) module_init(taprio_module_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) module_exit(taprio_module_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) MODULE_LICENSE("GPL");