^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /* Copyright 2020 NXP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/rtnetlink.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <net/act_api.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <net/netlink.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <net/pkt_cls.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <net/tc_act/tc_gate.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) static unsigned int gate_net_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) static struct tc_action_ops act_gate_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) static ktime_t gate_get_time(struct tcf_gate *gact)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) ktime_t mono = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) switch (gact->tk_offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) case TK_OFFS_MAX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) return mono;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) return ktime_mono_to_any(mono, gact->tk_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) return KTIME_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) static void gate_get_start_time(struct tcf_gate *gact, ktime_t *start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) struct tcf_gate_params *param = &gact->param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) ktime_t now, base, cycle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) u64 n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) base = ns_to_ktime(param->tcfg_basetime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) now = gate_get_time(gact);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) if (ktime_after(base, now)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) *start = base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) cycle = param->tcfg_cycletime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) n = div64_u64(ktime_sub_ns(now, base), cycle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) *start = ktime_add_ns(base, (n + 1) * cycle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) static void gate_start_timer(struct tcf_gate *gact, ktime_t start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) ktime_t expires;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) expires = hrtimer_get_expires(&gact->hitimer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) if (expires == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) expires = KTIME_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) start = min_t(ktime_t, start, expires);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) hrtimer_start(&gact->hitimer, start, HRTIMER_MODE_ABS_SOFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) static enum hrtimer_restart gate_timer_func(struct hrtimer *timer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) struct tcf_gate *gact = container_of(timer, struct tcf_gate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) hitimer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) struct tcf_gate_params *p = &gact->param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) struct tcfg_gate_entry *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) ktime_t close_time, now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) spin_lock(&gact->tcf_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) next = gact->next_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) /* cycle start, clear pending bit, clear total octets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) gact->current_gate_status = next->gate_state ? GATE_ACT_GATE_OPEN : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) gact->current_entry_octets = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) gact->current_max_octets = next->maxoctets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) gact->current_close_time = ktime_add_ns(gact->current_close_time,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) next->interval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) close_time = gact->current_close_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) if (list_is_last(&next->list, &p->entries))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) next = list_first_entry(&p->entries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) struct tcfg_gate_entry, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) next = list_next_entry(next, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) now = gate_get_time(gact);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) if (ktime_after(now, close_time)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) ktime_t cycle, base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) u64 n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) cycle = p->tcfg_cycletime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) base = ns_to_ktime(p->tcfg_basetime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) n = div64_u64(ktime_sub_ns(now, base), cycle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) close_time = ktime_add_ns(base, (n + 1) * cycle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) gact->next_entry = next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) hrtimer_set_expires(&gact->hitimer, close_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) spin_unlock(&gact->tcf_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) return HRTIMER_RESTART;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) static int tcf_gate_act(struct sk_buff *skb, const struct tc_action *a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) struct tcf_result *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) struct tcf_gate *gact = to_gate(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) spin_lock(&gact->tcf_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) tcf_lastuse_update(&gact->tcf_tm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) bstats_update(&gact->tcf_bstats, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) if (unlikely(gact->current_gate_status & GATE_ACT_PENDING)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) spin_unlock(&gact->tcf_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) return gact->tcf_action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) if (!(gact->current_gate_status & GATE_ACT_GATE_OPEN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) if (gact->current_max_octets >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) gact->current_entry_octets += qdisc_pkt_len(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) if (gact->current_entry_octets > gact->current_max_octets) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) gact->tcf_qstats.overlimits++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) spin_unlock(&gact->tcf_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) return gact->tcf_action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) drop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) gact->tcf_qstats.drops++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) spin_unlock(&gact->tcf_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) return TC_ACT_SHOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) static const struct nla_policy entry_policy[TCA_GATE_ENTRY_MAX + 1] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) [TCA_GATE_ENTRY_INDEX] = { .type = NLA_U32 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) [TCA_GATE_ENTRY_GATE] = { .type = NLA_FLAG },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) [TCA_GATE_ENTRY_INTERVAL] = { .type = NLA_U32 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) [TCA_GATE_ENTRY_IPV] = { .type = NLA_S32 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) [TCA_GATE_ENTRY_MAX_OCTETS] = { .type = NLA_S32 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) static const struct nla_policy gate_policy[TCA_GATE_MAX + 1] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) [TCA_GATE_PARMS] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) NLA_POLICY_EXACT_LEN(sizeof(struct tc_gate)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) [TCA_GATE_PRIORITY] = { .type = NLA_S32 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) [TCA_GATE_ENTRY_LIST] = { .type = NLA_NESTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) [TCA_GATE_BASE_TIME] = { .type = NLA_U64 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) [TCA_GATE_CYCLE_TIME] = { .type = NLA_U64 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) [TCA_GATE_CYCLE_TIME_EXT] = { .type = NLA_U64 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) [TCA_GATE_FLAGS] = { .type = NLA_U32 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) [TCA_GATE_CLOCKID] = { .type = NLA_S32 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) static int fill_gate_entry(struct nlattr **tb, struct tcfg_gate_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) u32 interval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) entry->gate_state = nla_get_flag(tb[TCA_GATE_ENTRY_GATE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) if (tb[TCA_GATE_ENTRY_INTERVAL])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) interval = nla_get_u32(tb[TCA_GATE_ENTRY_INTERVAL]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) if (interval == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) NL_SET_ERR_MSG(extack, "Invalid interval for schedule entry");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) entry->interval = interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) if (tb[TCA_GATE_ENTRY_IPV])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) entry->ipv = nla_get_s32(tb[TCA_GATE_ENTRY_IPV]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) entry->ipv = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) if (tb[TCA_GATE_ENTRY_MAX_OCTETS])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) entry->maxoctets = nla_get_s32(tb[TCA_GATE_ENTRY_MAX_OCTETS]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) entry->maxoctets = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) static int parse_gate_entry(struct nlattr *n, struct tcfg_gate_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) int index, struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) struct nlattr *tb[TCA_GATE_ENTRY_MAX + 1] = { };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) err = nla_parse_nested(tb, TCA_GATE_ENTRY_MAX, n, entry_policy, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) NL_SET_ERR_MSG(extack, "Could not parse nested entry");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) entry->index = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) return fill_gate_entry(tb, entry, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) static void release_entry_list(struct list_head *entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) struct tcfg_gate_entry *entry, *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) list_for_each_entry_safe(entry, e, entries, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) list_del(&entry->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) kfree(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) static int parse_gate_list(struct nlattr *list_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) struct tcf_gate_params *sched,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) struct tcfg_gate_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) struct nlattr *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) int err, rem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) if (!list_attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) nla_for_each_nested(n, list_attr, rem) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) if (nla_type(n) != TCA_GATE_ONE_ENTRY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) NL_SET_ERR_MSG(extack, "Attribute isn't type 'entry'");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) if (!entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) NL_SET_ERR_MSG(extack, "Not enough memory for entry");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) goto release_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) err = parse_gate_entry(n, entry, i, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) kfree(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) goto release_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) list_add_tail(&entry->list, &sched->entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) sched->num_entries = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) release_list:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) release_entry_list(&sched->entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) static void gate_setup_timer(struct tcf_gate *gact, u64 basetime,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) enum tk_offsets tko, s32 clockid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) bool do_init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) if (!do_init) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) if (basetime == gact->param.tcfg_basetime &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) tko == gact->tk_offset &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) clockid == gact->param.tcfg_clockid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) spin_unlock_bh(&gact->tcf_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) hrtimer_cancel(&gact->hitimer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) spin_lock_bh(&gact->tcf_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) gact->param.tcfg_basetime = basetime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) gact->param.tcfg_clockid = clockid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) gact->tk_offset = tko;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) hrtimer_init(&gact->hitimer, clockid, HRTIMER_MODE_ABS_SOFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) gact->hitimer.function = gate_timer_func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) static int tcf_gate_init(struct net *net, struct nlattr *nla,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) struct nlattr *est, struct tc_action **a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) int ovr, int bind, bool rtnl_held,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) struct tcf_proto *tp, u32 flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) struct tc_action_net *tn = net_generic(net, gate_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) enum tk_offsets tk_offset = TK_OFFS_TAI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) struct nlattr *tb[TCA_GATE_MAX + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) struct tcf_chain *goto_ch = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) u64 cycletime = 0, basetime = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) struct tcf_gate_params *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) s32 clockid = CLOCK_TAI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) struct tcf_gate *gact;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) struct tc_gate *parm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) int ret = 0, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) u32 gflags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) s32 prio = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) ktime_t start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) u32 index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) if (!nla)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) err = nla_parse_nested(tb, TCA_GATE_MAX, nla, gate_policy, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) if (!tb[TCA_GATE_PARMS])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) if (tb[TCA_GATE_CLOCKID]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) clockid = nla_get_s32(tb[TCA_GATE_CLOCKID]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) switch (clockid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) case CLOCK_REALTIME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) tk_offset = TK_OFFS_REAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) case CLOCK_MONOTONIC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) tk_offset = TK_OFFS_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) case CLOCK_BOOTTIME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) tk_offset = TK_OFFS_BOOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) case CLOCK_TAI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) tk_offset = TK_OFFS_TAI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) NL_SET_ERR_MSG(extack, "Invalid 'clockid'");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) parm = nla_data(tb[TCA_GATE_PARMS]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) index = parm->index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) err = tcf_idr_check_alloc(tn, &index, a, bind);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) if (err && bind)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) ret = tcf_idr_create(tn, index, est, a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) &act_gate_ops, bind, false, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) tcf_idr_cleanup(tn, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) ret = ACT_P_CREATED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) } else if (!ovr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) tcf_idr_release(*a, bind);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) return -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) if (tb[TCA_GATE_PRIORITY])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) prio = nla_get_s32(tb[TCA_GATE_PRIORITY]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) if (tb[TCA_GATE_BASE_TIME])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) basetime = nla_get_u64(tb[TCA_GATE_BASE_TIME]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) if (tb[TCA_GATE_FLAGS])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) gflags = nla_get_u32(tb[TCA_GATE_FLAGS]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) gact = to_gate(*a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) if (ret == ACT_P_CREATED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) INIT_LIST_HEAD(&gact->param.entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) goto release_idr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) spin_lock_bh(&gact->tcf_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) p = &gact->param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) if (tb[TCA_GATE_CYCLE_TIME])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) cycletime = nla_get_u64(tb[TCA_GATE_CYCLE_TIME]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) if (tb[TCA_GATE_ENTRY_LIST]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) err = parse_gate_list(tb[TCA_GATE_ENTRY_LIST], p, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) goto chain_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) if (!cycletime) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) struct tcfg_gate_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) ktime_t cycle = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) list_for_each_entry(entry, &p->entries, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) cycle = ktime_add_ns(cycle, entry->interval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) cycletime = cycle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) if (!cycletime) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) goto chain_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) p->tcfg_cycletime = cycletime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) if (tb[TCA_GATE_CYCLE_TIME_EXT])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) p->tcfg_cycletime_ext =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) nla_get_u64(tb[TCA_GATE_CYCLE_TIME_EXT]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) gate_setup_timer(gact, basetime, tk_offset, clockid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) ret == ACT_P_CREATED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) p->tcfg_priority = prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) p->tcfg_flags = gflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) gate_get_start_time(gact, &start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) gact->current_close_time = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) gact->current_gate_status = GATE_ACT_GATE_OPEN | GATE_ACT_PENDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) gact->next_entry = list_first_entry(&p->entries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) struct tcfg_gate_entry, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) gate_start_timer(gact, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) spin_unlock_bh(&gact->tcf_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) if (goto_ch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) tcf_chain_put_by_act(goto_ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) chain_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) spin_unlock_bh(&gact->tcf_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) if (goto_ch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) tcf_chain_put_by_act(goto_ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) release_idr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) /* action is not inserted in any list: it's safe to init hitimer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) * without taking tcf_lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) if (ret == ACT_P_CREATED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) gate_setup_timer(gact, gact->param.tcfg_basetime,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) gact->tk_offset, gact->param.tcfg_clockid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) tcf_idr_release(*a, bind);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) static void tcf_gate_cleanup(struct tc_action *a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) struct tcf_gate *gact = to_gate(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) struct tcf_gate_params *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) p = &gact->param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) hrtimer_cancel(&gact->hitimer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) release_entry_list(&p->entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) static int dumping_entry(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) struct tcfg_gate_entry *entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) struct nlattr *item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) item = nla_nest_start_noflag(skb, TCA_GATE_ONE_ENTRY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) if (!item)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) if (nla_put_u32(skb, TCA_GATE_ENTRY_INDEX, entry->index))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) if (entry->gate_state && nla_put_flag(skb, TCA_GATE_ENTRY_GATE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) if (nla_put_u32(skb, TCA_GATE_ENTRY_INTERVAL, entry->interval))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) if (nla_put_s32(skb, TCA_GATE_ENTRY_MAX_OCTETS, entry->maxoctets))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) if (nla_put_s32(skb, TCA_GATE_ENTRY_IPV, entry->ipv))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) return nla_nest_end(skb, item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) nla_put_failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) nla_nest_cancel(skb, item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) static int tcf_gate_dump(struct sk_buff *skb, struct tc_action *a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) int bind, int ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) unsigned char *b = skb_tail_pointer(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) struct tcf_gate *gact = to_gate(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) struct tc_gate opt = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) .index = gact->tcf_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) .refcnt = refcount_read(&gact->tcf_refcnt) - ref,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) .bindcnt = atomic_read(&gact->tcf_bindcnt) - bind,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) struct tcfg_gate_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) struct tcf_gate_params *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) struct nlattr *entry_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) struct tcf_t t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) spin_lock_bh(&gact->tcf_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) opt.action = gact->tcf_action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) p = &gact->param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) if (nla_put(skb, TCA_GATE_PARMS, sizeof(opt), &opt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) if (nla_put_u64_64bit(skb, TCA_GATE_BASE_TIME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) p->tcfg_basetime, TCA_GATE_PAD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) if (nla_put_u64_64bit(skb, TCA_GATE_CYCLE_TIME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) p->tcfg_cycletime, TCA_GATE_PAD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) if (nla_put_u64_64bit(skb, TCA_GATE_CYCLE_TIME_EXT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) p->tcfg_cycletime_ext, TCA_GATE_PAD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) if (nla_put_s32(skb, TCA_GATE_CLOCKID, p->tcfg_clockid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) if (nla_put_u32(skb, TCA_GATE_FLAGS, p->tcfg_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) if (nla_put_s32(skb, TCA_GATE_PRIORITY, p->tcfg_priority))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) entry_list = nla_nest_start_noflag(skb, TCA_GATE_ENTRY_LIST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) if (!entry_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) list_for_each_entry(entry, &p->entries, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) if (dumping_entry(skb, entry) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) nla_nest_end(skb, entry_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) tcf_tm_dump(&t, &gact->tcf_tm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) if (nla_put_64bit(skb, TCA_GATE_TM, sizeof(t), &t, TCA_GATE_PAD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) spin_unlock_bh(&gact->tcf_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) return skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) nla_put_failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) spin_unlock_bh(&gact->tcf_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) nlmsg_trim(skb, b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) static int tcf_gate_walker(struct net *net, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) struct netlink_callback *cb, int type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) const struct tc_action_ops *ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) struct tc_action_net *tn = net_generic(net, gate_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) return tcf_generic_walker(tn, skb, cb, type, ops, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) static void tcf_gate_stats_update(struct tc_action *a, u64 bytes, u64 packets,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) u64 drops, u64 lastuse, bool hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) struct tcf_gate *gact = to_gate(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) struct tcf_t *tm = &gact->tcf_tm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) tcf_action_update_stats(a, bytes, packets, drops, hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) tm->lastuse = max_t(u64, tm->lastuse, lastuse);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) static int tcf_gate_search(struct net *net, struct tc_action **a, u32 index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) struct tc_action_net *tn = net_generic(net, gate_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) return tcf_idr_search(tn, a, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) static size_t tcf_gate_get_fill_size(const struct tc_action *act)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) return nla_total_size(sizeof(struct tc_gate));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) static struct tc_action_ops act_gate_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) .kind = "gate",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) .id = TCA_ID_GATE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) .act = tcf_gate_act,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) .dump = tcf_gate_dump,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) .init = tcf_gate_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) .cleanup = tcf_gate_cleanup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) .walk = tcf_gate_walker,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) .stats_update = tcf_gate_stats_update,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) .get_fill_size = tcf_gate_get_fill_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) .lookup = tcf_gate_search,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) .size = sizeof(struct tcf_gate),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) static __net_init int gate_init_net(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) struct tc_action_net *tn = net_generic(net, gate_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) return tc_action_net_init(net, tn, &act_gate_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) static void __net_exit gate_exit_net(struct list_head *net_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) tc_action_net_exit(net_list, gate_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) static struct pernet_operations gate_net_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) .init = gate_init_net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) .exit_batch = gate_exit_net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) .id = &gate_net_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) .size = sizeof(struct tc_action_net),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) static int __init gate_init_module(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) return tcf_register_action(&act_gate_ops, &gate_net_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) static void __exit gate_cleanup_module(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) tcf_unregister_action(&act_gate_ops, &gate_net_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) module_init(gate_init_module);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) module_exit(gate_cleanup_module);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) MODULE_LICENSE("GPL v2");