^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /* Copyright (c) 2014 Protonic Holland,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * David Jander
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 2014-2017 Pengutronix,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Marc Kleine-Budde <kernel@pengutronix.de>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/can/dev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/can/rx-offload.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) struct can_rx_offload_cb {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) u32 timestamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) static inline struct can_rx_offload_cb *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) can_rx_offload_get_cb(struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) BUILD_BUG_ON(sizeof(struct can_rx_offload_cb) > sizeof(skb->cb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) return (struct can_rx_offload_cb *)skb->cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) static inline bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) can_rx_offload_le(struct can_rx_offload *offload,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) unsigned int a, unsigned int b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) if (offload->inc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) return a <= b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) return a >= b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) static inline unsigned int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) can_rx_offload_inc(struct can_rx_offload *offload, unsigned int *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) if (offload->inc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) return (*val)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) return (*val)--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) static int can_rx_offload_napi_poll(struct napi_struct *napi, int quota)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) struct can_rx_offload *offload = container_of(napi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) struct can_rx_offload,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) struct net_device *dev = offload->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) struct net_device_stats *stats = &dev->stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) int work_done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) while ((work_done < quota) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) (skb = skb_dequeue(&offload->skb_queue))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) struct can_frame *cf = (struct can_frame *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) work_done++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) stats->rx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) stats->rx_bytes += cf->can_dlc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) netif_receive_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) if (work_done < quota) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) napi_complete_done(napi, work_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) /* Check if there was another interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) if (!skb_queue_empty(&offload->skb_queue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) napi_reschedule(&offload->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) can_led_event(offload->dev, CAN_LED_EVENT_RX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) return work_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) __skb_queue_add_sort(struct sk_buff_head *head, struct sk_buff *new,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) int (*compare)(struct sk_buff *a, struct sk_buff *b))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) struct sk_buff *pos, *insert = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) skb_queue_reverse_walk(head, pos) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) const struct can_rx_offload_cb *cb_pos, *cb_new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) cb_pos = can_rx_offload_get_cb(pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) cb_new = can_rx_offload_get_cb(new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) netdev_dbg(new->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) "%s: pos=0x%08x, new=0x%08x, diff=%10d, queue_len=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) cb_pos->timestamp, cb_new->timestamp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) cb_new->timestamp - cb_pos->timestamp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) skb_queue_len(head));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) if (compare(pos, new) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) insert = pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) if (!insert)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) __skb_queue_head(head, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) __skb_queue_after(head, insert, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) static int can_rx_offload_compare(struct sk_buff *a, struct sk_buff *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) const struct can_rx_offload_cb *cb_a, *cb_b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) cb_a = can_rx_offload_get_cb(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) cb_b = can_rx_offload_get_cb(b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) /* Subtract two u32 and return result as int, to keep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) * difference steady around the u32 overflow.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) return cb_b->timestamp - cb_a->timestamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * can_rx_offload_offload_one() - Read one CAN frame from HW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * @offload: pointer to rx_offload context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * @n: number of mailbox to read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * The task of this function is to read a CAN frame from mailbox @n
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * from the device and return the mailbox's content as a struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * sk_buff.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * If the struct can_rx_offload::skb_queue exceeds the maximal queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) * length (struct can_rx_offload::skb_queue_len_max) or no skb can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * allocated, the mailbox contents is discarded by reading it into an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * overflow buffer. This way the mailbox is marked as free by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) * Return: A pointer to skb containing the CAN frame on success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * NULL if the mailbox @n is empty.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) * ERR_PTR() in case of an error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) static struct sk_buff *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) can_rx_offload_offload_one(struct can_rx_offload *offload, unsigned int n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) struct can_rx_offload_cb *cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) bool drop = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) u32 timestamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) /* If queue is full drop frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) if (unlikely(skb_queue_len(&offload->skb_queue) >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) offload->skb_queue_len_max))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) drop = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) skb = offload->mailbox_read(offload, n, ×tamp, drop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) /* Mailbox was empty. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) if (unlikely(!skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) /* There was a problem reading the mailbox, propagate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) * error value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) if (unlikely(IS_ERR(skb))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) offload->dev->stats.rx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) offload->dev->stats.rx_fifo_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) /* Mailbox was read. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) cb = can_rx_offload_get_cb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) cb->timestamp = timestamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) u64 pending)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) struct sk_buff_head skb_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) __skb_queue_head_init(&skb_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) for (i = offload->mb_first;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) can_rx_offload_le(offload, i, offload->mb_last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) can_rx_offload_inc(offload, &i)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) if (!(pending & BIT_ULL(i)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) skb = can_rx_offload_offload_one(offload, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) if (IS_ERR_OR_NULL(skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) __skb_queue_add_sort(&skb_queue, skb, can_rx_offload_compare);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) if (!skb_queue_empty(&skb_queue)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) u32 queue_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) spin_lock_irqsave(&offload->skb_queue.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) skb_queue_splice_tail(&skb_queue, &offload->skb_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) spin_unlock_irqrestore(&offload->skb_queue.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) queue_len = skb_queue_len(&offload->skb_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) if (queue_len > offload->skb_queue_len_max / 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) netdev_dbg(offload->dev, "%s: queue_len=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) __func__, queue_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) can_rx_offload_schedule(offload);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) return skb_queue_len(&skb_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_timestamp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) int received = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) skb = can_rx_offload_offload_one(offload, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) if (IS_ERR(skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) skb_queue_tail(&offload->skb_queue, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) received++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) if (received)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) can_rx_offload_schedule(offload);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) return received;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_fifo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) int can_rx_offload_queue_sorted(struct can_rx_offload *offload,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) struct sk_buff *skb, u32 timestamp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) struct can_rx_offload_cb *cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) if (skb_queue_len(&offload->skb_queue) >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) offload->skb_queue_len_max) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) dev_kfree_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) return -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) cb = can_rx_offload_get_cb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) cb->timestamp = timestamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) spin_lock_irqsave(&offload->skb_queue.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) __skb_queue_add_sort(&offload->skb_queue, skb, can_rx_offload_compare);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) spin_unlock_irqrestore(&offload->skb_queue.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) can_rx_offload_schedule(offload);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) EXPORT_SYMBOL_GPL(can_rx_offload_queue_sorted);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) unsigned int idx, u32 timestamp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) struct net_device *dev = offload->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) struct net_device_stats *stats = &dev->stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) u8 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) skb = __can_get_echo_skb(dev, idx, &len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) err = can_rx_offload_queue_sorted(offload, skb, timestamp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) stats->rx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) stats->tx_fifo_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) EXPORT_SYMBOL_GPL(can_rx_offload_get_echo_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) int can_rx_offload_queue_tail(struct can_rx_offload *offload,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) if (skb_queue_len(&offload->skb_queue) >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) offload->skb_queue_len_max) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) dev_kfree_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) return -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) skb_queue_tail(&offload->skb_queue, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) can_rx_offload_schedule(offload);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) EXPORT_SYMBOL_GPL(can_rx_offload_queue_tail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) static int can_rx_offload_init_queue(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) struct can_rx_offload *offload,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) unsigned int weight)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) offload->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) /* Limit queue len to 4x the weight (rounted to next power of two) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) offload->skb_queue_len_max = 2 << fls(weight);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) offload->skb_queue_len_max *= 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) skb_queue_head_init(&offload->skb_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) netif_napi_add(dev, &offload->napi, can_rx_offload_napi_poll, weight);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) dev_dbg(dev->dev.parent, "%s: skb_queue_len_max=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) __func__, offload->skb_queue_len_max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) int can_rx_offload_add_timestamp(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) struct can_rx_offload *offload)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) unsigned int weight;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) if (offload->mb_first > BITS_PER_LONG_LONG ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) offload->mb_last > BITS_PER_LONG_LONG || !offload->mailbox_read)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) if (offload->mb_first < offload->mb_last) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) offload->inc = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) weight = offload->mb_last - offload->mb_first;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) offload->inc = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) weight = offload->mb_first - offload->mb_last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) return can_rx_offload_init_queue(dev, offload, weight);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) EXPORT_SYMBOL_GPL(can_rx_offload_add_timestamp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) int can_rx_offload_add_fifo(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) struct can_rx_offload *offload, unsigned int weight)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) if (!offload->mailbox_read)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) return can_rx_offload_init_queue(dev, offload, weight);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) EXPORT_SYMBOL_GPL(can_rx_offload_add_fifo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) int can_rx_offload_add_manual(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) struct can_rx_offload *offload,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) unsigned int weight)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) if (offload->mailbox_read)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) return can_rx_offload_init_queue(dev, offload, weight);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) EXPORT_SYMBOL_GPL(can_rx_offload_add_manual);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) void can_rx_offload_enable(struct can_rx_offload *offload)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) napi_enable(&offload->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) EXPORT_SYMBOL_GPL(can_rx_offload_enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) void can_rx_offload_del(struct can_rx_offload *offload)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) netif_napi_del(&offload->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) skb_queue_purge(&offload->skb_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) EXPORT_SYMBOL_GPL(can_rx_offload_del);