^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright 2007-2012 Siemens AG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Written by:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Sergey Lapin <slapin@ossfans.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Maxim Gorbachyov <maxim.gorbachev@siemens.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/if_arp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/crc-ccitt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <asm/unaligned.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <net/rtnetlink.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <net/ieee802154_netdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <net/mac802154.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <net/cfg802154.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include "ieee802154_i.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include "driver-ops.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) void ieee802154_xmit_worker(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) struct ieee802154_local *local =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) container_of(work, struct ieee802154_local, tx_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) struct sk_buff *skb = local->tx_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) struct net_device *dev = skb->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) int res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) res = drv_xmit_sync(local, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) if (res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) goto err_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) dev->stats.tx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) dev->stats.tx_bytes += skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) ieee802154_xmit_complete(&local->hw, skb, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) err_tx:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) /* Restart the netif queue on each sub_if_data object. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) ieee802154_wake_queue(&local->hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) netdev_dbg(dev, "transmission failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) static netdev_tx_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) ieee802154_tx(struct ieee802154_local *local, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) struct net_device *dev = skb->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) if (!(local->hw.flags & IEEE802154_HW_TX_OMIT_CKSUM)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) struct sk_buff *nskb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) u16 crc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) if (unlikely(skb_tailroom(skb) < IEEE802154_FCS_LEN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) nskb = skb_copy_expand(skb, 0, IEEE802154_FCS_LEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) if (likely(nskb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) consume_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) skb = nskb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) goto err_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) crc = crc_ccitt(0, skb->data, skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) put_unaligned_le16(crc, skb_put(skb, 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) /* Stop the netif queue on each sub_if_data object. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) ieee802154_stop_queue(&local->hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) /* async is priority, otherwise sync is fallback */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) if (local->ops->xmit_async) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) unsigned int len = skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) ret = drv_xmit_async(local, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) ieee802154_wake_queue(&local->hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) goto err_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) dev->stats.tx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) dev->stats.tx_bytes += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) local->tx_skb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) queue_work(local->workqueue, &local->tx_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) err_tx:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) netdev_tx_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) ieee802154_monitor_start_xmit(struct sk_buff *skb, struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) skb->skb_iif = dev->ifindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) return ieee802154_tx(sdata->local, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) netdev_tx_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) ieee802154_subif_start_xmit(struct sk_buff *skb, struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) /* TODO we should move it to wpan_dev_hard_header and dev_hard_header
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * functions. The reason is wireshark will show a mac header which is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * with security fields but the payload is not encrypted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) rc = mac802154_llsec_encrypt(&sdata->sec, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) netdev_warn(dev, "encryption failed: %i\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) skb->skb_iif = dev->ifindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) return ieee802154_tx(sdata->local, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) }