^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * ASIX AX8817X based USB 2.0 Ethernet Devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 2003-2006 David Hollis <dhollis@davehollis.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2005 Phil Chang <pchang23@sbcglobal.net>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 2006 James Painter <jamie.painter@iname.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (c) 2002-2003 TiVo Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include "asix.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) int asix_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) u16 size, void *data, int in_pm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) int (*fn)(struct usbnet *, u8, u8, u16, u16, void *, u16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) BUG_ON(!dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) if (!in_pm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) fn = usbnet_read_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) fn = usbnet_read_cmd_nopm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) ret = fn(dev, cmd, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) value, index, data, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) if (unlikely(ret < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) netdev_warn(dev->net, "Failed to read reg index 0x%04x: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) index, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) int asix_write_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) u16 size, void *data, int in_pm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) int (*fn)(struct usbnet *, u8, u8, u16, u16, const void *, u16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) BUG_ON(!dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) if (!in_pm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) fn = usbnet_write_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) fn = usbnet_write_cmd_nopm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) ret = fn(dev, cmd, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) value, index, data, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) if (unlikely(ret < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) netdev_warn(dev->net, "Failed to write reg index 0x%04x: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) index, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) void asix_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value, u16 index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) u16 size, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) usbnet_write_cmd_async(dev, cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) value, index, data, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) static void reset_asix_rx_fixup_info(struct asix_rx_fixup_info *rx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) /* Reset the variables that have a lifetime outside of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * asix_rx_fixup_internal() so that future processing starts from a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * known set of initial conditions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) if (rx->ax_skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) /* Discard any incomplete Ethernet frame in the netdev buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) kfree_skb(rx->ax_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) rx->ax_skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) /* Assume the Data header 32-bit word is at the start of the current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * or next URB socket buffer so reset all the state variables.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) rx->remaining = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) rx->split_head = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) rx->header = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) struct asix_rx_fixup_info *rx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) int offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) u16 size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) /* When an Ethernet frame spans multiple URB socket buffers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * do a sanity test for the Data header synchronisation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * Attempt to detect the situation of the previous socket buffer having
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * been truncated or a socket buffer was missing. These situations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * cause a discontinuity in the data stream and therefore need to avoid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * appending bad data to the end of the current netdev socket buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * Also avoid unnecessarily discarding a good current netdev socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) if (rx->remaining && (rx->remaining + sizeof(u32) <= skb->len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) offset = ((rx->remaining + 1) & 0xfffe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) rx->header = get_unaligned_le32(skb->data + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) size = (u16)(rx->header & 0x7ff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) if (size != ((~rx->header >> 16) & 0x7ff)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) netdev_err(dev->net, "asix_rx_fixup() Data Header synchronisation was lost, remaining %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) rx->remaining);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) reset_asix_rx_fixup_info(rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) while (offset + sizeof(u16) <= skb->len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) u16 copy_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) if (!rx->remaining) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) if (skb->len - offset == sizeof(u16)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) rx->header = get_unaligned_le16(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) skb->data + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) rx->split_head = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) offset += sizeof(u16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) if (rx->split_head == true) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) rx->header |= (get_unaligned_le16(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) skb->data + offset) << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) rx->split_head = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) offset += sizeof(u16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) rx->header = get_unaligned_le32(skb->data +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) offset += sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) /* take frame length from Data header 32-bit word */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) size = (u16)(rx->header & 0x7ff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) if (size != ((~rx->header >> 16) & 0x7ff)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) netdev_err(dev->net, "asix_rx_fixup() Bad Header Length 0x%x, offset %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) rx->header, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) reset_asix_rx_fixup_info(rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) if (size > dev->net->mtu + ETH_HLEN + VLAN_HLEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) netdev_dbg(dev->net, "asix_rx_fixup() Bad RX Length %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) reset_asix_rx_fixup_info(rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) /* Sometimes may fail to get a netdev socket buffer but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * continue to process the URB socket buffer so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) * synchronisation of the Ethernet frame Data header
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) * word is maintained.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) rx->ax_skb = netdev_alloc_skb_ip_align(dev->net, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) rx->remaining = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) if (rx->remaining > skb->len - offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) copy_length = skb->len - offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) rx->remaining -= copy_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) copy_length = rx->remaining;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) rx->remaining = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) if (rx->ax_skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) skb_put_data(rx->ax_skb, skb->data + offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) copy_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) if (!rx->remaining) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) usbnet_skb_return(dev, rx->ax_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) rx->ax_skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) offset += (copy_length + 1) & 0xfffe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) if (skb->len != offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) netdev_err(dev->net, "asix_rx_fixup() Bad SKB Length %d, %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) skb->len, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) reset_asix_rx_fixup_info(rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) int asix_rx_fixup_common(struct usbnet *dev, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) struct asix_common_private *dp = dev->driver_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) struct asix_rx_fixup_info *rx = &dp->rx_fixup_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) return asix_rx_fixup_internal(dev, skb, rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) void asix_rx_fixup_common_free(struct asix_common_private *dp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) struct asix_rx_fixup_info *rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) if (!dp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) rx = &dp->rx_fixup_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) if (rx->ax_skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) kfree_skb(rx->ax_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) rx->ax_skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) gfp_t flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) int padlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) int headroom = skb_headroom(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) int tailroom = skb_tailroom(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) u32 packet_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) u32 padbytes = 0xffff0000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) void *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) padlen = ((skb->len + 4) & (dev->maxpacket - 1)) ? 0 : 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) /* We need to push 4 bytes in front of frame (packet_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) * and maybe add 4 bytes after the end (if padlen is 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) * Avoid skb_copy_expand() expensive call, using following rules :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) * - We are allowed to push 4 bytes in headroom if skb_header_cloned()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) * is false (and if we have 4 bytes of headroom)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) * - We are allowed to put 4 bytes at tail if skb_cloned()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) * is false (and if we have 4 bytes of tailroom)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) * TCP packets for example are cloned, but __skb_header_release()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) * was called in tcp stack, allowing us to use headroom for our needs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) if (!skb_header_cloned(skb) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) !(padlen && skb_cloned(skb)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) headroom + tailroom >= 4 + padlen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) /* following should not happen, but better be safe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) if (headroom < 4 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) tailroom < padlen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) skb->data = memmove(skb->head + 4, skb->data, skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) skb_set_tail_pointer(skb, skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) struct sk_buff *skb2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) skb2 = skb_copy_expand(skb, 4, padlen, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) dev_kfree_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) skb = skb2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) packet_len = ((skb->len ^ 0x0000ffff) << 16) + skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) ptr = skb_push(skb, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) put_unaligned_le32(packet_len, ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) if (padlen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) put_unaligned_le32(padbytes, skb_tail_pointer(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) skb_put(skb, sizeof(padbytes));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) usbnet_set_skb_tx_stats(skb, 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) int asix_set_sw_mii(struct usbnet *dev, int in_pm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) ret = asix_write_cmd(dev, AX_CMD_SET_SW_MII, 0x0000, 0, 0, NULL, in_pm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) netdev_err(dev->net, "Failed to enable software MII access\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) int asix_set_hw_mii(struct usbnet *dev, int in_pm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) ret = asix_write_cmd(dev, AX_CMD_SET_HW_MII, 0x0000, 0, 0, NULL, in_pm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) netdev_err(dev->net, "Failed to enable hardware MII access\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) int asix_read_phy_addr(struct usbnet *dev, int internal)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) int offset = (internal ? 1 : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) u8 buf[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) int ret = asix_read_cmd(dev, AX_CMD_READ_PHY_ID, 0, 0, 2, buf, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) netdev_dbg(dev->net, "asix_get_phy_addr()\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) if (ret < 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) netdev_err(dev->net, "Error reading PHYID register: %02x\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) netdev_dbg(dev->net, "asix_get_phy_addr() returning 0x%04x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) *((__le16 *)buf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) ret = buf[offset];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) int asix_get_phy_addr(struct usbnet *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) /* return the address of the internal phy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) return asix_read_phy_addr(dev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) int asix_sw_reset(struct usbnet *dev, u8 flags, int in_pm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) ret = asix_write_cmd(dev, AX_CMD_SW_RESET, flags, 0, 0, NULL, in_pm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) netdev_err(dev->net, "Failed to send software reset: %02x\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) u16 asix_read_rx_ctl(struct usbnet *dev, int in_pm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) __le16 v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) int ret = asix_read_cmd(dev, AX_CMD_READ_RX_CTL, 0, 0, 2, &v, in_pm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) netdev_err(dev->net, "Error reading RX_CTL register: %02x\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) ret = le16_to_cpu(v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) int asix_write_rx_ctl(struct usbnet *dev, u16 mode, int in_pm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) netdev_dbg(dev->net, "asix_write_rx_ctl() - mode = 0x%04x\n", mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) ret = asix_write_cmd(dev, AX_CMD_WRITE_RX_CTL, mode, 0, 0, NULL, in_pm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) netdev_err(dev->net, "Failed to write RX_CTL mode to 0x%04x: %02x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) mode, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) u16 asix_read_medium_status(struct usbnet *dev, int in_pm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) __le16 v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) int ret = asix_read_cmd(dev, AX_CMD_READ_MEDIUM_STATUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 0, 0, 2, &v, in_pm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) netdev_err(dev->net, "Error reading Medium Status register: %02x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) return ret; /* TODO: callers not checking for error ret */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) return le16_to_cpu(v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) int asix_write_medium_mode(struct usbnet *dev, u16 mode, int in_pm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) netdev_dbg(dev->net, "asix_write_medium_mode() - mode = 0x%04x\n", mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) ret = asix_write_cmd(dev, AX_CMD_WRITE_MEDIUM_MODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) mode, 0, 0, NULL, in_pm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) netdev_err(dev->net, "Failed to write Medium Mode mode to 0x%04x: %02x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) mode, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) int asix_write_gpio(struct usbnet *dev, u16 value, int sleep, int in_pm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) netdev_dbg(dev->net, "asix_write_gpio() - value = 0x%04x\n", value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) ret = asix_write_cmd(dev, AX_CMD_WRITE_GPIOS, value, 0, 0, NULL, in_pm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) netdev_err(dev->net, "Failed to write GPIO value 0x%04x: %02x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) value, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) if (sleep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) msleep(sleep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) * AX88772 & AX88178 have a 16-bit RX_CTL value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) void asix_set_multicast(struct net_device *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) struct usbnet *dev = netdev_priv(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) struct asix_data *data = (struct asix_data *)&dev->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) u16 rx_ctl = AX_DEFAULT_RX_CTL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) if (net->flags & IFF_PROMISC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) rx_ctl |= AX_RX_CTL_PRO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) } else if (net->flags & IFF_ALLMULTI ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) netdev_mc_count(net) > AX_MAX_MCAST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) rx_ctl |= AX_RX_CTL_AMALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) } else if (netdev_mc_empty(net)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) /* just broadcast and directed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) /* We use the 20 byte dev->data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) * for our 8 byte filter buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) * to avoid allocating memory that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) * is tricky to free later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) struct netdev_hw_addr *ha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) u32 crc_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) memset(data->multi_filter, 0, AX_MCAST_FILTER_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) /* Build the multicast hash filter. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) netdev_for_each_mc_addr(ha, net) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) crc_bits = ether_crc(ETH_ALEN, ha->addr) >> 26;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) data->multi_filter[crc_bits >> 3] |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 1 << (crc_bits & 7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) asix_write_cmd_async(dev, AX_CMD_WRITE_MULTI_FILTER, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) AX_MCAST_FILTER_SIZE, data->multi_filter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) rx_ctl |= AX_RX_CTL_AM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) asix_write_cmd_async(dev, AX_CMD_WRITE_RX_CTL, rx_ctl, 0, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) int asix_mdio_read(struct net_device *netdev, int phy_id, int loc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) struct usbnet *dev = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) __le16 res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) u8 smsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) mutex_lock(&dev->phy_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) ret = asix_set_sw_mii(dev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) if (ret == -ENODEV || ret == -ETIMEDOUT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) usleep_range(1000, 1100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 0, 0, 1, &smsr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) } while (!(smsr & AX_HOST_EN) && (i++ < 30) && (ret != -ENODEV));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) if (ret == -ENODEV || ret == -ETIMEDOUT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) mutex_unlock(&dev->phy_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) asix_read_cmd(dev, AX_CMD_READ_MII_REG, phy_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) (__u16)loc, 2, &res, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) asix_set_hw_mii(dev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) mutex_unlock(&dev->phy_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) netdev_dbg(dev->net, "asix_mdio_read() phy_id=0x%02x, loc=0x%02x, returns=0x%04x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) phy_id, loc, le16_to_cpu(res));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) return le16_to_cpu(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) void asix_mdio_write(struct net_device *netdev, int phy_id, int loc, int val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) struct usbnet *dev = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) __le16 res = cpu_to_le16(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) u8 smsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) netdev_dbg(dev->net, "asix_mdio_write() phy_id=0x%02x, loc=0x%02x, val=0x%04x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) phy_id, loc, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) mutex_lock(&dev->phy_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) ret = asix_set_sw_mii(dev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) if (ret == -ENODEV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) usleep_range(1000, 1100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 0, 0, 1, &smsr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) } while (!(smsr & AX_HOST_EN) && (i++ < 30) && (ret != -ENODEV));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) if (ret == -ENODEV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) mutex_unlock(&dev->phy_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) asix_write_cmd(dev, AX_CMD_WRITE_MII_REG, phy_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) (__u16)loc, 2, &res, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) asix_set_hw_mii(dev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) mutex_unlock(&dev->phy_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) int asix_mdio_read_nopm(struct net_device *netdev, int phy_id, int loc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) struct usbnet *dev = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) __le16 res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) u8 smsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) mutex_lock(&dev->phy_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) ret = asix_set_sw_mii(dev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) if (ret == -ENODEV || ret == -ETIMEDOUT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) usleep_range(1000, 1100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 0, 0, 1, &smsr, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) } while (!(smsr & AX_HOST_EN) && (i++ < 30) && (ret != -ENODEV));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) if (ret == -ENODEV || ret == -ETIMEDOUT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) mutex_unlock(&dev->phy_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) asix_read_cmd(dev, AX_CMD_READ_MII_REG, phy_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) (__u16)loc, 2, &res, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) asix_set_hw_mii(dev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) mutex_unlock(&dev->phy_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) netdev_dbg(dev->net, "asix_mdio_read_nopm() phy_id=0x%02x, loc=0x%02x, returns=0x%04x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) phy_id, loc, le16_to_cpu(res));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) return le16_to_cpu(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) asix_mdio_write_nopm(struct net_device *netdev, int phy_id, int loc, int val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) struct usbnet *dev = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) __le16 res = cpu_to_le16(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) u8 smsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) netdev_dbg(dev->net, "asix_mdio_write() phy_id=0x%02x, loc=0x%02x, val=0x%04x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) phy_id, loc, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) mutex_lock(&dev->phy_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) ret = asix_set_sw_mii(dev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) if (ret == -ENODEV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) usleep_range(1000, 1100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 0, 0, 1, &smsr, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) } while (!(smsr & AX_HOST_EN) && (i++ < 30) && (ret != -ENODEV));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) if (ret == -ENODEV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) mutex_unlock(&dev->phy_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) asix_write_cmd(dev, AX_CMD_WRITE_MII_REG, phy_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) (__u16)loc, 2, &res, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) asix_set_hw_mii(dev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) mutex_unlock(&dev->phy_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) void asix_get_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) struct usbnet *dev = netdev_priv(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) u8 opt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) if (asix_read_cmd(dev, AX_CMD_READ_MONITOR_MODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 0, 0, 1, &opt, 0) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) wolinfo->supported = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) wolinfo->wolopts = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) wolinfo->supported = WAKE_PHY | WAKE_MAGIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) wolinfo->wolopts = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) if (opt & AX_MONITOR_LINK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) wolinfo->wolopts |= WAKE_PHY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) if (opt & AX_MONITOR_MAGIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) wolinfo->wolopts |= WAKE_MAGIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) int asix_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) struct usbnet *dev = netdev_priv(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) u8 opt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) if (wolinfo->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) if (wolinfo->wolopts & WAKE_PHY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) opt |= AX_MONITOR_LINK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) if (wolinfo->wolopts & WAKE_MAGIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) opt |= AX_MONITOR_MAGIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) if (asix_write_cmd(dev, AX_CMD_WRITE_MONITOR_MODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) opt, 0, 0, NULL, 0) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) int asix_get_eeprom_len(struct net_device *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) return AX_EEPROM_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) int asix_get_eeprom(struct net_device *net, struct ethtool_eeprom *eeprom,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) u8 *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) struct usbnet *dev = netdev_priv(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) u16 *eeprom_buff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) int first_word, last_word;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) if (eeprom->len == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) eeprom->magic = AX_EEPROM_MAGIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) first_word = eeprom->offset >> 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) last_word = (eeprom->offset + eeprom->len - 1) >> 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) eeprom_buff = kmalloc_array(last_word - first_word + 1, sizeof(u16),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) if (!eeprom_buff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) /* ax8817x returns 2 bytes from eeprom on read */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) for (i = first_word; i <= last_word; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) if (asix_read_cmd(dev, AX_CMD_READ_EEPROM, i, 0, 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) &eeprom_buff[i - first_word], 0) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) kfree(eeprom_buff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) memcpy(data, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) kfree(eeprom_buff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) int asix_set_eeprom(struct net_device *net, struct ethtool_eeprom *eeprom,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) u8 *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) struct usbnet *dev = netdev_priv(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) u16 *eeprom_buff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) int first_word, last_word;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) netdev_dbg(net, "write EEPROM len %d, offset %d, magic 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) eeprom->len, eeprom->offset, eeprom->magic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) if (eeprom->len == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) if (eeprom->magic != AX_EEPROM_MAGIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) first_word = eeprom->offset >> 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) last_word = (eeprom->offset + eeprom->len - 1) >> 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) eeprom_buff = kmalloc_array(last_word - first_word + 1, sizeof(u16),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) if (!eeprom_buff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) /* align data to 16 bit boundaries, read the missing data from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) the EEPROM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) if (eeprom->offset & 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) ret = asix_read_cmd(dev, AX_CMD_READ_EEPROM, first_word, 0, 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) &eeprom_buff[0], 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) netdev_err(net, "Failed to read EEPROM at offset 0x%02x.\n", first_word);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) goto free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) if ((eeprom->offset + eeprom->len) & 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) ret = asix_read_cmd(dev, AX_CMD_READ_EEPROM, last_word, 0, 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) &eeprom_buff[last_word - first_word], 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) netdev_err(net, "Failed to read EEPROM at offset 0x%02x.\n", last_word);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) goto free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) memcpy((u8 *)eeprom_buff + (eeprom->offset & 1), data, eeprom->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) /* write data to EEPROM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) ret = asix_write_cmd(dev, AX_CMD_WRITE_ENABLE, 0x0000, 0, 0, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) netdev_err(net, "Failed to enable EEPROM write\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) goto free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) msleep(20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) for (i = first_word; i <= last_word; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) netdev_dbg(net, "write to EEPROM at offset 0x%02x, data 0x%04x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) i, eeprom_buff[i - first_word]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) ret = asix_write_cmd(dev, AX_CMD_WRITE_EEPROM, i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) eeprom_buff[i - first_word], 0, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) netdev_err(net, "Failed to write EEPROM at offset 0x%02x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) goto free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) msleep(20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) ret = asix_write_cmd(dev, AX_CMD_WRITE_DISABLE, 0x0000, 0, 0, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) netdev_err(net, "Failed to disable EEPROM write\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) goto free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) kfree(eeprom_buff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) void asix_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) /* Inherit standard device info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) usbnet_get_drvinfo(net, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) strlcpy(info->version, DRIVER_VERSION, sizeof(info->version));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) int asix_set_mac_address(struct net_device *net, void *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) struct usbnet *dev = netdev_priv(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) struct asix_data *data = (struct asix_data *)&dev->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) struct sockaddr *addr = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) if (netif_running(net))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) if (!is_valid_ether_addr(addr->sa_data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) return -EADDRNOTAVAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) memcpy(net->dev_addr, addr->sa_data, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) /* We use the 20 byte dev->data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) * for our 6 byte mac buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) * to avoid allocating memory that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) * is tricky to free later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) memcpy(data->mac_addr, addr->sa_data, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) asix_write_cmd_async(dev, AX_CMD_WRITE_NODE_ID, 0, 0, ETH_ALEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) data->mac_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) }