^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /**************************************************************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) /* */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) /* IBM System i and System p Virtual NIC Device Driver */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) /* Copyright (C) 2014 IBM Corp. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) /* Santiago Leon (santi_leon@yahoo.com) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) /* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) /* John Allen (jallen@linux.vnet.ibm.com) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) /* */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) /* */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) /* This module contains the implementation of a virtual ethernet device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) /* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) /* option of the RS/6000 Platform Architecture to interface with virtual */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) /* ethernet NICs that are presented to the partition by the hypervisor. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) /* */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) /* Messages are passed between the VNIC driver and the VNIC server using */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) /* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) /* issue and receive commands that initiate communication with the server */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) /* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) /* are used by the driver to notify the server that a packet is */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) /* ready for transmission or that a buffer has been added to receive a */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) /* packet. Subsequently, sCRQs are used by the server to notify the */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) /* driver that a packet transmission has been completed or that a packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) /* has been received and placed in a waiting buffer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) /* */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) /* In lieu of a more conventional "on-the-fly" DMA mapping strategy in */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) /* which skbs are DMA mapped and immediately unmapped when the transmit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) /* or receive has been completed, the VNIC driver is required to use */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) /* "long term mapping". This entails that large, continuous DMA mapped */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) /* buffers are allocated on driver initialization and these buffers are */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) /* then continuously reused to pass skbs to and from the VNIC server. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) /* */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) /**************************************************************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <linux/moduleparam.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <linux/completion.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <linux/ioport.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #include <linux/etherdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #include <linux/ethtool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #include <linux/proc_fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #include <linux/if_arp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #include <linux/in.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #include <linux/ip.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #include <linux/ipv6.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #include <linux/kthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #include <linux/seq_file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #include <net/net_namespace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #include <asm/hvcall.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #include <asm/vio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #include <asm/iommu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #include <asm/firmware.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #include <linux/workqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #include <linux/if_vlan.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #include <linux/utsname.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #include "ibmvnic.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) static const char ibmvnic_driver_name[] = "ibmvnic";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) MODULE_AUTHOR("Santiago Leon");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) static int ibmvnic_remove(struct vio_dev *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) static void release_sub_crqs(struct ibmvnic_adapter *, bool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) union sub_crq *sub_crq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) static int enable_scrq_irq(struct ibmvnic_adapter *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) struct ibmvnic_sub_crq_queue *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) static int disable_scrq_irq(struct ibmvnic_adapter *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) struct ibmvnic_sub_crq_queue *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) static int pending_scrq(struct ibmvnic_adapter *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) struct ibmvnic_sub_crq_queue *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) struct ibmvnic_sub_crq_queue *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) static int ibmvnic_poll(struct napi_struct *napi, int data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) static void send_query_map(struct ibmvnic_adapter *adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) static int send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) static int send_request_unmap(struct ibmvnic_adapter *, u8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) static int send_login(struct ibmvnic_adapter *adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) static void send_query_cap(struct ibmvnic_adapter *adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) static int init_sub_crqs(struct ibmvnic_adapter *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) static int ibmvnic_reset_init(struct ibmvnic_adapter *, bool reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) static void release_crq_queue(struct ibmvnic_adapter *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) static int __ibmvnic_set_mac(struct net_device *, u8 *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) static int init_crq_queue(struct ibmvnic_adapter *adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) static int send_query_phys_parms(struct ibmvnic_adapter *adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) struct ibmvnic_stat {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) char name[ETH_GSTRING_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) int offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) #define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) offsetof(struct ibmvnic_statistics, stat))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) static const struct ibmvnic_stat ibmvnic_stats[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) {"rx_packets", IBMVNIC_STAT_OFF(rx_packets)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) {"tx_packets", IBMVNIC_STAT_OFF(tx_packets)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) {"align_errors", IBMVNIC_STAT_OFF(align_errors)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) {"late_collisions", IBMVNIC_STAT_OFF(late_collisions)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) unsigned long length, unsigned long *number,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) unsigned long *irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) long rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) *number = retbuf[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) *irq = retbuf[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * ibmvnic_wait_for_completion - Check device state and wait for completion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * @adapter: private device data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * @comp_done: completion structure to wait for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * @timeout: time to wait in milliseconds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * Wait for a completion signal or until the timeout limit is reached
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * while checking that the device is still active.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) static int ibmvnic_wait_for_completion(struct ibmvnic_adapter *adapter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) struct completion *comp_done,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) unsigned long timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) struct net_device *netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) unsigned long div_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) u8 retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) netdev = adapter->netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) retry = 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) div_timeout = msecs_to_jiffies(timeout / retry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) while (true) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) if (!adapter->crq.active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) netdev_err(netdev, "Device down!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) if (!retry--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) if (wait_for_completion_timeout(comp_done, div_timeout))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) netdev_err(netdev, "Operation timed out.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) struct ibmvnic_long_term_buff *ltb, int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) struct device *dev = &adapter->vdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) ltb->size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) ltb->buff = dma_alloc_coherent(dev, ltb->size, <b->addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) if (!ltb->buff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) dev_err(dev, "Couldn't alloc long term buffer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) ltb->map_id = adapter->map_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) adapter->map_id++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) mutex_lock(&adapter->fw_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) adapter->fw_done_rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) reinit_completion(&adapter->fw_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) dev_err(dev, "send_request_map failed, rc = %d\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) dev_err(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) "Long term map request aborted or timed out,rc = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) if (adapter->fw_done_rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) dev_err(dev, "Couldn't map long term buffer,rc = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) adapter->fw_done_rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) rc = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) ltb->buff = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) mutex_unlock(&adapter->fw_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) static void free_long_term_buff(struct ibmvnic_adapter *adapter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) struct ibmvnic_long_term_buff *ltb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) struct device *dev = &adapter->vdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) if (!ltb->buff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) /* VIOS automatically unmaps the long term buffer at remote
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) * end for the following resets:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) * FAILOVER, MOBILITY, TIMEOUT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) adapter->reset_reason != VNIC_RESET_MOBILITY &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) adapter->reset_reason != VNIC_RESET_TIMEOUT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) send_request_unmap(adapter, ltb->map_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) ltb->buff = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) ltb->map_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) static int reset_long_term_buff(struct ibmvnic_adapter *adapter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) struct ibmvnic_long_term_buff *ltb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) struct device *dev = &adapter->vdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) memset(ltb->buff, 0, ltb->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) mutex_lock(&adapter->fw_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) adapter->fw_done_rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) reinit_completion(&adapter->fw_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) mutex_unlock(&adapter->fw_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) dev_info(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) "Reset failed, long term map request timed out or aborted\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) mutex_unlock(&adapter->fw_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) if (adapter->fw_done_rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) dev_info(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) "Reset failed, attempting to free and reallocate buffer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) free_long_term_buff(adapter, ltb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) mutex_unlock(&adapter->fw_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) return alloc_long_term_buff(adapter, ltb, ltb->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) mutex_unlock(&adapter->fw_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) static void deactivate_rx_pools(struct ibmvnic_adapter *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) for (i = 0; i < adapter->num_active_rx_pools; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) adapter->rx_pool[i].active = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) struct ibmvnic_rx_pool *pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) int count = pool->size - atomic_read(&pool->available);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) u64 handle = adapter->rx_scrq[pool->index]->handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) struct device *dev = &adapter->vdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) int buffers_added = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) unsigned long lpar_rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) union sub_crq sub_crq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) unsigned int offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) dma_addr_t dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) unsigned char *dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) int shift = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) if (!pool->active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) for (i = 0; i < count; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) dev_err(dev, "Couldn't replenish rx buff\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) adapter->replenish_no_mem++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) index = pool->free_map[pool->next_free];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) if (pool->rx_buff[index].skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) dev_err(dev, "Inconsistent free_map!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) /* Copy the skb to the long term mapped DMA buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) offset = index * pool->buff_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) dst = pool->long_term_buff.buff + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) memset(dst, 0, pool->buff_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) dma_addr = pool->long_term_buff.addr + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) pool->rx_buff[index].data = dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) pool->rx_buff[index].dma = dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) pool->rx_buff[index].skb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) pool->rx_buff[index].pool_index = pool->index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) pool->rx_buff[index].size = pool->buff_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) memset(&sub_crq, 0, sizeof(sub_crq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) sub_crq.rx_add.first = IBMVNIC_CRQ_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) sub_crq.rx_add.correlator =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) cpu_to_be64((u64)&pool->rx_buff[index]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) sub_crq.rx_add.ioba = cpu_to_be32(dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) sub_crq.rx_add.map_id = pool->long_term_buff.map_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) /* The length field of the sCRQ is defined to be 24 bits so the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) * buffer size needs to be left shifted by a byte before it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) * converted to big endian to prevent the last byte from being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) * truncated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) #ifdef __LITTLE_ENDIAN__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) shift = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) sub_crq.rx_add.len = cpu_to_be32(pool->buff_size << shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) lpar_rc = send_subcrq(adapter, handle, &sub_crq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) if (lpar_rc != H_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) goto failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) buffers_added++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) adapter->replenish_add_buff_success++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) pool->next_free = (pool->next_free + 1) % pool->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) atomic_add(buffers_added, &pool->available);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) if (lpar_rc != H_PARAMETER && lpar_rc != H_CLOSED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) dev_err_ratelimited(dev, "rx: replenish packet buffer failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) pool->free_map[pool->next_free] = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) pool->rx_buff[index].skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) dev_kfree_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) adapter->replenish_add_buff_failure++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) atomic_add(buffers_added, &pool->available);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) if (lpar_rc == H_CLOSED || adapter->failover_pending) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) /* Disable buffer pool replenishment and report carrier off if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) * queue is closed or pending failover.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) * Firmware guarantees that a signal will be sent to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) * driver, triggering a reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) deactivate_rx_pools(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) netif_carrier_off(adapter->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) static void replenish_pools(struct ibmvnic_adapter *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) adapter->replenish_task_cycles++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) for (i = 0; i < adapter->num_active_rx_pools; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) if (adapter->rx_pool[i].active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) replenish_rx_pool(adapter, &adapter->rx_pool[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) netdev_dbg(adapter->netdev, "Replenished %d pools\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) static void release_stats_buffers(struct ibmvnic_adapter *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) kfree(adapter->tx_stats_buffers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) kfree(adapter->rx_stats_buffers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) adapter->tx_stats_buffers = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) adapter->rx_stats_buffers = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) static int init_stats_buffers(struct ibmvnic_adapter *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) adapter->tx_stats_buffers =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) kcalloc(IBMVNIC_MAX_QUEUES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) sizeof(struct ibmvnic_tx_queue_stats),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) if (!adapter->tx_stats_buffers)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) adapter->rx_stats_buffers =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) kcalloc(IBMVNIC_MAX_QUEUES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) sizeof(struct ibmvnic_rx_queue_stats),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) if (!adapter->rx_stats_buffers)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) static void release_stats_token(struct ibmvnic_adapter *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) struct device *dev = &adapter->vdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) if (!adapter->stats_token)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) dma_unmap_single(dev, adapter->stats_token,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) sizeof(struct ibmvnic_statistics),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) adapter->stats_token = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) static int init_stats_token(struct ibmvnic_adapter *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) struct device *dev = &adapter->vdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) dma_addr_t stok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) stok = dma_map_single(dev, &adapter->stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) sizeof(struct ibmvnic_statistics),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) if (dma_mapping_error(dev, stok)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) dev_err(dev, "Couldn't map stats buffer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) adapter->stats_token = stok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) netdev_dbg(adapter->netdev, "Stats token initialized (%llx)\n", stok);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) static int reset_rx_pools(struct ibmvnic_adapter *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) struct ibmvnic_rx_pool *rx_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) u64 buff_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) int rx_scrqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) int i, j, rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) if (!adapter->rx_pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) buff_size = adapter->cur_rx_buf_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) rx_scrqs = adapter->num_active_rx_pools;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) for (i = 0; i < rx_scrqs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) rx_pool = &adapter->rx_pool[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) netdev_dbg(adapter->netdev, "Re-setting rx_pool[%d]\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) if (rx_pool->buff_size != buff_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) free_long_term_buff(adapter, &rx_pool->long_term_buff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) rx_pool->buff_size = buff_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) rc = alloc_long_term_buff(adapter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) &rx_pool->long_term_buff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) rx_pool->size *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) rx_pool->buff_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) rc = reset_long_term_buff(adapter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) &rx_pool->long_term_buff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) for (j = 0; j < rx_pool->size; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) rx_pool->free_map[j] = j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) memset(rx_pool->rx_buff, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) rx_pool->size * sizeof(struct ibmvnic_rx_buff));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) atomic_set(&rx_pool->available, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) rx_pool->next_alloc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) rx_pool->next_free = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) rx_pool->active = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) static void release_rx_pools(struct ibmvnic_adapter *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) struct ibmvnic_rx_pool *rx_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) if (!adapter->rx_pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) for (i = 0; i < adapter->num_active_rx_pools; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) rx_pool = &adapter->rx_pool[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) kfree(rx_pool->free_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) free_long_term_buff(adapter, &rx_pool->long_term_buff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) if (!rx_pool->rx_buff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) for (j = 0; j < rx_pool->size; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) if (rx_pool->rx_buff[j].skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) dev_kfree_skb_any(rx_pool->rx_buff[j].skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) rx_pool->rx_buff[j].skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) kfree(rx_pool->rx_buff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) kfree(adapter->rx_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) adapter->rx_pool = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) adapter->num_active_rx_pools = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) static int init_rx_pools(struct net_device *netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) struct ibmvnic_adapter *adapter = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) struct device *dev = &adapter->vdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) struct ibmvnic_rx_pool *rx_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) int rxadd_subcrqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) u64 buff_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) rxadd_subcrqs = adapter->num_active_rx_scrqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) buff_size = adapter->cur_rx_buf_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) adapter->rx_pool = kcalloc(rxadd_subcrqs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) sizeof(struct ibmvnic_rx_pool),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) if (!adapter->rx_pool) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) dev_err(dev, "Failed to allocate rx pools\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) adapter->num_active_rx_pools = rxadd_subcrqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) for (i = 0; i < rxadd_subcrqs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) rx_pool = &adapter->rx_pool[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) netdev_dbg(adapter->netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) "Initializing rx_pool[%d], %lld buffs, %lld bytes each\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) i, adapter->req_rx_add_entries_per_subcrq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) buff_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) rx_pool->size = adapter->req_rx_add_entries_per_subcrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) rx_pool->index = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) rx_pool->buff_size = buff_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) rx_pool->active = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) if (!rx_pool->free_map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) release_rx_pools(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) rx_pool->rx_buff = kcalloc(rx_pool->size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) sizeof(struct ibmvnic_rx_buff),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) if (!rx_pool->rx_buff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) dev_err(dev, "Couldn't alloc rx buffers\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) release_rx_pools(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) if (alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) rx_pool->size * rx_pool->buff_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) release_rx_pools(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) for (j = 0; j < rx_pool->size; ++j)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) rx_pool->free_map[j] = j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) atomic_set(&rx_pool->available, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) rx_pool->next_alloc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) rx_pool->next_free = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) static int reset_one_tx_pool(struct ibmvnic_adapter *adapter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) struct ibmvnic_tx_pool *tx_pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) int rc, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) rc = reset_long_term_buff(adapter, &tx_pool->long_term_buff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) memset(tx_pool->tx_buff, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) tx_pool->num_buffers *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) sizeof(struct ibmvnic_tx_buff));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) for (i = 0; i < tx_pool->num_buffers; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) tx_pool->free_map[i] = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) tx_pool->consumer_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) tx_pool->producer_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) static int reset_tx_pools(struct ibmvnic_adapter *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) int tx_scrqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) int i, rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) if (!adapter->tx_pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) tx_scrqs = adapter->num_active_tx_pools;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) for (i = 0; i < tx_scrqs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) rc = reset_one_tx_pool(adapter, &adapter->tso_pool[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) rc = reset_one_tx_pool(adapter, &adapter->tx_pool[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) static void release_vpd_data(struct ibmvnic_adapter *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) if (!adapter->vpd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) kfree(adapter->vpd->buff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) kfree(adapter->vpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) adapter->vpd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) static void release_one_tx_pool(struct ibmvnic_adapter *adapter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) struct ibmvnic_tx_pool *tx_pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) kfree(tx_pool->tx_buff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) kfree(tx_pool->free_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) free_long_term_buff(adapter, &tx_pool->long_term_buff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) static void release_tx_pools(struct ibmvnic_adapter *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) if (!adapter->tx_pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) for (i = 0; i < adapter->num_active_tx_pools; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) release_one_tx_pool(adapter, &adapter->tx_pool[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) release_one_tx_pool(adapter, &adapter->tso_pool[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) kfree(adapter->tx_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) adapter->tx_pool = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) kfree(adapter->tso_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) adapter->tso_pool = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) adapter->num_active_tx_pools = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) static int init_one_tx_pool(struct net_device *netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) struct ibmvnic_tx_pool *tx_pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) int num_entries, int buf_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) struct ibmvnic_adapter *adapter = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) tx_pool->tx_buff = kcalloc(num_entries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) sizeof(struct ibmvnic_tx_buff),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) if (!tx_pool->tx_buff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) num_entries * buf_size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) tx_pool->free_map = kcalloc(num_entries, sizeof(int), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) if (!tx_pool->free_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) for (i = 0; i < num_entries; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) tx_pool->free_map[i] = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) tx_pool->consumer_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) tx_pool->producer_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) tx_pool->num_buffers = num_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) tx_pool->buf_size = buf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) static int init_tx_pools(struct net_device *netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) struct ibmvnic_adapter *adapter = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) int tx_subcrqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) int i, rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) tx_subcrqs = adapter->num_active_tx_scrqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) adapter->tx_pool = kcalloc(tx_subcrqs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) if (!adapter->tx_pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) adapter->tso_pool = kcalloc(tx_subcrqs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) if (!adapter->tso_pool) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) kfree(adapter->tx_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) adapter->tx_pool = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) adapter->num_active_tx_pools = tx_subcrqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) for (i = 0; i < tx_subcrqs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) rc = init_one_tx_pool(netdev, &adapter->tx_pool[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) adapter->req_tx_entries_per_subcrq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) adapter->req_mtu + VLAN_HLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) release_tx_pools(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) rc = init_one_tx_pool(netdev, &adapter->tso_pool[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) IBMVNIC_TSO_BUFS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) IBMVNIC_TSO_BUF_SZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) release_tx_pools(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) if (adapter->napi_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) for (i = 0; i < adapter->req_rx_queues; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) napi_enable(&adapter->napi[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) adapter->napi_enabled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) static void ibmvnic_napi_disable(struct ibmvnic_adapter *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) if (!adapter->napi_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) for (i = 0; i < adapter->req_rx_queues; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) netdev_dbg(adapter->netdev, "Disabling napi[%d]\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) napi_disable(&adapter->napi[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) adapter->napi_enabled = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) static int init_napi(struct ibmvnic_adapter *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) adapter->napi = kcalloc(adapter->req_rx_queues,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) sizeof(struct napi_struct), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) if (!adapter->napi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) for (i = 0; i < adapter->req_rx_queues; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) netdev_dbg(adapter->netdev, "Adding napi[%d]\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) netif_napi_add(adapter->netdev, &adapter->napi[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) ibmvnic_poll, NAPI_POLL_WEIGHT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) adapter->num_active_rx_napi = adapter->req_rx_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) static void release_napi(struct ibmvnic_adapter *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) if (!adapter->napi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) for (i = 0; i < adapter->num_active_rx_napi; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) netdev_dbg(adapter->netdev, "Releasing napi[%d]\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) netif_napi_del(&adapter->napi[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) kfree(adapter->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) adapter->napi = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) adapter->num_active_rx_napi = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) adapter->napi_enabled = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) static int ibmvnic_login(struct net_device *netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) struct ibmvnic_adapter *adapter = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) unsigned long timeout = msecs_to_jiffies(20000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) int retry_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) int retries = 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) bool retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) retry = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) if (retry_count > retries) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) netdev_warn(netdev, "Login attempts exceeded\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) adapter->init_done_rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) reinit_completion(&adapter->init_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) rc = send_login(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) if (!wait_for_completion_timeout(&adapter->init_done,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) timeout)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) netdev_warn(netdev, "Login timed out, retrying...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) retry = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) adapter->init_done_rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) retry_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) if (adapter->init_done_rc == ABORTED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) netdev_warn(netdev, "Login aborted, retrying...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) retry = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) adapter->init_done_rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) retry_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) /* FW or device may be busy, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) * wait a bit before retrying login
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) msleep(500);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) } else if (adapter->init_done_rc == PARTIALSUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) retry_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) release_sub_crqs(adapter, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) retry = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) netdev_dbg(netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) "Received partial success, retrying...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) adapter->init_done_rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) reinit_completion(&adapter->init_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) send_query_cap(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) if (!wait_for_completion_timeout(&adapter->init_done,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) timeout)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) netdev_warn(netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) "Capabilities query timed out\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) rc = init_sub_crqs(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) netdev_warn(netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) "SCRQ initialization failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) rc = init_sub_crq_irqs(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) netdev_warn(netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) "SCRQ irq initialization failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) } else if (adapter->init_done_rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) netdev_warn(netdev, "Adapter login failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) } while (retry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) __ibmvnic_set_mac(netdev, adapter->mac_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) netdev_dbg(netdev, "[S:%d] Login succeeded\n", adapter->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) static void release_login_buffer(struct ibmvnic_adapter *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) kfree(adapter->login_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) adapter->login_buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) kfree(adapter->login_rsp_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) adapter->login_rsp_buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) static void release_resources(struct ibmvnic_adapter *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) release_vpd_data(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) release_tx_pools(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) release_rx_pools(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) release_napi(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) release_login_buffer(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) release_login_rsp_buffer(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) struct net_device *netdev = adapter->netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) unsigned long timeout = msecs_to_jiffies(20000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) union ibmvnic_crq crq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) bool resend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) netdev_dbg(netdev, "setting link state %d\n", link_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) memset(&crq, 0, sizeof(crq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) crq.logical_link_state.link_state = link_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) resend = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) reinit_completion(&adapter->init_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) rc = ibmvnic_send_crq(adapter, &crq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) netdev_err(netdev, "Failed to set link state\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) if (!wait_for_completion_timeout(&adapter->init_done,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) timeout)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) netdev_err(netdev, "timeout setting link state\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) if (adapter->init_done_rc == PARTIALSUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) /* Partuial success, delay and re-send */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) mdelay(1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) resend = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) } else if (adapter->init_done_rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) netdev_warn(netdev, "Unable to set link state, rc=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) adapter->init_done_rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) return adapter->init_done_rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) } while (resend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) static int set_real_num_queues(struct net_device *netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) struct ibmvnic_adapter *adapter = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) netdev_dbg(netdev, "Setting real tx/rx queues (%llx/%llx)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) adapter->req_tx_queues, adapter->req_rx_queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) netdev_err(netdev, "failed to set the number of tx queues\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) rc = netif_set_real_num_rx_queues(netdev, adapter->req_rx_queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) netdev_err(netdev, "failed to set the number of rx queues\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) struct device *dev = &adapter->vdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) union ibmvnic_crq crq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) int len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) if (adapter->vpd->buff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) len = adapter->vpd->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) mutex_lock(&adapter->fw_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) adapter->fw_done_rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) reinit_completion(&adapter->fw_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) crq.get_vpd_size.first = IBMVNIC_CRQ_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) crq.get_vpd_size.cmd = GET_VPD_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) rc = ibmvnic_send_crq(adapter, &crq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) mutex_unlock(&adapter->fw_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) dev_err(dev, "Could not retrieve VPD size, rc = %d\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) mutex_unlock(&adapter->fw_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) mutex_unlock(&adapter->fw_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) if (!adapter->vpd->len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) return -ENODATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) if (!adapter->vpd->buff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) adapter->vpd->buff = kzalloc(adapter->vpd->len, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) else if (adapter->vpd->len != len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) adapter->vpd->buff =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) krealloc(adapter->vpd->buff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) adapter->vpd->len, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) if (!adapter->vpd->buff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) dev_err(dev, "Could allocate VPD buffer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) adapter->vpd->dma_addr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) dma_map_single(dev, adapter->vpd->buff, adapter->vpd->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) if (dma_mapping_error(dev, adapter->vpd->dma_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) dev_err(dev, "Could not map VPD buffer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) kfree(adapter->vpd->buff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) adapter->vpd->buff = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) mutex_lock(&adapter->fw_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) adapter->fw_done_rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) reinit_completion(&adapter->fw_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) crq.get_vpd.first = IBMVNIC_CRQ_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) crq.get_vpd.cmd = GET_VPD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) crq.get_vpd.ioba = cpu_to_be32(adapter->vpd->dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) crq.get_vpd.len = cpu_to_be32((u32)adapter->vpd->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) rc = ibmvnic_send_crq(adapter, &crq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) kfree(adapter->vpd->buff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) adapter->vpd->buff = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) mutex_unlock(&adapter->fw_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) dev_err(dev, "Unable to retrieve VPD, rc = %d\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) kfree(adapter->vpd->buff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) adapter->vpd->buff = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) mutex_unlock(&adapter->fw_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) mutex_unlock(&adapter->fw_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) static int init_resources(struct ibmvnic_adapter *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) struct net_device *netdev = adapter->netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) rc = set_real_num_queues(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) adapter->vpd = kzalloc(sizeof(*adapter->vpd), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) if (!adapter->vpd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) /* Vital Product Data (VPD) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) rc = ibmvnic_get_vpd(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) adapter->map_id = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) rc = init_napi(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) send_query_map(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) rc = init_rx_pools(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) rc = init_tx_pools(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) static int __ibmvnic_open(struct net_device *netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) struct ibmvnic_adapter *adapter = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) enum vnic_state prev_state = adapter->state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) int i, rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) adapter->state = VNIC_OPENING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) replenish_pools(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) ibmvnic_napi_enable(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) /* We're ready to receive frames, enable the sub-crq interrupts and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) * set the logical link state to up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) for (i = 0; i < adapter->req_rx_queues; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) netdev_dbg(netdev, "Enabling rx_scrq[%d] irq\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) if (prev_state == VNIC_CLOSED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) enable_irq(adapter->rx_scrq[i]->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) enable_scrq_irq(adapter, adapter->rx_scrq[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) for (i = 0; i < adapter->req_tx_queues; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) netdev_dbg(netdev, "Enabling tx_scrq[%d] irq\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) if (prev_state == VNIC_CLOSED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) enable_irq(adapter->tx_scrq[i]->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) enable_scrq_irq(adapter, adapter->tx_scrq[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) ibmvnic_napi_disable(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) release_resources(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) netif_tx_start_all_queues(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) if (prev_state == VNIC_CLOSED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) for (i = 0; i < adapter->req_rx_queues; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) napi_schedule(&adapter->napi[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) adapter->state = VNIC_OPEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) static int ibmvnic_open(struct net_device *netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) struct ibmvnic_adapter *adapter = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) /* If device failover is pending, just set device state and return.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) * Device operation will be handled by reset routine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) if (adapter->failover_pending) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) adapter->state = VNIC_OPEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) if (adapter->state != VNIC_CLOSED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) rc = ibmvnic_login(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) rc = init_resources(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) netdev_err(netdev, "failed to initialize resources\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) release_resources(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) rc = __ibmvnic_open(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) * If open fails due to a pending failover, set device state and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) * return. Device operation will be handled by reset routine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) if (rc && adapter->failover_pending) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) adapter->state = VNIC_OPEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) static void clean_rx_pools(struct ibmvnic_adapter *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) struct ibmvnic_rx_pool *rx_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) struct ibmvnic_rx_buff *rx_buff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) u64 rx_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) int rx_scrqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) if (!adapter->rx_pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) rx_scrqs = adapter->num_active_rx_pools;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) rx_entries = adapter->req_rx_add_entries_per_subcrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) /* Free any remaining skbs in the rx buffer pools */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) for (i = 0; i < rx_scrqs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) rx_pool = &adapter->rx_pool[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) if (!rx_pool || !rx_pool->rx_buff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) netdev_dbg(adapter->netdev, "Cleaning rx_pool[%d]\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) for (j = 0; j < rx_entries; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) rx_buff = &rx_pool->rx_buff[j];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) if (rx_buff && rx_buff->skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) dev_kfree_skb_any(rx_buff->skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) rx_buff->skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) static void clean_one_tx_pool(struct ibmvnic_adapter *adapter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) struct ibmvnic_tx_pool *tx_pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) struct ibmvnic_tx_buff *tx_buff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) u64 tx_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) if (!tx_pool || !tx_pool->tx_buff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) tx_entries = tx_pool->num_buffers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) for (i = 0; i < tx_entries; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) tx_buff = &tx_pool->tx_buff[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) if (tx_buff && tx_buff->skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) dev_kfree_skb_any(tx_buff->skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) tx_buff->skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) static void clean_tx_pools(struct ibmvnic_adapter *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) int tx_scrqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) if (!adapter->tx_pool || !adapter->tso_pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) tx_scrqs = adapter->num_active_tx_pools;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) /* Free any remaining skbs in the tx buffer pools */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) for (i = 0; i < tx_scrqs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) netdev_dbg(adapter->netdev, "Cleaning tx_pool[%d]\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) clean_one_tx_pool(adapter, &adapter->tx_pool[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) clean_one_tx_pool(adapter, &adapter->tso_pool[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) struct net_device *netdev = adapter->netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) if (adapter->tx_scrq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) for (i = 0; i < adapter->req_tx_queues; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) if (adapter->tx_scrq[i]->irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) netdev_dbg(netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) "Disabling tx_scrq[%d] irq\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) disable_scrq_irq(adapter, adapter->tx_scrq[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) disable_irq(adapter->tx_scrq[i]->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) if (adapter->rx_scrq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) for (i = 0; i < adapter->req_rx_queues; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) if (adapter->rx_scrq[i]->irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) netdev_dbg(netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) "Disabling rx_scrq[%d] irq\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) disable_scrq_irq(adapter, adapter->rx_scrq[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) disable_irq(adapter->rx_scrq[i]->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) static void ibmvnic_cleanup(struct net_device *netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) struct ibmvnic_adapter *adapter = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) /* ensure that transmissions are stopped if called by do_reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) if (test_bit(0, &adapter->resetting))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) netif_tx_disable(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) netif_tx_stop_all_queues(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) ibmvnic_napi_disable(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) ibmvnic_disable_irqs(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) clean_rx_pools(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) clean_tx_pools(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) static int __ibmvnic_close(struct net_device *netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) struct ibmvnic_adapter *adapter = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) adapter->state = VNIC_CLOSING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) adapter->state = VNIC_CLOSED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) static int ibmvnic_close(struct net_device *netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) struct ibmvnic_adapter *adapter = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) netdev_dbg(netdev, "[S:%d FOP:%d FRR:%d] Closing\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) adapter->state, adapter->failover_pending,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) adapter->force_reset_recovery);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) /* If device failover is pending, just set device state and return.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) * Device operation will be handled by reset routine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) if (adapter->failover_pending) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) adapter->state = VNIC_CLOSED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) rc = __ibmvnic_close(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) ibmvnic_cleanup(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) * build_hdr_data - creates L2/L3/L4 header data buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) * @hdr_field - bitfield determining needed headers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) * @skb - socket buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) * @hdr_len - array of header lengths
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) * @tot_len - total length of data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) * Reads hdr_field to determine which headers are needed by firmware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) * Builds a buffer containing these headers. Saves individual header
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) * lengths and total buffer length to be used to build descriptors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) int *hdr_len, u8 *hdr_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) int len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) u8 *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) if (skb_vlan_tagged(skb) && !skb_vlan_tag_present(skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) hdr_len[0] = sizeof(struct vlan_ethhdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) hdr_len[0] = sizeof(struct ethhdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) if (skb->protocol == htons(ETH_P_IP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) hdr_len[1] = ip_hdr(skb)->ihl * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) if (ip_hdr(skb)->protocol == IPPROTO_TCP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) hdr_len[2] = tcp_hdrlen(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) hdr_len[2] = sizeof(struct udphdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) } else if (skb->protocol == htons(ETH_P_IPV6)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) hdr_len[1] = sizeof(struct ipv6hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) hdr_len[2] = tcp_hdrlen(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) hdr_len[2] = sizeof(struct udphdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) } else if (skb->protocol == htons(ETH_P_ARP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) hdr_len[1] = arp_hdr_len(skb->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) hdr_len[2] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) memset(hdr_data, 0, 120);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) if ((hdr_field >> 6) & 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) hdr = skb_mac_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) memcpy(hdr_data, hdr, hdr_len[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) len += hdr_len[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) if ((hdr_field >> 5) & 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) hdr = skb_network_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) memcpy(hdr_data + len, hdr, hdr_len[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) len += hdr_len[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) if ((hdr_field >> 4) & 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) hdr = skb_transport_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) memcpy(hdr_data + len, hdr, hdr_len[2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) len += hdr_len[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) * create_hdr_descs - create header and header extension descriptors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) * @hdr_field - bitfield determining needed headers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) * @data - buffer containing header data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) * @len - length of data buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) * @hdr_len - array of individual header lengths
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) * @scrq_arr - descriptor array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) * Creates header and, if needed, header extension descriptors and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) * places them in a descriptor array, scrq_arr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) union sub_crq *scrq_arr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) union sub_crq hdr_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) int tmp_len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) int num_descs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) u8 *data, *cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) int tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) while (tmp_len > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) cur = hdr_data + len - tmp_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) memset(&hdr_desc, 0, sizeof(hdr_desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) if (cur != hdr_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) data = hdr_desc.hdr_ext.data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) tmp = tmp_len > 29 ? 29 : tmp_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) hdr_desc.hdr_ext.len = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) data = hdr_desc.hdr.data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) tmp = tmp_len > 24 ? 24 : tmp_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) hdr_desc.hdr.len = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) hdr_desc.hdr.l2_len = (u8)hdr_len[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) hdr_desc.hdr.l4_len = (u8)hdr_len[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) hdr_desc.hdr.flag = hdr_field << 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) memcpy(data, cur, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) tmp_len -= tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) *scrq_arr = hdr_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) scrq_arr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) num_descs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) return num_descs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) * build_hdr_descs_arr - build a header descriptor array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) * @skb - socket buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) * @num_entries - number of descriptors to be sent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) * @subcrq - first TX descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) * @hdr_field - bit field determining which headers will be sent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) * This function will build a TX descriptor array with applicable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) int *num_entries, u8 hdr_field)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) int hdr_len[3] = {0, 0, 0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) int tot_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) u8 *hdr_data = txbuff->hdr_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) txbuff->hdr_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) *num_entries += create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) txbuff->indir_arr + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) static int ibmvnic_xmit_workarounds(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) struct net_device *netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) /* For some backing devices, mishandling of small packets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) * can result in a loss of connection or TX stall. Device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) * architects recommend that no packet should be smaller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) * than the minimum MTU value provided to the driver, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) * pad any packets to that length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) if (skb->len < netdev->min_mtu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) return skb_put_padto(skb, netdev->min_mtu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) struct ibmvnic_adapter *adapter = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) int queue_num = skb_get_queue_mapping(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) struct device *dev = &adapter->vdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) struct ibmvnic_tx_buff *tx_buff = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) struct ibmvnic_sub_crq_queue *tx_scrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) struct ibmvnic_tx_pool *tx_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) unsigned int tx_send_failed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) unsigned int tx_map_failed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) unsigned int tx_dropped = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) unsigned int tx_packets = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) unsigned int tx_bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) dma_addr_t data_dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) struct netdev_queue *txq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) unsigned long lpar_rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) union sub_crq tx_crq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) unsigned int offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) int num_entries = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) unsigned char *dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) int index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) u8 proto = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) u64 handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) netdev_tx_t ret = NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) if (test_bit(0, &adapter->resetting)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) dev_kfree_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) tx_send_failed++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) tx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) ret = NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) if (ibmvnic_xmit_workarounds(skb, netdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) tx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) tx_send_failed++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) ret = NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) if (skb_is_gso(skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) tx_pool = &adapter->tso_pool[queue_num];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) tx_pool = &adapter->tx_pool[queue_num];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) tx_scrq = adapter->tx_scrq[queue_num];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) handle = tx_scrq->handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) index = tx_pool->free_map[tx_pool->consumer_index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) if (index == IBMVNIC_INVALID_MAP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) dev_kfree_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) tx_send_failed++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) tx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) ret = NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) tx_pool->free_map[tx_pool->consumer_index] = IBMVNIC_INVALID_MAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) offset = index * tx_pool->buf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) dst = tx_pool->long_term_buff.buff + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) memset(dst, 0, tx_pool->buf_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) data_dma_addr = tx_pool->long_term_buff.addr + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) if (skb_shinfo(skb)->nr_frags) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) int cur, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) /* Copy the head */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) skb_copy_from_linear_data(skb, dst, skb_headlen(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) cur = skb_headlen(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) /* Copy the frags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) memcpy(dst + cur,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) page_address(skb_frag_page(frag)) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) skb_frag_off(frag), skb_frag_size(frag));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) cur += skb_frag_size(frag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) skb_copy_from_linear_data(skb, dst, skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) /* post changes to long_term_buff *dst before VIOS accessing it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) dma_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) tx_pool->consumer_index =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) (tx_pool->consumer_index + 1) % tx_pool->num_buffers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) tx_buff = &tx_pool->tx_buff[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) tx_buff->skb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) tx_buff->data_dma[0] = data_dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) tx_buff->data_len[0] = skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) tx_buff->index = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) tx_buff->pool_index = queue_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) tx_buff->last_frag = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) memset(&tx_crq, 0, sizeof(tx_crq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) tx_crq.v1.first = IBMVNIC_CRQ_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) tx_crq.v1.type = IBMVNIC_TX_DESC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) tx_crq.v1.n_crq_elem = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) tx_crq.v1.n_sge = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) if (skb_is_gso(skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) tx_crq.v1.correlator =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) cpu_to_be32(index | IBMVNIC_TSO_POOL_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) tx_crq.v1.correlator = cpu_to_be32(index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) tx_crq.v1.sge_len = cpu_to_be32(skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) if (adapter->vlan_header_insertion && skb_vlan_tag_present(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) if (skb->protocol == htons(ETH_P_IP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) proto = ip_hdr(skb)->protocol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) } else if (skb->protocol == htons(ETH_P_IPV6)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) proto = ipv6_hdr(skb)->nexthdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) if (proto == IPPROTO_TCP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) else if (proto == IPPROTO_UDP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) if (skb->ip_summed == CHECKSUM_PARTIAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) hdrs += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) if (skb_is_gso(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) tx_crq.v1.flags1 |= IBMVNIC_TX_LSO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) tx_crq.v1.mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) hdrs += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) /* determine if l2/3/4 headers are sent to firmware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) if ((*hdrs >> 7) & 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) build_hdr_descs_arr(tx_buff, &num_entries, *hdrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) tx_crq.v1.n_crq_elem = num_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) tx_buff->num_entries = num_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) tx_buff->indir_arr[0] = tx_crq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) tx_buff->indir_dma = dma_map_single(dev, tx_buff->indir_arr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) sizeof(tx_buff->indir_arr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) if (dma_mapping_error(dev, tx_buff->indir_dma)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) dev_kfree_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) tx_buff->skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) if (!firmware_has_feature(FW_FEATURE_CMO))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) dev_err(dev, "tx: unable to map descriptor array\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) tx_map_failed++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) tx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) ret = NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) goto tx_err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) lpar_rc = send_subcrq_indirect(adapter, handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) (u64)tx_buff->indir_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) (u64)num_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) dma_unmap_single(dev, tx_buff->indir_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) sizeof(tx_buff->indir_arr), DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) tx_buff->num_entries = num_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) lpar_rc = send_subcrq(adapter, handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) &tx_crq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) if (lpar_rc != H_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) if (lpar_rc != H_CLOSED && lpar_rc != H_PARAMETER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) dev_err_ratelimited(dev, "tx: send failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) dev_kfree_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) tx_buff->skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) if (lpar_rc == H_CLOSED || adapter->failover_pending) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) /* Disable TX and report carrier off if queue is closed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) * or pending failover.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) * Firmware guarantees that a signal will be sent to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) * driver, triggering a reset or some other action.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) netif_tx_stop_all_queues(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) netif_carrier_off(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) tx_send_failed++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) tx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) ret = NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) goto tx_err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) if (atomic_add_return(num_entries, &tx_scrq->used)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) >= adapter->req_tx_entries_per_subcrq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) netdev_dbg(netdev, "Stopping queue %d\n", queue_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) netif_stop_subqueue(netdev, queue_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) tx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) tx_bytes += skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) txq->trans_start = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) ret = NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) tx_err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) /* roll back consumer index and map array*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) if (tx_pool->consumer_index == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) tx_pool->consumer_index =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) tx_pool->num_buffers - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) tx_pool->consumer_index--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) tx_pool->free_map[tx_pool->consumer_index] = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) netdev->stats.tx_dropped += tx_dropped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) netdev->stats.tx_bytes += tx_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) netdev->stats.tx_packets += tx_packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) adapter->tx_send_failed += tx_send_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) adapter->tx_map_failed += tx_map_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) adapter->tx_stats_buffers[queue_num].packets += tx_packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) adapter->tx_stats_buffers[queue_num].bytes += tx_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) static void ibmvnic_set_multi(struct net_device *netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) struct ibmvnic_adapter *adapter = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) struct netdev_hw_addr *ha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) union ibmvnic_crq crq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) memset(&crq, 0, sizeof(crq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) crq.request_capability.first = IBMVNIC_CRQ_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) crq.request_capability.cmd = REQUEST_CAPABILITY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) if (netdev->flags & IFF_PROMISC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) if (!adapter->promisc_supported)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) if (netdev->flags & IFF_ALLMULTI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) /* Accept all multicast */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) memset(&crq, 0, sizeof(crq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) crq.multicast_ctrl.cmd = MULTICAST_CTRL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) ibmvnic_send_crq(adapter, &crq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) } else if (netdev_mc_empty(netdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) /* Reject all multicast */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) memset(&crq, 0, sizeof(crq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) crq.multicast_ctrl.cmd = MULTICAST_CTRL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) ibmvnic_send_crq(adapter, &crq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) /* Accept one or more multicast(s) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) netdev_for_each_mc_addr(ha, netdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) memset(&crq, 0, sizeof(crq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) crq.multicast_ctrl.cmd = MULTICAST_CTRL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) ether_addr_copy(&crq.multicast_ctrl.mac_addr[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) ha->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) ibmvnic_send_crq(adapter, &crq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) static int __ibmvnic_set_mac(struct net_device *netdev, u8 *dev_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) struct ibmvnic_adapter *adapter = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) union ibmvnic_crq crq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) if (!is_valid_ether_addr(dev_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) rc = -EADDRNOTAVAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) memset(&crq, 0, sizeof(crq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) ether_addr_copy(&crq.change_mac_addr.mac_addr[0], dev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) mutex_lock(&adapter->fw_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) adapter->fw_done_rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) reinit_completion(&adapter->fw_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) rc = ibmvnic_send_crq(adapter, &crq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) rc = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) mutex_unlock(&adapter->fw_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) /* netdev->dev_addr is changed in handle_change_mac_rsp function */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) if (rc || adapter->fw_done_rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) rc = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) mutex_unlock(&adapter->fw_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) mutex_unlock(&adapter->fw_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) ether_addr_copy(adapter->mac_addr, netdev->dev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) static int ibmvnic_set_mac(struct net_device *netdev, void *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) struct ibmvnic_adapter *adapter = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) struct sockaddr *addr = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) if (!is_valid_ether_addr(addr->sa_data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) return -EADDRNOTAVAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) ether_addr_copy(adapter->mac_addr, addr->sa_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) if (adapter->state != VNIC_PROBED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) rc = __ibmvnic_set_mac(netdev, addr->sa_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) * do_change_param_reset returns zero if we are able to keep processing reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) * events, or non-zero if we hit a fatal error and must halt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) static int do_change_param_reset(struct ibmvnic_adapter *adapter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) struct ibmvnic_rwi *rwi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) u32 reset_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) struct net_device *netdev = adapter->netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) int i, rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) netdev_dbg(adapter->netdev, "Change param resetting driver (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) rwi->reset_reason);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) netif_carrier_off(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) adapter->reset_reason = rwi->reset_reason;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) ibmvnic_cleanup(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) if (reset_state == VNIC_OPEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) rc = __ibmvnic_close(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) release_resources(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) release_sub_crqs(adapter, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) release_crq_queue(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) adapter->state = VNIC_PROBED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) rc = init_crq_queue(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) netdev_err(adapter->netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) "Couldn't initialize crq. rc=%d\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) rc = ibmvnic_reset_init(adapter, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) rc = IBMVNIC_INIT_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) /* If the adapter was in PROBE state prior to the reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) * exit here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) if (reset_state == VNIC_PROBED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) rc = ibmvnic_login(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) rc = init_resources(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) ibmvnic_disable_irqs(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) adapter->state = VNIC_CLOSED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) if (reset_state == VNIC_CLOSED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) rc = __ibmvnic_open(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) rc = IBMVNIC_OPEN_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) /* refresh device's multicast list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) ibmvnic_set_multi(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) /* kick napi */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) for (i = 0; i < adapter->req_rx_queues; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) napi_schedule(&adapter->napi[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) adapter->state = reset_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) * do_reset returns zero if we are able to keep processing reset events, or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) * non-zero if we hit a fatal error and must halt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) static int do_reset(struct ibmvnic_adapter *adapter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) struct ibmvnic_rwi *rwi, u32 reset_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) u64 old_num_rx_queues, old_num_tx_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) u64 old_num_rx_slots, old_num_tx_slots;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) struct net_device *netdev = adapter->netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) netdev_dbg(adapter->netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) "[S:%d FOP:%d] Reset reason %d, reset_state %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) adapter->state, adapter->failover_pending,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) rwi->reset_reason, reset_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) * Now that we have the rtnl lock, clear any pending failover.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) * This will ensure ibmvnic_open() has either completed or will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) * block until failover is complete.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) if (rwi->reset_reason == VNIC_RESET_FAILOVER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) adapter->failover_pending = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) netif_carrier_off(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) adapter->reset_reason = rwi->reset_reason;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) old_num_rx_queues = adapter->req_rx_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) old_num_tx_queues = adapter->req_tx_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) old_num_rx_slots = adapter->req_rx_add_entries_per_subcrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) old_num_tx_slots = adapter->req_tx_entries_per_subcrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) ibmvnic_cleanup(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) if (reset_state == VNIC_OPEN &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) adapter->reset_reason != VNIC_RESET_MOBILITY &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) adapter->reset_reason != VNIC_RESET_FAILOVER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) adapter->state = VNIC_CLOSING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) /* Release the RTNL lock before link state change and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) * re-acquire after the link state change to allow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) * linkwatch_event to grab the RTNL lock and run during
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) * a reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) if (adapter->state != VNIC_CLOSING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) rc = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) adapter->state = VNIC_CLOSED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) if (adapter->reset_reason != VNIC_RESET_NON_FATAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) /* remove the closed state so when we call open it appears
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) * we are coming from the probed state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) adapter->state = VNIC_PROBED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) if (adapter->reset_reason == VNIC_RESET_MOBILITY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) rc = ibmvnic_reenable_crq_queue(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) release_sub_crqs(adapter, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) rc = ibmvnic_reset_crq(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) if (rc == H_CLOSED || rc == H_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) rc = vio_enable_interrupts(adapter->vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) netdev_err(adapter->netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) "Reset failed to enable interrupts. rc=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) netdev_err(adapter->netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) "Reset couldn't initialize crq. rc=%d\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) rc = ibmvnic_reset_init(adapter, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) rc = IBMVNIC_INIT_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) /* If the adapter was in PROBE state prior to the reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) * exit here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) if (reset_state == VNIC_PROBED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) rc = ibmvnic_login(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) if (adapter->req_rx_queues != old_num_rx_queues ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) adapter->req_tx_queues != old_num_tx_queues ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) adapter->req_rx_add_entries_per_subcrq !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) old_num_rx_slots ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) adapter->req_tx_entries_per_subcrq !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) old_num_tx_slots ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) !adapter->rx_pool ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) !adapter->tso_pool ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) !adapter->tx_pool) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) release_rx_pools(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) release_tx_pools(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) release_napi(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) release_vpd_data(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) rc = init_resources(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) rc = reset_tx_pools(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) netdev_dbg(adapter->netdev, "reset tx pools failed (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) rc = reset_rx_pools(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) netdev_dbg(adapter->netdev, "reset rx pools failed (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) ibmvnic_disable_irqs(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) adapter->state = VNIC_CLOSED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) if (reset_state == VNIC_CLOSED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) rc = __ibmvnic_open(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) rc = IBMVNIC_OPEN_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) /* refresh device's multicast list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) ibmvnic_set_multi(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) if (adapter->reset_reason == VNIC_RESET_FAILOVER ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) adapter->reset_reason == VNIC_RESET_MOBILITY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) call_netdevice_notifiers(NETDEV_RESEND_IGMP, netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) /* restore the adapter state if reset failed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) adapter->state = reset_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) netdev_dbg(adapter->netdev, "[S:%d FOP:%d] Reset done, rc %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) adapter->state, adapter->failover_pending, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) static int do_hard_reset(struct ibmvnic_adapter *adapter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) struct ibmvnic_rwi *rwi, u32 reset_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) struct net_device *netdev = adapter->netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) netdev_dbg(adapter->netdev, "Hard resetting driver (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) rwi->reset_reason);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) netif_carrier_off(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) adapter->reset_reason = rwi->reset_reason;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) ibmvnic_cleanup(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) release_resources(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) release_sub_crqs(adapter, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) release_crq_queue(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) /* remove the closed state so when we call open it appears
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) * we are coming from the probed state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) adapter->state = VNIC_PROBED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) reinit_completion(&adapter->init_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) rc = init_crq_queue(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) netdev_err(adapter->netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) "Couldn't initialize crq. rc=%d\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) rc = ibmvnic_reset_init(adapter, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) /* If the adapter was in PROBE state prior to the reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) * exit here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) if (reset_state == VNIC_PROBED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) rc = ibmvnic_login(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) rc = init_resources(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) ibmvnic_disable_irqs(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) adapter->state = VNIC_CLOSED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) if (reset_state == VNIC_CLOSED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) rc = __ibmvnic_open(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) rc = IBMVNIC_OPEN_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) call_netdevice_notifiers(NETDEV_RESEND_IGMP, netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) /* restore adapter state if reset failed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) adapter->state = reset_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) netdev_dbg(adapter->netdev, "[S:%d FOP:%d] Hard reset done, rc %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) adapter->state, adapter->failover_pending, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) struct ibmvnic_rwi *rwi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) spin_lock_irqsave(&adapter->rwi_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) if (!list_empty(&adapter->rwi_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) list_del(&rwi->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) rwi = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) spin_unlock_irqrestore(&adapter->rwi_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) return rwi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) static void __ibmvnic_reset(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) struct ibmvnic_rwi *rwi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) struct ibmvnic_adapter *adapter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) bool saved_state = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) u32 reset_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) if (test_and_set_bit_lock(0, &adapter->resetting)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) schedule_delayed_work(&adapter->ibmvnic_delayed_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) IBMVNIC_RESET_DELAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) rwi = get_next_rwi(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) while (rwi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) spin_lock_irqsave(&adapter->state_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) if (adapter->state == VNIC_REMOVING ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) adapter->state == VNIC_REMOVED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) spin_unlock_irqrestore(&adapter->state_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) kfree(rwi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) rc = EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) if (!saved_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) reset_state = adapter->state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) saved_state = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) spin_unlock_irqrestore(&adapter->state_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) if (rwi->reset_reason == VNIC_RESET_CHANGE_PARAM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) /* CHANGE_PARAM requestor holds rtnl_lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) rc = do_change_param_reset(adapter, rwi, reset_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) } else if (adapter->force_reset_recovery) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) * Since we are doing a hard reset now, clear the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) * failover_pending flag so we don't ignore any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) * future MOBILITY or other resets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) adapter->failover_pending = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) /* Transport event occurred during previous reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) if (adapter->wait_for_reset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) /* Previous was CHANGE_PARAM; caller locked */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) adapter->force_reset_recovery = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) rc = do_hard_reset(adapter, rwi, reset_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) adapter->force_reset_recovery = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) rc = do_hard_reset(adapter, rwi, reset_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) /* give backing device time to settle down */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) netdev_dbg(adapter->netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) "[S:%d] Hard reset failed, waiting 60 secs\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) adapter->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) set_current_state(TASK_UNINTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) schedule_timeout(60 * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) rc = do_reset(adapter, rwi, reset_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) kfree(rwi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) adapter->last_reset_time = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) netdev_dbg(adapter->netdev, "Reset failed, rc=%d\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) rwi = get_next_rwi(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) if (rwi && (rwi->reset_reason == VNIC_RESET_FAILOVER ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) rwi->reset_reason == VNIC_RESET_MOBILITY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) adapter->force_reset_recovery = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) if (adapter->wait_for_reset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) adapter->reset_done_rc = rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) complete(&adapter->reset_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) clear_bit_unlock(0, &adapter->resetting);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) netdev_dbg(adapter->netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) "[S:%d FRR:%d WFR:%d] Done processing resets\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) adapter->state, adapter->force_reset_recovery,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) adapter->wait_for_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) static void __ibmvnic_delayed_reset(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) struct ibmvnic_adapter *adapter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) adapter = container_of(work, struct ibmvnic_adapter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) ibmvnic_delayed_reset.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) __ibmvnic_reset(&adapter->ibmvnic_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) enum ibmvnic_reset_reason reason)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) struct list_head *entry, *tmp_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) struct ibmvnic_rwi *rwi, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) struct net_device *netdev = adapter->netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) spin_lock_irqsave(&adapter->rwi_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) * If failover is pending don't schedule any other reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) * Instead let the failover complete. If there is already a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) * a failover reset scheduled, we will detect and drop the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) * duplicate reset when walking the ->rwi_list below.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) if (adapter->state == VNIC_REMOVING ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) adapter->state == VNIC_REMOVED ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) (adapter->failover_pending && reason != VNIC_RESET_FAILOVER)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) ret = EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) netdev_dbg(netdev, "Adapter removing or pending failover, skipping reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) if (adapter->state == VNIC_PROBING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) netdev_warn(netdev, "Adapter reset during probe\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) ret = adapter->init_done_rc = EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) list_for_each(entry, &adapter->rwi_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) tmp = list_entry(entry, struct ibmvnic_rwi, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) if (tmp->reset_reason == reason) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) netdev_dbg(netdev, "Skipping matching reset, reason=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) reason);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) ret = EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) rwi = kzalloc(sizeof(*rwi), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) if (!rwi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) ret = ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) /* if we just received a transport event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) * flush reset queue and process this reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) if (adapter->force_reset_recovery && !list_empty(&adapter->rwi_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) list_for_each_safe(entry, tmp_entry, &adapter->rwi_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) list_del(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) kfree(list_entry(entry, struct ibmvnic_rwi, list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) rwi->reset_reason = reason;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) list_add_tail(&rwi->list, &adapter->rwi_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) schedule_work(&adapter->ibmvnic_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) /* ibmvnic_close() below can block, so drop the lock first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) spin_unlock_irqrestore(&adapter->rwi_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) if (ret == ENOMEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) ibmvnic_close(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) return -ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) static void ibmvnic_tx_timeout(struct net_device *dev, unsigned int txqueue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) struct ibmvnic_adapter *adapter = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) if (test_bit(0, &adapter->resetting)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) netdev_err(adapter->netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) "Adapter is resetting, skip timeout reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) /* No queuing up reset until at least 5 seconds (default watchdog val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) * after last reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) if (time_before(jiffies, (adapter->last_reset_time + dev->watchdog_timeo))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) netdev_dbg(dev, "Not yet time to tx timeout.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) ibmvnic_reset(adapter, VNIC_RESET_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) static void remove_buff_from_pool(struct ibmvnic_adapter *adapter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) struct ibmvnic_rx_buff *rx_buff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) rx_buff->skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) pool->next_alloc = (pool->next_alloc + 1) % pool->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) atomic_dec(&pool->available);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) static int ibmvnic_poll(struct napi_struct *napi, int budget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) struct net_device *netdev = napi->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) struct ibmvnic_adapter *adapter = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) int scrq_num = (int)(napi - adapter->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) int frames_processed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) restart_poll:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) while (frames_processed < budget) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) struct ibmvnic_rx_buff *rx_buff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) union sub_crq *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) u32 length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) u16 offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) u8 flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) if (unlikely(test_bit(0, &adapter->resetting) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) adapter->reset_reason != VNIC_RESET_NON_FATAL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) napi_complete_done(napi, frames_processed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) return frames_processed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) if (!pending_scrq(adapter, adapter->rx_scrq[scrq_num]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) /* The queue entry at the current index is peeked at above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) * to determine that there is a valid descriptor awaiting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) * processing. We want to be sure that the current slot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) * holds a valid descriptor before reading its contents.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) dma_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) next = ibmvnic_next_scrq(adapter, adapter->rx_scrq[scrq_num]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) rx_buff =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) (struct ibmvnic_rx_buff *)be64_to_cpu(next->
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) rx_comp.correlator);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) /* do error checking */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) if (next->rx_comp.rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) netdev_dbg(netdev, "rx buffer returned with rc %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) be16_to_cpu(next->rx_comp.rc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) /* free the entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) next->rx_comp.first = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) dev_kfree_skb_any(rx_buff->skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) remove_buff_from_pool(adapter, rx_buff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) } else if (!rx_buff->skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) /* free the entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) next->rx_comp.first = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) remove_buff_from_pool(adapter, rx_buff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) length = be32_to_cpu(next->rx_comp.len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) offset = be16_to_cpu(next->rx_comp.off_frame_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) flags = next->rx_comp.flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) skb = rx_buff->skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) /* load long_term_buff before copying to skb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) dma_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) skb_copy_to_linear_data(skb, rx_buff->data + offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) /* VLAN Header has been stripped by the system firmware and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) * needs to be inserted by the driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) if (adapter->rx_vlan_header_insertion &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) (flags & IBMVNIC_VLAN_STRIPPED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) ntohs(next->rx_comp.vlan_tci));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) /* free the entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) next->rx_comp.first = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) remove_buff_from_pool(adapter, rx_buff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) skb_put(skb, length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) skb->protocol = eth_type_trans(skb, netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) skb_record_rx_queue(skb, scrq_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) if (flags & IBMVNIC_IP_CHKSUM_GOOD &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) skb->ip_summed = CHECKSUM_UNNECESSARY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) length = skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) napi_gro_receive(napi, skb); /* send it up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) netdev->stats.rx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) netdev->stats.rx_bytes += length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) adapter->rx_stats_buffers[scrq_num].packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) adapter->rx_stats_buffers[scrq_num].bytes += length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) frames_processed++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) if (adapter->state != VNIC_CLOSING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) if (frames_processed < budget) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) napi_complete_done(napi, frames_processed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) if (pending_scrq(adapter, adapter->rx_scrq[scrq_num]) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) napi_reschedule(napi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) disable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) goto restart_poll;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) return frames_processed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) static int wait_for_reset(struct ibmvnic_adapter *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) int rc, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) adapter->fallback.mtu = adapter->req_mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) adapter->fallback.rx_queues = adapter->req_rx_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) adapter->fallback.tx_queues = adapter->req_tx_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) adapter->fallback.rx_entries = adapter->req_rx_add_entries_per_subcrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) reinit_completion(&adapter->reset_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) adapter->wait_for_reset = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) ret = rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done, 60000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) if (adapter->reset_done_rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) adapter->desired.mtu = adapter->fallback.mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) adapter->desired.rx_queues = adapter->fallback.rx_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) adapter->desired.tx_queues = adapter->fallback.tx_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) adapter->desired.rx_entries = adapter->fallback.rx_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) adapter->desired.tx_entries = adapter->fallback.tx_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) reinit_completion(&adapter->reset_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) adapter->wait_for_reset = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) ret = rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) 60000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) adapter->wait_for_reset = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) struct ibmvnic_adapter *adapter = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) adapter->desired.mtu = new_mtu + ETH_HLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) return wait_for_reset(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) static netdev_features_t ibmvnic_features_check(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) netdev_features_t features)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) /* Some backing hardware adapters can not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) * handle packets with a MSS less than 224
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) * or with only one segment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) if (skb_is_gso(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) if (skb_shinfo(skb)->gso_size < 224 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) skb_shinfo(skb)->gso_segs == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) features &= ~NETIF_F_GSO_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) return features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) static const struct net_device_ops ibmvnic_netdev_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) .ndo_open = ibmvnic_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) .ndo_stop = ibmvnic_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) .ndo_start_xmit = ibmvnic_xmit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) .ndo_set_rx_mode = ibmvnic_set_multi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) .ndo_set_mac_address = ibmvnic_set_mac,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) .ndo_validate_addr = eth_validate_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) .ndo_tx_timeout = ibmvnic_tx_timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) .ndo_change_mtu = ibmvnic_change_mtu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) .ndo_features_check = ibmvnic_features_check,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) /* ethtool functions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) static int ibmvnic_get_link_ksettings(struct net_device *netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) struct ethtool_link_ksettings *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) struct ibmvnic_adapter *adapter = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) rc = send_query_phys_parms(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) adapter->speed = SPEED_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) adapter->duplex = DUPLEX_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) cmd->base.speed = adapter->speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) cmd->base.duplex = adapter->duplex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) cmd->base.port = PORT_FIBRE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) cmd->base.phy_address = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) cmd->base.autoneg = AUTONEG_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) static void ibmvnic_get_drvinfo(struct net_device *netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) struct ethtool_drvinfo *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) struct ibmvnic_adapter *adapter = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) strlcpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) strlcpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) strlcpy(info->fw_version, adapter->fw_version,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) sizeof(info->fw_version));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) static u32 ibmvnic_get_msglevel(struct net_device *netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) struct ibmvnic_adapter *adapter = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) return adapter->msg_enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) struct ibmvnic_adapter *adapter = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) adapter->msg_enable = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) static u32 ibmvnic_get_link(struct net_device *netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) struct ibmvnic_adapter *adapter = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) /* Don't need to send a query because we request a logical link up at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) * init and then we wait for link state indications
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) return adapter->logical_link_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) static void ibmvnic_get_ringparam(struct net_device *netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) struct ethtool_ringparam *ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) struct ibmvnic_adapter *adapter = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) ring->rx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) ring->tx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) ring->rx_mini_max_pending = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) ring->rx_jumbo_max_pending = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) ring->rx_pending = adapter->req_rx_add_entries_per_subcrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) ring->tx_pending = adapter->req_tx_entries_per_subcrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) ring->rx_mini_pending = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) ring->rx_jumbo_pending = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) static int ibmvnic_set_ringparam(struct net_device *netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) struct ethtool_ringparam *ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) struct ibmvnic_adapter *adapter = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) adapter->desired.rx_entries = ring->rx_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) adapter->desired.tx_entries = ring->tx_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) ret = wait_for_reset(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) if (!ret &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) (adapter->req_rx_add_entries_per_subcrq != ring->rx_pending ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) adapter->req_tx_entries_per_subcrq != ring->tx_pending))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) netdev_info(netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) "Could not match full ringsize request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) ring->rx_pending, ring->tx_pending,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) adapter->req_rx_add_entries_per_subcrq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) adapter->req_tx_entries_per_subcrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) static void ibmvnic_get_channels(struct net_device *netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) struct ethtool_channels *channels)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) struct ibmvnic_adapter *adapter = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) channels->max_rx = adapter->max_rx_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) channels->max_tx = adapter->max_tx_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) channels->max_rx = IBMVNIC_MAX_QUEUES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) channels->max_tx = IBMVNIC_MAX_QUEUES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) channels->max_other = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) channels->max_combined = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) channels->rx_count = adapter->req_rx_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) channels->tx_count = adapter->req_tx_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) channels->other_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) channels->combined_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) static int ibmvnic_set_channels(struct net_device *netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) struct ethtool_channels *channels)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) struct ibmvnic_adapter *adapter = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) adapter->desired.rx_queues = channels->rx_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) adapter->desired.tx_queues = channels->tx_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) ret = wait_for_reset(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) if (!ret &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) (adapter->req_rx_queues != channels->rx_count ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) adapter->req_tx_queues != channels->tx_count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) netdev_info(netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) "Could not match full channels request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) channels->rx_count, channels->tx_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) adapter->req_rx_queues, adapter->req_tx_queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) struct ibmvnic_adapter *adapter = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) switch (stringset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) case ETH_SS_STATS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) for (i = 0; i < ARRAY_SIZE(ibmvnic_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) i++, data += ETH_GSTRING_LEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) for (i = 0; i < adapter->req_tx_queues; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) data += ETH_GSTRING_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) data += ETH_GSTRING_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) snprintf(data, ETH_GSTRING_LEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) "tx%d_dropped_packets", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) data += ETH_GSTRING_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) for (i = 0; i < adapter->req_rx_queues; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) data += ETH_GSTRING_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) data += ETH_GSTRING_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) data += ETH_GSTRING_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) case ETH_SS_PRIV_FLAGS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) for (i = 0; i < ARRAY_SIZE(ibmvnic_priv_flags); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) strcpy(data + i * ETH_GSTRING_LEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) ibmvnic_priv_flags[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) struct ibmvnic_adapter *adapter = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) switch (sset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) case ETH_SS_STATS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) return ARRAY_SIZE(ibmvnic_stats) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) adapter->req_tx_queues * NUM_TX_STATS +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) adapter->req_rx_queues * NUM_RX_STATS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) case ETH_SS_PRIV_FLAGS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) return ARRAY_SIZE(ibmvnic_priv_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) static void ibmvnic_get_ethtool_stats(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) struct ethtool_stats *stats, u64 *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) struct ibmvnic_adapter *adapter = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) union ibmvnic_crq crq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) memset(&crq, 0, sizeof(crq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) crq.request_statistics.first = IBMVNIC_CRQ_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) crq.request_statistics.cmd = REQUEST_STATISTICS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) crq.request_statistics.len =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) cpu_to_be32(sizeof(struct ibmvnic_statistics));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) /* Wait for data to be written */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) reinit_completion(&adapter->stats_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) rc = ibmvnic_send_crq(adapter, &crq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) rc = ibmvnic_wait_for_completion(adapter, &adapter->stats_done, 10000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) data[i] = be64_to_cpu(IBMVNIC_GET_STAT(adapter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) ibmvnic_stats[i].offset));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) for (j = 0; j < adapter->req_tx_queues; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) data[i] = adapter->tx_stats_buffers[j].packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) data[i] = adapter->tx_stats_buffers[j].bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) data[i] = adapter->tx_stats_buffers[j].dropped_packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) for (j = 0; j < adapter->req_rx_queues; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) data[i] = adapter->rx_stats_buffers[j].packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) data[i] = adapter->rx_stats_buffers[j].bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) data[i] = adapter->rx_stats_buffers[j].interrupts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) static u32 ibmvnic_get_priv_flags(struct net_device *netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) struct ibmvnic_adapter *adapter = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) return adapter->priv_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) static int ibmvnic_set_priv_flags(struct net_device *netdev, u32 flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) struct ibmvnic_adapter *adapter = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) bool which_maxes = !!(flags & IBMVNIC_USE_SERVER_MAXES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) if (which_maxes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) adapter->priv_flags |= IBMVNIC_USE_SERVER_MAXES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) adapter->priv_flags &= ~IBMVNIC_USE_SERVER_MAXES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) static const struct ethtool_ops ibmvnic_ethtool_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) .get_drvinfo = ibmvnic_get_drvinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) .get_msglevel = ibmvnic_get_msglevel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) .set_msglevel = ibmvnic_set_msglevel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) .get_link = ibmvnic_get_link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) .get_ringparam = ibmvnic_get_ringparam,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) .set_ringparam = ibmvnic_set_ringparam,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) .get_channels = ibmvnic_get_channels,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) .set_channels = ibmvnic_set_channels,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) .get_strings = ibmvnic_get_strings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) .get_sset_count = ibmvnic_get_sset_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) .get_ethtool_stats = ibmvnic_get_ethtool_stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) .get_link_ksettings = ibmvnic_get_link_ksettings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) .get_priv_flags = ibmvnic_get_priv_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) .set_priv_flags = ibmvnic_set_priv_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) /* Routines for managing CRQs/sCRQs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) struct ibmvnic_sub_crq_queue *scrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) if (!scrq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) netdev_dbg(adapter->netdev, "Invalid scrq reset.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) if (scrq->irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) free_irq(scrq->irq, scrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) irq_dispose_mapping(scrq->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) scrq->irq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) if (scrq->msgs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) memset(scrq->msgs, 0, 4 * PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) atomic_set(&scrq->used, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) scrq->cur = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) netdev_dbg(adapter->netdev, "Invalid scrq reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) int i, rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) if (!adapter->tx_scrq || !adapter->rx_scrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) for (i = 0; i < adapter->req_tx_queues; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) netdev_dbg(adapter->netdev, "Re-setting tx_scrq[%d]\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) for (i = 0; i < adapter->req_rx_queues; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) netdev_dbg(adapter->netdev, "Re-setting rx_scrq[%d]\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) rc = reset_one_sub_crq_queue(adapter, adapter->rx_scrq[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) struct ibmvnic_sub_crq_queue *scrq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) bool do_h_free)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) struct device *dev = &adapter->vdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) long rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) if (do_h_free) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) /* Close the sub-crqs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) adapter->vdev->unit_address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) scrq->crq_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) netdev_err(adapter->netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) "Failed to release sub-CRQ %16lx, rc = %ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) scrq->crq_num, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) free_pages((unsigned long)scrq->msgs, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) kfree(scrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) struct device *dev = &adapter->vdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) struct ibmvnic_sub_crq_queue *scrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) scrq = kzalloc(sizeof(*scrq), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) if (!scrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) scrq->msgs =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) (union sub_crq *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) if (!scrq->msgs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) dev_warn(dev, "Couldn't allocate crq queue messages page\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) goto zero_page_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) if (dma_mapping_error(dev, scrq->msg_token)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) dev_warn(dev, "Couldn't map crq queue messages page\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) goto map_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) if (rc == H_RESOURCE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) rc = ibmvnic_reset_crq(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) if (rc == H_CLOSED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) dev_warn(dev, "Partner adapter not ready, waiting.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) } else if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) dev_warn(dev, "Error %d registering sub-crq\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) goto reg_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) scrq->adapter = adapter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) spin_lock_init(&scrq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) netdev_dbg(adapter->netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) scrq->crq_num, scrq->hw_irq, scrq->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) return scrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) reg_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) map_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) free_pages((unsigned long)scrq->msgs, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) zero_page_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) kfree(scrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) if (adapter->tx_scrq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) for (i = 0; i < adapter->num_active_tx_scrqs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) if (!adapter->tx_scrq[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) netdev_dbg(adapter->netdev, "Releasing tx_scrq[%d]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) if (adapter->tx_scrq[i]->irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) free_irq(adapter->tx_scrq[i]->irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) adapter->tx_scrq[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) irq_dispose_mapping(adapter->tx_scrq[i]->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) adapter->tx_scrq[i]->irq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) release_sub_crq_queue(adapter, adapter->tx_scrq[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) do_h_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) kfree(adapter->tx_scrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) adapter->tx_scrq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) adapter->num_active_tx_scrqs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) if (adapter->rx_scrq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) for (i = 0; i < adapter->num_active_rx_scrqs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) if (!adapter->rx_scrq[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) netdev_dbg(adapter->netdev, "Releasing rx_scrq[%d]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) if (adapter->rx_scrq[i]->irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) free_irq(adapter->rx_scrq[i]->irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) adapter->rx_scrq[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) irq_dispose_mapping(adapter->rx_scrq[i]->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) adapter->rx_scrq[i]->irq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) release_sub_crq_queue(adapter, adapter->rx_scrq[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) do_h_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) kfree(adapter->rx_scrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) adapter->rx_scrq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) adapter->num_active_rx_scrqs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) struct ibmvnic_sub_crq_queue *scrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) struct device *dev = &adapter->vdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) unsigned long rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) scrq->hw_irq, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) struct ibmvnic_sub_crq_queue *scrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) struct device *dev = &adapter->vdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) unsigned long rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) if (scrq->hw_irq > 0x100000000ULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) if (test_bit(0, &adapter->resetting) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) adapter->reset_reason == VNIC_RESET_MOBILITY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) u64 val = (0xff000000) | scrq->hw_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) rc = plpar_hcall_norets(H_EOI, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) /* H_EOI would fail with rc = H_FUNCTION when running
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) * in XIVE mode which is expected, but not an error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) if (rc && (rc != H_FUNCTION))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) val, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) scrq->hw_irq, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) struct ibmvnic_sub_crq_queue *scrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) struct device *dev = &adapter->vdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) struct ibmvnic_tx_pool *tx_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) struct ibmvnic_tx_buff *txbuff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) union sub_crq *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) restart_loop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) while (pending_scrq(adapter, scrq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) unsigned int pool = scrq->pool_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) int num_entries = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) /* The queue entry at the current index is peeked at above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) * to determine that there is a valid descriptor awaiting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) * processing. We want to be sure that the current slot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) * holds a valid descriptor before reading its contents.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) dma_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) next = ibmvnic_next_scrq(adapter, scrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) for (i = 0; i < next->tx_comp.num_comps; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) if (next->tx_comp.rcs[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) dev_err(dev, "tx error %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) next->tx_comp.rcs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) index = be32_to_cpu(next->tx_comp.correlators[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) if (index & IBMVNIC_TSO_POOL_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) tx_pool = &adapter->tso_pool[pool];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) index &= ~IBMVNIC_TSO_POOL_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) tx_pool = &adapter->tx_pool[pool];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) txbuff = &tx_pool->tx_buff[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) for (j = 0; j < IBMVNIC_MAX_FRAGS_PER_CRQ; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) if (!txbuff->data_dma[j])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) txbuff->data_dma[j] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) if (txbuff->last_frag) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) dev_kfree_skb_any(txbuff->skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) txbuff->skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) num_entries += txbuff->num_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) tx_pool->free_map[tx_pool->producer_index] = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) tx_pool->producer_index =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) (tx_pool->producer_index + 1) %
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) tx_pool->num_buffers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) /* remove tx_comp scrq*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) next->tx_comp.first = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) if (atomic_sub_return(num_entries, &scrq->used) <=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) (adapter->req_tx_entries_per_subcrq / 2) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) __netif_subqueue_stopped(adapter->netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) scrq->pool_index)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) netif_wake_subqueue(adapter->netdev, scrq->pool_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) netdev_dbg(adapter->netdev, "Started queue %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) scrq->pool_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) enable_scrq_irq(adapter, scrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) if (pending_scrq(adapter, scrq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) disable_scrq_irq(adapter, scrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) goto restart_loop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) struct ibmvnic_sub_crq_queue *scrq = instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) struct ibmvnic_adapter *adapter = scrq->adapter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) disable_scrq_irq(adapter, scrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) ibmvnic_complete_tx(adapter, scrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) struct ibmvnic_sub_crq_queue *scrq = instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) struct ibmvnic_adapter *adapter = scrq->adapter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) /* When booting a kdump kernel we can hit pending interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) * prior to completing driver initialization.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) if (unlikely(adapter->state != VNIC_OPEN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) adapter->rx_stats_buffers[scrq->scrq_num].interrupts++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) disable_scrq_irq(adapter, scrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) __napi_schedule(&adapter->napi[scrq->scrq_num]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) struct device *dev = &adapter->vdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) struct ibmvnic_sub_crq_queue *scrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) int i = 0, j = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) for (i = 0; i < adapter->req_tx_queues; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) netdev_dbg(adapter->netdev, "Initializing tx_scrq[%d] irq\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) scrq = adapter->tx_scrq[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) if (!scrq->irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) dev_err(dev, "Error mapping irq\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) goto req_tx_irq_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-tx%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) adapter->vdev->unit_address, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) 0, scrq->name, scrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) scrq->irq, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) irq_dispose_mapping(scrq->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) goto req_tx_irq_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) for (i = 0; i < adapter->req_rx_queues; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) netdev_dbg(adapter->netdev, "Initializing rx_scrq[%d] irq\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) scrq = adapter->rx_scrq[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) if (!scrq->irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) dev_err(dev, "Error mapping irq\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) goto req_rx_irq_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-rx%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) adapter->vdev->unit_address, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) 0, scrq->name, scrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) scrq->irq, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) irq_dispose_mapping(scrq->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) goto req_rx_irq_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301) req_rx_irq_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) for (j = 0; j < i; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) irq_dispose_mapping(adapter->rx_scrq[j]->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) i = adapter->req_tx_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) req_tx_irq_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) for (j = 0; j < i; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) irq_dispose_mapping(adapter->tx_scrq[j]->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) release_sub_crqs(adapter, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) static int init_sub_crqs(struct ibmvnic_adapter *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) struct device *dev = &adapter->vdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) struct ibmvnic_sub_crq_queue **allqueues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) int registered_queues = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321) int total_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) int more = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) total_queues = adapter->req_tx_queues + adapter->req_rx_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) if (!allqueues)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) for (i = 0; i < total_queues; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) allqueues[i] = init_sub_crq_queue(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) if (!allqueues[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334) dev_warn(dev, "Couldn't allocate all sub-crqs\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337) registered_queues++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340) /* Make sure we were able to register the minimum number of queues */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) if (registered_queues <
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) adapter->min_tx_queues + adapter->min_rx_queues) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) dev_err(dev, "Fatal: Couldn't init min number of sub-crqs\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) goto tx_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) /* Distribute the failed allocated queues*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) for (i = 0; i < total_queues - registered_queues + more ; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349) netdev_dbg(adapter->netdev, "Reducing number of queues\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) switch (i % 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) if (adapter->req_rx_queues > adapter->min_rx_queues)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) adapter->req_rx_queues--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355) more++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358) if (adapter->req_tx_queues > adapter->min_tx_queues)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) adapter->req_tx_queues--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361) more++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) adapter->tx_scrq = kcalloc(adapter->req_tx_queues,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) sizeof(*adapter->tx_scrq), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) if (!adapter->tx_scrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) goto tx_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) for (i = 0; i < adapter->req_tx_queues; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) adapter->tx_scrq[i] = allqueues[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373) adapter->tx_scrq[i]->pool_index = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) adapter->num_active_tx_scrqs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378) sizeof(*adapter->rx_scrq), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379) if (!adapter->rx_scrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) goto rx_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) for (i = 0; i < adapter->req_rx_queues; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) adapter->rx_scrq[i]->scrq_num = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) adapter->num_active_rx_scrqs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) kfree(allqueues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391) rx_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392) kfree(adapter->tx_scrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393) adapter->tx_scrq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394) tx_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) for (i = 0; i < registered_queues; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396) release_sub_crq_queue(adapter, allqueues[i], 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397) kfree(allqueues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401) static void send_request_cap(struct ibmvnic_adapter *adapter, int retry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) struct device *dev = &adapter->vdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404) union ibmvnic_crq crq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405) int max_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406) int cap_reqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) /* We send out 6 or 7 REQUEST_CAPABILITY CRQs below (depending on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409) * the PROMISC flag). Initialize this count upfront. When the tasklet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410) * receives a response to all of these, it will send the next protocol
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411) * message (QUERY_IP_OFFLOAD).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413) if (!(adapter->netdev->flags & IFF_PROMISC) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) adapter->promisc_supported)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) cap_reqs = 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417) cap_reqs = 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419) if (!retry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420) /* Sub-CRQ entries are 32 byte long */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423) atomic_set(&adapter->running_cap_crqs, cap_reqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425) if (adapter->min_tx_entries_per_subcrq > entries_page ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426) adapter->min_rx_add_entries_per_subcrq > entries_page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427) dev_err(dev, "Fatal, invalid entries per sub-crq\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431) if (adapter->desired.mtu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432) adapter->req_mtu = adapter->desired.mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434) adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436) if (!adapter->desired.tx_entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437) adapter->desired.tx_entries =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438) adapter->max_tx_entries_per_subcrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439) if (!adapter->desired.rx_entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440) adapter->desired.rx_entries =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441) adapter->max_rx_add_entries_per_subcrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443) max_entries = IBMVNIC_MAX_LTB_SIZE /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444) (adapter->req_mtu + IBMVNIC_BUFFER_HLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446) if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447) adapter->desired.tx_entries > IBMVNIC_MAX_LTB_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448) adapter->desired.tx_entries = max_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451) if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452) adapter->desired.rx_entries > IBMVNIC_MAX_LTB_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453) adapter->desired.rx_entries = max_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456) if (adapter->desired.tx_entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457) adapter->req_tx_entries_per_subcrq =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458) adapter->desired.tx_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460) adapter->req_tx_entries_per_subcrq =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461) adapter->max_tx_entries_per_subcrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463) if (adapter->desired.rx_entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464) adapter->req_rx_add_entries_per_subcrq =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465) adapter->desired.rx_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467) adapter->req_rx_add_entries_per_subcrq =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468) adapter->max_rx_add_entries_per_subcrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470) if (adapter->desired.tx_queues)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471) adapter->req_tx_queues =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472) adapter->desired.tx_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474) adapter->req_tx_queues =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) adapter->opt_tx_comp_sub_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477) if (adapter->desired.rx_queues)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478) adapter->req_rx_queues =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479) adapter->desired.rx_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481) adapter->req_rx_queues =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482) adapter->opt_rx_comp_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484) adapter->req_rx_add_queues = adapter->max_rx_add_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486) atomic_add(cap_reqs, &adapter->running_cap_crqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488) memset(&crq, 0, sizeof(crq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489) crq.request_capability.first = IBMVNIC_CRQ_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490) crq.request_capability.cmd = REQUEST_CAPABILITY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492) crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493) crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494) cap_reqs--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495) ibmvnic_send_crq(adapter, &crq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497) crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498) crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499) cap_reqs--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500) ibmvnic_send_crq(adapter, &crq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502) crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503) crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504) cap_reqs--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505) ibmvnic_send_crq(adapter, &crq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507) crq.request_capability.capability =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508) cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509) crq.request_capability.number =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510) cpu_to_be64(adapter->req_tx_entries_per_subcrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511) cap_reqs--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512) ibmvnic_send_crq(adapter, &crq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514) crq.request_capability.capability =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515) cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516) crq.request_capability.number =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517) cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518) cap_reqs--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519) ibmvnic_send_crq(adapter, &crq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521) crq.request_capability.capability = cpu_to_be16(REQ_MTU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522) crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523) cap_reqs--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524) ibmvnic_send_crq(adapter, &crq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526) if (adapter->netdev->flags & IFF_PROMISC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527) if (adapter->promisc_supported) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528) crq.request_capability.capability =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529) cpu_to_be16(PROMISC_REQUESTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530) crq.request_capability.number = cpu_to_be64(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531) cap_reqs--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532) ibmvnic_send_crq(adapter, &crq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535) crq.request_capability.capability =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536) cpu_to_be16(PROMISC_REQUESTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537) crq.request_capability.number = cpu_to_be64(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538) cap_reqs--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539) ibmvnic_send_crq(adapter, &crq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542) /* Keep at end to catch any discrepancy between expected and actual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3543) * CRQs sent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3544) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3545) WARN_ON(cap_reqs != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3548) static int pending_scrq(struct ibmvnic_adapter *adapter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3549) struct ibmvnic_sub_crq_queue *scrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3550) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3551) union sub_crq *entry = &scrq->msgs[scrq->cur];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3553) if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3554) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3555) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3556) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3559) static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3560) struct ibmvnic_sub_crq_queue *scrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3561) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3562) union sub_crq *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3563) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3565) spin_lock_irqsave(&scrq->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3566) entry = &scrq->msgs[scrq->cur];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3567) if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3568) if (++scrq->cur == scrq->size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3569) scrq->cur = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3570) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3571) entry = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3573) spin_unlock_irqrestore(&scrq->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3575) /* Ensure that the entire buffer descriptor has been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3576) * loaded before reading its contents
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3577) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3578) dma_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3580) return entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3583) static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3584) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3585) struct ibmvnic_crq_queue *queue = &adapter->crq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3586) union ibmvnic_crq *crq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3588) crq = &queue->msgs[queue->cur];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3589) if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3590) if (++queue->cur == queue->size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3591) queue->cur = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3592) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3593) crq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3596) return crq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3599) static void print_subcrq_error(struct device *dev, int rc, const char *func)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3600) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3601) switch (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3602) case H_PARAMETER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3603) dev_warn_ratelimited(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3604) "%s failed: Send request is malformed or adapter failover pending. (rc=%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3605) func, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3606) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3607) case H_CLOSED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3608) dev_warn_ratelimited(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3609) "%s failed: Backing queue closed. Adapter is down or failover pending. (rc=%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3610) func, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3611) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3612) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3613) dev_err_ratelimited(dev, "%s failed: (rc=%d)\n", func, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3614) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3618) static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3619) union sub_crq *sub_crq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3620) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3621) unsigned int ua = adapter->vdev->unit_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3622) struct device *dev = &adapter->vdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3623) u64 *u64_crq = (u64 *)sub_crq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3624) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3626) netdev_dbg(adapter->netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3627) "Sending sCRQ %016lx: %016lx %016lx %016lx %016lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3628) (unsigned long int)cpu_to_be64(remote_handle),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3629) (unsigned long int)cpu_to_be64(u64_crq[0]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3630) (unsigned long int)cpu_to_be64(u64_crq[1]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3631) (unsigned long int)cpu_to_be64(u64_crq[2]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3632) (unsigned long int)cpu_to_be64(u64_crq[3]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3634) /* Make sure the hypervisor sees the complete request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3635) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3637) rc = plpar_hcall_norets(H_SEND_SUB_CRQ, ua,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3638) cpu_to_be64(remote_handle),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3639) cpu_to_be64(u64_crq[0]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3640) cpu_to_be64(u64_crq[1]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3641) cpu_to_be64(u64_crq[2]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3642) cpu_to_be64(u64_crq[3]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3644) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3645) print_subcrq_error(dev, rc, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3647) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3650) static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3651) u64 remote_handle, u64 ioba, u64 num_entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3652) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3653) unsigned int ua = adapter->vdev->unit_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3654) struct device *dev = &adapter->vdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3655) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3657) /* Make sure the hypervisor sees the complete request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3658) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3659) rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3660) cpu_to_be64(remote_handle),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3661) ioba, num_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3663) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3664) print_subcrq_error(dev, rc, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3666) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3669) static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3670) union ibmvnic_crq *crq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3671) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3672) unsigned int ua = adapter->vdev->unit_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3673) struct device *dev = &adapter->vdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3674) u64 *u64_crq = (u64 *)crq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3675) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3677) netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3678) (unsigned long int)cpu_to_be64(u64_crq[0]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3679) (unsigned long int)cpu_to_be64(u64_crq[1]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3681) if (!adapter->crq.active &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3682) crq->generic.first != IBMVNIC_CRQ_INIT_CMD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3683) dev_warn(dev, "Invalid request detected while CRQ is inactive, possible device state change during reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3684) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3687) /* Make sure the hypervisor sees the complete request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3688) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3690) rc = plpar_hcall_norets(H_SEND_CRQ, ua,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3691) cpu_to_be64(u64_crq[0]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3692) cpu_to_be64(u64_crq[1]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3694) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3695) if (rc == H_CLOSED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3696) dev_warn(dev, "CRQ Queue closed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3697) /* do not reset, report the fail, wait for passive init from server */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3700) dev_warn(dev, "Send error (rc=%d)\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3701) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3703) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3704) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3706) static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3707) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3708) struct device *dev = &adapter->vdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3709) union ibmvnic_crq crq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3710) int retries = 100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3711) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3713) memset(&crq, 0, sizeof(crq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3714) crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3715) crq.generic.cmd = IBMVNIC_CRQ_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3716) netdev_dbg(adapter->netdev, "Sending CRQ init\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3718) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3719) rc = ibmvnic_send_crq(adapter, &crq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3720) if (rc != H_CLOSED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3721) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3722) retries--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3723) msleep(50);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3725) } while (retries > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3727) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3728) dev_err(dev, "Failed to send init request, rc = %d\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3729) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3730) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3732) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3735) static int send_version_xchg(struct ibmvnic_adapter *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3736) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3737) union ibmvnic_crq crq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3739) memset(&crq, 0, sizeof(crq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3740) crq.version_exchange.first = IBMVNIC_CRQ_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3741) crq.version_exchange.cmd = VERSION_EXCHANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3742) crq.version_exchange.version = cpu_to_be16(ibmvnic_version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3744) return ibmvnic_send_crq(adapter, &crq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3747) struct vnic_login_client_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3748) u8 type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3749) __be16 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3750) char name[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3751) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3753) static int vnic_client_data_len(struct ibmvnic_adapter *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3754) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3755) int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3757) /* Calculate the amount of buffer space needed for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3758) * vnic client data in the login buffer. There are four entries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3759) * OS name, LPAR name, device name, and a null last entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3760) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3761) len = 4 * sizeof(struct vnic_login_client_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3762) len += 6; /* "Linux" plus NULL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3763) len += strlen(utsname()->nodename) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3764) len += strlen(adapter->netdev->name) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3766) return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3769) static void vnic_add_client_data(struct ibmvnic_adapter *adapter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3770) struct vnic_login_client_data *vlcd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3771) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3772) const char *os_name = "Linux";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3773) int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3775) /* Type 1 - LPAR OS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3776) vlcd->type = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3777) len = strlen(os_name) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3778) vlcd->len = cpu_to_be16(len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3779) strncpy(vlcd->name, os_name, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3780) vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3782) /* Type 2 - LPAR name */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3783) vlcd->type = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3784) len = strlen(utsname()->nodename) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3785) vlcd->len = cpu_to_be16(len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3786) strncpy(vlcd->name, utsname()->nodename, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3787) vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3789) /* Type 3 - device name */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3790) vlcd->type = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3791) len = strlen(adapter->netdev->name) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3792) vlcd->len = cpu_to_be16(len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3793) strncpy(vlcd->name, adapter->netdev->name, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3794) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3796) static int send_login(struct ibmvnic_adapter *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3797) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3798) struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3799) struct ibmvnic_login_buffer *login_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3800) struct device *dev = &adapter->vdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3801) struct vnic_login_client_data *vlcd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3802) dma_addr_t rsp_buffer_token;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3803) dma_addr_t buffer_token;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3804) size_t rsp_buffer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3805) union ibmvnic_crq crq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3806) int client_data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3807) size_t buffer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3808) __be64 *tx_list_p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3809) __be64 *rx_list_p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3810) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3811) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3813) if (!adapter->tx_scrq || !adapter->rx_scrq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3814) netdev_err(adapter->netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3815) "RX or TX queues are not allocated, device login failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3816) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3819) release_login_buffer(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3820) release_login_rsp_buffer(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3822) client_data_len = vnic_client_data_len(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3824) buffer_size =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3825) sizeof(struct ibmvnic_login_buffer) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3826) sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3827) client_data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3829) login_buffer = kzalloc(buffer_size, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3830) if (!login_buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3831) goto buf_alloc_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3833) buffer_token = dma_map_single(dev, login_buffer, buffer_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3834) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3835) if (dma_mapping_error(dev, buffer_token)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3836) dev_err(dev, "Couldn't map login buffer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3837) goto buf_map_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3838) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3840) rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3841) sizeof(u64) * adapter->req_tx_queues +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3842) sizeof(u64) * adapter->req_rx_queues +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3843) sizeof(u64) * adapter->req_rx_queues +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3844) sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3846) login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3847) if (!login_rsp_buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3848) goto buf_rsp_alloc_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3850) rsp_buffer_token = dma_map_single(dev, login_rsp_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3851) rsp_buffer_size, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3852) if (dma_mapping_error(dev, rsp_buffer_token)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3853) dev_err(dev, "Couldn't map login rsp buffer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3854) goto buf_rsp_map_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3855) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3857) adapter->login_buf = login_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3858) adapter->login_buf_token = buffer_token;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3859) adapter->login_buf_sz = buffer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3860) adapter->login_rsp_buf = login_rsp_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3861) adapter->login_rsp_buf_token = rsp_buffer_token;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3862) adapter->login_rsp_buf_sz = rsp_buffer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3864) login_buffer->len = cpu_to_be32(buffer_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3865) login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3866) login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3867) login_buffer->off_txcomp_subcrqs =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3868) cpu_to_be32(sizeof(struct ibmvnic_login_buffer));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3869) login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3870) login_buffer->off_rxcomp_subcrqs =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3871) cpu_to_be32(sizeof(struct ibmvnic_login_buffer) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3872) sizeof(u64) * adapter->req_tx_queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3873) login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3874) login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3876) tx_list_p = (__be64 *)((char *)login_buffer +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3877) sizeof(struct ibmvnic_login_buffer));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3878) rx_list_p = (__be64 *)((char *)login_buffer +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3879) sizeof(struct ibmvnic_login_buffer) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3880) sizeof(u64) * adapter->req_tx_queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3882) for (i = 0; i < adapter->req_tx_queues; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3883) if (adapter->tx_scrq[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3884) tx_list_p[i] = cpu_to_be64(adapter->tx_scrq[i]->
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3885) crq_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3886) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3887) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3889) for (i = 0; i < adapter->req_rx_queues; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3890) if (adapter->rx_scrq[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3891) rx_list_p[i] = cpu_to_be64(adapter->rx_scrq[i]->
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3892) crq_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3893) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3896) /* Insert vNIC login client data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3897) vlcd = (struct vnic_login_client_data *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3898) ((char *)rx_list_p + (sizeof(u64) * adapter->req_rx_queues));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3899) login_buffer->client_data_offset =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3900) cpu_to_be32((char *)vlcd - (char *)login_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3901) login_buffer->client_data_len = cpu_to_be32(client_data_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3903) vnic_add_client_data(adapter, vlcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3905) netdev_dbg(adapter->netdev, "Login Buffer:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3906) for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3907) netdev_dbg(adapter->netdev, "%016lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3908) ((unsigned long int *)(adapter->login_buf))[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3909) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3911) memset(&crq, 0, sizeof(crq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3912) crq.login.first = IBMVNIC_CRQ_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3913) crq.login.cmd = LOGIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3914) crq.login.ioba = cpu_to_be32(buffer_token);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3915) crq.login.len = cpu_to_be32(buffer_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3917) adapter->login_pending = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3918) rc = ibmvnic_send_crq(adapter, &crq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3919) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3920) adapter->login_pending = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3921) netdev_err(adapter->netdev, "Failed to send login, rc=%d\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3922) goto buf_rsp_map_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3923) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3925) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3927) buf_rsp_map_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3928) kfree(login_rsp_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3929) adapter->login_rsp_buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3930) buf_rsp_alloc_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3931) dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3932) buf_map_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3933) kfree(login_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3934) adapter->login_buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3935) buf_alloc_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3936) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3937) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3939) static int send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3940) u32 len, u8 map_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3941) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3942) union ibmvnic_crq crq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3944) memset(&crq, 0, sizeof(crq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3945) crq.request_map.first = IBMVNIC_CRQ_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3946) crq.request_map.cmd = REQUEST_MAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3947) crq.request_map.map_id = map_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3948) crq.request_map.ioba = cpu_to_be32(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3949) crq.request_map.len = cpu_to_be32(len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3950) return ibmvnic_send_crq(adapter, &crq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3951) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3953) static int send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3954) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3955) union ibmvnic_crq crq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3957) memset(&crq, 0, sizeof(crq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3958) crq.request_unmap.first = IBMVNIC_CRQ_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3959) crq.request_unmap.cmd = REQUEST_UNMAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3960) crq.request_unmap.map_id = map_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3961) return ibmvnic_send_crq(adapter, &crq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3962) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3964) static void send_query_map(struct ibmvnic_adapter *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3965) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3966) union ibmvnic_crq crq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3968) memset(&crq, 0, sizeof(crq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3969) crq.query_map.first = IBMVNIC_CRQ_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3970) crq.query_map.cmd = QUERY_MAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3971) ibmvnic_send_crq(adapter, &crq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3972) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3974) /* Send a series of CRQs requesting various capabilities of the VNIC server */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3975) static void send_query_cap(struct ibmvnic_adapter *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3976) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3977) union ibmvnic_crq crq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3978) int cap_reqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3980) /* We send out 25 QUERY_CAPABILITY CRQs below. Initialize this count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3981) * upfront. When the tasklet receives a response to all of these, it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3982) * can send out the next protocol messaage (REQUEST_CAPABILITY).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3983) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3984) cap_reqs = 25;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3986) atomic_set(&adapter->running_cap_crqs, cap_reqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3988) memset(&crq, 0, sizeof(crq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3989) crq.query_capability.first = IBMVNIC_CRQ_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3990) crq.query_capability.cmd = QUERY_CAPABILITY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3992) crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3993) ibmvnic_send_crq(adapter, &crq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3994) cap_reqs--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3996) crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3997) ibmvnic_send_crq(adapter, &crq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3998) cap_reqs--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4000) crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4001) ibmvnic_send_crq(adapter, &crq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4002) cap_reqs--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4004) crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4005) ibmvnic_send_crq(adapter, &crq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4006) cap_reqs--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4008) crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4009) ibmvnic_send_crq(adapter, &crq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4010) cap_reqs--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4012) crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4013) ibmvnic_send_crq(adapter, &crq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4014) cap_reqs--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4016) crq.query_capability.capability =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4017) cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4018) ibmvnic_send_crq(adapter, &crq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4019) cap_reqs--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4021) crq.query_capability.capability =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4022) cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4023) ibmvnic_send_crq(adapter, &crq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4024) cap_reqs--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4026) crq.query_capability.capability =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4027) cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4028) ibmvnic_send_crq(adapter, &crq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4029) cap_reqs--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4031) crq.query_capability.capability =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4032) cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4033) ibmvnic_send_crq(adapter, &crq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4034) cap_reqs--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4036) crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4037) ibmvnic_send_crq(adapter, &crq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4038) cap_reqs--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4040) crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4041) ibmvnic_send_crq(adapter, &crq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4042) cap_reqs--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4044) crq.query_capability.capability = cpu_to_be16(MIN_MTU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4045) ibmvnic_send_crq(adapter, &crq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4046) cap_reqs--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4048) crq.query_capability.capability = cpu_to_be16(MAX_MTU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4049) ibmvnic_send_crq(adapter, &crq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4050) cap_reqs--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4052) crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4053) ibmvnic_send_crq(adapter, &crq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4054) cap_reqs--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4056) crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4057) ibmvnic_send_crq(adapter, &crq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4058) cap_reqs--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4060) crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4061) ibmvnic_send_crq(adapter, &crq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4062) cap_reqs--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4064) crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4065) ibmvnic_send_crq(adapter, &crq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4066) cap_reqs--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4068) crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4069) ibmvnic_send_crq(adapter, &crq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4070) cap_reqs--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4072) crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4073) ibmvnic_send_crq(adapter, &crq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4074) cap_reqs--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4076) crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4077) ibmvnic_send_crq(adapter, &crq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4078) cap_reqs--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4080) crq.query_capability.capability =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4081) cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4082) ibmvnic_send_crq(adapter, &crq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4083) cap_reqs--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4085) crq.query_capability.capability =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4086) cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4087) ibmvnic_send_crq(adapter, &crq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4088) cap_reqs--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4090) crq.query_capability.capability =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4091) cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4092) ibmvnic_send_crq(adapter, &crq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4093) cap_reqs--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4095) crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4097) ibmvnic_send_crq(adapter, &crq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4098) cap_reqs--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4100) /* Keep at end to catch any discrepancy between expected and actual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4101) * CRQs sent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4102) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4103) WARN_ON(cap_reqs != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4106) static void send_query_ip_offload(struct ibmvnic_adapter *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4107) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4108) int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4109) struct device *dev = &adapter->vdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4110) union ibmvnic_crq crq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4112) adapter->ip_offload_tok =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4113) dma_map_single(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4114) &adapter->ip_offload_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4115) buf_sz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4116) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4118) if (dma_mapping_error(dev, adapter->ip_offload_tok)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4119) if (!firmware_has_feature(FW_FEATURE_CMO))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4120) dev_err(dev, "Couldn't map offload buffer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4121) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4124) memset(&crq, 0, sizeof(crq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4125) crq.query_ip_offload.first = IBMVNIC_CRQ_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4126) crq.query_ip_offload.cmd = QUERY_IP_OFFLOAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4127) crq.query_ip_offload.len = cpu_to_be32(buf_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4128) crq.query_ip_offload.ioba =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4129) cpu_to_be32(adapter->ip_offload_tok);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4131) ibmvnic_send_crq(adapter, &crq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4134) static void send_control_ip_offload(struct ibmvnic_adapter *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4135) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4136) struct ibmvnic_control_ip_offload_buffer *ctrl_buf = &adapter->ip_offload_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4137) struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4138) struct device *dev = &adapter->vdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4139) netdev_features_t old_hw_features = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4140) union ibmvnic_crq crq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4142) adapter->ip_offload_ctrl_tok =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4143) dma_map_single(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4144) ctrl_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4145) sizeof(adapter->ip_offload_ctrl),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4146) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4148) if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4149) dev_err(dev, "Couldn't map ip offload control buffer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4150) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4153) ctrl_buf->len = cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4154) ctrl_buf->version = cpu_to_be32(INITIAL_VERSION_IOB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4155) ctrl_buf->ipv4_chksum = buf->ipv4_chksum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4156) ctrl_buf->ipv6_chksum = buf->ipv6_chksum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4157) ctrl_buf->tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4158) ctrl_buf->udp_ipv4_chksum = buf->udp_ipv4_chksum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4159) ctrl_buf->tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4160) ctrl_buf->udp_ipv6_chksum = buf->udp_ipv6_chksum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4161) ctrl_buf->large_tx_ipv4 = buf->large_tx_ipv4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4162) ctrl_buf->large_tx_ipv6 = buf->large_tx_ipv6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4164) /* large_rx disabled for now, additional features needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4165) ctrl_buf->large_rx_ipv4 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4166) ctrl_buf->large_rx_ipv6 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4168) if (adapter->state != VNIC_PROBING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4169) old_hw_features = adapter->netdev->hw_features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4170) adapter->netdev->hw_features = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4173) adapter->netdev->hw_features = NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4175) if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4176) adapter->netdev->hw_features |= NETIF_F_IP_CSUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4178) if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4179) adapter->netdev->hw_features |= NETIF_F_IPV6_CSUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4181) if ((adapter->netdev->features &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4182) (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4183) adapter->netdev->hw_features |= NETIF_F_RXCSUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4185) if (buf->large_tx_ipv4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4186) adapter->netdev->hw_features |= NETIF_F_TSO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4187) if (buf->large_tx_ipv6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4188) adapter->netdev->hw_features |= NETIF_F_TSO6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4190) if (adapter->state == VNIC_PROBING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4191) adapter->netdev->features |= adapter->netdev->hw_features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4192) } else if (old_hw_features != adapter->netdev->hw_features) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4193) netdev_features_t tmp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4195) /* disable features no longer supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4196) adapter->netdev->features &= adapter->netdev->hw_features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4197) /* turn on features now supported if previously enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4198) tmp = (old_hw_features ^ adapter->netdev->hw_features) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4199) adapter->netdev->hw_features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4200) adapter->netdev->features |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4201) tmp & adapter->netdev->wanted_features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4204) memset(&crq, 0, sizeof(crq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4205) crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4206) crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4207) crq.control_ip_offload.len =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4208) cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4209) crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4210) ibmvnic_send_crq(adapter, &crq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4213) static void handle_vpd_size_rsp(union ibmvnic_crq *crq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4214) struct ibmvnic_adapter *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4216) struct device *dev = &adapter->vdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4218) if (crq->get_vpd_size_rsp.rc.code) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4219) dev_err(dev, "Error retrieving VPD size, rc=%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4220) crq->get_vpd_size_rsp.rc.code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4221) complete(&adapter->fw_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4222) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4225) adapter->vpd->len = be64_to_cpu(crq->get_vpd_size_rsp.len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4226) complete(&adapter->fw_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4229) static void handle_vpd_rsp(union ibmvnic_crq *crq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4230) struct ibmvnic_adapter *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4232) struct device *dev = &adapter->vdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4233) unsigned char *substr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4234) u8 fw_level_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4236) memset(adapter->fw_version, 0, 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4238) dma_unmap_single(dev, adapter->vpd->dma_addr, adapter->vpd->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4239) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4241) if (crq->get_vpd_rsp.rc.code) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4242) dev_err(dev, "Error retrieving VPD from device, rc=%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4243) crq->get_vpd_rsp.rc.code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4244) goto complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4247) /* get the position of the firmware version info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4248) * located after the ASCII 'RM' substring in the buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4249) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4250) substr = strnstr(adapter->vpd->buff, "RM", adapter->vpd->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4251) if (!substr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4252) dev_info(dev, "Warning - No FW level has been provided in the VPD buffer by the VIOS Server\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4253) goto complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4256) /* get length of firmware level ASCII substring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4257) if ((substr + 2) < (adapter->vpd->buff + adapter->vpd->len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4258) fw_level_len = *(substr + 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4259) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4260) dev_info(dev, "Length of FW substr extrapolated VDP buff\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4261) goto complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4264) /* copy firmware version string from vpd into adapter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4265) if ((substr + 3 + fw_level_len) <
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4266) (adapter->vpd->buff + adapter->vpd->len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4267) strncpy((char *)adapter->fw_version, substr + 3, fw_level_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4268) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4269) dev_info(dev, "FW substr extrapolated VPD buff\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4272) complete:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4273) if (adapter->fw_version[0] == '\0')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4274) strncpy((char *)adapter->fw_version, "N/A", 3 * sizeof(char));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4275) complete(&adapter->fw_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4278) static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4279) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4280) struct device *dev = &adapter->vdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4281) struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4282) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4284) dma_unmap_single(dev, adapter->ip_offload_tok,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4285) sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4287) netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4288) for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4289) netdev_dbg(adapter->netdev, "%016lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4290) ((unsigned long int *)(buf))[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4292) netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4293) netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4294) netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4295) buf->tcp_ipv4_chksum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4296) netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4297) buf->tcp_ipv6_chksum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4298) netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4299) buf->udp_ipv4_chksum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4300) netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4301) buf->udp_ipv6_chksum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4302) netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4303) buf->large_tx_ipv4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4304) netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4305) buf->large_tx_ipv6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4306) netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4307) buf->large_rx_ipv4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4308) netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4309) buf->large_rx_ipv6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4310) netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4311) buf->max_ipv4_header_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4312) netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4313) buf->max_ipv6_header_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4314) netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4315) buf->max_tcp_header_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4316) netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4317) buf->max_udp_header_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4318) netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4319) buf->max_large_tx_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4320) netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4321) buf->max_large_rx_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4322) netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4323) buf->ipv6_extension_header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4324) netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4325) buf->tcp_pseudosum_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4326) netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4327) buf->num_ipv6_ext_headers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4328) netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4329) buf->off_ipv6_ext_headers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4331) send_control_ip_offload(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4334) static const char *ibmvnic_fw_err_cause(u16 cause)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4335) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4336) switch (cause) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4337) case ADAPTER_PROBLEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4338) return "adapter problem";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4339) case BUS_PROBLEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4340) return "bus problem";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4341) case FW_PROBLEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4342) return "firmware problem";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4343) case DD_PROBLEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4344) return "device driver problem";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4345) case EEH_RECOVERY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4346) return "EEH recovery";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4347) case FW_UPDATED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4348) return "firmware updated";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4349) case LOW_MEMORY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4350) return "low Memory";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4351) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4352) return "unknown";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4356) static void handle_error_indication(union ibmvnic_crq *crq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4357) struct ibmvnic_adapter *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4358) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4359) struct device *dev = &adapter->vdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4360) u16 cause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4362) cause = be16_to_cpu(crq->error_indication.error_cause);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4364) dev_warn_ratelimited(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4365) "Firmware reports %serror, cause: %s. Starting recovery...\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4366) crq->error_indication.flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4367) & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4368) ibmvnic_fw_err_cause(cause));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4370) if (crq->error_indication.flags & IBMVNIC_FATAL_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4371) ibmvnic_reset(adapter, VNIC_RESET_FATAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4372) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4373) ibmvnic_reset(adapter, VNIC_RESET_NON_FATAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4376) static int handle_change_mac_rsp(union ibmvnic_crq *crq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4377) struct ibmvnic_adapter *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4378) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4379) struct net_device *netdev = adapter->netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4380) struct device *dev = &adapter->vdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4381) long rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4383) rc = crq->change_mac_addr_rsp.rc.code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4384) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4385) dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4386) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4388) /* crq->change_mac_addr.mac_addr is the requested one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4389) * crq->change_mac_addr_rsp.mac_addr is the returned valid one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4390) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4391) ether_addr_copy(netdev->dev_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4392) &crq->change_mac_addr_rsp.mac_addr[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4393) ether_addr_copy(adapter->mac_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4394) &crq->change_mac_addr_rsp.mac_addr[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4395) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4396) complete(&adapter->fw_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4397) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4400) static void handle_request_cap_rsp(union ibmvnic_crq *crq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4401) struct ibmvnic_adapter *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4402) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4403) struct device *dev = &adapter->vdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4404) u64 *req_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4405) char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4407) atomic_dec(&adapter->running_cap_crqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4408) netdev_dbg(adapter->netdev, "Outstanding request-caps: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4409) atomic_read(&adapter->running_cap_crqs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4410) switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4411) case REQ_TX_QUEUES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4412) req_value = &adapter->req_tx_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4413) name = "tx";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4414) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4415) case REQ_RX_QUEUES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4416) req_value = &adapter->req_rx_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4417) name = "rx";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4418) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4419) case REQ_RX_ADD_QUEUES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4420) req_value = &adapter->req_rx_add_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4421) name = "rx_add";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4422) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4423) case REQ_TX_ENTRIES_PER_SUBCRQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4424) req_value = &adapter->req_tx_entries_per_subcrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4425) name = "tx_entries_per_subcrq";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4426) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4427) case REQ_RX_ADD_ENTRIES_PER_SUBCRQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4428) req_value = &adapter->req_rx_add_entries_per_subcrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4429) name = "rx_add_entries_per_subcrq";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4430) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4431) case REQ_MTU:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4432) req_value = &adapter->req_mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4433) name = "mtu";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4434) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4435) case PROMISC_REQUESTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4436) req_value = &adapter->promisc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4437) name = "promisc";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4438) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4439) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4440) dev_err(dev, "Got invalid cap request rsp %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4441) crq->request_capability.capability);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4442) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4445) switch (crq->request_capability_rsp.rc.code) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4446) case SUCCESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4447) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4448) case PARTIALSUCCESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4449) dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4450) *req_value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4451) (long int)be64_to_cpu(crq->request_capability_rsp.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4452) number), name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4454) if (be16_to_cpu(crq->request_capability_rsp.capability) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4455) REQ_MTU) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4456) pr_err("mtu of %llu is not supported. Reverting.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4457) *req_value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4458) *req_value = adapter->fallback.mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4459) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4460) *req_value =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4461) be64_to_cpu(crq->request_capability_rsp.number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4464) send_request_cap(adapter, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4465) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4466) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4467) dev_err(dev, "Error %d in request cap rsp\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4468) crq->request_capability_rsp.rc.code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4469) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4472) /* Done receiving requested capabilities, query IP offload support */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4473) if (atomic_read(&adapter->running_cap_crqs) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4474) adapter->wait_capability = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4475) send_query_ip_offload(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4479) static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4480) struct ibmvnic_adapter *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4481) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4482) struct device *dev = &adapter->vdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4483) struct net_device *netdev = adapter->netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4484) struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4485) struct ibmvnic_login_buffer *login = adapter->login_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4486) u64 *tx_handle_array;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4487) u64 *rx_handle_array;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4488) int num_tx_pools;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4489) int num_rx_pools;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4490) u64 *size_array;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4491) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4493) /* CHECK: Test/set of login_pending does not need to be atomic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4494) * because only ibmvnic_tasklet tests/clears this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4495) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4496) if (!adapter->login_pending) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4497) netdev_warn(netdev, "Ignoring unexpected login response\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4498) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4500) adapter->login_pending = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4502) dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4503) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4504) dma_unmap_single(dev, adapter->login_rsp_buf_token,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4505) adapter->login_rsp_buf_sz, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4507) /* If the number of queues requested can't be allocated by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4508) * server, the login response will return with code 1. We will need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4509) * to resend the login buffer with fewer queues requested.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4510) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4511) if (login_rsp_crq->generic.rc.code) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4512) adapter->init_done_rc = login_rsp_crq->generic.rc.code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4513) complete(&adapter->init_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4514) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4517) if (adapter->failover_pending) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4518) adapter->init_done_rc = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4519) netdev_dbg(netdev, "Failover pending, ignoring login response\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4520) complete(&adapter->init_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4521) /* login response buffer will be released on reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4522) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4525) netdev->mtu = adapter->req_mtu - ETH_HLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4527) netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4528) for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4529) netdev_dbg(adapter->netdev, "%016lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4530) ((unsigned long int *)(adapter->login_rsp_buf))[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4533) /* Sanity checks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4534) if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4535) (be32_to_cpu(login->num_rxcomp_subcrqs) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4536) adapter->req_rx_add_queues !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4537) be32_to_cpu(login_rsp->num_rxadd_subcrqs))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4538) dev_err(dev, "FATAL: Inconsistent login and login rsp\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4539) ibmvnic_reset(adapter, VNIC_RESET_FATAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4540) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4542) size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4543) be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4544) /* variable buffer sizes are not supported, so just read the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4545) * first entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4546) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4547) adapter->cur_rx_buf_sz = be64_to_cpu(size_array[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4549) num_tx_pools = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4550) num_rx_pools = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4552) tx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4553) be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4554) rx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4555) be32_to_cpu(adapter->login_rsp_buf->off_rxadd_subcrqs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4557) for (i = 0; i < num_tx_pools; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4558) adapter->tx_scrq[i]->handle = tx_handle_array[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4560) for (i = 0; i < num_rx_pools; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4561) adapter->rx_scrq[i]->handle = rx_handle_array[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4563) adapter->num_active_tx_scrqs = num_tx_pools;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4564) adapter->num_active_rx_scrqs = num_rx_pools;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4565) release_login_rsp_buffer(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4566) release_login_buffer(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4567) complete(&adapter->init_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4569) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4572) static void handle_request_unmap_rsp(union ibmvnic_crq *crq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4573) struct ibmvnic_adapter *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4574) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4575) struct device *dev = &adapter->vdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4576) long rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4578) rc = crq->request_unmap_rsp.rc.code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4579) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4580) dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4583) static void handle_query_map_rsp(union ibmvnic_crq *crq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4584) struct ibmvnic_adapter *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4585) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4586) struct net_device *netdev = adapter->netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4587) struct device *dev = &adapter->vdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4588) long rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4590) rc = crq->query_map_rsp.rc.code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4591) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4592) dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4593) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4595) netdev_dbg(netdev, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4596) crq->query_map_rsp.page_size, crq->query_map_rsp.tot_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4597) crq->query_map_rsp.free_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4600) static void handle_query_cap_rsp(union ibmvnic_crq *crq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4601) struct ibmvnic_adapter *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4602) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4603) struct net_device *netdev = adapter->netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4604) struct device *dev = &adapter->vdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4605) long rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4607) atomic_dec(&adapter->running_cap_crqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4608) netdev_dbg(netdev, "Outstanding queries: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4609) atomic_read(&adapter->running_cap_crqs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4610) rc = crq->query_capability.rc.code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4611) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4612) dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4613) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4616) switch (be16_to_cpu(crq->query_capability.capability)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4617) case MIN_TX_QUEUES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4618) adapter->min_tx_queues =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4619) be64_to_cpu(crq->query_capability.number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4620) netdev_dbg(netdev, "min_tx_queues = %lld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4621) adapter->min_tx_queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4622) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4623) case MIN_RX_QUEUES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4624) adapter->min_rx_queues =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4625) be64_to_cpu(crq->query_capability.number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4626) netdev_dbg(netdev, "min_rx_queues = %lld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4627) adapter->min_rx_queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4628) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4629) case MIN_RX_ADD_QUEUES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4630) adapter->min_rx_add_queues =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4631) be64_to_cpu(crq->query_capability.number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4632) netdev_dbg(netdev, "min_rx_add_queues = %lld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4633) adapter->min_rx_add_queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4634) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4635) case MAX_TX_QUEUES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4636) adapter->max_tx_queues =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4637) be64_to_cpu(crq->query_capability.number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4638) netdev_dbg(netdev, "max_tx_queues = %lld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4639) adapter->max_tx_queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4640) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4641) case MAX_RX_QUEUES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4642) adapter->max_rx_queues =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4643) be64_to_cpu(crq->query_capability.number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4644) netdev_dbg(netdev, "max_rx_queues = %lld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4645) adapter->max_rx_queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4646) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4647) case MAX_RX_ADD_QUEUES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4648) adapter->max_rx_add_queues =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4649) be64_to_cpu(crq->query_capability.number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4650) netdev_dbg(netdev, "max_rx_add_queues = %lld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4651) adapter->max_rx_add_queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4652) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4653) case MIN_TX_ENTRIES_PER_SUBCRQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4654) adapter->min_tx_entries_per_subcrq =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4655) be64_to_cpu(crq->query_capability.number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4656) netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4657) adapter->min_tx_entries_per_subcrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4658) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4659) case MIN_RX_ADD_ENTRIES_PER_SUBCRQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4660) adapter->min_rx_add_entries_per_subcrq =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4661) be64_to_cpu(crq->query_capability.number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4662) netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4663) adapter->min_rx_add_entries_per_subcrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4664) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4665) case MAX_TX_ENTRIES_PER_SUBCRQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4666) adapter->max_tx_entries_per_subcrq =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4667) be64_to_cpu(crq->query_capability.number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4668) netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4669) adapter->max_tx_entries_per_subcrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4670) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4671) case MAX_RX_ADD_ENTRIES_PER_SUBCRQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4672) adapter->max_rx_add_entries_per_subcrq =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4673) be64_to_cpu(crq->query_capability.number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4674) netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4675) adapter->max_rx_add_entries_per_subcrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4676) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4677) case TCP_IP_OFFLOAD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4678) adapter->tcp_ip_offload =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4679) be64_to_cpu(crq->query_capability.number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4680) netdev_dbg(netdev, "tcp_ip_offload = %lld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4681) adapter->tcp_ip_offload);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4682) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4683) case PROMISC_SUPPORTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4684) adapter->promisc_supported =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4685) be64_to_cpu(crq->query_capability.number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4686) netdev_dbg(netdev, "promisc_supported = %lld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4687) adapter->promisc_supported);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4688) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4689) case MIN_MTU:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4690) adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4691) netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4692) netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4693) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4694) case MAX_MTU:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4695) adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4696) netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4697) netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4698) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4699) case MAX_MULTICAST_FILTERS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4700) adapter->max_multicast_filters =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4701) be64_to_cpu(crq->query_capability.number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4702) netdev_dbg(netdev, "max_multicast_filters = %lld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4703) adapter->max_multicast_filters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4704) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4705) case VLAN_HEADER_INSERTION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4706) adapter->vlan_header_insertion =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4707) be64_to_cpu(crq->query_capability.number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4708) if (adapter->vlan_header_insertion)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4709) netdev->features |= NETIF_F_HW_VLAN_STAG_TX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4710) netdev_dbg(netdev, "vlan_header_insertion = %lld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4711) adapter->vlan_header_insertion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4712) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4713) case RX_VLAN_HEADER_INSERTION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4714) adapter->rx_vlan_header_insertion =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4715) be64_to_cpu(crq->query_capability.number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4716) netdev_dbg(netdev, "rx_vlan_header_insertion = %lld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4717) adapter->rx_vlan_header_insertion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4718) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4719) case MAX_TX_SG_ENTRIES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4720) adapter->max_tx_sg_entries =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4721) be64_to_cpu(crq->query_capability.number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4722) netdev_dbg(netdev, "max_tx_sg_entries = %lld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4723) adapter->max_tx_sg_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4724) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4725) case RX_SG_SUPPORTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4726) adapter->rx_sg_supported =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4727) be64_to_cpu(crq->query_capability.number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4728) netdev_dbg(netdev, "rx_sg_supported = %lld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4729) adapter->rx_sg_supported);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4730) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4731) case OPT_TX_COMP_SUB_QUEUES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4732) adapter->opt_tx_comp_sub_queues =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4733) be64_to_cpu(crq->query_capability.number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4734) netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4735) adapter->opt_tx_comp_sub_queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4736) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4737) case OPT_RX_COMP_QUEUES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4738) adapter->opt_rx_comp_queues =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4739) be64_to_cpu(crq->query_capability.number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4740) netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4741) adapter->opt_rx_comp_queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4742) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4743) case OPT_RX_BUFADD_Q_PER_RX_COMP_Q:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4744) adapter->opt_rx_bufadd_q_per_rx_comp_q =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4745) be64_to_cpu(crq->query_capability.number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4746) netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4747) adapter->opt_rx_bufadd_q_per_rx_comp_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4748) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4749) case OPT_TX_ENTRIES_PER_SUBCRQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4750) adapter->opt_tx_entries_per_subcrq =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4751) be64_to_cpu(crq->query_capability.number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4752) netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4753) adapter->opt_tx_entries_per_subcrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4754) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4755) case OPT_RXBA_ENTRIES_PER_SUBCRQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4756) adapter->opt_rxba_entries_per_subcrq =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4757) be64_to_cpu(crq->query_capability.number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4758) netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4759) adapter->opt_rxba_entries_per_subcrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4760) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4761) case TX_RX_DESC_REQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4762) adapter->tx_rx_desc_req = crq->query_capability.number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4763) netdev_dbg(netdev, "tx_rx_desc_req = %llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4764) adapter->tx_rx_desc_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4765) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4767) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4768) netdev_err(netdev, "Got invalid cap rsp %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4769) crq->query_capability.capability);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4770) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4772) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4773) if (atomic_read(&adapter->running_cap_crqs) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4774) adapter->wait_capability = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4775) send_request_cap(adapter, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4776) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4779) static int send_query_phys_parms(struct ibmvnic_adapter *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4780) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4781) union ibmvnic_crq crq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4782) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4784) memset(&crq, 0, sizeof(crq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4785) crq.query_phys_parms.first = IBMVNIC_CRQ_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4786) crq.query_phys_parms.cmd = QUERY_PHYS_PARMS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4788) mutex_lock(&adapter->fw_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4789) adapter->fw_done_rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4790) reinit_completion(&adapter->fw_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4792) rc = ibmvnic_send_crq(adapter, &crq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4793) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4794) mutex_unlock(&adapter->fw_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4795) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4798) rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4799) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4800) mutex_unlock(&adapter->fw_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4801) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4804) mutex_unlock(&adapter->fw_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4805) return adapter->fw_done_rc ? -EIO : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4808) static int handle_query_phys_parms_rsp(union ibmvnic_crq *crq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4809) struct ibmvnic_adapter *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4810) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4811) struct net_device *netdev = adapter->netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4812) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4813) __be32 rspeed = cpu_to_be32(crq->query_phys_parms_rsp.speed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4815) rc = crq->query_phys_parms_rsp.rc.code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4816) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4817) netdev_err(netdev, "Error %d in QUERY_PHYS_PARMS\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4818) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4820) switch (rspeed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4821) case IBMVNIC_10MBPS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4822) adapter->speed = SPEED_10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4823) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4824) case IBMVNIC_100MBPS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4825) adapter->speed = SPEED_100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4826) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4827) case IBMVNIC_1GBPS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4828) adapter->speed = SPEED_1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4829) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4830) case IBMVNIC_10GBPS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4831) adapter->speed = SPEED_10000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4832) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4833) case IBMVNIC_25GBPS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4834) adapter->speed = SPEED_25000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4835) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4836) case IBMVNIC_40GBPS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4837) adapter->speed = SPEED_40000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4838) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4839) case IBMVNIC_50GBPS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4840) adapter->speed = SPEED_50000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4841) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4842) case IBMVNIC_100GBPS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4843) adapter->speed = SPEED_100000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4844) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4845) case IBMVNIC_200GBPS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4846) adapter->speed = SPEED_200000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4847) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4848) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4849) if (netif_carrier_ok(netdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4850) netdev_warn(netdev, "Unknown speed 0x%08x\n", rspeed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4851) adapter->speed = SPEED_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4853) if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_FULL_DUPLEX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4854) adapter->duplex = DUPLEX_FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4855) else if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_HALF_DUPLEX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4856) adapter->duplex = DUPLEX_HALF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4857) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4858) adapter->duplex = DUPLEX_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4860) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4861) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4863) static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4864) struct ibmvnic_adapter *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4865) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4866) struct ibmvnic_generic_crq *gen_crq = &crq->generic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4867) struct net_device *netdev = adapter->netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4868) struct device *dev = &adapter->vdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4869) u64 *u64_crq = (u64 *)crq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4870) long rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4872) netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4873) (unsigned long int)cpu_to_be64(u64_crq[0]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4874) (unsigned long int)cpu_to_be64(u64_crq[1]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4875) switch (gen_crq->first) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4876) case IBMVNIC_CRQ_INIT_RSP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4877) switch (gen_crq->cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4878) case IBMVNIC_CRQ_INIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4879) dev_info(dev, "Partner initialized\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4880) adapter->from_passive_init = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4881) /* Discard any stale login responses from prev reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4882) * CHECK: should we clear even on INIT_COMPLETE?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4883) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4884) adapter->login_pending = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4886) if (!completion_done(&adapter->init_done)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4887) complete(&adapter->init_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4888) adapter->init_done_rc = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4889) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4890) rc = ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4891) if (rc && rc != -EBUSY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4892) /* We were unable to schedule the failover
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4893) * reset either because the adapter was still
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4894) * probing (eg: during kexec) or we could not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4895) * allocate memory. Clear the failover_pending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4896) * flag since no one else will. We ignore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4897) * EBUSY because it means either FAILOVER reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4898) * is already scheduled or the adapter is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4899) * being removed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4900) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4901) netdev_err(netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4902) "Error %ld scheduling failover reset\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4903) rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4904) adapter->failover_pending = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4905) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4906) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4907) case IBMVNIC_CRQ_INIT_COMPLETE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4908) dev_info(dev, "Partner initialization complete\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4909) adapter->crq.active = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4910) send_version_xchg(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4911) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4912) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4913) dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4914) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4915) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4916) case IBMVNIC_CRQ_XPORT_EVENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4917) netif_carrier_off(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4918) adapter->crq.active = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4919) /* terminate any thread waiting for a response
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4920) * from the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4921) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4922) if (!completion_done(&adapter->fw_done)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4923) adapter->fw_done_rc = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4924) complete(&adapter->fw_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4925) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4927) /* if we got here during crq-init, retry crq-init */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4928) if (!completion_done(&adapter->init_done)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4929) adapter->init_done_rc = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4930) complete(&adapter->init_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4931) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4933) if (!completion_done(&adapter->stats_done))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4934) complete(&adapter->stats_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4935) if (test_bit(0, &adapter->resetting))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4936) adapter->force_reset_recovery = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4937) if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4938) dev_info(dev, "Migrated, re-enabling adapter\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4939) ibmvnic_reset(adapter, VNIC_RESET_MOBILITY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4940) } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4941) dev_info(dev, "Backing device failover detected\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4942) adapter->failover_pending = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4943) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4944) /* The adapter lost the connection */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4945) dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4946) gen_crq->cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4947) ibmvnic_reset(adapter, VNIC_RESET_FATAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4948) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4949) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4950) case IBMVNIC_CRQ_CMD_RSP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4951) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4952) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4953) dev_err(dev, "Got an invalid msg type 0x%02x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4954) gen_crq->first);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4955) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4956) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4958) switch (gen_crq->cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4959) case VERSION_EXCHANGE_RSP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4960) rc = crq->version_exchange_rsp.rc.code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4961) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4962) dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4963) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4964) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4965) ibmvnic_version =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4966) be16_to_cpu(crq->version_exchange_rsp.version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4967) dev_info(dev, "Partner protocol version is %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4968) ibmvnic_version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4969) send_query_cap(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4970) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4971) case QUERY_CAPABILITY_RSP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4972) handle_query_cap_rsp(crq, adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4973) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4974) case QUERY_MAP_RSP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4975) handle_query_map_rsp(crq, adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4976) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4977) case REQUEST_MAP_RSP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4978) adapter->fw_done_rc = crq->request_map_rsp.rc.code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4979) complete(&adapter->fw_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4980) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4981) case REQUEST_UNMAP_RSP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4982) handle_request_unmap_rsp(crq, adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4983) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4984) case REQUEST_CAPABILITY_RSP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4985) handle_request_cap_rsp(crq, adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4986) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4987) case LOGIN_RSP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4988) netdev_dbg(netdev, "Got Login Response\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4989) handle_login_rsp(crq, adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4990) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4991) case LOGICAL_LINK_STATE_RSP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4992) netdev_dbg(netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4993) "Got Logical Link State Response, state: %d rc: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4994) crq->logical_link_state_rsp.link_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4995) crq->logical_link_state_rsp.rc.code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4996) adapter->logical_link_state =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4997) crq->logical_link_state_rsp.link_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4998) adapter->init_done_rc = crq->logical_link_state_rsp.rc.code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4999) complete(&adapter->init_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5000) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5001) case LINK_STATE_INDICATION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5002) netdev_dbg(netdev, "Got Logical Link State Indication\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5003) adapter->phys_link_state =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5004) crq->link_state_indication.phys_link_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5005) adapter->logical_link_state =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5006) crq->link_state_indication.logical_link_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5007) if (adapter->phys_link_state && adapter->logical_link_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5008) netif_carrier_on(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5009) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5010) netif_carrier_off(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5011) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5012) case CHANGE_MAC_ADDR_RSP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5013) netdev_dbg(netdev, "Got MAC address change Response\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5014) adapter->fw_done_rc = handle_change_mac_rsp(crq, adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5015) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5016) case ERROR_INDICATION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5017) netdev_dbg(netdev, "Got Error Indication\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5018) handle_error_indication(crq, adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5019) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5020) case REQUEST_STATISTICS_RSP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5021) netdev_dbg(netdev, "Got Statistics Response\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5022) complete(&adapter->stats_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5023) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5024) case QUERY_IP_OFFLOAD_RSP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5025) netdev_dbg(netdev, "Got Query IP offload Response\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5026) handle_query_ip_offload_rsp(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5027) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5028) case MULTICAST_CTRL_RSP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5029) netdev_dbg(netdev, "Got multicast control Response\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5030) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5031) case CONTROL_IP_OFFLOAD_RSP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5032) netdev_dbg(netdev, "Got Control IP offload Response\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5033) dma_unmap_single(dev, adapter->ip_offload_ctrl_tok,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5034) sizeof(adapter->ip_offload_ctrl),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5035) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5036) complete(&adapter->init_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5037) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5038) case COLLECT_FW_TRACE_RSP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5039) netdev_dbg(netdev, "Got Collect firmware trace Response\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5040) complete(&adapter->fw_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5041) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5042) case GET_VPD_SIZE_RSP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5043) handle_vpd_size_rsp(crq, adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5044) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5045) case GET_VPD_RSP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5046) handle_vpd_rsp(crq, adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5047) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5048) case QUERY_PHYS_PARMS_RSP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5049) adapter->fw_done_rc = handle_query_phys_parms_rsp(crq, adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5050) complete(&adapter->fw_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5051) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5052) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5053) netdev_err(netdev, "Got an invalid cmd type 0x%02x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5054) gen_crq->cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5055) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5056) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5058) static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5059) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5060) struct ibmvnic_adapter *adapter = instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5062) tasklet_schedule(&adapter->tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5063) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5064) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5066) static void ibmvnic_tasklet(struct tasklet_struct *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5067) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5068) struct ibmvnic_adapter *adapter = from_tasklet(adapter, t, tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5069) struct ibmvnic_crq_queue *queue = &adapter->crq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5070) union ibmvnic_crq *crq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5071) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5072) bool done = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5074) spin_lock_irqsave(&queue->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5075) while (!done) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5076) /* Pull all the valid messages off the CRQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5077) while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5078) /* This barrier makes sure ibmvnic_next_crq()'s
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5079) * crq->generic.first & IBMVNIC_CRQ_CMD_RSP is loaded
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5080) * before ibmvnic_handle_crq()'s
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5081) * switch(gen_crq->first) and switch(gen_crq->cmd).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5082) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5083) dma_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5084) ibmvnic_handle_crq(crq, adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5085) crq->generic.first = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5086) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5087) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5088) /* if capabilities CRQ's were sent in this tasklet, the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5089) * tasklet must wait until all responses are received
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5090) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5091) if (atomic_read(&adapter->running_cap_crqs) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5092) adapter->wait_capability = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5093) spin_unlock_irqrestore(&queue->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5094) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5096) static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5097) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5098) struct vio_dev *vdev = adapter->vdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5099) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5101) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5102) rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5103) } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5105) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5106) dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5108) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5111) static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5113) struct ibmvnic_crq_queue *crq = &adapter->crq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5114) struct device *dev = &adapter->vdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5115) struct vio_dev *vdev = adapter->vdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5116) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5118) /* Close the CRQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5119) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5120) rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5121) } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5123) /* Clean out the queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5124) if (!crq->msgs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5125) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5127) memset(crq->msgs, 0, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5128) crq->cur = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5129) crq->active = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5131) /* And re-open it again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5132) rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5133) crq->msg_token, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5135) if (rc == H_CLOSED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5136) /* Adapter is good, but other end is not ready */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5137) dev_warn(dev, "Partner adapter not ready\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5138) else if (rc != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5139) dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5141) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5144) static void release_crq_queue(struct ibmvnic_adapter *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5146) struct ibmvnic_crq_queue *crq = &adapter->crq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5147) struct vio_dev *vdev = adapter->vdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5148) long rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5150) if (!crq->msgs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5151) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5153) netdev_dbg(adapter->netdev, "Releasing CRQ\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5154) free_irq(vdev->irq, adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5155) tasklet_kill(&adapter->tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5156) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5157) rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5158) } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5160) dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5161) DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5162) free_page((unsigned long)crq->msgs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5163) crq->msgs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5164) crq->active = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5167) static int init_crq_queue(struct ibmvnic_adapter *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5169) struct ibmvnic_crq_queue *crq = &adapter->crq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5170) struct device *dev = &adapter->vdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5171) struct vio_dev *vdev = adapter->vdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5172) int rc, retrc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5174) if (crq->msgs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5175) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5177) crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5178) /* Should we allocate more than one page? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5180) if (!crq->msgs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5181) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5183) crq->size = PAGE_SIZE / sizeof(*crq->msgs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5184) crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5185) DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5186) if (dma_mapping_error(dev, crq->msg_token))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5187) goto map_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5189) rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5190) crq->msg_token, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5192) if (rc == H_RESOURCE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5193) /* maybe kexecing and resource is busy. try a reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5194) rc = ibmvnic_reset_crq(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5195) retrc = rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5197) if (rc == H_CLOSED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5198) dev_warn(dev, "Partner adapter not ready\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5199) } else if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5200) dev_warn(dev, "Error %d opening adapter\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5201) goto reg_crq_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5204) retrc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5206) tasklet_setup(&adapter->tasklet, (void *)ibmvnic_tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5208) netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5209) snprintf(crq->name, sizeof(crq->name), "ibmvnic-%x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5210) adapter->vdev->unit_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5211) rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, crq->name, adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5212) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5213) dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5214) vdev->irq, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5215) goto req_irq_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5218) rc = vio_enable_interrupts(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5219) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5220) dev_err(dev, "Error %d enabling interrupts\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5221) goto req_irq_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5224) crq->cur = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5225) spin_lock_init(&crq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5227) /* process any CRQs that were queued before we enabled interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5228) tasklet_schedule(&adapter->tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5230) return retrc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5232) req_irq_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5233) tasklet_kill(&adapter->tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5234) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5235) rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5236) } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5237) reg_crq_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5238) dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5239) map_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5240) free_page((unsigned long)crq->msgs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5241) crq->msgs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5242) return retrc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5245) static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5246) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5247) struct device *dev = &adapter->vdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5248) unsigned long timeout = msecs_to_jiffies(20000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5249) u64 old_num_rx_queues = adapter->req_rx_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5250) u64 old_num_tx_queues = adapter->req_tx_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5251) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5253) adapter->from_passive_init = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5255) if (reset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5256) reinit_completion(&adapter->init_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5258) adapter->init_done_rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5259) rc = ibmvnic_send_crq_init(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5260) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5261) dev_err(dev, "Send crq init failed with error %d\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5262) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5265) if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5266) dev_err(dev, "Initialization sequence timed out\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5267) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5270) if (adapter->init_done_rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5271) release_crq_queue(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5272) return adapter->init_done_rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5275) if (adapter->from_passive_init) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5276) adapter->state = VNIC_OPEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5277) adapter->from_passive_init = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5278) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5281) if (reset &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5282) test_bit(0, &adapter->resetting) && !adapter->wait_for_reset &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5283) adapter->reset_reason != VNIC_RESET_MOBILITY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5284) if (adapter->req_rx_queues != old_num_rx_queues ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5285) adapter->req_tx_queues != old_num_tx_queues) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5286) release_sub_crqs(adapter, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5287) rc = init_sub_crqs(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5288) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5289) rc = reset_sub_crq_queues(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5291) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5292) rc = init_sub_crqs(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5295) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5296) dev_err(dev, "Initialization of sub crqs failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5297) release_crq_queue(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5298) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5301) rc = init_sub_crq_irqs(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5302) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5303) dev_err(dev, "Failed to initialize sub crq irqs\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5304) release_crq_queue(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5307) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5310) static struct device_attribute dev_attr_failover;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5312) static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5313) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5314) struct ibmvnic_adapter *adapter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5315) struct net_device *netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5316) unsigned char *mac_addr_p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5317) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5319) dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5320) dev->unit_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5322) mac_addr_p = (unsigned char *)vio_get_attribute(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5323) VETH_MAC_ADDR, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5324) if (!mac_addr_p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5325) dev_err(&dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5326) "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5327) __FILE__, __LINE__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5328) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5331) netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5332) IBMVNIC_MAX_QUEUES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5333) if (!netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5334) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5336) adapter = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5337) adapter->state = VNIC_PROBING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5338) dev_set_drvdata(&dev->dev, netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5339) adapter->vdev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5340) adapter->netdev = netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5341) adapter->login_pending = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5343) ether_addr_copy(adapter->mac_addr, mac_addr_p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5344) ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5345) netdev->irq = dev->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5346) netdev->netdev_ops = &ibmvnic_netdev_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5347) netdev->ethtool_ops = &ibmvnic_ethtool_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5348) SET_NETDEV_DEV(netdev, &dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5350) spin_lock_init(&adapter->stats_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5352) INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5353) INIT_DELAYED_WORK(&adapter->ibmvnic_delayed_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5354) __ibmvnic_delayed_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5355) INIT_LIST_HEAD(&adapter->rwi_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5356) spin_lock_init(&adapter->rwi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5357) spin_lock_init(&adapter->state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5358) mutex_init(&adapter->fw_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5359) init_completion(&adapter->init_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5360) init_completion(&adapter->fw_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5361) init_completion(&adapter->reset_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5362) init_completion(&adapter->stats_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5363) clear_bit(0, &adapter->resetting);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5365) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5366) rc = init_crq_queue(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5367) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5368) dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5369) rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5370) goto ibmvnic_init_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5373) rc = ibmvnic_reset_init(adapter, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5374) if (rc && rc != EAGAIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5375) goto ibmvnic_init_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5376) } while (rc == EAGAIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5378) rc = init_stats_buffers(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5379) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5380) goto ibmvnic_init_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5382) rc = init_stats_token(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5383) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5384) goto ibmvnic_stats_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5386) netdev->mtu = adapter->req_mtu - ETH_HLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5387) netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5388) netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5390) rc = device_create_file(&dev->dev, &dev_attr_failover);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5391) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5392) goto ibmvnic_dev_file_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5394) netif_carrier_off(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5396) adapter->state = VNIC_PROBED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5398) adapter->wait_for_reset = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5399) adapter->last_reset_time = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5401) rc = register_netdev(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5402) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5403) dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5404) goto ibmvnic_register_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5406) dev_info(&dev->dev, "ibmvnic registered\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5408) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5410) ibmvnic_register_fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5411) device_remove_file(&dev->dev, &dev_attr_failover);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5413) ibmvnic_dev_file_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5414) release_stats_token(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5416) ibmvnic_stats_fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5417) release_stats_buffers(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5419) ibmvnic_init_fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5420) release_sub_crqs(adapter, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5421) release_crq_queue(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5422) mutex_destroy(&adapter->fw_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5423) free_netdev(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5425) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5428) static int ibmvnic_remove(struct vio_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5429) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5430) struct net_device *netdev = dev_get_drvdata(&dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5431) struct ibmvnic_adapter *adapter = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5432) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5434) spin_lock_irqsave(&adapter->state_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5436) /* If ibmvnic_reset() is scheduling a reset, wait for it to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5437) * finish. Then, set the state to REMOVING to prevent it from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5438) * scheduling any more work and to have reset functions ignore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5439) * any resets that have already been scheduled. Drop the lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5440) * after setting state, so __ibmvnic_reset() which is called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5441) * from the flush_work() below, can make progress.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5442) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5443) spin_lock(&adapter->rwi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5444) adapter->state = VNIC_REMOVING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5445) spin_unlock(&adapter->rwi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5447) spin_unlock_irqrestore(&adapter->state_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5449) flush_work(&adapter->ibmvnic_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5450) flush_delayed_work(&adapter->ibmvnic_delayed_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5452) rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5453) unregister_netdevice(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5455) release_resources(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5456) release_sub_crqs(adapter, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5457) release_crq_queue(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5459) release_stats_token(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5460) release_stats_buffers(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5462) adapter->state = VNIC_REMOVED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5464) rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5465) mutex_destroy(&adapter->fw_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5466) device_remove_file(&dev->dev, &dev_attr_failover);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5467) free_netdev(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5468) dev_set_drvdata(&dev->dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5470) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5473) static ssize_t failover_store(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5474) const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5475) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5476) struct net_device *netdev = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5477) struct ibmvnic_adapter *adapter = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5478) unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5479) __be64 session_token;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5480) long rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5482) if (!sysfs_streq(buf, "1"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5483) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5485) rc = plpar_hcall(H_VIOCTL, retbuf, adapter->vdev->unit_address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5486) H_GET_SESSION_TOKEN, 0, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5487) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5488) netdev_err(netdev, "Couldn't retrieve session token, rc %ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5489) rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5490) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5493) session_token = (__be64)retbuf[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5494) netdev_dbg(netdev, "Initiating client failover, session id %llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5495) be64_to_cpu(session_token));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5496) rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5497) H_SESSION_ERR_DETECTED, session_token, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5498) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5499) netdev_err(netdev, "Client initiated failover failed, rc %ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5500) rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5501) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5504) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5507) static DEVICE_ATTR_WO(failover);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5509) static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5510) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5511) struct net_device *netdev = dev_get_drvdata(&vdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5512) struct ibmvnic_adapter *adapter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5513) struct iommu_table *tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5514) unsigned long ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5515) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5517) tbl = get_iommu_table_base(&vdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5519) /* netdev inits at probe time along with the structures we need below*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5520) if (!netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5521) return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5523) adapter = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5525) ret += PAGE_SIZE; /* the crq message queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5526) ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5528) for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5529) ret += 4 * PAGE_SIZE; /* the scrq message queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5531) for (i = 0; i < adapter->num_active_rx_pools; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5532) ret += adapter->rx_pool[i].size *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5533) IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5535) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5538) static int ibmvnic_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5539) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5540) struct net_device *netdev = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5541) struct ibmvnic_adapter *adapter = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5543) if (adapter->state != VNIC_OPEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5544) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5546) tasklet_schedule(&adapter->tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5548) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5551) static const struct vio_device_id ibmvnic_device_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5552) {"network", "IBM,vnic"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5553) {"", "" }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5554) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5555) MODULE_DEVICE_TABLE(vio, ibmvnic_device_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5557) static const struct dev_pm_ops ibmvnic_pm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5558) .resume = ibmvnic_resume
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5559) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5561) static struct vio_driver ibmvnic_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5562) .id_table = ibmvnic_device_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5563) .probe = ibmvnic_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5564) .remove = ibmvnic_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5565) .get_desired_dma = ibmvnic_get_desired_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5566) .name = ibmvnic_driver_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5567) .pm = &ibmvnic_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5568) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5570) /* module functions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5571) static int __init ibmvnic_module_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5572) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5573) pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5574) IBMVNIC_DRIVER_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5576) return vio_register_driver(&ibmvnic_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5579) static void __exit ibmvnic_module_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5580) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5581) vio_unregister_driver(&ibmvnic_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5584) module_init(ibmvnic_module_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5585) module_exit(ibmvnic_module_exit);