^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /******************************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * (C)Copyright 1998,1999 SysKonnect,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * a business unit of Schneider & Koch & Co. Datensysteme GmbH.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * See the file "skfddi.c" for further information.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * The information in this file is provided "AS IS" without warranty.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) ******************************************************************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #define HWMTM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #ifndef FDDI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #define FDDI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include "h/types.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include "h/fddi.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include "h/smc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include "h/supern_2.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include "h/skfbiinc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) -------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) DOCUMENTATION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) -------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) BEGIN_MANUAL_ENTRY(DOCUMENTATION)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) T B D
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) END_MANUAL_ENTRY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) -------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) LOCAL VARIABLES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) -------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #ifdef COMMON_MB_POOL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) static SMbuf *mb_start = 0 ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) static SMbuf *mb_free = 0 ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) static int mb_init = FALSE ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) static int call_count = 0 ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) -------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) EXTERNE VARIABLES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) -------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #ifdef DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #ifndef DEBUG_BRD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) extern struct smt_debug debug ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #ifdef NDIS_OS2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) extern u_char offDepth ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) extern u_char force_irq_pending ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) -------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) LOCAL FUNCTIONS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) -------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) static void queue_llc_rx(struct s_smc *smc, SMbuf *mb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) static void smt_to_llc(struct s_smc *smc, SMbuf *mb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) static void init_txd_ring(struct s_smc *smc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) static void init_rxd_ring(struct s_smc *smc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) static void queue_txd_mb(struct s_smc *smc, SMbuf *mb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) static u_long init_descr_ring(struct s_smc *smc, union s_fp_descr volatile *start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) int count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) static u_long repair_txd_ring(struct s_smc *smc, struct s_smt_tx_queue *queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) static u_long repair_rxd_ring(struct s_smc *smc, struct s_smt_rx_queue *queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) static SMbuf* get_llc_rx(struct s_smc *smc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) static SMbuf* get_txd_mb(struct s_smc *smc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) static void mac_drv_clear_txd(struct s_smc *smc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) -------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) EXTERNAL FUNCTIONS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) -------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) /* The external SMT functions are listed in cmtdef.h */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) extern void* mac_drv_get_space(struct s_smc *smc, unsigned int size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) extern void* mac_drv_get_desc_mem(struct s_smc *smc, unsigned int size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) extern void mac_drv_fill_rxd(struct s_smc *smc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) extern void mac_drv_tx_complete(struct s_smc *smc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) volatile struct s_smt_fp_txd *txd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) extern void mac_drv_rx_complete(struct s_smc *smc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) volatile struct s_smt_fp_rxd *rxd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) int frag_count, int len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) extern void mac_drv_requeue_rxd(struct s_smc *smc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) volatile struct s_smt_fp_rxd *rxd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) int frag_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) extern void mac_drv_clear_rxd(struct s_smc *smc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) volatile struct s_smt_fp_rxd *rxd, int frag_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #ifdef USE_OS_CPY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) extern void hwm_cpy_rxd2mb(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) extern void hwm_cpy_txd2mb(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) #ifdef ALL_RX_COMPLETE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) extern void mac_drv_all_receives_complete(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) extern u_long mac_drv_virt2phys(struct s_smc *smc, void *virt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) extern u_long dma_master(struct s_smc *smc, void *virt, int len, int flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) #ifdef NDIS_OS2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) extern void post_proc(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) extern void dma_complete(struct s_smc *smc, volatile union s_fp_descr *descr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) int flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) extern int mac_drv_rx_init(struct s_smc *smc, int len, int fc, char *look_ahead,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) int la_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) -------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) PUBLIC FUNCTIONS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) -------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) void process_receive(struct s_smc *smc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) void fddi_isr(struct s_smc *smc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) void smt_free_mbuf(struct s_smc *smc, SMbuf *mb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) void init_driver_fplus(struct s_smc *smc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) void mac_drv_rx_mode(struct s_smc *smc, int mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) void init_fddi_driver(struct s_smc *smc, u_char *mac_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) void mac_drv_clear_tx_queue(struct s_smc *smc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) void mac_drv_clear_rx_queue(struct s_smc *smc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) void hwm_tx_frag(struct s_smc *smc, char far *virt, u_long phys, int len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) int frame_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) void hwm_rx_frag(struct s_smc *smc, char far *virt, u_long phys, int len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) int frame_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) int mac_drv_init(struct s_smc *smc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) int hwm_tx_init(struct s_smc *smc, u_char fc, int frag_count, int frame_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) int frame_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) u_int mac_drv_check_space(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) SMbuf* smt_get_mbuf(struct s_smc *smc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) #ifdef DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) void mac_drv_debug_lev(struct s_smc *smc, int flag, int lev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) -------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) MACROS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) -------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) #ifndef UNUSED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) #ifdef lint
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) #define UNUSED(x) (x) = (x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) #define UNUSED(x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) #ifdef USE_CAN_ADDR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) #define MA smc->hw.fddi_canon_addr.a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) #define GROUP_ADDR_BIT 0x01
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) #define MA smc->hw.fddi_home_addr.a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) #define GROUP_ADDR_BIT 0x80
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) #define RXD_TXD_COUNT (HWM_ASYNC_TXD_COUNT+HWM_SYNC_TXD_COUNT+\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) SMT_R1_RXD_COUNT+SMT_R2_RXD_COUNT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) #ifdef MB_OUTSIDE_SMC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) #define EXT_VIRT_MEM ((RXD_TXD_COUNT+1)*sizeof(struct s_smt_fp_txd) +\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) MAX_MBUF*sizeof(SMbuf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) #define EXT_VIRT_MEM_2 ((RXD_TXD_COUNT+1)*sizeof(struct s_smt_fp_txd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) #define EXT_VIRT_MEM ((RXD_TXD_COUNT+1)*sizeof(struct s_smt_fp_txd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) * define critical read for 16 Bit drivers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) #if defined(NDIS_OS2) || defined(ODI2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) #define CR_READ(var) ((var) & 0xffff0000 | ((var) & 0xffff))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) #define CR_READ(var) (__le32)(var)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) #define IMASK_SLOW (IS_PLINT1 | IS_PLINT2 | IS_TIMINT | IS_TOKEN | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) IS_MINTR1 | IS_MINTR2 | IS_MINTR3 | IS_R1_P | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) IS_R1_C | IS_XA_C | IS_XS_C)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) -------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) INIT- AND SMT FUNCTIONS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) -------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) * BEGIN_MANUAL_ENTRY(mac_drv_check_space)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) * u_int mac_drv_check_space()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) * function DOWNCALL (drvsr.c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) * This function calculates the needed non virtual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) * memory for MBufs, RxD and TxD descriptors etc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) * needed by the driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) * return u_int memory in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) * END_MANUAL_ENTRY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) u_int mac_drv_check_space(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) #ifdef MB_OUTSIDE_SMC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) #ifdef COMMON_MB_POOL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) call_count++ ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) if (call_count == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) return EXT_VIRT_MEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) return EXT_VIRT_MEM_2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) return EXT_VIRT_MEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) * BEGIN_MANUAL_ENTRY(mac_drv_init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) * void mac_drv_init(smc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) * function DOWNCALL (drvsr.c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) * In this function the hardware module allocates it's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) * memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) * The operating system dependent module should call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) * mac_drv_init once, after the adatper is detected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) * END_MANUAL_ENTRY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) int mac_drv_init(struct s_smc *smc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) if (sizeof(struct s_smt_fp_rxd) % 16) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) SMT_PANIC(smc,HWM_E0001,HWM_E0001_MSG) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) if (sizeof(struct s_smt_fp_txd) % 16) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) SMT_PANIC(smc,HWM_E0002,HWM_E0002_MSG) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) * get the required memory for the RxDs and TxDs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) if (!(smc->os.hwm.descr_p = (union s_fp_descr volatile *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) mac_drv_get_desc_mem(smc,(u_int)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) (RXD_TXD_COUNT+1)*sizeof(struct s_smt_fp_txd)))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) return 1; /* no space the hwm modul can't work */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) * get the memory for the SMT MBufs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) #ifndef MB_OUTSIDE_SMC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) smc->os.hwm.mbuf_pool.mb_start=(SMbuf *)(&smc->os.hwm.mbuf_pool.mb[0]) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) #ifndef COMMON_MB_POOL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) if (!(smc->os.hwm.mbuf_pool.mb_start = (SMbuf *) mac_drv_get_space(smc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) MAX_MBUF*sizeof(SMbuf)))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) return 1; /* no space the hwm modul can't work */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) if (!mb_start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) if (!(mb_start = (SMbuf *) mac_drv_get_space(smc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) MAX_MBUF*sizeof(SMbuf)))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) return 1; /* no space the hwm modul can't work */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) * BEGIN_MANUAL_ENTRY(init_driver_fplus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) * init_driver_fplus(smc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) * Sets hardware modul specific values for the mode register 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) * (e.g. the byte alignment for the received frames, the position of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) * least significant byte etc.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) * END_MANUAL_ENTRY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) void init_driver_fplus(struct s_smc *smc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) smc->hw.fp.mdr2init = FM_LSB | FM_BMMODE | FM_ENNPRQ | FM_ENHSRQ | 3 ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) #ifdef PCI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) smc->hw.fp.mdr2init |= FM_CHKPAR | FM_PARITY ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) smc->hw.fp.mdr3init = FM_MENRQAUNLCK | FM_MENRS ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) #ifdef USE_CAN_ADDR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) /* enable address bit swapping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) smc->hw.fp.frselreg_init = FM_ENXMTADSWAP | FM_ENRCVADSWAP ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) static u_long init_descr_ring(struct s_smc *smc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) union s_fp_descr volatile *start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) int i ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) union s_fp_descr volatile *d1 ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) union s_fp_descr volatile *d2 ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) u_long phys ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) DB_GEN(3, "descr ring starts at = %p", start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) for (i=count-1, d1=start; i ; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) d2 = d1 ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) d1++ ; /* descr is owned by the host */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) d2->r.rxd_rbctrl = cpu_to_le32(BMU_CHECK) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) d2->r.rxd_next = &d1->r ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) phys = mac_drv_virt2phys(smc,(void *)d1) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) d2->r.rxd_nrdadr = cpu_to_le32(phys) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) DB_GEN(3, "descr ring ends at = %p", d1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) d1->r.rxd_rbctrl = cpu_to_le32(BMU_CHECK) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) d1->r.rxd_next = &start->r ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) phys = mac_drv_virt2phys(smc,(void *)start) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) d1->r.rxd_nrdadr = cpu_to_le32(phys) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) for (i=count, d1=start; i ; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) DRV_BUF_FLUSH(&d1->r,DDI_DMA_SYNC_FORDEV) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) d1++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) return phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) static void init_txd_ring(struct s_smc *smc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) struct s_smt_fp_txd volatile *ds ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) struct s_smt_tx_queue *queue ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) u_long phys ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) * initialize the transmit descriptors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) ds = (struct s_smt_fp_txd volatile *) ((char *)smc->os.hwm.descr_p +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) SMT_R1_RXD_COUNT*sizeof(struct s_smt_fp_rxd)) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) queue = smc->hw.fp.tx[QUEUE_A0] ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) DB_GEN(3, "Init async TxD ring, %d TxDs", HWM_ASYNC_TXD_COUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) (void)init_descr_ring(smc,(union s_fp_descr volatile *)ds,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) HWM_ASYNC_TXD_COUNT) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) phys = le32_to_cpu(ds->txd_ntdadr) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) ds++ ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) queue->tx_curr_put = queue->tx_curr_get = ds ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) ds-- ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) queue->tx_free = HWM_ASYNC_TXD_COUNT ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) queue->tx_used = 0 ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) outpd(ADDR(B5_XA_DA),phys) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) ds = (struct s_smt_fp_txd volatile *) ((char *)ds +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) HWM_ASYNC_TXD_COUNT*sizeof(struct s_smt_fp_txd)) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) queue = smc->hw.fp.tx[QUEUE_S] ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) DB_GEN(3, "Init sync TxD ring, %d TxDs", HWM_SYNC_TXD_COUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) (void)init_descr_ring(smc,(union s_fp_descr volatile *)ds,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) HWM_SYNC_TXD_COUNT) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) phys = le32_to_cpu(ds->txd_ntdadr) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) ds++ ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) queue->tx_curr_put = queue->tx_curr_get = ds ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) queue->tx_free = HWM_SYNC_TXD_COUNT ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) queue->tx_used = 0 ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) outpd(ADDR(B5_XS_DA),phys) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) static void init_rxd_ring(struct s_smc *smc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) struct s_smt_fp_rxd volatile *ds ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) struct s_smt_rx_queue *queue ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) u_long phys ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) * initialize the receive descriptors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) ds = (struct s_smt_fp_rxd volatile *) smc->os.hwm.descr_p ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) queue = smc->hw.fp.rx[QUEUE_R1] ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) DB_GEN(3, "Init RxD ring, %d RxDs", SMT_R1_RXD_COUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) (void)init_descr_ring(smc,(union s_fp_descr volatile *)ds,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) SMT_R1_RXD_COUNT) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) phys = le32_to_cpu(ds->rxd_nrdadr) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) ds++ ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) queue->rx_curr_put = queue->rx_curr_get = ds ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) queue->rx_free = SMT_R1_RXD_COUNT ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) queue->rx_used = 0 ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) outpd(ADDR(B4_R1_DA),phys) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) * BEGIN_MANUAL_ENTRY(init_fddi_driver)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) * void init_fddi_driver(smc,mac_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) * initializes the driver and it's variables
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) * END_MANUAL_ENTRY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) void init_fddi_driver(struct s_smc *smc, u_char *mac_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) SMbuf *mb ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) int i ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) init_board(smc,mac_addr) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) (void)init_fplus(smc) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) * initialize the SMbufs for the SMT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) #ifndef COMMON_MB_POOL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) mb = smc->os.hwm.mbuf_pool.mb_start ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) smc->os.hwm.mbuf_pool.mb_free = (SMbuf *)NULL ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) for (i = 0; i < MAX_MBUF; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) mb->sm_use_count = 1 ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) smt_free_mbuf(smc,mb) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) mb++ ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) mb = mb_start ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) if (!mb_init) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) mb_free = 0 ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) for (i = 0; i < MAX_MBUF; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) mb->sm_use_count = 1 ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) smt_free_mbuf(smc,mb) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) mb++ ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) mb_init = TRUE ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) * initialize the other variables
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) smc->os.hwm.llc_rx_pipe = smc->os.hwm.llc_rx_tail = (SMbuf *)NULL ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) smc->os.hwm.txd_tx_pipe = smc->os.hwm.txd_tx_tail = NULL ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) smc->os.hwm.pass_SMT = smc->os.hwm.pass_NSA = smc->os.hwm.pass_DB = 0 ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) smc->os.hwm.pass_llc_promisc = TRUE ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) smc->os.hwm.queued_rx_frames = smc->os.hwm.queued_txd_mb = 0 ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) smc->os.hwm.detec_count = 0 ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) smc->os.hwm.rx_break = 0 ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) smc->os.hwm.rx_len_error = 0 ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) smc->os.hwm.isr_flag = FALSE ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) * make sure that the start pointer is 16 byte aligned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) i = 16 - ((long)smc->os.hwm.descr_p & 0xf) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) if (i != 16) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) DB_GEN(3, "i = %d", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) smc->os.hwm.descr_p = (union s_fp_descr volatile *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) ((char *)smc->os.hwm.descr_p+i) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) DB_GEN(3, "pt to descr area = %p", smc->os.hwm.descr_p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) init_txd_ring(smc) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) init_rxd_ring(smc) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) mac_drv_fill_rxd(smc) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) init_plc(smc) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) SMbuf *smt_get_mbuf(struct s_smc *smc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) register SMbuf *mb ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) #ifndef COMMON_MB_POOL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) mb = smc->os.hwm.mbuf_pool.mb_free ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) mb = mb_free ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) if (mb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) #ifndef COMMON_MB_POOL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) smc->os.hwm.mbuf_pool.mb_free = mb->sm_next ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) mb_free = mb->sm_next ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) mb->sm_off = 8 ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) mb->sm_use_count = 1 ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) DB_GEN(3, "get SMbuf: mb = %p", mb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) return mb; /* May be NULL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) void smt_free_mbuf(struct s_smc *smc, SMbuf *mb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) if (mb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) mb->sm_use_count-- ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) DB_GEN(3, "free_mbuf: sm_use_count = %d", mb->sm_use_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) * If the use_count is != zero the MBuf is queued
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) * more than once and must not queued into the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) * free MBuf queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) if (!mb->sm_use_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) DB_GEN(3, "free SMbuf: mb = %p", mb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) #ifndef COMMON_MB_POOL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) mb->sm_next = smc->os.hwm.mbuf_pool.mb_free ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) smc->os.hwm.mbuf_pool.mb_free = mb ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) mb->sm_next = mb_free ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) mb_free = mb ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) SMT_PANIC(smc,HWM_E0003,HWM_E0003_MSG) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) * BEGIN_MANUAL_ENTRY(mac_drv_repair_descr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) * void mac_drv_repair_descr(smc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) * function called from SMT (HWM / hwmtm.c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) * The BMU is idle when this function is called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) * Mac_drv_repair_descr sets up the physical address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) * for all receive and transmit queues where the BMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) * should continue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) * It may be that the BMU was reseted during a fragmented
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) * transfer. In this case there are some fragments which will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) * never completed by the BMU. The OWN bit of this fragments
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) * must be switched to be owned by the host.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) * Give a start command to the receive BMU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) * Start the transmit BMUs if transmit frames pending.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) * END_MANUAL_ENTRY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) void mac_drv_repair_descr(struct s_smc *smc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) u_long phys ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) if (smc->hw.hw_state != STOPPED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) SK_BREAK() ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) SMT_PANIC(smc,HWM_E0013,HWM_E0013_MSG) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) return ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) * repair tx queues: don't start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) phys = repair_txd_ring(smc,smc->hw.fp.tx[QUEUE_A0]) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) outpd(ADDR(B5_XA_DA),phys) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) if (smc->hw.fp.tx_q[QUEUE_A0].tx_used) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) outpd(ADDR(B0_XA_CSR),CSR_START) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) phys = repair_txd_ring(smc,smc->hw.fp.tx[QUEUE_S]) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) outpd(ADDR(B5_XS_DA),phys) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) if (smc->hw.fp.tx_q[QUEUE_S].tx_used) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) outpd(ADDR(B0_XS_CSR),CSR_START) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) * repair rx queues
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) phys = repair_rxd_ring(smc,smc->hw.fp.rx[QUEUE_R1]) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) outpd(ADDR(B4_R1_DA),phys) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) outpd(ADDR(B0_R1_CSR),CSR_START) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) static u_long repair_txd_ring(struct s_smc *smc, struct s_smt_tx_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) int i ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) int tx_used ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) u_long phys ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) u_long tbctrl ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) struct s_smt_fp_txd volatile *t ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) SK_UNUSED(smc) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) t = queue->tx_curr_get ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) tx_used = queue->tx_used ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) for (i = tx_used+queue->tx_free-1 ; i ; i-- ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) t = t->txd_next ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) phys = le32_to_cpu(t->txd_ntdadr) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) t = queue->tx_curr_get ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) while (tx_used) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORCPU) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) tbctrl = le32_to_cpu(t->txd_tbctrl) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) if (tbctrl & BMU_OWN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) if (tbctrl & BMU_STF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) break ; /* exit the loop */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) * repair the descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) t->txd_tbctrl &= ~cpu_to_le32(BMU_OWN) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) phys = le32_to_cpu(t->txd_ntdadr) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) t = t->txd_next ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) tx_used-- ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) return phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) * Repairs the receive descriptor ring and returns the physical address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) * where the BMU should continue working.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) * o The physical address where the BMU was stopped has to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) * determined. This is the next RxD after rx_curr_get with an OWN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) * bit set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) * o The BMU should start working at beginning of the next frame.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) * RxDs with an OWN bit set but with a reset STF bit should be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) * skipped and owned by the driver (OWN = 0).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) static u_long repair_rxd_ring(struct s_smc *smc, struct s_smt_rx_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) int i ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) int rx_used ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) u_long phys ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) u_long rbctrl ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) struct s_smt_fp_rxd volatile *r ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) SK_UNUSED(smc) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) r = queue->rx_curr_get ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) rx_used = queue->rx_used ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) for (i = SMT_R1_RXD_COUNT-1 ; i ; i-- ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) r = r->rxd_next ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) phys = le32_to_cpu(r->rxd_nrdadr) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) r = queue->rx_curr_get ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) while (rx_used) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) rbctrl = le32_to_cpu(r->rxd_rbctrl) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) if (rbctrl & BMU_OWN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) if (rbctrl & BMU_STF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) break ; /* exit the loop */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) * repair the descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) r->rxd_rbctrl &= ~cpu_to_le32(BMU_OWN) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) phys = le32_to_cpu(r->rxd_nrdadr) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORDEV) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) r = r->rxd_next ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) rx_used-- ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) return phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) -------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) INTERRUPT SERVICE ROUTINE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) -------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) * BEGIN_MANUAL_ENTRY(fddi_isr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) * void fddi_isr(smc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) * function DOWNCALL (drvsr.c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) * interrupt service routine, handles the interrupt requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) * generated by the FDDI adapter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) * NOTE: The operating system dependent module must guarantee that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) * interrupts of the adapter are disabled when it calls fddi_isr.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) * About the USE_BREAK_ISR mechanismn:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) * The main requirement of this mechanismn is to force an timer IRQ when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) * leaving process_receive() with leave_isr set. process_receive() may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) * be called at any time from anywhere!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) * To be sure we don't miss such event we set 'force_irq' per default.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) * We have to force and Timer IRQ if 'smc->os.hwm.leave_isr' AND
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) * 'force_irq' are set. 'force_irq' may be reset if a receive complete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) * IRQ is pending.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) * END_MANUAL_ENTRY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) void fddi_isr(struct s_smc *smc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) u_long is ; /* ISR source */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) u_short stu, stl ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) SMbuf *mb ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) #ifdef USE_BREAK_ISR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) int force_irq ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) #ifdef ODI2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) if (smc->os.hwm.rx_break) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) mac_drv_fill_rxd(smc) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) if (smc->hw.fp.rx_q[QUEUE_R1].rx_used > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) smc->os.hwm.rx_break = 0 ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) process_receive(smc) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) smc->os.hwm.detec_count = 0 ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) smt_force_irq(smc) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) smc->os.hwm.isr_flag = TRUE ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) #ifdef USE_BREAK_ISR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) force_irq = TRUE ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) if (smc->os.hwm.leave_isr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) smc->os.hwm.leave_isr = FALSE ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) process_receive(smc) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) while ((is = GET_ISR() & ISR_MASK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) NDD_TRACE("CH0B",is,0,0) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) DB_GEN(7, "ISA = 0x%lx", is);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) if (is & IMASK_SLOW) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) NDD_TRACE("CH1b",is,0,0) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) if (is & IS_PLINT1) { /* PLC1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) plc1_irq(smc) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) if (is & IS_PLINT2) { /* PLC2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) plc2_irq(smc) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) if (is & IS_MINTR1) { /* FORMAC+ STU1(U/L) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) stu = inpw(FM_A(FM_ST1U)) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) stl = inpw(FM_A(FM_ST1L)) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) DB_GEN(6, "Slow transmit complete");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) mac1_irq(smc,stu,stl) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) if (is & IS_MINTR2) { /* FORMAC+ STU2(U/L) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) stu= inpw(FM_A(FM_ST2U)) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) stl= inpw(FM_A(FM_ST2L)) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) DB_GEN(6, "Slow receive complete");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) DB_GEN(7, "stl = %x : stu = %x", stl, stu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) mac2_irq(smc,stu,stl) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) if (is & IS_MINTR3) { /* FORMAC+ STU3(U/L) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) stu= inpw(FM_A(FM_ST3U)) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) stl= inpw(FM_A(FM_ST3L)) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) DB_GEN(6, "FORMAC Mode Register 3");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) mac3_irq(smc,stu,stl) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) if (is & IS_TIMINT) { /* Timer 82C54-2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) timer_irq(smc) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) #ifdef NDIS_OS2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) force_irq_pending = 0 ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) * out of RxD detection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) if (++smc->os.hwm.detec_count > 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) * check out of RxD condition
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) process_receive(smc) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) if (is & IS_TOKEN) { /* Restricted Token Monitor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) rtm_irq(smc) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) if (is & IS_R1_P) { /* Parity error rx queue 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) /* clear IRQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) outpd(ADDR(B4_R1_CSR),CSR_IRQ_CL_P) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) SMT_PANIC(smc,HWM_E0004,HWM_E0004_MSG) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) if (is & IS_R1_C) { /* Encoding error rx queue 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) /* clear IRQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) outpd(ADDR(B4_R1_CSR),CSR_IRQ_CL_C) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) SMT_PANIC(smc,HWM_E0005,HWM_E0005_MSG) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) if (is & IS_XA_C) { /* Encoding error async tx q */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) /* clear IRQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) outpd(ADDR(B5_XA_CSR),CSR_IRQ_CL_C) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) SMT_PANIC(smc,HWM_E0006,HWM_E0006_MSG) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) if (is & IS_XS_C) { /* Encoding error sync tx q */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) /* clear IRQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) outpd(ADDR(B5_XS_CSR),CSR_IRQ_CL_C) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) SMT_PANIC(smc,HWM_E0007,HWM_E0007_MSG) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) * Fast Tx complete Async/Sync Queue (BMU service)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) if (is & (IS_XS_F|IS_XA_F)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) DB_GEN(6, "Fast tx complete queue");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) * clear IRQ, Note: no IRQ is lost, because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) * we always service both queues
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) outpd(ADDR(B5_XS_CSR),CSR_IRQ_CL_F) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) outpd(ADDR(B5_XA_CSR),CSR_IRQ_CL_F) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) mac_drv_clear_txd(smc) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) llc_restart_tx(smc) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) * Fast Rx Complete (BMU service)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) if (is & IS_R1_F) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) DB_GEN(6, "Fast receive complete");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) /* clear IRQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) #ifndef USE_BREAK_ISR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) outpd(ADDR(B4_R1_CSR),CSR_IRQ_CL_F) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) process_receive(smc) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) process_receive(smc) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) if (smc->os.hwm.leave_isr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) force_irq = FALSE ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) outpd(ADDR(B4_R1_CSR),CSR_IRQ_CL_F) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) process_receive(smc) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) #ifndef NDIS_OS2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) while ((mb = get_llc_rx(smc))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) smt_to_llc(smc,mb) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) if (offDepth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) post_proc() ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) while (!offDepth && (mb = get_llc_rx(smc))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) smt_to_llc(smc,mb) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) if (!offDepth && smc->os.hwm.rx_break) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) process_receive(smc) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) if (smc->q.ev_get != smc->q.ev_put) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) NDD_TRACE("CH2a",0,0,0) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) ev_dispatcher(smc) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) #ifdef NDIS_OS2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) post_proc() ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) if (offDepth) { /* leave fddi_isr because */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) break ; /* indications not allowed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) #ifdef USE_BREAK_ISR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) if (smc->os.hwm.leave_isr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) break ; /* leave fddi_isr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) /* NOTE: when the isr is left, no rx is pending */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) } /* end of interrupt source polling loop */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) #ifdef USE_BREAK_ISR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) if (smc->os.hwm.leave_isr && force_irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) smt_force_irq(smc) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) smc->os.hwm.isr_flag = FALSE ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) NDD_TRACE("CH0E",0,0,0) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) -------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) RECEIVE FUNCTIONS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) -------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) #ifndef NDIS_OS2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) * BEGIN_MANUAL_ENTRY(mac_drv_rx_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) * void mac_drv_rx_mode(smc,mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) * function DOWNCALL (fplus.c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) * Corresponding to the parameter mode, the operating system
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) * dependent module can activate several receive modes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) * para mode = 1: RX_ENABLE_ALLMULTI enable all multicasts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) * = 2: RX_DISABLE_ALLMULTI disable "enable all multicasts"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) * = 3: RX_ENABLE_PROMISC enable promiscuous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) * = 4: RX_DISABLE_PROMISC disable promiscuous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) * = 5: RX_ENABLE_NSA enable rec. of all NSA frames
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) * (disabled after 'driver reset' & 'set station address')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) * = 6: RX_DISABLE_NSA disable rec. of all NSA frames
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) * = 21: RX_ENABLE_PASS_SMT ( see description )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) * = 22: RX_DISABLE_PASS_SMT ( " " )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) * = 23: RX_ENABLE_PASS_NSA ( " " )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) * = 24: RX_DISABLE_PASS_NSA ( " " )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) * = 25: RX_ENABLE_PASS_DB ( " " )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) * = 26: RX_DISABLE_PASS_DB ( " " )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) * = 27: RX_DISABLE_PASS_ALL ( " " )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) * = 28: RX_DISABLE_LLC_PROMISC ( " " )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) * = 29: RX_ENABLE_LLC_PROMISC ( " " )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) * RX_ENABLE_PASS_SMT / RX_DISABLE_PASS_SMT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) * If the operating system dependent module activates the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) * mode RX_ENABLE_PASS_SMT, the hardware module
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) * duplicates all SMT frames with the frame control
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) * FC_SMT_INFO and passes them to the LLC receive channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) * by calling mac_drv_rx_init.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) * The SMT Frames which are sent by the local SMT and the NSA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) * frames whose A- and C-Indicator is not set are also duplicated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) * and passed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) * The receive mode RX_DISABLE_PASS_SMT disables the passing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) * of SMT frames.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) * RX_ENABLE_PASS_NSA / RX_DISABLE_PASS_NSA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) * If the operating system dependent module activates the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) * mode RX_ENABLE_PASS_NSA, the hardware module
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) * duplicates all NSA frames with frame control FC_SMT_NSA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) * and a set A-Indicator and passed them to the LLC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) * receive channel by calling mac_drv_rx_init.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) * All NSA Frames which are sent by the local SMT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) * are also duplicated and passed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) * The receive mode RX_DISABLE_PASS_NSA disables the passing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) * of NSA frames with the A- or C-Indicator set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) * NOTE: For fear that the hardware module receives NSA frames with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) * a reset A-Indicator, the operating system dependent module
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) * has to call mac_drv_rx_mode with the mode RX_ENABLE_NSA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) * before activate the RX_ENABLE_PASS_NSA mode and after every
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) * 'driver reset' and 'set station address'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) * RX_ENABLE_PASS_DB / RX_DISABLE_PASS_DB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) * If the operating system dependent module activates the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) * mode RX_ENABLE_PASS_DB, direct BEACON frames
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) * (FC_BEACON frame control) are passed to the LLC receive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) * channel by mac_drv_rx_init.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) * The receive mode RX_DISABLE_PASS_DB disables the passing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) * of direct BEACON frames.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) * RX_DISABLE_PASS_ALL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) * Disables all special receives modes. It is equal to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) * call mac_drv_set_rx_mode successively with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) * parameters RX_DISABLE_NSA, RX_DISABLE_PASS_SMT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) * RX_DISABLE_PASS_NSA and RX_DISABLE_PASS_DB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) * RX_ENABLE_LLC_PROMISC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) * (default) all received LLC frames and all SMT/NSA/DBEACON
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) * frames depending on the attitude of the flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) * PASS_SMT/PASS_NSA/PASS_DBEACON will be delivered to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) * LLC layer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) * RX_DISABLE_LLC_PROMISC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) * all received SMT/NSA/DBEACON frames depending on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) * attitude of the flags PASS_SMT/PASS_NSA/PASS_DBEACON
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) * will be delivered to the LLC layer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) * all received LLC frames with a directed address, Multicast
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) * or Broadcast address will be delivered to the LLC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) * layer too.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) * END_MANUAL_ENTRY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) void mac_drv_rx_mode(struct s_smc *smc, int mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) switch(mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) case RX_ENABLE_PASS_SMT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) smc->os.hwm.pass_SMT = TRUE ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) break ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) case RX_DISABLE_PASS_SMT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) smc->os.hwm.pass_SMT = FALSE ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) break ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) case RX_ENABLE_PASS_NSA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) smc->os.hwm.pass_NSA = TRUE ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) break ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) case RX_DISABLE_PASS_NSA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) smc->os.hwm.pass_NSA = FALSE ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) break ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) case RX_ENABLE_PASS_DB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) smc->os.hwm.pass_DB = TRUE ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) break ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) case RX_DISABLE_PASS_DB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) smc->os.hwm.pass_DB = FALSE ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) break ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) case RX_DISABLE_PASS_ALL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) smc->os.hwm.pass_SMT = smc->os.hwm.pass_NSA = FALSE ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) smc->os.hwm.pass_DB = FALSE ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) smc->os.hwm.pass_llc_promisc = TRUE ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) mac_set_rx_mode(smc,RX_DISABLE_NSA) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) break ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) case RX_DISABLE_LLC_PROMISC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) smc->os.hwm.pass_llc_promisc = FALSE ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) break ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) case RX_ENABLE_LLC_PROMISC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) smc->os.hwm.pass_llc_promisc = TRUE ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) break ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) case RX_ENABLE_ALLMULTI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) case RX_DISABLE_ALLMULTI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) case RX_ENABLE_PROMISC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) case RX_DISABLE_PROMISC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) case RX_ENABLE_NSA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) case RX_DISABLE_NSA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) mac_set_rx_mode(smc,mode) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) break ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) #endif /* ifndef NDIS_OS2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) * process receive queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) void process_receive(struct s_smc *smc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) int i ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) int n ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) int frag_count ; /* number of RxDs of the curr rx buf */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) int used_frags ; /* number of RxDs of the curr frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) struct s_smt_rx_queue *queue ; /* points to the queue ctl struct */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) struct s_smt_fp_rxd volatile *r ; /* rxd pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) struct s_smt_fp_rxd volatile *rxd ; /* first rxd of rx frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) u_long rbctrl ; /* receive buffer control word */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) u_long rfsw ; /* receive frame status word */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) u_short rx_used ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) u_char far *virt ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) char far *data ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) SMbuf *mb ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) u_char fc ; /* Frame control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) int len ; /* Frame length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) smc->os.hwm.detec_count = 0 ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) queue = smc->hw.fp.rx[QUEUE_R1] ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) NDD_TRACE("RHxB",0,0,0) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) for ( ; ; ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) r = queue->rx_curr_get ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) rx_used = queue->rx_used ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) frag_count = 0 ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) #ifdef USE_BREAK_ISR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) if (smc->os.hwm.leave_isr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) goto rx_end ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) #ifdef NDIS_OS2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) if (offDepth) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) smc->os.hwm.rx_break = 1 ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) goto rx_end ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) smc->os.hwm.rx_break = 0 ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) #ifdef ODI2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) if (smc->os.hwm.rx_break) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) goto rx_end ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) n = 0 ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) DB_RX(5, "Check RxD %p for OWN and EOF", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) rbctrl = le32_to_cpu(CR_READ(r->rxd_rbctrl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) if (rbctrl & BMU_OWN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) NDD_TRACE("RHxE",r,rfsw,rbctrl) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) DB_RX(4, "End of RxDs");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) goto rx_end ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) * out of RxD detection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) if (!rx_used) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) SK_BREAK() ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) SMT_PANIC(smc,HWM_E0009,HWM_E0009_MSG) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) /* Either we don't have an RxD or all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) * RxDs are filled. Therefore it's allowed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) * for to set the STOPPED flag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) smc->hw.hw_state = STOPPED ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) mac_drv_clear_rx_queue(smc) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) smc->hw.hw_state = STARTED ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) mac_drv_fill_rxd(smc) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) smc->os.hwm.detec_count = 0 ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) goto rx_end ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) rfsw = le32_to_cpu(r->rxd_rfsw) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) if ((rbctrl & BMU_STF) != ((rbctrl & BMU_ST_BUF) <<5)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) * The BMU_STF bit is deleted, 1 frame is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) * placed into more than 1 rx buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) * skip frame by setting the rx len to 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) * if fragment count == 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) * The missing STF bit belongs to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) * current frame, search for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) * EOF bit to complete the frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) * else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) * the fragment belongs to the next frame,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) * exit the loop and process the frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) SK_BREAK() ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) rfsw = 0 ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) if (frag_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) break ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) n += rbctrl & 0xffff ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) r = r->rxd_next ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) frag_count++ ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) rx_used-- ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) } while (!(rbctrl & BMU_EOF)) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) used_frags = frag_count ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) DB_RX(5, "EOF set in RxD, used_frags = %d", used_frags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) /* may be next 2 DRV_BUF_FLUSH() can be skipped, because */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) /* BMU_ST_BUF will not be changed by the ASIC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) while (rx_used && !(r->rxd_rbctrl & cpu_to_le32(BMU_ST_BUF))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) DB_RX(5, "Check STF bit in %p", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) r = r->rxd_next ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) frag_count++ ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) rx_used-- ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) DB_RX(5, "STF bit found");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) * The received frame is finished for the process receive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) rxd = queue->rx_curr_get ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) queue->rx_curr_get = r ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) queue->rx_free += frag_count ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) queue->rx_used = rx_used ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) * ASIC Errata no. 7 (STF - Bit Bug)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) rxd->rxd_rbctrl &= cpu_to_le32(~BMU_STF) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) for (r=rxd, i=frag_count ; i ; r=r->rxd_next, i--){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) DB_RX(5, "dma_complete for RxD %p", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) dma_complete(smc,(union s_fp_descr volatile *)r,DMA_WR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) smc->hw.fp.err_stats.err_valid++ ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) smc->mib.m[MAC0].fddiMACCopied_Ct++ ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) /* the length of the data including the FC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) len = (rfsw & RD_LENGTH) - 4 ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) DB_RX(4, "frame length = %d", len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) * check the frame_length and all error flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) if (rfsw & (RX_MSRABT|RX_FS_E|RX_FS_CRC|RX_FS_IMPL)){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) if (rfsw & RD_S_MSRABT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) DB_RX(2, "Frame aborted by the FORMAC");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) smc->hw.fp.err_stats.err_abort++ ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) * check frame status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) if (rfsw & RD_S_SEAC2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) DB_RX(2, "E-Indicator set");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) smc->hw.fp.err_stats.err_e_indicator++ ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) if (rfsw & RD_S_SFRMERR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) DB_RX(2, "CRC error");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) smc->hw.fp.err_stats.err_crc++ ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) if (rfsw & RX_FS_IMPL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) DB_RX(2, "Implementer frame");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) smc->hw.fp.err_stats.err_imp_frame++ ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) goto abort_frame ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) if (len > FDDI_RAW_MTU-4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) DB_RX(2, "Frame too long error");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) smc->hw.fp.err_stats.err_too_long++ ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) goto abort_frame ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) * SUPERNET 3 Bug: FORMAC delivers status words
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) * of aborted frames to the BMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) if (len <= 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) DB_RX(2, "Frame length = 0");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) goto abort_frame ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) if (len != (n-4)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) DB_RX(4, "BMU: rx len differs: [%d:%d]", len, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) smc->os.hwm.rx_len_error++ ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) goto abort_frame ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) * Check SA == MA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) virt = (u_char far *) rxd->rxd_virt ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) DB_RX(2, "FC = %x", *virt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) if (virt[12] == MA[5] &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) virt[11] == MA[4] &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) virt[10] == MA[3] &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) virt[9] == MA[2] &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) virt[8] == MA[1] &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) (virt[7] & ~GROUP_ADDR_BIT) == MA[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) goto abort_frame ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) * test if LLC frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) if (rfsw & RX_FS_LLC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) * if pass_llc_promisc is disable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) * if DA != Multicast or Broadcast or DA!=MA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) * abort the frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) if (!smc->os.hwm.pass_llc_promisc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) if(!(virt[1] & GROUP_ADDR_BIT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) if (virt[6] != MA[5] ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) virt[5] != MA[4] ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) virt[4] != MA[3] ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) virt[3] != MA[2] ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) virt[2] != MA[1] ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) virt[1] != MA[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) DB_RX(2, "DA != MA and not multi- or broadcast");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) goto abort_frame ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) * LLC frame received
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) DB_RX(4, "LLC - receive");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) mac_drv_rx_complete(smc,rxd,frag_count,len) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) if (!(mb = smt_get_mbuf(smc))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) smc->hw.fp.err_stats.err_no_buf++ ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) DB_RX(4, "No SMbuf; receive terminated");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) goto abort_frame ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) data = smtod(mb,char *) - 1 ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) * copy the frame into a SMT_MBuf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) #ifdef USE_OS_CPY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) hwm_cpy_rxd2mb(rxd,data,len) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) for (r=rxd, i=used_frags ; i ; r=r->rxd_next, i--){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) n = le32_to_cpu(r->rxd_rbctrl) & RD_LENGTH ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) DB_RX(6, "cp SMT frame to mb: len = %d", n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) memcpy(data,r->rxd_virt,n) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) data += n ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) data = smtod(mb,char *) - 1 ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) fc = *(char *)mb->sm_data = *data ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) mb->sm_len = len - 1 ; /* len - fc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) data++ ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) * SMT frame received
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) switch(fc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) case FC_SMT_INFO :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) smc->hw.fp.err_stats.err_smt_frame++ ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) DB_RX(5, "SMT frame received");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) if (smc->os.hwm.pass_SMT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) DB_RX(5, "pass SMT frame");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) mac_drv_rx_complete(smc, rxd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) frag_count,len) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) DB_RX(5, "requeue RxD");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) mac_drv_requeue_rxd(smc,rxd,frag_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) smt_received_pack(smc,mb,(int)(rfsw>>25)) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) break ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) case FC_SMT_NSA :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) smc->hw.fp.err_stats.err_smt_frame++ ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) DB_RX(5, "SMT frame received");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) /* if pass_NSA set pass the NSA frame or */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) /* pass_SMT set and the A-Indicator */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) /* is not set, pass the NSA frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) if (smc->os.hwm.pass_NSA ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) (smc->os.hwm.pass_SMT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) !(rfsw & A_INDIC))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) DB_RX(5, "pass SMT frame");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) mac_drv_rx_complete(smc, rxd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) frag_count,len) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) DB_RX(5, "requeue RxD");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) mac_drv_requeue_rxd(smc,rxd,frag_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) smt_received_pack(smc,mb,(int)(rfsw>>25)) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) break ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) case FC_BEACON :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) if (smc->os.hwm.pass_DB) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) DB_RX(5, "pass DB frame");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) mac_drv_rx_complete(smc, rxd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) frag_count,len) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) DB_RX(5, "requeue RxD");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) mac_drv_requeue_rxd(smc,rxd,frag_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) smt_free_mbuf(smc,mb) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) break ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) default :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) * unknown FC abort the frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) DB_RX(2, "unknown FC error");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) smt_free_mbuf(smc,mb) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) DB_RX(5, "requeue RxD");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) mac_drv_requeue_rxd(smc,rxd,frag_count) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) if ((fc & 0xf0) == FC_MAC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) smc->hw.fp.err_stats.err_mac_frame++ ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) smc->hw.fp.err_stats.err_imp_frame++ ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) break ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) DB_RX(3, "next RxD is %p", queue->rx_curr_get);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) NDD_TRACE("RHx1",queue->rx_curr_get,0,0) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) continue ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) /*--------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) abort_frame:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) DB_RX(5, "requeue RxD");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) mac_drv_requeue_rxd(smc,rxd,frag_count) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) DB_RX(3, "next RxD is %p", queue->rx_curr_get);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) NDD_TRACE("RHx2",queue->rx_curr_get,0,0) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) rx_end:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) #ifdef ALL_RX_COMPLETE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) mac_drv_all_receives_complete(smc) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) return ; /* lint bug: needs return detect end of function */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) static void smt_to_llc(struct s_smc *smc, SMbuf *mb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) u_char fc ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) DB_RX(4, "send a queued frame to the llc layer");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) smc->os.hwm.r.len = mb->sm_len ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) smc->os.hwm.r.mb_pos = smtod(mb,char *) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) fc = *smc->os.hwm.r.mb_pos ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) (void)mac_drv_rx_init(smc,(int)mb->sm_len,(int)fc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) smc->os.hwm.r.mb_pos,(int)mb->sm_len) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) smt_free_mbuf(smc,mb) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) * BEGIN_MANUAL_ENTRY(hwm_rx_frag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) * void hwm_rx_frag(smc,virt,phys,len,frame_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) * function MACRO (hardware module, hwmtm.h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) * This function calls dma_master for preparing the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) * system hardware for the DMA transfer and initializes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) * the current RxD with the length and the physical and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) * virtual address of the fragment. Furthermore, it sets the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) * STF and EOF bits depending on the frame status byte,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) * switches the OWN flag of the RxD, so that it is owned by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) * adapter and issues an rx_start.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) * para virt virtual pointer to the fragment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) * len the length of the fragment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) * frame_status status of the frame, see design description
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) * NOTE: It is possible to call this function with a fragment length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) * of zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) * END_MANUAL_ENTRY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) void hwm_rx_frag(struct s_smc *smc, char far *virt, u_long phys, int len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) int frame_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) struct s_smt_fp_rxd volatile *r ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) __le32 rbctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) NDD_TRACE("RHfB",virt,len,frame_status) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) DB_RX(2, "hwm_rx_frag: len = %d, frame_status = %x", len, frame_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) r = smc->hw.fp.rx_q[QUEUE_R1].rx_curr_put ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) r->rxd_virt = virt ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) r->rxd_rbadr = cpu_to_le32(phys) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) rbctrl = cpu_to_le32( (((__u32)frame_status &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) (FIRST_FRAG|LAST_FRAG))<<26) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) (((u_long) frame_status & FIRST_FRAG) << 21) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) BMU_OWN | BMU_CHECK | BMU_EN_IRQ_EOF | len) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) r->rxd_rbctrl = rbctrl ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORDEV) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) outpd(ADDR(B0_R1_CSR),CSR_START) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) smc->hw.fp.rx_q[QUEUE_R1].rx_free-- ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) smc->hw.fp.rx_q[QUEUE_R1].rx_used++ ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) smc->hw.fp.rx_q[QUEUE_R1].rx_curr_put = r->rxd_next ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) NDD_TRACE("RHfE",r,le32_to_cpu(r->rxd_rbadr),0) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) * BEGINN_MANUAL_ENTRY(mac_drv_clear_rx_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) * void mac_drv_clear_rx_queue(smc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) * struct s_smc *smc ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) * function DOWNCALL (hardware module, hwmtm.c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) * mac_drv_clear_rx_queue is called by the OS-specific module
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) * after it has issued a card_stop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) * In this case, the frames in the receive queue are obsolete and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) * should be removed. For removing mac_drv_clear_rx_queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) * calls dma_master for each RxD and mac_drv_clear_rxd for each
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) * receive buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) * NOTE: calling sequence card_stop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) * CLI_FBI(), card_stop(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) * mac_drv_clear_tx_queue(), mac_drv_clear_rx_queue(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) * NOTE: The caller is responsible that the BMUs are idle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) * when this function is called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) * END_MANUAL_ENTRY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) void mac_drv_clear_rx_queue(struct s_smc *smc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) struct s_smt_fp_rxd volatile *r ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) struct s_smt_fp_rxd volatile *next_rxd ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) struct s_smt_rx_queue *queue ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) int frag_count ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) int i ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) if (smc->hw.hw_state != STOPPED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) SK_BREAK() ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) SMT_PANIC(smc,HWM_E0012,HWM_E0012_MSG) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) return ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) queue = smc->hw.fp.rx[QUEUE_R1] ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) DB_RX(5, "clear_rx_queue");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) * dma_complete and mac_drv_clear_rxd for all RxDs / receive buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) r = queue->rx_curr_get ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) while (queue->rx_used) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) DB_RX(5, "switch OWN bit of RxD 0x%p", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) r->rxd_rbctrl &= ~cpu_to_le32(BMU_OWN) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) frag_count = 1 ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORDEV) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) r = r->rxd_next ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) while (r != queue->rx_curr_put &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) !(r->rxd_rbctrl & cpu_to_le32(BMU_ST_BUF))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) DB_RX(5, "Check STF bit in %p", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) r->rxd_rbctrl &= ~cpu_to_le32(BMU_OWN) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORDEV) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) r = r->rxd_next ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) frag_count++ ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) DB_RX(5, "STF bit found");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) next_rxd = r ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) for (r=queue->rx_curr_get,i=frag_count; i ; r=r->rxd_next,i--){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) DB_RX(5, "dma_complete for RxD %p", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) dma_complete(smc,(union s_fp_descr volatile *)r,DMA_WR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) DB_RX(5, "mac_drv_clear_rxd: RxD %p frag_count %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) queue->rx_curr_get, frag_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) mac_drv_clear_rxd(smc,queue->rx_curr_get,frag_count) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) queue->rx_curr_get = next_rxd ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) queue->rx_used -= frag_count ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) queue->rx_free += frag_count ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) -------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) SEND FUNCTIONS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) -------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) * BEGIN_MANUAL_ENTRY(hwm_tx_init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) * int hwm_tx_init(smc,fc,frag_count,frame_len,frame_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) * function DOWN_CALL (hardware module, hwmtm.c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) * hwm_tx_init checks if the frame can be sent through the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) * corresponding send queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) * para fc the frame control. To determine through which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) * send queue the frame should be transmitted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) * 0x50 - 0x57: asynchronous LLC frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) * 0xD0 - 0xD7: synchronous LLC frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) * 0x41, 0x4F: SMT frame to the network
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) * 0x42: SMT frame to the network and to the local SMT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) * 0x43: SMT frame to the local SMT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) * frag_count count of the fragments for this frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) * frame_len length of the frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) * frame_status status of the frame, the send queue bit is already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) * specified
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) * return frame_status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) * END_MANUAL_ENTRY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) int hwm_tx_init(struct s_smc *smc, u_char fc, int frag_count, int frame_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) int frame_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) NDD_TRACE("THiB",fc,frag_count,frame_len) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) smc->os.hwm.tx_p = smc->hw.fp.tx[frame_status & QUEUE_A0] ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) smc->os.hwm.tx_descr = TX_DESCRIPTOR | (((u_long)(frame_len-1)&3)<<27) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) smc->os.hwm.tx_len = frame_len ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) DB_TX(3, "hwm_tx_init: fc = %x, len = %d", fc, frame_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) if ((fc & ~(FC_SYNC_BIT|FC_LLC_PRIOR)) == FC_ASYNC_LLC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) frame_status |= LAN_TX ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) switch (fc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) case FC_SMT_INFO :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) case FC_SMT_NSA :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) frame_status |= LAN_TX ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) break ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) case FC_SMT_LOC :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) frame_status |= LOC_TX ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) break ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) case FC_SMT_LAN_LOC :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) frame_status |= LAN_TX | LOC_TX ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) break ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) default :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) SMT_PANIC(smc,HWM_E0010,HWM_E0010_MSG) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) if (!smc->hw.mac_ring_is_up) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) frame_status &= ~LAN_TX ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) frame_status |= RING_DOWN ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) DB_TX(2, "Ring is down: terminate LAN_TX");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) if (frag_count > smc->os.hwm.tx_p->tx_free) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) #ifndef NDIS_OS2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) mac_drv_clear_txd(smc) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) if (frag_count > smc->os.hwm.tx_p->tx_free) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) DB_TX(2, "Out of TxDs, terminate LAN_TX");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) frame_status &= ~LAN_TX ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) frame_status |= OUT_OF_TXD ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) DB_TX(2, "Out of TxDs, terminate LAN_TX");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) frame_status &= ~LAN_TX ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) frame_status |= OUT_OF_TXD ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) DB_TX(3, "frame_status = %x", frame_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) NDD_TRACE("THiE",frame_status,smc->os.hwm.tx_p->tx_free,0) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) return frame_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) * BEGIN_MANUAL_ENTRY(hwm_tx_frag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) * void hwm_tx_frag(smc,virt,phys,len,frame_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) * function DOWNCALL (hardware module, hwmtm.c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) * If the frame should be sent to the LAN, this function calls
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) * dma_master, fills the current TxD with the virtual and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) * physical address, sets the STF and EOF bits dependent on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) * the frame status, and requests the BMU to start the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) * transmit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) * If the frame should be sent to the local SMT, an SMT_MBuf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) * is allocated if the FIRST_FRAG bit is set in the frame_status.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) * The fragment of the frame is copied into the SMT MBuf.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) * The function smt_received_pack is called if the LAST_FRAG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) * bit is set in the frame_status word.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) * para virt virtual pointer to the fragment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) * len the length of the fragment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) * frame_status status of the frame, see design description
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) * return nothing returned, no parameter is modified
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) * NOTE: It is possible to invoke this macro with a fragment length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) * of zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) * END_MANUAL_ENTRY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) void hwm_tx_frag(struct s_smc *smc, char far *virt, u_long phys, int len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) int frame_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) struct s_smt_fp_txd volatile *t ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) struct s_smt_tx_queue *queue ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) __le32 tbctrl ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) queue = smc->os.hwm.tx_p ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) NDD_TRACE("THfB",virt,len,frame_status) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) /* Bug fix: AF / May 31 1999 (#missing)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) * snmpinfo problem reported by IBM is caused by invalid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) * t-pointer (txd) if LAN_TX is not set but LOC_TX only.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) * Set: t = queue->tx_curr_put here !
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) t = queue->tx_curr_put ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) DB_TX(2, "hwm_tx_frag: len = %d, frame_status = %x", len, frame_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) if (frame_status & LAN_TX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) /* '*t' is already defined */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) DB_TX(3, "LAN_TX: TxD = %p, virt = %p", t, virt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) t->txd_virt = virt ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) t->txd_txdscr = cpu_to_le32(smc->os.hwm.tx_descr) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) t->txd_tbadr = cpu_to_le32(phys) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) tbctrl = cpu_to_le32((((__u32)frame_status &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) (FIRST_FRAG|LAST_FRAG|EN_IRQ_EOF))<< 26) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) BMU_OWN|BMU_CHECK |len) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) t->txd_tbctrl = tbctrl ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) #ifndef AIX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) outpd(queue->tx_bmu_ctl,CSR_START) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) #else /* ifndef AIX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) if (frame_status & QUEUE_A0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) outpd(ADDR(B0_XA_CSR),CSR_START) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) outpd(ADDR(B0_XS_CSR),CSR_START) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) queue->tx_free-- ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) queue->tx_used++ ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) queue->tx_curr_put = t->txd_next ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) if (frame_status & LAST_FRAG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) smc->mib.m[MAC0].fddiMACTransmit_Ct++ ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) if (frame_status & LOC_TX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) DB_TX(3, "LOC_TX:");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) if (frame_status & FIRST_FRAG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) if(!(smc->os.hwm.tx_mb = smt_get_mbuf(smc))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) smc->hw.fp.err_stats.err_no_buf++ ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) DB_TX(4, "No SMbuf; transmit terminated");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) smc->os.hwm.tx_data =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) smtod(smc->os.hwm.tx_mb,char *) - 1 ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) #ifdef USE_OS_CPY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) #ifdef PASS_1ST_TXD_2_TX_COMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) hwm_cpy_txd2mb(t,smc->os.hwm.tx_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) smc->os.hwm.tx_len) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) if (smc->os.hwm.tx_mb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) #ifndef USE_OS_CPY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) DB_TX(3, "copy fragment into MBuf");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) memcpy(smc->os.hwm.tx_data,virt,len) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) smc->os.hwm.tx_data += len ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) if (frame_status & LAST_FRAG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) #ifdef USE_OS_CPY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) #ifndef PASS_1ST_TXD_2_TX_COMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) * hwm_cpy_txd2mb(txd,data,len) copies 'len'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) * bytes from the virtual pointer in 'rxd'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) * to 'data'. The virtual pointer of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) * os-specific tx-buffer should be written
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) * in the LAST txd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) hwm_cpy_txd2mb(t,smc->os.hwm.tx_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) smc->os.hwm.tx_len) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) #endif /* nPASS_1ST_TXD_2_TX_COMP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) #endif /* USE_OS_CPY */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) smc->os.hwm.tx_data =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) smtod(smc->os.hwm.tx_mb,char *) - 1 ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) *(char *)smc->os.hwm.tx_mb->sm_data =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) *smc->os.hwm.tx_data ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) smc->os.hwm.tx_data++ ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) smc->os.hwm.tx_mb->sm_len =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) smc->os.hwm.tx_len - 1 ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) DB_TX(3, "pass LLC frame to SMT");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) smt_received_pack(smc,smc->os.hwm.tx_mb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) RD_FS_LOCAL) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) NDD_TRACE("THfE",t,queue->tx_free,0) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) * queues a receive for later send
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) static void queue_llc_rx(struct s_smc *smc, SMbuf *mb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) DB_GEN(4, "queue_llc_rx: mb = %p", mb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) smc->os.hwm.queued_rx_frames++ ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) mb->sm_next = (SMbuf *)NULL ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) if (smc->os.hwm.llc_rx_pipe == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) smc->os.hwm.llc_rx_pipe = mb ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) smc->os.hwm.llc_rx_tail->sm_next = mb ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) smc->os.hwm.llc_rx_tail = mb ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) * force an timer IRQ to receive the data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) if (!smc->os.hwm.isr_flag) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) smt_force_irq(smc) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) * get a SMbuf from the llc_rx_queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) static SMbuf *get_llc_rx(struct s_smc *smc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) SMbuf *mb ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) if ((mb = smc->os.hwm.llc_rx_pipe)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) smc->os.hwm.queued_rx_frames-- ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) smc->os.hwm.llc_rx_pipe = mb->sm_next ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) DB_GEN(4, "get_llc_rx: mb = 0x%p", mb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) return mb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) * queues a transmit SMT MBuf during the time were the MBuf is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) * queued the TxD ring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) static void queue_txd_mb(struct s_smc *smc, SMbuf *mb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) DB_GEN(4, "_rx: queue_txd_mb = %p", mb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) smc->os.hwm.queued_txd_mb++ ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) mb->sm_next = (SMbuf *)NULL ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) if (smc->os.hwm.txd_tx_pipe == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) smc->os.hwm.txd_tx_pipe = mb ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) smc->os.hwm.txd_tx_tail->sm_next = mb ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) smc->os.hwm.txd_tx_tail = mb ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) * get a SMbuf from the txd_tx_queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) static SMbuf *get_txd_mb(struct s_smc *smc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) SMbuf *mb ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) if ((mb = smc->os.hwm.txd_tx_pipe)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) smc->os.hwm.queued_txd_mb-- ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) smc->os.hwm.txd_tx_pipe = mb->sm_next ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) DB_GEN(4, "get_txd_mb: mb = 0x%p", mb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) return mb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) * SMT Send function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) void smt_send_mbuf(struct s_smc *smc, SMbuf *mb, int fc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) char far *data ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) int len ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) int n ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) int i ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) int frag_count ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) int frame_status ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) SK_LOC_DECL(char far,*virt[3]) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) int frag_len[3] ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) struct s_smt_tx_queue *queue ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) struct s_smt_fp_txd volatile *t ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) u_long phys ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) __le32 tbctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) NDD_TRACE("THSB",mb,fc,0) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) DB_TX(4, "smt_send_mbuf: mb = 0x%p, fc = 0x%x", mb, fc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) mb->sm_off-- ; /* set to fc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) mb->sm_len++ ; /* + fc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) data = smtod(mb,char *) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) *data = fc ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) if (fc == FC_SMT_LOC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) *data = FC_SMT_INFO ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) * determine the frag count and the virt addresses of the frags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) frag_count = 0 ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) len = mb->sm_len ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) while (len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) n = SMT_PAGESIZE - ((long)data & (SMT_PAGESIZE-1)) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) if (n >= len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) n = len ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) DB_TX(5, "frag: virt/len = 0x%p/%d", data, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) virt[frag_count] = data ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) frag_len[frag_count] = n ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) frag_count++ ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) len -= n ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) data += n ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) * determine the frame status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) queue = smc->hw.fp.tx[QUEUE_A0] ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) if (fc == FC_BEACON || fc == FC_SMT_LOC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) frame_status = LOC_TX ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) frame_status = LAN_TX ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) if ((smc->os.hwm.pass_NSA &&(fc == FC_SMT_NSA)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) (smc->os.hwm.pass_SMT &&(fc == FC_SMT_INFO)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) frame_status |= LOC_TX ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) if (!smc->hw.mac_ring_is_up || frag_count > queue->tx_free) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) frame_status &= ~LAN_TX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) if (frame_status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) DB_TX(2, "Ring is down: terminate LAN_TX");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) DB_TX(2, "Ring is down: terminate transmission");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) smt_free_mbuf(smc,mb) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) return ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) DB_TX(5, "frame_status = 0x%x", frame_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) if ((frame_status & LAN_TX) && (frame_status & LOC_TX)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) mb->sm_use_count = 2 ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) if (frame_status & LAN_TX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) t = queue->tx_curr_put ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) frame_status |= FIRST_FRAG ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) for (i = 0; i < frag_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) DB_TX(5, "init TxD = 0x%p", t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) if (i == frag_count-1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) frame_status |= LAST_FRAG ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) t->txd_txdscr = cpu_to_le32(TX_DESCRIPTOR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) (((__u32)(mb->sm_len-1)&3) << 27)) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) t->txd_virt = virt[i] ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) phys = dma_master(smc, (void far *)virt[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) frag_len[i], DMA_RD|SMT_BUF) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) t->txd_tbadr = cpu_to_le32(phys) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) tbctrl = cpu_to_le32((((__u32)frame_status &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) (FIRST_FRAG|LAST_FRAG)) << 26) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) BMU_OWN | BMU_CHECK | BMU_SMT_TX |frag_len[i]) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) t->txd_tbctrl = tbctrl ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) #ifndef AIX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) outpd(queue->tx_bmu_ctl,CSR_START) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) outpd(ADDR(B0_XA_CSR),CSR_START) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) frame_status &= ~FIRST_FRAG ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) queue->tx_curr_put = t = t->txd_next ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) queue->tx_free-- ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) queue->tx_used++ ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) smc->mib.m[MAC0].fddiMACTransmit_Ct++ ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) queue_txd_mb(smc,mb) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) if (frame_status & LOC_TX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) DB_TX(5, "pass Mbuf to LLC queue");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) queue_llc_rx(smc,mb) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) * We need to unqueue the free SMT_MBUFs here, because it may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) * be that the SMT want's to send more than 1 frame for one down call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) mac_drv_clear_txd(smc) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) NDD_TRACE("THSE",t,queue->tx_free,frag_count) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) /* BEGIN_MANUAL_ENTRY(mac_drv_clear_txd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) * void mac_drv_clear_txd(smc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) * function DOWNCALL (hardware module, hwmtm.c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) * mac_drv_clear_txd searches in both send queues for TxD's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) * which were finished by the adapter. It calls dma_complete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) * for each TxD. If the last fragment of an LLC frame is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) * reached, it calls mac_drv_tx_complete to release the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) * send buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) * return nothing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) * END_MANUAL_ENTRY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) static void mac_drv_clear_txd(struct s_smc *smc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) struct s_smt_tx_queue *queue ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) struct s_smt_fp_txd volatile *t1 ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) struct s_smt_fp_txd volatile *t2 = NULL ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) SMbuf *mb ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) u_long tbctrl ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) int i ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) int frag_count ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) int n ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) NDD_TRACE("THcB",0,0,0) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) for (i = QUEUE_S; i <= QUEUE_A0; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) queue = smc->hw.fp.tx[i] ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) t1 = queue->tx_curr_get ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) DB_TX(5, "clear_txd: QUEUE = %d (0=sync/1=async)", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) for ( ; ; ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) frag_count = 0 ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) DRV_BUF_FLUSH(t1,DDI_DMA_SYNC_FORCPU) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) DB_TX(5, "check OWN/EOF bit of TxD 0x%p", t1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) tbctrl = le32_to_cpu(CR_READ(t1->txd_tbctrl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) if (tbctrl & BMU_OWN || !queue->tx_used){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) DB_TX(4, "End of TxDs queue %d", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) goto free_next_queue ; /* next queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) t1 = t1->txd_next ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) frag_count++ ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) } while (!(tbctrl & BMU_EOF)) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) t1 = queue->tx_curr_get ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) for (n = frag_count; n; n--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) tbctrl = le32_to_cpu(t1->txd_tbctrl) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) dma_complete(smc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) (union s_fp_descr volatile *) t1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) (int) (DMA_RD |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) ((tbctrl & BMU_SMT_TX) >> 18))) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) t2 = t1 ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) t1 = t1->txd_next ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) if (tbctrl & BMU_SMT_TX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) mb = get_txd_mb(smc) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) smt_free_mbuf(smc,mb) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) #ifndef PASS_1ST_TXD_2_TX_COMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) DB_TX(4, "mac_drv_tx_comp for TxD 0x%p", t2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) mac_drv_tx_complete(smc,t2) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) DB_TX(4, "mac_drv_tx_comp for TxD 0x%x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) queue->tx_curr_get);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) mac_drv_tx_complete(smc,queue->tx_curr_get) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) queue->tx_curr_get = t1 ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) queue->tx_free += frag_count ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) queue->tx_used -= frag_count ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) free_next_queue: ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) NDD_TRACE("THcE",0,0,0) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) * BEGINN_MANUAL_ENTRY(mac_drv_clear_tx_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) * void mac_drv_clear_tx_queue(smc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) * struct s_smc *smc ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) * function DOWNCALL (hardware module, hwmtm.c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) * mac_drv_clear_tx_queue is called from the SMT when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) * the RMT state machine has entered the ISOLATE state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) * This function is also called by the os-specific module
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) * after it has called the function card_stop().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) * In this case, the frames in the send queues are obsolete and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) * should be removed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) * note calling sequence:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) * CLI_FBI(), card_stop(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) * mac_drv_clear_tx_queue(), mac_drv_clear_rx_queue(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) * NOTE: The caller is responsible that the BMUs are idle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) * when this function is called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) * END_MANUAL_ENTRY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) void mac_drv_clear_tx_queue(struct s_smc *smc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) struct s_smt_fp_txd volatile *t ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) struct s_smt_tx_queue *queue ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) int tx_used ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) int i ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) if (smc->hw.hw_state != STOPPED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) SK_BREAK() ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) SMT_PANIC(smc,HWM_E0011,HWM_E0011_MSG) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) return ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) for (i = QUEUE_S; i <= QUEUE_A0; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) queue = smc->hw.fp.tx[i] ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) DB_TX(5, "clear_tx_queue: QUEUE = %d (0=sync/1=async)", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) * switch the OWN bit of all pending frames to the host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) t = queue->tx_curr_get ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) tx_used = queue->tx_used ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) while (tx_used) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORCPU) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) DB_TX(5, "switch OWN bit of TxD 0x%p", t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) t->txd_tbctrl &= ~cpu_to_le32(BMU_OWN) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) t = t->txd_next ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) tx_used-- ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) * release all TxD's for both send queues
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) mac_drv_clear_txd(smc) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) for (i = QUEUE_S; i <= QUEUE_A0; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) queue = smc->hw.fp.tx[i] ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) t = queue->tx_curr_get ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) * write the phys pointer of the NEXT descriptor into the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) * BMU's current address descriptor pointer and set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) * tx_curr_get and tx_curr_put to this position
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) if (i == QUEUE_S) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) outpd(ADDR(B5_XS_DA),le32_to_cpu(t->txd_ntdadr)) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) outpd(ADDR(B5_XA_DA),le32_to_cpu(t->txd_ntdadr)) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) queue->tx_curr_put = queue->tx_curr_get->txd_next ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) queue->tx_curr_get = queue->tx_curr_put ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) -------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) TEST FUNCTIONS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) -------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) #ifdef DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) * BEGIN_MANUAL_ENTRY(mac_drv_debug_lev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) * void mac_drv_debug_lev(smc,flag,lev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) * function DOWNCALL (drvsr.c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) * To get a special debug info the user can assign a debug level
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) * to any debug flag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) * para flag debug flag, possible values are:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) * = 0: reset all debug flags (the defined level is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) * ignored)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) * = 1: debug.d_smtf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) * = 2: debug.d_smt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) * = 3: debug.d_ecm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) * = 4: debug.d_rmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) * = 5: debug.d_cfm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) * = 6: debug.d_pcm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) * = 10: debug.d_os.hwm_rx (hardware module receive path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) * = 11: debug.d_os.hwm_tx(hardware module transmit path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) * = 12: debug.d_os.hwm_gen(hardware module general flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) * lev debug level
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) * END_MANUAL_ENTRY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) void mac_drv_debug_lev(struct s_smc *smc, int flag, int lev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) switch(flag) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) case (int)NULL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) DB_P.d_smtf = DB_P.d_smt = DB_P.d_ecm = DB_P.d_rmt = 0 ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) DB_P.d_cfm = 0 ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) DB_P.d_os.hwm_rx = DB_P.d_os.hwm_tx = DB_P.d_os.hwm_gen = 0 ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) #ifdef SBA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) DB_P.d_sba = 0 ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) #ifdef ESS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) DB_P.d_ess = 0 ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) break ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) case DEBUG_SMTF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) DB_P.d_smtf = lev ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) break ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) case DEBUG_SMT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) DB_P.d_smt = lev ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) break ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) case DEBUG_ECM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) DB_P.d_ecm = lev ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) break ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) case DEBUG_RMT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) DB_P.d_rmt = lev ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) break ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) case DEBUG_CFM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) DB_P.d_cfm = lev ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) break ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) case DEBUG_PCM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) DB_P.d_pcm = lev ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) break ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) case DEBUG_SBA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) #ifdef SBA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) DB_P.d_sba = lev ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) break ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) case DEBUG_ESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) #ifdef ESS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) DB_P.d_ess = lev ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) break ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) case DB_HWM_RX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) DB_P.d_os.hwm_rx = lev ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) break ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) case DB_HWM_TX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) DB_P.d_os.hwm_tx = lev ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) break ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) case DB_HWM_GEN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) DB_P.d_os.hwm_gen = lev ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) break ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) break ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) #endif