Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * Linux for s390 qdio support, buffer handling, qdio API and module support.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Copyright IBM Corp. 2000, 2008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  *	      Jan Glauber <jang@linux.vnet.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  * 2.6 cio integration by Cornelia Huck <cornelia.huck@de.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/timer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <asm/debug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <asm/qdio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <asm/ipl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include "cio.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include "css.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include "device.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include "qdio.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include "qdio_debug.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) 	"Jan Glauber <jang@linux.vnet.ibm.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) MODULE_DESCRIPTION("QDIO base support");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) static inline int do_siga_sync(unsigned long schid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) 			       unsigned int out_mask, unsigned int in_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) 			       unsigned int fc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) 	register unsigned long __fc asm ("0") = fc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) 	register unsigned long __schid asm ("1") = schid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) 	register unsigned long out asm ("2") = out_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) 	register unsigned long in asm ("3") = in_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 	int cc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 	asm volatile(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 		"	siga	0\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 		"	ipm	%0\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 		"	srl	%0,28\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 		: "=d" (cc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 		: "d" (__fc), "d" (__schid), "d" (out), "d" (in) : "cc");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 	return cc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) static inline int do_siga_input(unsigned long schid, unsigned int mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 				unsigned int fc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 	register unsigned long __fc asm ("0") = fc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 	register unsigned long __schid asm ("1") = schid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 	register unsigned long __mask asm ("2") = mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 	int cc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 	asm volatile(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 		"	siga	0\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 		"	ipm	%0\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 		"	srl	%0,28\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 		: "=d" (cc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 		: "d" (__fc), "d" (__schid), "d" (__mask) : "cc");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 	return cc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70)  * do_siga_output - perform SIGA-w/wt function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71)  * @schid: subchannel id or in case of QEBSM the subchannel token
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72)  * @mask: which output queues to process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73)  * @bb: busy bit indicator, set only if SIGA-w/wt could not access a buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74)  * @fc: function code to perform
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75)  * @aob: asynchronous operation block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77)  * Returns condition code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78)  * Note: For IQDC unicast queues only the highest priority queue is processed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) static inline int do_siga_output(unsigned long schid, unsigned long mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 				 unsigned int *bb, unsigned int fc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 				 unsigned long aob)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 	register unsigned long __fc asm("0") = fc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	register unsigned long __schid asm("1") = schid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 	register unsigned long __mask asm("2") = mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 	register unsigned long __aob asm("3") = aob;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 	int cc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 	asm volatile(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 		"	siga	0\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 		"	ipm	%0\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 		"	srl	%0,28\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 		: "=d" (cc), "+d" (__fc), "+d" (__aob)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 		: "d" (__schid), "d" (__mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 		: "cc");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 	*bb = __fc >> 31;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 	return cc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102)  * qdio_do_eqbs - extract buffer states for QEBSM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103)  * @q: queue to manipulate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104)  * @state: state of the extracted buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105)  * @start: buffer number to start at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106)  * @count: count of buffers to examine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107)  * @auto_ack: automatically acknowledge buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109)  * Returns the number of successfully extracted equal buffer states.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110)  * Stops processing if a state is different from the last buffers state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 			int start, int count, int auto_ack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 	int tmp_count = count, tmp_start = start, nr = q->nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 	unsigned int ccq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 	qperf_inc(q, eqbs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 	if (!q->is_input_q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 		nr += q->irq_ptr->nr_input_qs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 	ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 		      auto_ack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 	switch (ccq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 	case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 	case 32:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 		/* all done, or next buffer state different */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 		return count - tmp_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 	case 96:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 		/* not all buffers processed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 		qperf_inc(q, eqbs_partial);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "EQBS part:%02x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 			tmp_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 		return count - tmp_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 	case 97:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 		/* no buffer processed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 		DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 		goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 		DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 		DBF_ERROR("%4x EQBS ERROR", SCH_NO(q));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 		DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 		q->handler(q->irq_ptr->cdev, QDIO_ERROR_GET_BUF_STATE, q->nr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 			   q->first_to_check, count, q->irq_ptr->int_parm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152)  * qdio_do_sqbs - set buffer states for QEBSM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153)  * @q: queue to manipulate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154)  * @state: new state of the buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155)  * @start: first buffer number to change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156)  * @count: how many buffers to change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158)  * Returns the number of successfully changed buffers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159)  * Does retrying until the specified count of buffer states is set or an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160)  * error occurs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 			int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 	unsigned int ccq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 	int tmp_count = count, tmp_start = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	int nr = q->nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 	if (!count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 	qperf_inc(q, sqbs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 	if (!q->is_input_q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 		nr += q->irq_ptr->nr_input_qs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 	ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 	switch (ccq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 	case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 	case 32:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 		/* all done, or active buffer adapter-owned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 		WARN_ON_ONCE(tmp_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 		return count - tmp_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 	case 96:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 		/* not all buffers processed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 		qperf_inc(q, sqbs_partial);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 		goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 		DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 		DBF_ERROR("%4x SQBS ERROR", SCH_NO(q));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 		DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 		q->handler(q->irq_ptr->cdev, QDIO_ERROR_SET_BUF_STATE, q->nr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 			   q->first_to_check, count, q->irq_ptr->int_parm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200)  * Returns number of examined buffers and their common state in *state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201)  * Requested number of buffers-to-examine must be > 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 				 unsigned char *state, unsigned int count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 				 int auto_ack, int merge_pending)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 	unsigned char __state = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 	int i = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	if (is_qebsm(q))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 		return qdio_do_eqbs(q, state, bufnr, count, auto_ack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	/* get initial state: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 	__state = q->slsb.val[bufnr];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	/* Bail out early if there is no work on the queue: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 	if (__state & SLSB_OWNER_CU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	if (merge_pending && __state == SLSB_P_OUTPUT_PENDING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 		__state = SLSB_P_OUTPUT_EMPTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 	for (; i < count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 		bufnr = next_buf(bufnr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 		/* merge PENDING into EMPTY: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 		if (merge_pending &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 		    q->slsb.val[bufnr] == SLSB_P_OUTPUT_PENDING &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 		    __state == SLSB_P_OUTPUT_EMPTY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 		/* stop if next state differs from initial state: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 		if (q->slsb.val[bufnr] != __state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 	*state = __state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 	return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) static inline int get_buf_state(struct qdio_q *q, unsigned int bufnr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 				unsigned char *state, int auto_ack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 	return get_buf_states(q, bufnr, state, 1, auto_ack, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) /* wrap-around safe setting of slsb states, returns number of changed buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) static inline int set_buf_states(struct qdio_q *q, int bufnr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 				 unsigned char state, int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	if (is_qebsm(q))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 		return qdio_do_sqbs(q, state, bufnr, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 	/* Ensure that all preceding changes to the SBALs are visible: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 	mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 	for (i = 0; i < count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 		WRITE_ONCE(q->slsb.val[bufnr], state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 		bufnr = next_buf(bufnr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 	/* Make our SLSB changes visible: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) static inline int set_buf_state(struct qdio_q *q, int bufnr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 				unsigned char state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 	return set_buf_states(q, bufnr, state, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) /* set slsb states to initial state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) static void qdio_init_buf_states(struct qdio_irq *irq_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	struct qdio_q *q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 	for_each_input_queue(irq_ptr, q, i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 		set_buf_states(q, 0, SLSB_P_INPUT_NOT_INIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 			       QDIO_MAX_BUFFERS_PER_Q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 	for_each_output_queue(irq_ptr, q, i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 		set_buf_states(q, 0, SLSB_P_OUTPUT_NOT_INIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 			       QDIO_MAX_BUFFERS_PER_Q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 			  unsigned int input)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	unsigned long schid = *((u32 *) &q->irq_ptr->schid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 	unsigned int fc = QDIO_SIGA_SYNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 	int cc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 	DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:%1d", q->nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 	qperf_inc(q, siga_sync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 	if (is_qebsm(q)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 		schid = q->irq_ptr->sch_token;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 		fc |= QDIO_SIGA_QEBSM_FLAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 	cc = do_siga_sync(schid, output, input, fc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 	if (unlikely(cc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 		DBF_ERROR("%4x SIGA-S:%2d", SCH_NO(q), cc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 	return (cc) ? -EIO : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) static inline int qdio_siga_sync_q(struct qdio_q *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 	if (q->is_input_q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 		return qdio_siga_sync(q, 0, q->mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 		return qdio_siga_sync(q, q->mask, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) static int qdio_siga_output(struct qdio_q *q, unsigned int count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 			    unsigned int *busy_bit, unsigned long aob)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 	unsigned long schid = *((u32 *) &q->irq_ptr->schid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 	unsigned int fc = QDIO_SIGA_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 	u64 start_time = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 	int retries = 0, cc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 	if (queue_type(q) == QDIO_IQDIO_QFMT && !multicast_outbound(q)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 		if (count > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 			fc = QDIO_SIGA_WRITEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 		else if (aob)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 			fc = QDIO_SIGA_WRITEQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 	if (is_qebsm(q)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 		schid = q->irq_ptr->sch_token;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 		fc |= QDIO_SIGA_QEBSM_FLAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 	cc = do_siga_output(schid, q->mask, busy_bit, fc, aob);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 	/* hipersocket busy condition */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	if (unlikely(*busy_bit)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 		retries++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 		if (!start_time) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 			start_time = get_tod_clock_fast();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 			goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 		if (get_tod_clock_fast() - start_time < QDIO_BUSY_BIT_PATIENCE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 			goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	if (retries) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 		DBF_DEV_EVENT(DBF_WARN, q->irq_ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 			      "%4x cc2 BB1:%1d", SCH_NO(q), q->nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 		DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "count:%u", retries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 	return cc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) static inline int qdio_siga_input(struct qdio_q *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 	unsigned long schid = *((u32 *) &q->irq_ptr->schid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 	unsigned int fc = QDIO_SIGA_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 	int cc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 	DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 	qperf_inc(q, siga_read);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 	if (is_qebsm(q)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 		schid = q->irq_ptr->sch_token;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 		fc |= QDIO_SIGA_QEBSM_FLAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 	cc = do_siga_input(schid, q->mask, fc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 	if (unlikely(cc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 		DBF_ERROR("%4x SIGA-R:%2d", SCH_NO(q), cc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 	return (cc) ? -EIO : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) #define qdio_siga_sync_out(q) qdio_siga_sync(q, ~0U, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) #define qdio_siga_sync_all(q) qdio_siga_sync(q, ~0U, ~0U)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) static inline void qdio_sync_queues(struct qdio_q *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 	/* PCI capable outbound queues will also be scanned so sync them too */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 	if (pci_out_supported(q->irq_ptr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 		qdio_siga_sync_all(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 		qdio_siga_sync_q(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 			unsigned char *state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	if (need_siga_sync(q))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 		qdio_siga_sync_q(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 	return get_buf_state(q, bufnr, state, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) static inline void qdio_stop_polling(struct qdio_q *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 	if (!q->u.in.batch_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 	qperf_inc(q, stop_polling);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	/* show the card that we are not polling anymore */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	set_buf_states(q, q->u.in.batch_start, SLSB_P_INPUT_NOT_INIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 		       q->u.in.batch_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 	q->u.in.batch_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) static inline void account_sbals(struct qdio_q *q, unsigned int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 	q->q_stats.nr_sbal_total += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	q->q_stats.nr_sbals[ilog2(count)]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) static void process_buffer_error(struct qdio_q *q, unsigned int start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 				 int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	q->qdio_error = QDIO_ERROR_SLSB_STATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	/* special handling for no target buffer empty */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 	if (queue_type(q) == QDIO_IQDIO_QFMT && !q->is_input_q &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 	    q->sbal[start]->element[15].sflags == 0x10) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 		qperf_inc(q, target_full);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x", start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 	DBF_ERROR("%4x BUF ERROR", SCH_NO(q));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 	DBF_ERROR((q->is_input_q) ? "IN:%2d" : "OUT:%2d", q->nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 	DBF_ERROR("FTC:%3d C:%3d", start, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 	DBF_ERROR("F14:%2x F15:%2x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 		  q->sbal[start]->element[14].sflags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 		  q->sbal[start]->element[15].sflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) static inline void inbound_handle_work(struct qdio_q *q, unsigned int start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 				       int count, bool auto_ack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 	/* ACK the newest SBAL: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	if (!auto_ack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 		set_buf_state(q, add_buf(start, count - 1), SLSB_P_INPUT_ACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 	if (!q->u.in.batch_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 		q->u.in.batch_start = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 	q->u.in.batch_count += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) static int get_inbound_buffer_frontier(struct qdio_q *q, unsigned int start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 	unsigned char state = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 	q->timestamp = get_tod_clock_fast();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 	count = atomic_read(&q->nr_buf_used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	if (!count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 	 * No siga sync here, as a PCI or we after a thin interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 	 * already sync'ed the queues.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 	count = get_buf_states(q, start, &state, count, 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 	if (!count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 	switch (state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 	case SLSB_P_INPUT_PRIMED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim:%1d %02x", q->nr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 			      count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 		inbound_handle_work(q, start, count, is_qebsm(q));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 		if (atomic_sub_return(count, &q->nr_buf_used) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 			qperf_inc(q, inbound_queue_full);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 		if (q->irq_ptr->perf_stat_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 			account_sbals(q, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 		return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	case SLSB_P_INPUT_ERROR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in err:%1d %02x", q->nr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 			      count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 		process_buffer_error(q, start, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 		inbound_handle_work(q, start, count, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 		if (atomic_sub_return(count, &q->nr_buf_used) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 			qperf_inc(q, inbound_queue_full);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 		if (q->irq_ptr->perf_stat_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 			account_sbals_error(q, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 		return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 	case SLSB_CU_INPUT_EMPTY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 		if (q->irq_ptr->perf_stat_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 			q->q_stats.nr_sbal_nop++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop:%1d %#02x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 			      q->nr, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 	case SLSB_P_INPUT_NOT_INIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 	case SLSB_P_INPUT_ACK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 		/* We should never see this state, throw a WARN: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 		dev_WARN_ONCE(&q->irq_ptr->cdev->dev, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 			      "found state %#x at index %u on queue %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 			      state, start, q->nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) static int qdio_inbound_q_moved(struct qdio_q *q, unsigned int start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	return get_inbound_buffer_frontier(q, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) static inline int qdio_inbound_q_done(struct qdio_q *q, unsigned int start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 	unsigned char state = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 	if (!atomic_read(&q->nr_buf_used))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 	if (need_siga_sync(q))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 		qdio_siga_sync_q(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	get_buf_state(q, start, &state, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 		/* more work coming */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 					int bufnr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 	unsigned long phys_aob = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	if (!q->aobs[bufnr]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 		struct qaob *aob = qdio_allocate_aob();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 		q->aobs[bufnr] = aob;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 	if (q->aobs[bufnr]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 		q->aobs[bufnr]->user1 = (u64) q->sbal_state[bufnr].user;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 		phys_aob = virt_to_phys(q->aobs[bufnr]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 		WARN_ON_ONCE(phys_aob & 0xFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 	q->sbal_state[bufnr].flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 	return phys_aob;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) static void qdio_kick_handler(struct qdio_q *q, unsigned int start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 			      unsigned int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 	if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 	if (q->is_input_q) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 		qperf_inc(q, inbound_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%02x c:%02x", start, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 		qperf_inc(q, outbound_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 			      start, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 		   q->irq_ptr->int_parm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 	/* for the next time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 	q->qdio_error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) static inline int qdio_tasklet_schedule(struct qdio_q *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	if (likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 		tasklet_schedule(&q->tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 	return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) static void __qdio_inbound_processing(struct qdio_q *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 	unsigned int start = q->first_to_check;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 	int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	qperf_inc(q, tasklet_inbound);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 	count = qdio_inbound_q_moved(q, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 	if (count == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	qdio_kick_handler(q, start, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	start = add_buf(start, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	q->first_to_check = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	if (!qdio_inbound_q_done(q, start)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 		/* means poll time is not yet over */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 		qperf_inc(q, tasklet_inbound_resched);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 		if (!qdio_tasklet_schedule(q))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 	qdio_stop_polling(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 	 * We need to check again to not lose initiative after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 	 * resetting the ACK state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 	if (!qdio_inbound_q_done(q, start)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 		qperf_inc(q, tasklet_inbound_resched2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 		qdio_tasklet_schedule(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) void qdio_inbound_processing(unsigned long data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	struct qdio_q *q = (struct qdio_q *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	__qdio_inbound_processing(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) static void qdio_check_pending(struct qdio_q *q, unsigned int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 	unsigned char state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 	if (get_buf_state(q, index, &state, 0) > 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	    state == SLSB_P_OUTPUT_PENDING &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	    q->u.out.aobs[index]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 		q->u.out.sbal_state[index].flags |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 			QDIO_OUTBUF_STATE_FLAG_PENDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 		q->u.out.aobs[index] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) static int get_outbound_buffer_frontier(struct qdio_q *q, unsigned int start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 	unsigned char state = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 	int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 	q->timestamp = get_tod_clock_fast();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 	if (need_siga_sync(q))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 		if (((queue_type(q) != QDIO_IQDIO_QFMT) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 		    !pci_out_supported(q->irq_ptr)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 		    (queue_type(q) == QDIO_IQDIO_QFMT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 		    multicast_outbound(q)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 			qdio_siga_sync_q(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 	count = atomic_read(&q->nr_buf_used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	if (!count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 	count = get_buf_states(q, start, &state, count, 0, q->u.out.use_cq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 	if (!count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 	switch (state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 	case SLSB_P_OUTPUT_EMPTY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 	case SLSB_P_OUTPUT_PENDING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 		/* the adapter got it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 			"out empty:%1d %02x", q->nr, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 		atomic_sub(count, &q->nr_buf_used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 		if (q->irq_ptr->perf_stat_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 			account_sbals(q, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 		return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 	case SLSB_P_OUTPUT_ERROR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 		process_buffer_error(q, start, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 		atomic_sub(count, &q->nr_buf_used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 		if (q->irq_ptr->perf_stat_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 			account_sbals_error(q, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 		return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 	case SLSB_CU_OUTPUT_PRIMED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 		/* the adapter has not fetched the output yet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 		if (q->irq_ptr->perf_stat_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 			q->q_stats.nr_sbal_nop++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 			      q->nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 	case SLSB_P_OUTPUT_HALTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 	case SLSB_P_OUTPUT_NOT_INIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 		/* We should never see this state, throw a WARN: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 		dev_WARN_ONCE(&q->irq_ptr->cdev->dev, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 			      "found state %#x at index %u on queue %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 			      state, start, q->nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) /* all buffers processed? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) static inline int qdio_outbound_q_done(struct qdio_q *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	return atomic_read(&q->nr_buf_used) == 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) static inline int qdio_outbound_q_moved(struct qdio_q *q, unsigned int start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 	count = get_outbound_buffer_frontier(q, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 	if (count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 		if (q->u.out.use_cq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 			unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 			for (i = 0; i < count; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 				qdio_check_pending(q, QDIO_BUFNR(start + i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) static int qdio_kick_outbound_q(struct qdio_q *q, unsigned int count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 				unsigned long aob)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	int retries = 0, cc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 	unsigned int busy_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	if (!need_siga_out(q))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	qperf_inc(q, siga_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	cc = qdio_siga_output(q, count, &busy_bit, aob);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	switch (cc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 		if (busy_bit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 			while (++retries < QDIO_BUSY_BIT_RETRIES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 				mdelay(QDIO_BUSY_BIT_RETRY_DELAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 				goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 			DBF_ERROR("%4x cc2 BBC:%1d", SCH_NO(q), q->nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 			cc = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 			DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w cc2:%1d", q->nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 			cc = -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 		DBF_ERROR("%4x SIGA-W:%1d", SCH_NO(q), cc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 		cc = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	if (retries) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 		DBF_ERROR("%4x cc2 BB2:%1d", SCH_NO(q), q->nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 		DBF_ERROR("count:%u", retries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	return cc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) static void __qdio_outbound_processing(struct qdio_q *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	unsigned int start = q->first_to_check;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 	int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 	qperf_inc(q, tasklet_outbound);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	WARN_ON_ONCE(atomic_read(&q->nr_buf_used) < 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	count = qdio_outbound_q_moved(q, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 	if (count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 		q->first_to_check = add_buf(start, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 		qdio_kick_handler(q, start, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 	if (queue_type(q) == QDIO_ZFCP_QFMT && !pci_out_supported(q->irq_ptr) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	    !qdio_outbound_q_done(q))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 		goto sched;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 	if (q->u.out.pci_out_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 	 * Now we know that queue type is either qeth without pci enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 	 * or HiperSockets. Make sure buffer switch from PRIMED to EMPTY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 	 * is noticed and outbound_handler is called after some time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 	if (qdio_outbound_q_done(q))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 		del_timer_sync(&q->u.out.timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 		if (!timer_pending(&q->u.out.timer) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 		    likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 			mod_timer(&q->u.out.timer, jiffies + 10 * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) sched:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	qdio_tasklet_schedule(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) /* outbound tasklet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) void qdio_outbound_processing(unsigned long data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 	struct qdio_q *q = (struct qdio_q *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 	__qdio_outbound_processing(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) void qdio_outbound_timer(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 	struct qdio_q *q = from_timer(q, t, u.out.timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 	qdio_tasklet_schedule(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) static inline void qdio_check_outbound_pci_queues(struct qdio_irq *irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 	struct qdio_q *out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	if (!pci_out_supported(irq) || !irq->scan_threshold)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	for_each_output_queue(irq, out, i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 		if (!qdio_outbound_q_done(out))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 			qdio_tasklet_schedule(out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) void tiqdio_inbound_processing(unsigned long data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	struct qdio_q *q = (struct qdio_q *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	if (need_siga_sync(q) && need_siga_sync_after_ai(q))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 		qdio_sync_queues(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	/* The interrupt could be caused by a PCI request: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	qdio_check_outbound_pci_queues(q->irq_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	__qdio_inbound_processing(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) static inline void qdio_set_state(struct qdio_irq *irq_ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 				  enum qdio_irq_states state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	DBF_DEV_EVENT(DBF_INFO, irq_ptr, "newstate: %1d", state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 	irq_ptr->state = state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 	mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) static void qdio_irq_check_sense(struct qdio_irq *irq_ptr, struct irb *irb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	if (irb->esw.esw0.erw.cons) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 		DBF_ERROR("%4x sense:", irq_ptr->schid.sch_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 		DBF_ERROR_HEX(irb, 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 		DBF_ERROR_HEX(irb->ecw, 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) /* PCI interrupt handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	struct qdio_q *q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	if (unlikely(irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	if (irq_ptr->irq_poll) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 		if (!test_and_set_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 			irq_ptr->irq_poll(irq_ptr->cdev, irq_ptr->int_parm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 			QDIO_PERF_STAT_INC(irq_ptr, int_discarded);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 		for_each_input_queue(irq_ptr, q, i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 			tasklet_schedule(&q->tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	if (!pci_out_supported(irq_ptr) || !irq_ptr->scan_threshold)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	for_each_output_queue(irq_ptr, q, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 		if (qdio_outbound_q_done(q))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 		if (need_siga_sync(q) && need_siga_sync_out_after_pci(q))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 			qdio_siga_sync_q(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 		qdio_tasklet_schedule(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) static void qdio_handle_activate_check(struct qdio_irq *irq_ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 				       unsigned long intparm, int cstat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 				       int dstat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	struct qdio_q *q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	DBF_ERROR("%4x ACT CHECK", irq_ptr->schid.sch_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 	DBF_ERROR("intp :%lx", intparm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 	DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 	if (irq_ptr->nr_input_qs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 		q = irq_ptr->input_qs[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	} else if (irq_ptr->nr_output_qs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 		q = irq_ptr->output_qs[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 		dump_stack();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 		goto no_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 	q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 		   q->nr, q->first_to_check, 0, irq_ptr->int_parm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) no_handler:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	 * In case of z/VM LGR (Live Guest Migration) QDIO recovery will happen.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 	 * Therefore we call the LGR detection function here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	lgr_info_log();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) static void qdio_establish_handle_irq(struct qdio_irq *irq_ptr, int cstat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 				      int dstat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 	DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 	if (cstat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 		goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 	if (dstat & ~(DEV_STAT_DEV_END | DEV_STAT_CHN_END))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 		goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	if (!(dstat & DEV_STAT_DEV_END))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 		goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 	qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 	DBF_ERROR("%4x EQ:error", irq_ptr->schid.sch_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 	DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 	qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) /* qdio interrupt handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 		      struct irb *irb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 	struct subchannel_id schid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	int cstat, dstat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	if (!intparm || !irq_ptr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 		ccw_device_get_schid(cdev, &schid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 		DBF_ERROR("qint:%4x", schid.sch_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	if (irq_ptr->perf_stat_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 		irq_ptr->perf_stat.qdio_int++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 	if (IS_ERR(irb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 		DBF_ERROR("%4x IO error", irq_ptr->schid.sch_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 		qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 		wake_up(&cdev->private->wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 	qdio_irq_check_sense(irq_ptr, irb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 	cstat = irb->scsw.cmd.cstat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 	dstat = irb->scsw.cmd.dstat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	switch (irq_ptr->state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 	case QDIO_IRQ_STATE_INACTIVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 		qdio_establish_handle_irq(irq_ptr, cstat, dstat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	case QDIO_IRQ_STATE_CLEANUP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 		qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	case QDIO_IRQ_STATE_ESTABLISHED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	case QDIO_IRQ_STATE_ACTIVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 		if (cstat & SCHN_STAT_PCI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 			qdio_int_handler_pci(irq_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 		if (cstat || dstat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 			qdio_handle_activate_check(irq_ptr, intparm, cstat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 						   dstat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 	case QDIO_IRQ_STATE_STOPPED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 		WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 	wake_up(&cdev->private->wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993)  * qdio_get_ssqd_desc - get qdio subchannel description
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994)  * @cdev: ccw device to get description for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995)  * @data: where to store the ssqd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997)  * Returns 0 or an error code. The results of the chsc are stored in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998)  * specified structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) int qdio_get_ssqd_desc(struct ccw_device *cdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 		       struct qdio_ssqd_desc *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	struct subchannel_id schid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	if (!cdev || !cdev->private)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	ccw_device_get_schid(cdev, &schid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	DBF_EVENT("get ssqd:%4x", schid.sch_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 	return qdio_setup_get_ssqd(NULL, &schid, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) static void qdio_shutdown_queues(struct qdio_irq *irq_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 	struct qdio_q *q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 	for_each_input_queue(irq_ptr, q, i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 		tasklet_kill(&q->tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 	for_each_output_queue(irq_ptr, q, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 		del_timer_sync(&q->u.out.timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 		tasklet_kill(&q->tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) static int qdio_cancel_ccw(struct qdio_irq *irq, int how)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 	struct ccw_device *cdev = irq->cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 	spin_lock_irq(get_ccwdev_lock(cdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 	qdio_set_state(irq, QDIO_IRQ_STATE_CLEANUP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 	if (how & QDIO_FLAG_CLEANUP_USING_CLEAR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 		rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 		/* default behaviour is halt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 		rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 	spin_unlock_irq(get_ccwdev_lock(cdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 	if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 		DBF_ERROR("%4x SHUTD ERR", irq->schid.sch_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 		DBF_ERROR("rc:%4d", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 	wait_event_interruptible_timeout(cdev->private->wait_q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 					 irq->state == QDIO_IRQ_STATE_INACTIVE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 					 irq->state == QDIO_IRQ_STATE_ERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 					 10 * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056)  * qdio_shutdown - shut down a qdio subchannel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057)  * @cdev: associated ccw device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058)  * @how: use halt or clear to shutdown
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) int qdio_shutdown(struct ccw_device *cdev, int how)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 	struct subchannel_id schid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 	if (!irq_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 	WARN_ON_ONCE(irqs_disabled());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 	ccw_device_get_schid(cdev, &schid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 	DBF_EVENT("qshutdown:%4x", schid.sch_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 	mutex_lock(&irq_ptr->setup_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 	 * Subchannel was already shot down. We cannot prevent being called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 	 * twice since cio may trigger a shutdown asynchronously.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 	if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 		mutex_unlock(&irq_ptr->setup_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 	 * Indicate that the device is going down. Scheduling the queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 	 * tasklets is forbidden from here on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 	qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 	tiqdio_remove_device(irq_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 	qdio_shutdown_queues(irq_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 	qdio_shutdown_debug_entries(irq_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 	rc = qdio_cancel_ccw(irq_ptr, how);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 	qdio_shutdown_thinint(irq_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 	qdio_shutdown_irq(irq_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 	qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 	mutex_unlock(&irq_ptr->setup_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) EXPORT_SYMBOL_GPL(qdio_shutdown);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106)  * qdio_free - free data structures for a qdio subchannel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107)  * @cdev: associated ccw device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) int qdio_free(struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 	struct subchannel_id schid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 	if (!irq_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 	ccw_device_get_schid(cdev, &schid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 	DBF_EVENT("qfree:%4x", schid.sch_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 	DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf abandoned");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 	mutex_lock(&irq_ptr->setup_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 	irq_ptr->debug_area = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 	cdev->private->qdio_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 	mutex_unlock(&irq_ptr->setup_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 	qdio_free_async_data(irq_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 	qdio_free_queues(irq_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 	free_page((unsigned long) irq_ptr->qdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 	free_page(irq_ptr->chsc_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 	free_page((unsigned long) irq_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) EXPORT_SYMBOL_GPL(qdio_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136)  * qdio_allocate - allocate qdio queues and associated data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137)  * @cdev: associated ccw device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138)  * @no_input_qs: allocate this number of Input Queues
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139)  * @no_output_qs: allocate this number of Output Queues
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) int qdio_allocate(struct ccw_device *cdev, unsigned int no_input_qs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 		  unsigned int no_output_qs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 	struct subchannel_id schid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 	struct qdio_irq *irq_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 	int rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 	ccw_device_get_schid(cdev, &schid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 	DBF_EVENT("qallocate:%4x", schid.sch_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 	if (no_input_qs > QDIO_MAX_QUEUES_PER_IRQ ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 	    no_output_qs > QDIO_MAX_QUEUES_PER_IRQ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 	/* irq_ptr must be in GFP_DMA since it contains ccw1.cda */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 	irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 	if (!irq_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 	irq_ptr->cdev = cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 	mutex_init(&irq_ptr->setup_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 	if (qdio_allocate_dbf(irq_ptr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 		goto err_dbf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 	DBF_DEV_EVENT(DBF_ERR, irq_ptr, "alloc niq:%1u noq:%1u", no_input_qs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 		      no_output_qs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 	 * Allocate a page for the chsc calls in qdio_establish.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 	 * Must be pre-allocated since a zfcp recovery will call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 	 * qdio_establish. In case of low memory and swap on a zfcp disk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 	 * we may not be able to allocate memory otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 	irq_ptr->chsc_page = get_zeroed_page(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 	if (!irq_ptr->chsc_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 		goto err_chsc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 	/* qdr is used in ccw1.cda which is u32 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 	irq_ptr->qdr = (struct qdr *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 	if (!irq_ptr->qdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 		goto err_qdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 	rc = qdio_allocate_qs(irq_ptr, no_input_qs, no_output_qs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 		goto err_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 	INIT_LIST_HEAD(&irq_ptr->entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 	cdev->private->qdio_data = irq_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 	qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) err_queues:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 	free_page((unsigned long) irq_ptr->qdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) err_qdr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 	free_page(irq_ptr->chsc_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) err_chsc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) err_dbf:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 	free_page((unsigned long) irq_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) EXPORT_SYMBOL_GPL(qdio_allocate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) static void qdio_detect_hsicq(struct qdio_irq *irq_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 	struct qdio_q *q = irq_ptr->input_qs[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 	int i, use_cq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 	if (irq_ptr->nr_input_qs > 1 && queue_type(q) == QDIO_IQDIO_QFMT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 		use_cq = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 	for_each_output_queue(irq_ptr, q, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 		if (use_cq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 			if (multicast_outbound(q))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 			if (qdio_enable_async_operation(&q->u.out) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 				use_cq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 			qdio_disable_async_operation(&q->u.out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 	DBF_EVENT("use_cq:%d", use_cq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) static void qdio_trace_init_data(struct qdio_irq *irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 				 struct qdio_initialize *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 	DBF_DEV_EVENT(DBF_ERR, irq, "qfmt:%1u", data->q_format);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 	DBF_DEV_EVENT(DBF_ERR, irq, "qpff%4x", data->qib_param_field_format);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 	DBF_DEV_HEX(irq, &data->qib_param_field, sizeof(void *), DBF_ERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 	DBF_DEV_HEX(irq, &data->input_slib_elements, sizeof(void *), DBF_ERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 	DBF_DEV_HEX(irq, &data->output_slib_elements, sizeof(void *), DBF_ERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 	DBF_DEV_EVENT(DBF_ERR, irq, "niq:%1u noq:%1u", data->no_input_qs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 		      data->no_output_qs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 	DBF_DEV_HEX(irq, &data->input_handler, sizeof(void *), DBF_ERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 	DBF_DEV_HEX(irq, &data->output_handler, sizeof(void *), DBF_ERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 	DBF_DEV_HEX(irq, &data->int_parm, sizeof(long), DBF_ERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 	DBF_DEV_HEX(irq, &data->input_sbal_addr_array, sizeof(void *), DBF_ERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 	DBF_DEV_HEX(irq, &data->output_sbal_addr_array, sizeof(void *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 		    DBF_ERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244)  * qdio_establish - establish queues on a qdio subchannel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245)  * @cdev: associated ccw device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246)  * @init_data: initialization data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) int qdio_establish(struct ccw_device *cdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 		   struct qdio_initialize *init_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 	struct subchannel_id schid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 	long timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 	ccw_device_get_schid(cdev, &schid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 	DBF_EVENT("qestablish:%4x", schid.sch_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 	if (!irq_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 	if (init_data->no_input_qs > irq_ptr->max_input_qs ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 	    init_data->no_output_qs > irq_ptr->max_output_qs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 	if ((init_data->no_input_qs && !init_data->input_handler) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 	    (init_data->no_output_qs && !init_data->output_handler))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 	if (!init_data->input_sbal_addr_array ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 	    !init_data->output_sbal_addr_array)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 	mutex_lock(&irq_ptr->setup_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 	qdio_trace_init_data(irq_ptr, init_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 	qdio_setup_irq(irq_ptr, init_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 	rc = qdio_establish_thinint(irq_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 		goto err_thinint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 	/* establish q */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 	irq_ptr->ccw.cmd_code = irq_ptr->equeue.cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 	irq_ptr->ccw.flags = CCW_FLAG_SLI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 	irq_ptr->ccw.count = irq_ptr->equeue.count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 	irq_ptr->ccw.cda = (u32)((addr_t)irq_ptr->qdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 	spin_lock_irq(get_ccwdev_lock(cdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 	ccw_device_set_options_mask(cdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 	rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 	spin_unlock_irq(get_ccwdev_lock(cdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 	if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 		DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 		DBF_ERROR("rc:%4x", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 		goto err_ccw_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 	timeout = wait_event_interruptible_timeout(cdev->private->wait_q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 						   irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 						   irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 	if (timeout <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 		rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 		goto err_ccw_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 	if (irq_ptr->state != QDIO_IRQ_STATE_ESTABLISHED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 		mutex_unlock(&irq_ptr->setup_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 		qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 	qdio_setup_ssqd_info(irq_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 	qdio_detect_hsicq(irq_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 	/* qebsm is now setup if available, initialize buffer states */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 	qdio_init_buf_states(irq_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 	mutex_unlock(&irq_ptr->setup_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 	qdio_print_subchannel_info(irq_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 	qdio_setup_debug_entries(irq_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) err_ccw_timeout:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 	qdio_cancel_ccw(irq_ptr, QDIO_FLAG_CLEANUP_USING_CLEAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) err_ccw_start:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 	qdio_shutdown_thinint(irq_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) err_thinint:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 	qdio_shutdown_irq(irq_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 	qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 	mutex_unlock(&irq_ptr->setup_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) EXPORT_SYMBOL_GPL(qdio_establish);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338)  * qdio_activate - activate queues on a qdio subchannel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339)  * @cdev: associated cdev
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) int qdio_activate(struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 	struct subchannel_id schid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 	ccw_device_get_schid(cdev, &schid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 	DBF_EVENT("qactivate:%4x", schid.sch_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 	if (!irq_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 	mutex_lock(&irq_ptr->setup_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 	if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 		rc = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 	irq_ptr->ccw.cmd_code = irq_ptr->aqueue.cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 	irq_ptr->ccw.flags = CCW_FLAG_SLI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 	irq_ptr->ccw.count = irq_ptr->aqueue.count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 	irq_ptr->ccw.cda = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 	spin_lock_irq(get_ccwdev_lock(cdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 	ccw_device_set_options(cdev, CCWDEV_REPORT_ALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 	rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 			      0, DOIO_DENY_PREFETCH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 	spin_unlock_irq(get_ccwdev_lock(cdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 	if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 		DBF_ERROR("%4x act IO ERR", irq_ptr->schid.sch_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 		DBF_ERROR("rc:%4x", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 	if (is_thinint_irq(irq_ptr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 		tiqdio_add_device(irq_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 	/* wait for subchannel to become active */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 	msleep(5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 	switch (irq_ptr->state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 	case QDIO_IRQ_STATE_STOPPED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 	case QDIO_IRQ_STATE_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 		rc = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 		qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 		rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 	mutex_unlock(&irq_ptr->setup_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) EXPORT_SYMBOL_GPL(qdio_activate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398)  * handle_inbound - reset processed input buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399)  * @q: queue containing the buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400)  * @callflags: flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401)  * @bufnr: first buffer to process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402)  * @count: how many buffers are emptied
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) static int handle_inbound(struct qdio_q *q, unsigned int callflags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 			  int bufnr, int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 	int overlap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 	qperf_inc(q, inbound_call);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 	/* If any processed SBALs are returned to HW, adjust our tracking: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 	overlap = min_t(int, count - sub_buf(q->u.in.batch_start, bufnr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 			     q->u.in.batch_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 	if (overlap > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 		q->u.in.batch_start = add_buf(q->u.in.batch_start, overlap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 		q->u.in.batch_count -= overlap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 	count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 	atomic_add(count, &q->nr_buf_used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 	if (need_siga_in(q))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 		return qdio_siga_input(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429)  * handle_outbound - process filled outbound buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430)  * @q: queue containing the buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431)  * @callflags: flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432)  * @bufnr: first buffer to process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433)  * @count: how many buffers are filled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) static int handle_outbound(struct qdio_q *q, unsigned int callflags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 			   unsigned int bufnr, unsigned int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 	const unsigned int scan_threshold = q->irq_ptr->scan_threshold;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 	unsigned char state = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 	int used, rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 	qperf_inc(q, outbound_call);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 	count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 	used = atomic_add_return(count, &q->nr_buf_used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 	if (used == QDIO_MAX_BUFFERS_PER_Q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 		qperf_inc(q, outbound_queue_full);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 	if (callflags & QDIO_FLAG_PCI_OUT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 		q->u.out.pci_out_enabled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 		qperf_inc(q, pci_request_int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 		q->u.out.pci_out_enabled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 	if (queue_type(q) == QDIO_IQDIO_QFMT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 		unsigned long phys_aob = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 		if (q->u.out.use_cq && count == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 			phys_aob = qdio_aob_for_buffer(&q->u.out, bufnr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 		rc = qdio_kick_outbound_q(q, count, phys_aob);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 	} else if (need_siga_sync(q)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 		rc = qdio_siga_sync_q(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 	} else if (count < QDIO_MAX_BUFFERS_PER_Q &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 		   get_buf_state(q, prev_buf(bufnr), &state, 0) > 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 		   state == SLSB_CU_OUTPUT_PRIMED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 		/* The previous buffer is not processed yet, tack on. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 		qperf_inc(q, fast_requeue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 		rc = qdio_kick_outbound_q(q, count, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 	/* Let drivers implement their own completion scanning: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 	if (!scan_threshold)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 	/* in case of SIGA errors we must process the error immediately */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 	if (used >= scan_threshold || rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 		qdio_tasklet_schedule(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 		/* free the SBALs in case of no further traffic */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 		if (!timer_pending(&q->u.out.timer) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 		    likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 			mod_timer(&q->u.out.timer, jiffies + HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490)  * do_QDIO - process input or output buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491)  * @cdev: associated ccw_device for the qdio subchannel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492)  * @callflags: input or output and special flags from the program
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493)  * @q_nr: queue number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494)  * @bufnr: buffer number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495)  * @count: how many buffers to process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 	    int q_nr, unsigned int bufnr, unsigned int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 	if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 	if (!irq_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 	DBF_DEV_EVENT(DBF_INFO, irq_ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 		      "do%02x b:%02x c:%02x", callflags, bufnr, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 	if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 	if (!count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 	if (callflags & QDIO_FLAG_SYNC_INPUT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 		return handle_inbound(irq_ptr->input_qs[q_nr],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 				      callflags, bufnr, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 	else if (callflags & QDIO_FLAG_SYNC_OUTPUT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 		return handle_outbound(irq_ptr->output_qs[q_nr],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 				       callflags, bufnr, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 	return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) EXPORT_SYMBOL_GPL(do_QDIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526)  * qdio_start_irq - enable interrupt processing for the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527)  * @cdev: associated ccw_device for the qdio subchannel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529)  * Return codes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530)  *   0 - success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531)  *   1 - irqs not started since new data is available
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) int qdio_start_irq(struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 	struct qdio_q *q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 	if (!irq_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 	for_each_input_queue(irq_ptr, q, i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 		qdio_stop_polling(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 	clear_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 	 * We need to check again to not lose initiative after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 	 * resetting the ACK state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 	if (test_nonshared_ind(irq_ptr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 		goto rescan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 	for_each_input_queue(irq_ptr, q, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 		if (!qdio_inbound_q_done(q, q->first_to_check))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 			goto rescan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) rescan:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 	if (test_and_set_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) EXPORT_SYMBOL(qdio_start_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) static int __qdio_inspect_queue(struct qdio_q *q, unsigned int *bufnr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 				unsigned int *error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 	unsigned int start = q->first_to_check;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 	int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 	count = q->is_input_q ? qdio_inbound_q_moved(q, start) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 				qdio_outbound_q_moved(q, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 	if (count == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 	*bufnr = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 	*error = q->qdio_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 	/* for the next time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 	q->first_to_check = add_buf(start, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 	q->qdio_error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) int qdio_inspect_queue(struct ccw_device *cdev, unsigned int nr, bool is_input,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 		       unsigned int *bufnr, unsigned int *error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 	struct qdio_q *q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 	if (!irq_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 	q = is_input ? irq_ptr->input_qs[nr] : irq_ptr->output_qs[nr];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 	if (need_siga_sync(q))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 		qdio_siga_sync_q(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 	return __qdio_inspect_queue(q, bufnr, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) EXPORT_SYMBOL_GPL(qdio_inspect_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609)  * qdio_get_next_buffers - process input buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610)  * @cdev: associated ccw_device for the qdio subchannel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611)  * @nr: input queue number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612)  * @bufnr: first filled buffer number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613)  * @error: buffers are in error state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615)  * Return codes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616)  *   < 0 - error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617)  *   = 0 - no new buffers found
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618)  *   > 0 - number of processed buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 			  int *error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 	struct qdio_q *q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 	if (!irq_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 	q = irq_ptr->input_qs[nr];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 	 * Cannot rely on automatic sync after interrupt since queues may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 	 * also be examined without interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 	if (need_siga_sync(q))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 		qdio_sync_queues(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 	qdio_check_outbound_pci_queues(irq_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 	/* Note: upper-layer MUST stop processing immediately here ... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 	if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 	return __qdio_inspect_queue(q, bufnr, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) EXPORT_SYMBOL(qdio_get_next_buffers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648)  * qdio_stop_irq - disable interrupt processing for the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649)  * @cdev: associated ccw_device for the qdio subchannel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651)  * Return codes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652)  *   0 - interrupts were already disabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653)  *   1 - interrupts successfully disabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) int qdio_stop_irq(struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 	if (!irq_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 	if (test_and_set_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) EXPORT_SYMBOL(qdio_stop_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) static int __init init_QDIO(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 	rc = qdio_debug_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 	rc = qdio_setup_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 		goto out_debug;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 	rc = qdio_thinint_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 		goto out_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) out_cache:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 	qdio_setup_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) out_debug:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 	qdio_debug_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) static void __exit exit_QDIO(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 	qdio_thinint_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 	qdio_setup_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 	qdio_debug_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) module_init(init_QDIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) module_exit(exit_QDIO);