Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags   |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Copyright (C) 2015 Karol Kosik <karo9@interia.eu>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * Copyright (C) 2015-2016 Samsung Electronics
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  *               Igor Kotrasinski <i.kotrasinsk@samsung.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <net/sock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/kthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include "usbip_common.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include "vudc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) static inline void setup_base_pdu(struct usbip_header_basic *base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) 				  __u32 command, __u32 seqnum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) 	base->command	= command;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 	base->seqnum	= seqnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 	base->devid	= 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 	base->ep	= 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 	base->direction = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) static void setup_ret_submit_pdu(struct usbip_header *rpdu, struct urbp *urb_p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 	setup_base_pdu(&rpdu->base, USBIP_RET_SUBMIT, urb_p->seqnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 	usbip_pack_pdu(rpdu, urb_p->urb, USBIP_RET_SUBMIT, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) static void setup_ret_unlink_pdu(struct usbip_header *rpdu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 				 struct v_unlink *unlink)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	setup_base_pdu(&rpdu->base, USBIP_RET_UNLINK, unlink->seqnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	rpdu->u.ret_unlink.status = unlink->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) static int v_send_ret_unlink(struct vudc *udc, struct v_unlink *unlink)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	struct msghdr msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	struct kvec iov[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	size_t txsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	struct usbip_header pdu_header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	txsize = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	memset(&pdu_header, 0, sizeof(pdu_header));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	memset(&msg, 0, sizeof(msg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	memset(&iov, 0, sizeof(iov));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	/* 1. setup usbip_header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	setup_ret_unlink_pdu(&pdu_header, unlink);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	usbip_header_correct_endian(&pdu_header, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	iov[0].iov_base = &pdu_header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	iov[0].iov_len  = sizeof(pdu_header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	txsize += sizeof(pdu_header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	ret = kernel_sendmsg(udc->ud.tcp_socket, &msg, iov,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 			     1, txsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	if (ret != txsize) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 		usbip_event_add(&udc->ud, VUDC_EVENT_ERROR_TCP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 		if (ret >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 			return -EPIPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	kfree(unlink);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	return txsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) static int v_send_ret_submit(struct vudc *udc, struct urbp *urb_p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	struct urb *urb = urb_p->urb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	struct usbip_header pdu_header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	struct usbip_iso_packet_descriptor *iso_buffer = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	struct kvec *iov = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	int iovnum = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	size_t txsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	struct msghdr msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	txsize = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	memset(&pdu_header, 0, sizeof(pdu_header));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	memset(&msg, 0, sizeof(msg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	if (urb->actual_length > 0 && !urb->transfer_buffer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 		dev_err(&udc->gadget.dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 			"urb: actual_length %d transfer_buffer null\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 			urb->actual_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	if (urb_p->type == USB_ENDPOINT_XFER_ISOC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 		iovnum = 2 + urb->number_of_packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 		iovnum = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	iov = kcalloc(iovnum, sizeof(*iov), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	if (!iov) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 		usbip_event_add(&udc->ud, VUDC_EVENT_ERROR_MALLOC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	iovnum = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	/* 1. setup usbip_header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	setup_ret_submit_pdu(&pdu_header, urb_p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	usbip_dbg_stub_tx("setup txdata seqnum: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 			  pdu_header.base.seqnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	usbip_header_correct_endian(&pdu_header, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	iov[iovnum].iov_base = &pdu_header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	iov[iovnum].iov_len  = sizeof(pdu_header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	iovnum++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	txsize += sizeof(pdu_header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	/* 2. setup transfer buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	if (urb_p->type != USB_ENDPOINT_XFER_ISOC &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	    usb_pipein(urb->pipe) && urb->actual_length > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 		iov[iovnum].iov_base = urb->transfer_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 		iov[iovnum].iov_len  = urb->actual_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 		iovnum++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 		txsize += urb->actual_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	} else if (urb_p->type == USB_ENDPOINT_XFER_ISOC &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 		usb_pipein(urb->pipe)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 		/* FIXME - copypasted from stub_tx, refactor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 		int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 		for (i = 0; i < urb->number_of_packets; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 			iov[iovnum].iov_base = urb->transfer_buffer +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 				urb->iso_frame_desc[i].offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 			iov[iovnum].iov_len =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 				urb->iso_frame_desc[i].actual_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 			iovnum++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 			txsize += urb->iso_frame_desc[i].actual_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 		if (txsize != sizeof(pdu_header) + urb->actual_length) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 			usbip_event_add(&udc->ud, VUDC_EVENT_ERROR_TCP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 			ret = -EPIPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	/* else - no buffer to send */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	/* 3. setup iso_packet_descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	if (urb_p->type == USB_ENDPOINT_XFER_ISOC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 		ssize_t len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 		iso_buffer = usbip_alloc_iso_desc_pdu(urb, &len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 		if (!iso_buffer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 			usbip_event_add(&udc->ud,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 					VUDC_EVENT_ERROR_MALLOC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 			ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 		iov[iovnum].iov_base = iso_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 		iov[iovnum].iov_len  = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 		txsize += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 		iovnum++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	ret = kernel_sendmsg(udc->ud.tcp_socket, &msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 						iov,  iovnum, txsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	if (ret != txsize) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 		usbip_event_add(&udc->ud, VUDC_EVENT_ERROR_TCP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 		if (ret >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 			ret = -EPIPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	kfree(iov);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	kfree(iso_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	free_urbp_and_urb(urb_p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	return txsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) static int v_send_ret(struct vudc *udc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	struct tx_item *txi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	size_t total_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	spin_lock_irqsave(&udc->lock_tx, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	while (!list_empty(&udc->tx_queue)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 		txi = list_first_entry(&udc->tx_queue, struct tx_item,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 				       tx_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 		list_del(&txi->tx_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 		spin_unlock_irqrestore(&udc->lock_tx, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 		switch (txi->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 		case TX_SUBMIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 			ret = v_send_ret_submit(udc, txi->s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 		case TX_UNLINK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 			ret = v_send_ret_unlink(udc, txi->u);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 		kfree(txi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 		total_size += ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 		spin_lock_irqsave(&udc->lock_tx, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	spin_unlock_irqrestore(&udc->lock_tx, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	return total_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) int v_tx_loop(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	struct usbip_device *ud = (struct usbip_device *) data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	struct vudc *udc = container_of(ud, struct vudc, ud);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	while (!kthread_should_stop()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 		if (usbip_event_happened(&udc->ud))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 		ret = v_send_ret(udc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 		if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 			pr_warn("v_tx exit with error %d", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 		wait_event_interruptible(udc->tx_waitq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 					 (!list_empty(&udc->tx_queue) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 					 kthread_should_stop()));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) /* called with spinlocks held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) void v_enqueue_ret_unlink(struct vudc *udc, __u32 seqnum, __u32 status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	struct tx_item *txi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	struct v_unlink *unlink;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	txi = kzalloc(sizeof(*txi), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	if (!txi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 		usbip_event_add(&udc->ud, VDEV_EVENT_ERROR_MALLOC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	unlink = kzalloc(sizeof(*unlink), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	if (!unlink) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 		kfree(txi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 		usbip_event_add(&udc->ud, VDEV_EVENT_ERROR_MALLOC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	unlink->seqnum = seqnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	unlink->status = status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	txi->type = TX_UNLINK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	txi->u = unlink;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	list_add_tail(&txi->tx_entry, &udc->tx_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) /* called with spinlocks held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) void v_enqueue_ret_submit(struct vudc *udc, struct urbp *urb_p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	struct tx_item *txi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	txi = kzalloc(sizeof(*txi), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	if (!txi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 		usbip_event_add(&udc->ud, VDEV_EVENT_ERROR_MALLOC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	txi->type = TX_SUBMIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	txi->s = urb_p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	list_add_tail(&txi->tx_entry, &udc->tx_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) }