Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2) /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7) #include <uapi/linux/idxd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8) #include "idxd.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9) #include "registers.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) static struct idxd_desc *__get_desc(struct idxd_wq *wq, int idx, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) 	struct idxd_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) 	desc = wq->descs[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) 	memset(desc->hw, 0, sizeof(struct dsa_hw_desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) 	memset(desc->completion, 0, sizeof(struct dsa_completion_record));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) 	desc->cpu = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) 	return desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) struct idxd_desc *idxd_alloc_desc(struct idxd_wq *wq, enum idxd_op_type optype)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) 	int cpu, idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) 	struct idxd_device *idxd = wq->idxd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) 	DEFINE_SBQ_WAIT(wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) 	struct sbq_wait_state *ws;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) 	struct sbitmap_queue *sbq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) 	if (idxd->state != IDXD_DEV_ENABLED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) 		return ERR_PTR(-EIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) 	sbq = &wq->sbq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) 	idx = sbitmap_queue_get(sbq, &cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) 	if (idx < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) 		if (optype == IDXD_OP_NONBLOCK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) 			return ERR_PTR(-EAGAIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) 		return __get_desc(wq, idx, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) 	ws = &sbq->ws[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) 	for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) 		sbitmap_prepare_to_wait(sbq, ws, &wait, TASK_INTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) 		if (signal_pending_state(TASK_INTERRUPTIBLE, current))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) 		idx = sbitmap_queue_get(sbq, &cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) 		if (idx >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) 		schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) 	sbitmap_finish_wait(sbq, ws, &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) 	if (idx < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) 		return ERR_PTR(-EAGAIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) 	return __get_desc(wq, idx, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) 	int cpu = desc->cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) 	desc->cpu = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) 	sbitmap_queue_clear(&wq->sbq, desc->id, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) 	struct idxd_device *idxd = wq->idxd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) 	int vec = desc->hw->int_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) 	void __iomem *portal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) 	if (idxd->state != IDXD_DEV_ENABLED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) 	portal = wq->dportal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) 	 * The wmb() flushes writes to coherent DMA data before possibly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) 	 * triggering a DMA read. The wmb() is necessary even on UP because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) 	 * the recipient is a device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) 	wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) 	iosubmit_cmds512(portal, desc->hw, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) 	 * Pending the descriptor to the lockless list for the irq_entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) 	 * that we designated the descriptor to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) 	if (desc->hw->flags & IDXD_OP_FLAG_RCI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) 		llist_add(&desc->llnode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) 			  &idxd->irq_entries[vec].pending_llist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) }