Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Copyright 2014-2016 Freescale Semiconductor Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * Copyright 2016-2019 NXP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/fsl/mc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <soc/fsl/dpaa2-io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include "dpio.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include "qbman-portal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) struct dpaa2_io {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 	struct dpaa2_io_desc dpio_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 	struct qbman_swp_desc swp_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 	struct qbman_swp *swp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 	struct list_head node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 	/* protect against multiple management commands */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 	spinlock_t lock_mgmt_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 	/* protect notifications list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 	spinlock_t lock_notifications;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 	struct list_head notifications;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 	struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) struct dpaa2_io_store {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	unsigned int max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	dma_addr_t paddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	struct dpaa2_dq *vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	void *alloced_addr;    /* unaligned value from kmalloc() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	unsigned int idx;      /* position of the next-to-be-returned entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	struct qbman_swp *swp; /* portal used to issue VDQCR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	struct device *dev;    /* device used for DMA mapping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) /* keep a per cpu array of DPIOs for fast access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) static struct dpaa2_io *dpio_by_cpu[NR_CPUS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) static struct list_head dpio_list = LIST_HEAD_INIT(dpio_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) static DEFINE_SPINLOCK(dpio_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) static inline struct dpaa2_io *service_select_by_cpu(struct dpaa2_io *d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 						     int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	if (d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 		return d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	if (cpu != DPAA2_IO_ANY_CPU && cpu >= num_possible_cpus())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	 * If cpu == -1, choose the current cpu, with no guarantees about
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	 * potentially being migrated away.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	if (cpu < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 		cpu = raw_smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	/* If a specific cpu was requested, pick it up immediately */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	return dpio_by_cpu[cpu];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) static inline struct dpaa2_io *service_select(struct dpaa2_io *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	if (d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 		return d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	d = service_select_by_cpu(d, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	if (d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 		return d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	spin_lock(&dpio_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	d = list_entry(dpio_list.next, struct dpaa2_io, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	list_del(&d->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	list_add_tail(&d->node, &dpio_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	spin_unlock(&dpio_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	return d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87)  * dpaa2_io_service_select() - return a dpaa2_io service affined to this cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88)  * @cpu: the cpu id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90)  * Return the affine dpaa2_io service, or NULL if there is no service affined
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91)  * to the specified cpu. If DPAA2_IO_ANY_CPU is used, return the next available
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92)  * service.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) struct dpaa2_io *dpaa2_io_service_select(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	if (cpu == DPAA2_IO_ANY_CPU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 		return service_select(NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	return service_select_by_cpu(NULL, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) EXPORT_SYMBOL_GPL(dpaa2_io_service_select);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)  * dpaa2_io_create() - create a dpaa2_io object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)  * @desc: the dpaa2_io descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)  * @dev: the actual DPIO device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)  * Activates a "struct dpaa2_io" corresponding to the given config of an actual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)  * DPIO object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)  * Return a valid dpaa2_io object for success, or NULL for failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 				 struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	struct dpaa2_io *obj = kmalloc(sizeof(*obj), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	if (!obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	/* check if CPU is out of range (-1 means any cpu) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	if (desc->cpu != DPAA2_IO_ANY_CPU && desc->cpu >= num_possible_cpus()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 		kfree(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	obj->dpio_desc = *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	obj->swp_desc.cena_bar = obj->dpio_desc.regs_cena;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	obj->swp_desc.cinh_bar = obj->dpio_desc.regs_cinh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	obj->swp_desc.qman_version = obj->dpio_desc.qman_version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	obj->swp = qbman_swp_init(&obj->swp_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	if (!obj->swp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 		kfree(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	INIT_LIST_HEAD(&obj->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	spin_lock_init(&obj->lock_mgmt_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	spin_lock_init(&obj->lock_notifications);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	INIT_LIST_HEAD(&obj->notifications);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	/* For now only enable DQRR interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	qbman_swp_interrupt_set_trigger(obj->swp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 					QBMAN_SWP_INTERRUPT_DQRI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	qbman_swp_interrupt_clear_status(obj->swp, 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	if (obj->dpio_desc.receives_notifications)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 		qbman_swp_push_set(obj->swp, 0, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	spin_lock(&dpio_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	list_add_tail(&obj->node, &dpio_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	if (desc->cpu >= 0 && !dpio_by_cpu[desc->cpu])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 		dpio_by_cpu[desc->cpu] = obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	spin_unlock(&dpio_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	obj->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	return obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)  * dpaa2_io_down() - release the dpaa2_io object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)  * @d: the dpaa2_io object to be released.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)  * The "struct dpaa2_io" type can represent an individual DPIO object (as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)  * described by "struct dpaa2_io_desc") or an instance of a "DPIO service",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)  * which can be used to group/encapsulate multiple DPIO objects. In all cases,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)  * each handle obtained should be released using this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) void dpaa2_io_down(struct dpaa2_io *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	spin_lock(&dpio_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	dpio_by_cpu[d->dpio_desc.cpu] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	list_del(&d->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	spin_unlock(&dpio_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	kfree(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) #define DPAA_POLL_MAX 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)  * dpaa2_io_irq() - ISR for DPIO interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)  * @obj: the given DPIO object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)  * Return IRQ_HANDLED for success or IRQ_NONE if there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)  * were no pending interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) irqreturn_t dpaa2_io_irq(struct dpaa2_io *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	const struct dpaa2_dq *dq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	int max = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	struct qbman_swp *swp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	u32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	swp = obj->swp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	status = qbman_swp_interrupt_read_status(swp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	if (!status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 		return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	dq = qbman_swp_dqrr_next(swp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	while (dq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 		if (qbman_result_is_SCN(dq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 			struct dpaa2_io_notification_ctx *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 			u64 q64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 			q64 = qbman_result_SCN_ctx(dq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 			ctx = (void *)(uintptr_t)q64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 			ctx->cb(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 			pr_crit("fsl-mc-dpio: Unrecognised/ignored DQRR entry\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 		qbman_swp_dqrr_consume(swp, dq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 		++max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 		if (max > DPAA_POLL_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 			goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 		dq = qbman_swp_dqrr_next(swp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	qbman_swp_interrupt_clear_status(swp, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	qbman_swp_interrupt_set_inhibit(swp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)  * dpaa2_io_get_cpu() - get the cpu associated with a given DPIO object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)  * @d: the given DPIO object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)  * Return the cpu associated with the DPIO object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) int dpaa2_io_get_cpu(struct dpaa2_io *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	return d->dpio_desc.cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) EXPORT_SYMBOL(dpaa2_io_get_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)  * dpaa2_io_service_register() - Prepare for servicing of FQDAN or CDAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)  *                               notifications on the given DPIO service.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)  * @d:   the given DPIO service.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)  * @ctx: the notification context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)  * @dev: the device that requests the register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)  * The caller should make the MC command to attach a DPAA2 object to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)  * a DPIO after this function completes successfully.  In that way:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)  *    (a) The DPIO service is "ready" to handle a notification arrival
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)  *        (which might happen before the "attach" command to MC has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)  *        returned control of execution back to the caller)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)  *    (b) The DPIO service can provide back to the caller the 'dpio_id' and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)  *        'qman64' parameters that it should pass along in the MC command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)  *        in order for the object to be configured to produce the right
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)  *        notification fields to the DPIO service.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)  * Return 0 for success, or -ENODEV for failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) int dpaa2_io_service_register(struct dpaa2_io *d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 			      struct dpaa2_io_notification_ctx *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 			      struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	struct device_link *link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	unsigned long irqflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	d = service_select_by_cpu(d, ctx->desired_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	if (!d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	link = device_link_add(dev, d->dev, DL_FLAG_AUTOREMOVE_CONSUMER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 	if (!link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	ctx->dpio_id = d->dpio_desc.dpio_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	ctx->qman64 = (u64)(uintptr_t)ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	ctx->dpio_private = d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	spin_lock_irqsave(&d->lock_notifications, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	list_add(&ctx->node, &d->notifications);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	spin_unlock_irqrestore(&d->lock_notifications, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	/* Enable the generation of CDAN notifications */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	if (ctx->is_cdan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 		return qbman_swp_CDAN_set_context_enable(d->swp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 							 (u16)ctx->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 							 ctx->qman64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) EXPORT_SYMBOL_GPL(dpaa2_io_service_register);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)  * dpaa2_io_service_deregister - The opposite of 'register'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)  * @service: the given DPIO service.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)  * @ctx: the notification context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)  * @dev: the device that requests to be deregistered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)  * This function should be called only after sending the MC command to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)  * to detach the notification-producing device from the DPIO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) void dpaa2_io_service_deregister(struct dpaa2_io *service,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 				 struct dpaa2_io_notification_ctx *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 				 struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	struct dpaa2_io *d = ctx->dpio_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	unsigned long irqflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	if (ctx->is_cdan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 		qbman_swp_CDAN_disable(d->swp, (u16)ctx->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	spin_lock_irqsave(&d->lock_notifications, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	list_del(&ctx->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	spin_unlock_irqrestore(&d->lock_notifications, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) EXPORT_SYMBOL_GPL(dpaa2_io_service_deregister);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)  * dpaa2_io_service_rearm() - Rearm the notification for the given DPIO service.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)  * @d: the given DPIO service.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)  * @ctx: the notification context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)  * Once a FQDAN/CDAN has been produced, the corresponding FQ/channel is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)  * considered "disarmed". Ie. the user can issue pull dequeue operations on that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)  * traffic source for as long as it likes. Eventually it may wish to "rearm"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)  * that source to allow it to produce another FQDAN/CDAN, that's what this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)  * function achieves.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)  * Return 0 for success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) int dpaa2_io_service_rearm(struct dpaa2_io *d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 			   struct dpaa2_io_notification_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 	unsigned long irqflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	d = service_select_by_cpu(d, ctx->desired_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 	if (!unlikely(d))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 	if (ctx->is_cdan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 		err = qbman_swp_CDAN_enable(d->swp, (u16)ctx->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 		err = qbman_swp_fq_schedule(d->swp, ctx->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 	spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) EXPORT_SYMBOL_GPL(dpaa2_io_service_rearm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)  * dpaa2_io_service_pull_fq() - pull dequeue functions from a fq.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)  * @d: the given DPIO service.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)  * @fqid: the given frame queue id.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)  * @s: the dpaa2_io_store object for the result.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)  * Return 0 for success, or error code for failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) int dpaa2_io_service_pull_fq(struct dpaa2_io *d, u32 fqid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 			     struct dpaa2_io_store *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 	struct qbman_pull_desc pd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 	qbman_pull_desc_clear(&pd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 	qbman_pull_desc_set_storage(&pd, s->vaddr, s->paddr, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 	qbman_pull_desc_set_numframes(&pd, (u8)s->max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 	qbman_pull_desc_set_fq(&pd, fqid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 	d = service_select(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 	if (!d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 	s->swp = d->swp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 	err = qbman_swp_pull(d->swp, &pd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 		s->swp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) EXPORT_SYMBOL(dpaa2_io_service_pull_fq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)  * dpaa2_io_service_pull_channel() - pull dequeue functions from a channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)  * @d: the given DPIO service.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)  * @channelid: the given channel id.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)  * @s: the dpaa2_io_store object for the result.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)  * Return 0 for success, or error code for failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) int dpaa2_io_service_pull_channel(struct dpaa2_io *d, u32 channelid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 				  struct dpaa2_io_store *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 	struct qbman_pull_desc pd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 	qbman_pull_desc_clear(&pd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 	qbman_pull_desc_set_storage(&pd, s->vaddr, s->paddr, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 	qbman_pull_desc_set_numframes(&pd, (u8)s->max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 	qbman_pull_desc_set_channel(&pd, channelid, qbman_pull_type_prio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	d = service_select(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 	if (!d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 	s->swp = d->swp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 	err = qbman_swp_pull(d->swp, &pd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 		s->swp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) EXPORT_SYMBOL_GPL(dpaa2_io_service_pull_channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)  * dpaa2_io_service_enqueue_fq() - Enqueue a frame to a frame queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)  * @d: the given DPIO service.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)  * @fqid: the given frame queue id.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)  * @fd: the frame descriptor which is enqueued.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)  * Return 0 for successful enqueue, -EBUSY if the enqueue ring is not ready,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)  * or -ENODEV if there is no dpio service.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) int dpaa2_io_service_enqueue_fq(struct dpaa2_io *d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 				u32 fqid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 				const struct dpaa2_fd *fd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 	struct qbman_eq_desc ed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 	d = service_select(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 	if (!d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 	qbman_eq_desc_clear(&ed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 	qbman_eq_desc_set_no_orp(&ed, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 	qbman_eq_desc_set_fq(&ed, fqid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 	return qbman_swp_enqueue(d->swp, &ed, fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) EXPORT_SYMBOL(dpaa2_io_service_enqueue_fq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)  * dpaa2_io_service_enqueue_multiple_fq() - Enqueue multiple frames
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)  * to a frame queue using one fqid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)  * @d: the given DPIO service.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)  * @fqid: the given frame queue id.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)  * @fd: the frame descriptor which is enqueued.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)  * @nb: number of frames to be enqueud
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)  * Return 0 for successful enqueue, -EBUSY if the enqueue ring is not ready,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)  * or -ENODEV if there is no dpio service.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) int dpaa2_io_service_enqueue_multiple_fq(struct dpaa2_io *d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 				u32 fqid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 				const struct dpaa2_fd *fd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 				int nb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 	struct qbman_eq_desc ed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 	d = service_select(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 	if (!d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 	qbman_eq_desc_clear(&ed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 	qbman_eq_desc_set_no_orp(&ed, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 	qbman_eq_desc_set_fq(&ed, fqid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 	return qbman_swp_enqueue_multiple(d->swp, &ed, fd, 0, nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) EXPORT_SYMBOL(dpaa2_io_service_enqueue_multiple_fq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)  * dpaa2_io_service_enqueue_multiple_desc_fq() - Enqueue multiple frames
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)  * to different frame queue using a list of fqids.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)  * @d: the given DPIO service.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)  * @fqid: the given list of frame queue ids.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)  * @fd: the frame descriptor which is enqueued.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)  * @nb: number of frames to be enqueud
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)  * Return 0 for successful enqueue, -EBUSY if the enqueue ring is not ready,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)  * or -ENODEV if there is no dpio service.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) int dpaa2_io_service_enqueue_multiple_desc_fq(struct dpaa2_io *d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 				u32 *fqid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 				const struct dpaa2_fd *fd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 				int nb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 	struct qbman_eq_desc *ed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 	int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 	ed = kcalloc(sizeof(struct qbman_eq_desc), 32, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 	if (!ed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 	d = service_select(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 	if (!d) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 		ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 	for (i = 0; i < nb; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 		qbman_eq_desc_clear(&ed[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 		qbman_eq_desc_set_no_orp(&ed[i], 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 		qbman_eq_desc_set_fq(&ed[i], fqid[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 	ret = qbman_swp_enqueue_multiple_desc(d->swp, &ed[0], fd, nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 	kfree(ed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) EXPORT_SYMBOL(dpaa2_io_service_enqueue_multiple_desc_fq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)  * dpaa2_io_service_enqueue_qd() - Enqueue a frame to a QD.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)  * @d: the given DPIO service.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)  * @qdid: the given queuing destination id.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)  * @prio: the given queuing priority.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)  * @qdbin: the given queuing destination bin.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)  * @fd: the frame descriptor which is enqueued.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)  * Return 0 for successful enqueue, or -EBUSY if the enqueue ring is not ready,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)  * or -ENODEV if there is no dpio service.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) int dpaa2_io_service_enqueue_qd(struct dpaa2_io *d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 				u32 qdid, u8 prio, u16 qdbin,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 				const struct dpaa2_fd *fd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 	struct qbman_eq_desc ed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 	d = service_select(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 	if (!d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 	qbman_eq_desc_clear(&ed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 	qbman_eq_desc_set_no_orp(&ed, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 	qbman_eq_desc_set_qd(&ed, qdid, qdbin, prio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 	return qbman_swp_enqueue(d->swp, &ed, fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) EXPORT_SYMBOL_GPL(dpaa2_io_service_enqueue_qd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)  * dpaa2_io_service_release() - Release buffers to a buffer pool.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)  * @d: the given DPIO object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)  * @bpid: the buffer pool id.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)  * @buffers: the buffers to be released.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)  * @num_buffers: the number of the buffers to be released.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)  * Return 0 for success, and negative error code for failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) int dpaa2_io_service_release(struct dpaa2_io *d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 			     u16 bpid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 			     const u64 *buffers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 			     unsigned int num_buffers)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 	struct qbman_release_desc rd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 	d = service_select(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 	if (!d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 	qbman_release_desc_clear(&rd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 	qbman_release_desc_set_bpid(&rd, bpid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 	return qbman_swp_release(d->swp, &rd, buffers, num_buffers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) EXPORT_SYMBOL_GPL(dpaa2_io_service_release);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)  * dpaa2_io_service_acquire() - Acquire buffers from a buffer pool.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)  * @d: the given DPIO object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)  * @bpid: the buffer pool id.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)  * @buffers: the buffer addresses for acquired buffers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)  * @num_buffers: the expected number of the buffers to acquire.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)  * Return a negative error code if the command failed, otherwise it returns
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)  * the number of buffers acquired, which may be less than the number requested.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)  * Eg. if the buffer pool is empty, this will return zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) int dpaa2_io_service_acquire(struct dpaa2_io *d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 			     u16 bpid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 			     u64 *buffers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 			     unsigned int num_buffers)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 	unsigned long irqflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 	d = service_select(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 	if (!d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 	spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 	err = qbman_swp_acquire(d->swp, bpid, buffers, num_buffers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 	spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) EXPORT_SYMBOL_GPL(dpaa2_io_service_acquire);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)  * 'Stores' are reusable memory blocks for holding dequeue results, and to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)  * assist with parsing those results.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)  * dpaa2_io_store_create() - Create the dma memory storage for dequeue result.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)  * @max_frames: the maximum number of dequeued result for frames, must be <= 32.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)  * @dev:        the device to allow mapping/unmapping the DMAable region.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)  * The size of the storage is "max_frames*sizeof(struct dpaa2_dq)".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)  * The 'dpaa2_io_store' returned is a DPIO service managed object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)  * Return pointer to dpaa2_io_store struct for successfully created storage
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612)  * memory, or NULL on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) struct dpaa2_io_store *dpaa2_io_store_create(unsigned int max_frames,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) 					     struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 	struct dpaa2_io_store *ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 	size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 	if (!max_frames || (max_frames > 32))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) 	ret = kmalloc(sizeof(*ret), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) 	if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) 	ret->max = max_frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) 	size = max_frames * sizeof(struct dpaa2_dq) + 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) 	ret->alloced_addr = kzalloc(size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) 	if (!ret->alloced_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) 		kfree(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) 	ret->vaddr = PTR_ALIGN(ret->alloced_addr, 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) 	ret->paddr = dma_map_single(dev, ret->vaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) 				    sizeof(struct dpaa2_dq) * max_frames,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) 				    DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) 	if (dma_mapping_error(dev, ret->paddr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) 		kfree(ret->alloced_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) 		kfree(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) 	ret->idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) 	ret->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) EXPORT_SYMBOL_GPL(dpaa2_io_store_create);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)  * dpaa2_io_store_destroy() - Frees the dma memory storage for dequeue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)  *                            result.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)  * @s: the storage memory to be destroyed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) void dpaa2_io_store_destroy(struct dpaa2_io_store *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) 	dma_unmap_single(s->dev, s->paddr, sizeof(struct dpaa2_dq) * s->max,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) 			 DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) 	kfree(s->alloced_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) 	kfree(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) EXPORT_SYMBOL_GPL(dpaa2_io_store_destroy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)  * dpaa2_io_store_next() - Determine when the next dequeue result is available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)  * @s: the dpaa2_io_store object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)  * @is_last: indicate whether this is the last frame in the pull command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)  * When an object driver performs dequeues to a dpaa2_io_store, this function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)  * can be used to determine when the next frame result is available. Once
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)  * this function returns non-NULL, a subsequent call to it will try to find
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)  * the next dequeue result.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)  * Note that if a pull-dequeue has a NULL result because the target FQ/channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)  * was empty, then this function will also return NULL (rather than expecting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)  * the caller to always check for this. As such, "is_last" can be used to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)  * differentiate between "end-of-empty-dequeue" and "still-waiting".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)  * Return dequeue result for a valid dequeue result, or NULL for empty dequeue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) struct dpaa2_dq *dpaa2_io_store_next(struct dpaa2_io_store *s, int *is_last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) 	int match;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) 	struct dpaa2_dq *ret = &s->vaddr[s->idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) 	match = qbman_result_has_new_result(s->swp, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) 	if (!match) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) 		*is_last = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) 	s->idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) 	if (dpaa2_dq_is_pull_complete(ret)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) 		*is_last = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) 		s->idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) 		 * If we get an empty dequeue result to terminate a zero-results
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) 		 * vdqcr, return NULL to the caller rather than expecting him to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) 		 * check non-NULL results every time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) 		if (!(dpaa2_dq_flags(ret) & DPAA2_DQ_STAT_VALIDFRAME))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) 			ret = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) 		prefetch(&s->vaddr[s->idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) 		*is_last = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) EXPORT_SYMBOL_GPL(dpaa2_io_store_next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)  * dpaa2_io_query_fq_count() - Get the frame and byte count for a given fq.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)  * @d: the given DPIO object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)  * @fqid: the id of frame queue to be queried.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)  * @fcnt: the queried frame count.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)  * @bcnt: the queried byte count.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)  * Knowing the FQ count at run-time can be useful in debugging situations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)  * The instantaneous frame- and byte-count are hereby returned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)  * Return 0 for a successful query, and negative error code if query fails.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) int dpaa2_io_query_fq_count(struct dpaa2_io *d, u32 fqid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) 			    u32 *fcnt, u32 *bcnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) 	struct qbman_fq_query_np_rslt state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) 	struct qbman_swp *swp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) 	unsigned long irqflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) 	d = service_select(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) 	if (!d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) 	swp = d->swp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) 	spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) 	ret = qbman_fq_query_state(swp, fqid, &state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) 	spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) 	*fcnt = qbman_fq_state_frame_count(&state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) 	*bcnt = qbman_fq_state_byte_count(&state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) EXPORT_SYMBOL_GPL(dpaa2_io_query_fq_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753)  * dpaa2_io_query_bp_count() - Query the number of buffers currently in a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)  * buffer pool.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)  * @d: the given DPIO object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756)  * @bpid: the index of buffer pool to be queried.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757)  * @num: the queried number of buffers in the buffer pool.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)  * Return 0 for a successful query, and negative error code if query fails.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) int dpaa2_io_query_bp_count(struct dpaa2_io *d, u16 bpid, u32 *num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) 	struct qbman_bp_query_rslt state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) 	struct qbman_swp *swp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) 	unsigned long irqflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) 	d = service_select(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) 	if (!d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) 	swp = d->swp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) 	spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) 	ret = qbman_bp_query(swp, bpid, &state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) 	spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) 	*num = qbman_bp_info_num_free_bufs(&state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) EXPORT_SYMBOL_GPL(dpaa2_io_query_bp_count);