Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Intel I/OAT DMA Linux driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * Copyright(c) 2004 - 2015 Intel Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/dmaengine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include "dma.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include "registers.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include "hw.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include "../dmaengine.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) static ssize_t cap_show(struct dma_chan *c, char *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 	struct dma_device *dma = c->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 	return sprintf(page, "copy%s%s%s%s%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 		       dma_has_cap(DMA_PQ, dma->cap_mask) ? " pq" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 		       dma_has_cap(DMA_PQ_VAL, dma->cap_mask) ? " pq_val" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 		       dma_has_cap(DMA_XOR, dma->cap_mask) ? " xor" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 		       dma_has_cap(DMA_XOR_VAL, dma->cap_mask) ? " xor_val" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 		       dma_has_cap(DMA_INTERRUPT, dma->cap_mask) ? " intr" : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) struct ioat_sysfs_entry ioat_cap_attr = __ATTR_RO(cap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) static ssize_t version_show(struct dma_chan *c, char *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 	struct dma_device *dma = c->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	struct ioatdma_device *ioat_dma = to_ioatdma_device(dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	return sprintf(page, "%d.%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 		       ioat_dma->version >> 4, ioat_dma->version & 0xf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) struct ioat_sysfs_entry ioat_version_attr = __ATTR_RO(version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) ioat_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	struct ioat_sysfs_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	struct ioatdma_chan *ioat_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	entry = container_of(attr, struct ioat_sysfs_entry, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	ioat_chan = container_of(kobj, struct ioatdma_chan, kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	if (!entry->show)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	return entry->show(&ioat_chan->dma_chan, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) ioat_attr_store(struct kobject *kobj, struct attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) const char *page, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	struct ioat_sysfs_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	struct ioatdma_chan *ioat_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	entry = container_of(attr, struct ioat_sysfs_entry, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	ioat_chan = container_of(kobj, struct ioatdma_chan, kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	if (!entry->store)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	return entry->store(&ioat_chan->dma_chan, page, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) const struct sysfs_ops ioat_sysfs_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	.show	= ioat_attr_show,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	.store  = ioat_attr_store,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) void ioat_kobject_add(struct ioatdma_device *ioat_dma, struct kobj_type *type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	struct dma_device *dma = &ioat_dma->dma_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	struct dma_chan *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	list_for_each_entry(c, &dma->channels, device_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 		struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 		struct kobject *parent = &c->dev->device.kobj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 		int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 		err = kobject_init_and_add(&ioat_chan->kobj, type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 					   parent, "quickdata");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 		if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 			dev_warn(to_dev(ioat_chan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 				 "sysfs init error (%d), continuing...\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 			kobject_put(&ioat_chan->kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 			set_bit(IOAT_KOBJ_INIT_FAIL, &ioat_chan->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) void ioat_kobject_del(struct ioatdma_device *ioat_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	struct dma_device *dma = &ioat_dma->dma_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	struct dma_chan *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	list_for_each_entry(c, &dma->channels, device_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 		struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 		if (!test_bit(IOAT_KOBJ_INIT_FAIL, &ioat_chan->state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 			kobject_del(&ioat_chan->kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 			kobject_put(&ioat_chan->kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) static ssize_t ring_size_show(struct dma_chan *c, char *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	return sprintf(page, "%d\n", (1 << ioat_chan->alloc_order) & ~1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) static struct ioat_sysfs_entry ring_size_attr = __ATTR_RO(ring_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) static ssize_t ring_active_show(struct dma_chan *c, char *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	/* ...taken outside the lock, no need to be precise */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	return sprintf(page, "%d\n", ioat_ring_active(ioat_chan));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) static struct ioat_sysfs_entry ring_active_attr = __ATTR_RO(ring_active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) static ssize_t intr_coalesce_show(struct dma_chan *c, char *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	return sprintf(page, "%d\n", ioat_chan->intr_coalesce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) static ssize_t intr_coalesce_store(struct dma_chan *c, const char *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	int intr_coalesce = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	if (sscanf(page, "%du", &intr_coalesce) != -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 		if ((intr_coalesce < 0) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 		    (intr_coalesce > IOAT_INTRDELAY_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 		ioat_chan->intr_coalesce = intr_coalesce;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) static struct ioat_sysfs_entry intr_coalesce_attr = __ATTR_RW(intr_coalesce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) static struct attribute *ioat_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	&ring_size_attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	&ring_active_attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	&ioat_cap_attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	&ioat_version_attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	&intr_coalesce_attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) struct kobj_type ioat_ktype = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	.sysfs_ops = &ioat_sysfs_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	.default_attrs = ioat_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) };