Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags   |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Tegra host1x Command DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright (c) 2010-2013, NVIDIA Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <asm/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/host1x.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/kfifo.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <trace/events/host1x.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include "cdma.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include "channel.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include "dev.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include "debug.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #include "job.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26)  * push_buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28)  * The push buffer is a circular array of words to be fetched by command DMA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29)  * Note that it works slightly differently to the sync queue; fence == pos
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30)  * means that the push buffer is full, not empty.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34)  * Typically the commands written into the push buffer are a pair of words. We
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35)  * use slots to represent each of these pairs and to simplify things. Note the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36)  * strange number of slots allocated here. 512 slots will fit exactly within a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37)  * single memory page. We also need one additional word at the end of the push
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38)  * buffer for the RESTART opcode that will instruct the CDMA to jump back to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39)  * the beginning of the push buffer. With 512 slots, this means that we'll use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40)  * 2 memory pages and waste 4092 bytes of the second page that will never be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41)  * used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) #define HOST1X_PUSHBUFFER_SLOTS	511
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46)  * Clean up push buffer resources
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) static void host1x_pushbuffer_destroy(struct push_buffer *pb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	struct host1x_cdma *cdma = pb_to_cdma(pb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	struct host1x *host1x = cdma_to_host1x(cdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	if (!pb->mapped)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	if (host1x->domain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 		iommu_unmap(host1x->domain, pb->dma, pb->alloc_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 		free_iova(&host1x->iova, iova_pfn(&host1x->iova, pb->dma));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	dma_free_wc(host1x->dev, pb->alloc_size, pb->mapped, pb->phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	pb->mapped = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	pb->phys = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68)  * Init push buffer resources
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) static int host1x_pushbuffer_init(struct push_buffer *pb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	struct host1x_cdma *cdma = pb_to_cdma(pb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	struct host1x *host1x = cdma_to_host1x(cdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	struct iova *alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	u32 size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	pb->mapped = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	pb->phys = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	pb->size = HOST1X_PUSHBUFFER_SLOTS * 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	size = pb->size + 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	/* initialize buffer pointers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	pb->fence = pb->size - 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	pb->pos = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	if (host1x->domain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 		unsigned long shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 		size = iova_align(&host1x->iova, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 		pb->mapped = dma_alloc_wc(host1x->dev, size, &pb->phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 					  GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 		if (!pb->mapped)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 		shift = iova_shift(&host1x->iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 		alloc = alloc_iova(&host1x->iova, size >> shift,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 				   host1x->iova_end >> shift, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 		if (!alloc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 			err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 			goto iommu_free_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 		pb->dma = iova_dma_addr(&host1x->iova, alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 		err = iommu_map(host1x->domain, pb->dma, pb->phys, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 				IOMMU_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 			goto iommu_free_iova;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 		pb->mapped = dma_alloc_wc(host1x->dev, size, &pb->phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 					  GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 		if (!pb->mapped)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 		pb->dma = pb->phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	pb->alloc_size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	host1x_hw_pushbuffer_init(host1x, pb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) iommu_free_iova:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	__free_iova(&host1x->iova, alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) iommu_free_mem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	dma_free_wc(host1x->dev, size, pb->mapped, pb->phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)  * Push two words to the push buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)  * Caller must ensure push buffer is not full
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) static void host1x_pushbuffer_push(struct push_buffer *pb, u32 op1, u32 op2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	u32 *p = (u32 *)((void *)pb->mapped + pb->pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	WARN_ON(pb->pos == pb->fence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	*(p++) = op1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	*(p++) = op2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	pb->pos += 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	if (pb->pos >= pb->size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 		pb->pos -= pb->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)  * Pop a number of two word slots from the push buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)  * Caller must ensure push buffer is not empty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) static void host1x_pushbuffer_pop(struct push_buffer *pb, unsigned int slots)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	/* Advance the next write position */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	pb->fence += slots * 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	if (pb->fence >= pb->size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 		pb->fence -= pb->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)  * Return the number of two word slots free in the push buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) static u32 host1x_pushbuffer_space(struct push_buffer *pb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	unsigned int fence = pb->fence;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	if (pb->fence < pb->pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 		fence += pb->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	return (fence - pb->pos) / 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)  * Sleep (if necessary) until the requested event happens
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)  *   - CDMA_EVENT_SYNC_QUEUE_EMPTY : sync queue is completely empty.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)  *     - Returns 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)  *   - CDMA_EVENT_PUSH_BUFFER_SPACE : there is space in the push buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)  *     - Return the amount of space (> 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)  * Must be called with the cdma lock held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) unsigned int host1x_cdma_wait_locked(struct host1x_cdma *cdma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 				     enum cdma_event event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 		struct push_buffer *pb = &cdma->push_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 		unsigned int space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 		switch (event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 		case CDMA_EVENT_SYNC_QUEUE_EMPTY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 			space = list_empty(&cdma->sync_queue) ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 		case CDMA_EVENT_PUSH_BUFFER_SPACE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 			space = host1x_pushbuffer_space(pb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 			WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 		if (space)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 			return space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 		trace_host1x_wait_cdma(dev_name(cdma_to_channel(cdma)->dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 				       event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 		/* If somebody has managed to already start waiting, yield */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 		if (cdma->event != CDMA_EVENT_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 			mutex_unlock(&cdma->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 			schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 			mutex_lock(&cdma->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 		cdma->event = event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 		mutex_unlock(&cdma->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 		wait_for_completion(&cdma->complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 		mutex_lock(&cdma->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)  * Sleep (if necessary) until the push buffer has enough free space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)  * Must be called with the cdma lock held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) static int host1x_cdma_wait_pushbuffer_space(struct host1x *host1x,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 					     struct host1x_cdma *cdma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 					     unsigned int needed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	while (true) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 		struct push_buffer *pb = &cdma->push_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 		unsigned int space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 		space = host1x_pushbuffer_space(pb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 		if (space >= needed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 		trace_host1x_wait_cdma(dev_name(cdma_to_channel(cdma)->dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 				       CDMA_EVENT_PUSH_BUFFER_SPACE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 		host1x_hw_cdma_flush(host1x, cdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 		/* If somebody has managed to already start waiting, yield */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 		if (cdma->event != CDMA_EVENT_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 			mutex_unlock(&cdma->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 			schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 			mutex_lock(&cdma->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 		cdma->event = CDMA_EVENT_PUSH_BUFFER_SPACE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 		mutex_unlock(&cdma->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 		wait_for_completion(&cdma->complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 		mutex_lock(&cdma->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)  * Start timer that tracks the time spent by the job.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)  * Must be called with the cdma lock held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) static void cdma_start_timer_locked(struct host1x_cdma *cdma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 				    struct host1x_job *job)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	struct host1x *host = cdma_to_host1x(cdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	if (cdma->timeout.client) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 		/* timer already started */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	cdma->timeout.client = job->client;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	cdma->timeout.syncpt = host1x_syncpt_get(host, job->syncpt_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	cdma->timeout.syncpt_val = job->syncpt_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	cdma->timeout.start_ktime = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	schedule_delayed_work(&cdma->timeout.wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 			      msecs_to_jiffies(job->timeout));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)  * Stop timer when a buffer submission completes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)  * Must be called with the cdma lock held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) static void stop_cdma_timer_locked(struct host1x_cdma *cdma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 	cancel_delayed_work(&cdma->timeout.wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	cdma->timeout.client = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)  * For all sync queue entries that have already finished according to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)  * current sync point registers:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)  *  - unpin & unref their mems
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)  *  - pop their push buffer slots
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)  *  - remove them from the sync queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)  * This is normally called from the host code's worker thread, but can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)  * called manually if necessary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)  * Must be called with the cdma lock held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) static void update_cdma_locked(struct host1x_cdma *cdma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	bool signal = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	struct host1x *host1x = cdma_to_host1x(cdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	struct host1x_job *job, *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	/* If CDMA is stopped, queue is cleared and we can return */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	if (!cdma->running)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 	 * Walk the sync queue, reading the sync point registers as necessary,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	 * to consume as many sync queue entries as possible without blocking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	list_for_each_entry_safe(job, n, &cdma->sync_queue, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 		struct host1x_syncpt *sp =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 			host1x_syncpt_get(host1x, job->syncpt_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 		/* Check whether this syncpt has completed, and bail if not */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 		if (!host1x_syncpt_is_expired(sp, job->syncpt_end)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 			/* Start timer on next pending syncpt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 			if (job->timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 				cdma_start_timer_locked(cdma, job);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 		/* Cancel timeout, when a buffer completes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 		if (cdma->timeout.client)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 			stop_cdma_timer_locked(cdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 		/* Unpin the memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 		host1x_job_unpin(job);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 		/* Pop push buffer slots */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 		if (job->num_slots) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 			struct push_buffer *pb = &cdma->push_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 			host1x_pushbuffer_pop(pb, job->num_slots);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 			if (cdma->event == CDMA_EVENT_PUSH_BUFFER_SPACE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 				signal = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 		list_del(&job->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 		host1x_job_put(job);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 	if (cdma->event == CDMA_EVENT_SYNC_QUEUE_EMPTY &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 	    list_empty(&cdma->sync_queue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 		signal = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 	if (signal) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 		cdma->event = CDMA_EVENT_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 		complete(&cdma->complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) void host1x_cdma_update_sync_queue(struct host1x_cdma *cdma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 				   struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 	struct host1x *host1x = cdma_to_host1x(cdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 	u32 restart_addr, syncpt_incrs, syncpt_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 	struct host1x_job *job, *next_job = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 	syncpt_val = host1x_syncpt_load(cdma->timeout.syncpt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 	dev_dbg(dev, "%s: starting cleanup (thresh %d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 		__func__, syncpt_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 	 * Move the sync_queue read pointer to the first entry that hasn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 	 * completed based on the current HW syncpt value. It's likely there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 	 * won't be any (i.e. we're still at the head), but covers the case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 	 * where a syncpt incr happens just prior/during the teardown.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 	dev_dbg(dev, "%s: skip completed buffers still in sync_queue\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 		__func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 	list_for_each_entry(job, &cdma->sync_queue, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 		if (syncpt_val < job->syncpt_end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 			if (!list_is_last(&job->list, &cdma->sync_queue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 				next_job = list_next_entry(job, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 			goto syncpt_incr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 		host1x_job_dump(dev, job);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 	/* all jobs have been completed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 	job = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) syncpt_incr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 	 * Increment with CPU the remaining syncpts of a partially executed job.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 	 * CDMA will continue execution starting with the next job or will get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 	 * into idle state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 	if (next_job)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 		restart_addr = next_job->first_get;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 		restart_addr = cdma->last_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 	/* do CPU increments for the remaining syncpts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 	if (job) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 		dev_dbg(dev, "%s: perform CPU incr on pending buffers\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 			__func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 		/* won't need a timeout when replayed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 		job->timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 		syncpt_incrs = job->syncpt_end - syncpt_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 		dev_dbg(dev, "%s: CPU incr (%d)\n", __func__, syncpt_incrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 		host1x_job_dump(dev, job);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 		/* safe to use CPU to incr syncpts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 		host1x_hw_cdma_timeout_cpu_incr(host1x, cdma, job->first_get,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 						syncpt_incrs, job->syncpt_end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 						job->num_slots);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 		dev_dbg(dev, "%s: finished sync_queue modification\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 			__func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 	/* roll back DMAGET and start up channel again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 	host1x_hw_cdma_resume(host1x, cdma, restart_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)  * Create a cdma
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) int host1x_cdma_init(struct host1x_cdma *cdma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 	mutex_init(&cdma->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 	init_completion(&cdma->complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 	INIT_LIST_HEAD(&cdma->sync_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 	cdma->event = CDMA_EVENT_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 	cdma->running = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 	cdma->torndown = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 	err = host1x_pushbuffer_init(&cdma->push_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)  * Destroy a cdma
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) int host1x_cdma_deinit(struct host1x_cdma *cdma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 	struct push_buffer *pb = &cdma->push_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 	struct host1x *host1x = cdma_to_host1x(cdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 	if (cdma->running) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 		pr_warn("%s: CDMA still running\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 	host1x_pushbuffer_destroy(pb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 	host1x_hw_cdma_timeout_destroy(host1x, cdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)  * Begin a cdma submit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) int host1x_cdma_begin(struct host1x_cdma *cdma, struct host1x_job *job)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 	struct host1x *host1x = cdma_to_host1x(cdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 	mutex_lock(&cdma->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 	if (job->timeout) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 		/* init state on first submit with timeout value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 		if (!cdma->timeout.initialized) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 			int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 			err = host1x_hw_cdma_timeout_init(host1x, cdma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 							  job->syncpt_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 			if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 				mutex_unlock(&cdma->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 				return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 	if (!cdma->running)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 		host1x_hw_cdma_start(host1x, cdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 	cdma->slots_free = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 	cdma->slots_used = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 	cdma->first_get = cdma->push_buffer.pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 	trace_host1x_cdma_begin(dev_name(job->channel->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)  * Push two words into a push buffer slot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)  * Blocks as necessary if the push buffer is full.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) void host1x_cdma_push(struct host1x_cdma *cdma, u32 op1, u32 op2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 	struct host1x *host1x = cdma_to_host1x(cdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 	struct push_buffer *pb = &cdma->push_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 	u32 slots_free = cdma->slots_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 	if (host1x_debug_trace_cmdbuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 		trace_host1x_cdma_push(dev_name(cdma_to_channel(cdma)->dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 				       op1, op2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 	if (slots_free == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 		host1x_hw_cdma_flush(host1x, cdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 		slots_free = host1x_cdma_wait_locked(cdma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 						CDMA_EVENT_PUSH_BUFFER_SPACE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 	cdma->slots_free = slots_free - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 	cdma->slots_used++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 	host1x_pushbuffer_push(pb, op1, op2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)  * Push four words into two consecutive push buffer slots. Note that extra
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)  * care needs to be taken not to split the two slots across the end of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)  * push buffer. Otherwise the RESTART opcode at the end of the push buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)  * that ensures processing will restart at the beginning will break up the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)  * four words.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)  * Blocks as necessary if the push buffer is full.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) void host1x_cdma_push_wide(struct host1x_cdma *cdma, u32 op1, u32 op2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 			   u32 op3, u32 op4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 	struct host1x_channel *channel = cdma_to_channel(cdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 	struct host1x *host1x = cdma_to_host1x(cdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 	struct push_buffer *pb = &cdma->push_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 	unsigned int needed = 2, extra = 0, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 	unsigned int space = cdma->slots_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 	if (host1x_debug_trace_cmdbuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 		trace_host1x_cdma_push_wide(dev_name(channel->dev), op1, op2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 					    op3, op4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 	/* compute number of extra slots needed for padding */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 	if (pb->pos + 16 > pb->size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 		extra = (pb->size - pb->pos) / 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 		needed += extra;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 	host1x_cdma_wait_pushbuffer_space(host1x, cdma, needed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 	space = host1x_pushbuffer_space(pb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 	cdma->slots_free = space - needed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 	cdma->slots_used += needed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 	 * Note that we rely on the fact that this is only used to submit wide
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 	 * gather opcodes, which consist of 3 words, and they are padded with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 	 * a NOP to avoid having to deal with fractional slots (a slot always
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 	 * represents 2 words). The fourth opcode passed to this function will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 	 * therefore always be a NOP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 	 * This works around a slight ambiguity when it comes to opcodes. For
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 	 * all current host1x incarnations the NOP opcode uses the exact same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 	 * encoding (0x20000000), so we could hard-code the value here, but a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 	 * new incarnation may change it and break that assumption.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 	for (i = 0; i < extra; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 		host1x_pushbuffer_push(pb, op4, op4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 	host1x_pushbuffer_push(pb, op1, op2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 	host1x_pushbuffer_push(pb, op3, op4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)  * End a cdma submit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)  * Kick off DMA, add job to the sync queue, and a number of slots to be freed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)  * from the pushbuffer. The handles for a submit must all be pinned at the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)  * time, but they can be unpinned in smaller chunks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) void host1x_cdma_end(struct host1x_cdma *cdma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 		     struct host1x_job *job)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 	struct host1x *host1x = cdma_to_host1x(cdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) 	bool idle = list_empty(&cdma->sync_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 	host1x_hw_cdma_flush(host1x, cdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 	job->first_get = cdma->first_get;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) 	job->num_slots = cdma->slots_used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 	host1x_job_get(job);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 	list_add_tail(&job->list, &cdma->sync_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 	/* start timer on idle -> active transitions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 	if (job->timeout && idle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) 		cdma_start_timer_locked(cdma, job);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) 	trace_host1x_cdma_end(dev_name(job->channel->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) 	mutex_unlock(&cdma->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)  * Update cdma state according to current sync point values
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) void host1x_cdma_update(struct host1x_cdma *cdma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) 	mutex_lock(&cdma->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) 	update_cdma_locked(cdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) 	mutex_unlock(&cdma->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) }