^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Tegra host1x Command DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (c) 2010-2013, NVIDIA Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #ifndef __HOST1X_CDMA_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #define __HOST1X_CDMA_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/completion.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) struct host1x_syncpt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) struct host1x_userctx_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) struct host1x_job;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * cdma
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * This is in charge of a host command DMA channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * Sends ops to a push buffer, and takes responsibility for unpinning
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * (& possibly freeing) of memory after those ops have completed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * Producer:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * begin
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * push - send ops to the push buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * end - start command DMA and enqueue handles to be unpinned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * Consumer:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * update - call to update sync queue and push buffer, unpin memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) struct push_buffer {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) void *mapped; /* mapped pushbuffer memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) dma_addr_t dma; /* device address of pushbuffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) dma_addr_t phys; /* physical address of pushbuffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) u32 fence; /* index we've written */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) u32 pos; /* index to write to */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) u32 size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) u32 alloc_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) struct buffer_timeout {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) struct delayed_work wq; /* work queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) bool initialized; /* timer one-time setup flag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) struct host1x_syncpt *syncpt; /* buffer completion syncpt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) u32 syncpt_val; /* syncpt value when completed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) ktime_t start_ktime; /* starting time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) /* context timeout information */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) struct host1x_client *client;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) enum cdma_event {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) CDMA_EVENT_NONE, /* not waiting for any event */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) CDMA_EVENT_SYNC_QUEUE_EMPTY, /* wait for empty sync queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) CDMA_EVENT_PUSH_BUFFER_SPACE /* wait for space in push buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) struct host1x_cdma {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) struct mutex lock; /* controls access to shared state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) struct completion complete; /* signalled when event occurs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) enum cdma_event event; /* event that complete is waiting for */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) unsigned int slots_used; /* pb slots used in current submit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) unsigned int slots_free; /* pb slots free in current submit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) unsigned int first_get; /* DMAGET value, where submit begins */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) unsigned int last_pos; /* last value written to DMAPUT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) struct push_buffer push_buffer; /* channel's push buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) struct list_head sync_queue; /* job queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) struct buffer_timeout timeout; /* channel's timeout state/wq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) bool running;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) bool torndown;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #define cdma_to_channel(cdma) container_of(cdma, struct host1x_channel, cdma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #define cdma_to_host1x(cdma) dev_get_drvdata(cdma_to_channel(cdma)->dev->parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #define pb_to_cdma(pb) container_of(pb, struct host1x_cdma, push_buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) int host1x_cdma_init(struct host1x_cdma *cdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) int host1x_cdma_deinit(struct host1x_cdma *cdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) int host1x_cdma_begin(struct host1x_cdma *cdma, struct host1x_job *job);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) void host1x_cdma_push(struct host1x_cdma *cdma, u32 op1, u32 op2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) void host1x_cdma_push_wide(struct host1x_cdma *cdma, u32 op1, u32 op2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) u32 op3, u32 op4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) void host1x_cdma_end(struct host1x_cdma *cdma, struct host1x_job *job);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) void host1x_cdma_update(struct host1x_cdma *cdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) void host1x_cdma_peek(struct host1x_cdma *cdma, u32 dmaget, int slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) u32 *out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) unsigned int host1x_cdma_wait_locked(struct host1x_cdma *cdma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) enum cdma_event event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) void host1x_cdma_update_sync_queue(struct host1x_cdma *cdma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) struct device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) #endif