^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) Rockchip Electronics Co.Ltd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Author: Felix Zeng <felix.zeng@rock-chips.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include "rknpu_debugger.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include "rknpu_mm.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) int rknpu_mm_create(unsigned int mem_size, unsigned int chunk_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) struct rknpu_mm **mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) unsigned int num_of_longs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) int ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) if (WARN_ON(mem_size < chunk_size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) if (WARN_ON(mem_size == 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) if (WARN_ON(chunk_size == 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) *mm = kzalloc(sizeof(struct rknpu_mm), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) if (!(*mm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) (*mm)->chunk_size = chunk_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) (*mm)->total_chunks = mem_size / chunk_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) (*mm)->free_chunks = (*mm)->total_chunks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) num_of_longs =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) ((*mm)->total_chunks + BITS_PER_LONG - 1) / BITS_PER_LONG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) (*mm)->bitmap = kcalloc(num_of_longs, sizeof(long), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) if (!(*mm)->bitmap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) goto free_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) mutex_init(&(*mm)->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) LOG_DEBUG("total_chunks: %d, bitmap: %p\n", (*mm)->total_chunks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) (*mm)->bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) free_mm:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) kfree(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) void rknpu_mm_destroy(struct rknpu_mm *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) if (mm != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) mutex_destroy(&mm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) kfree(mm->bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) kfree(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) int rknpu_mm_alloc(struct rknpu_mm *mm, unsigned int size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) struct rknpu_mm_obj **mm_obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) unsigned int found, start_search, cur_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) if (size == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) if (size > mm->total_chunks * mm->chunk_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) *mm_obj = kzalloc(sizeof(struct rknpu_mm_obj), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) if (!(*mm_obj))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) start_search = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) mutex_lock(&mm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) mm_restart_search:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) /* Find the first chunk that is free */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) found = find_next_zero_bit(mm->bitmap, mm->total_chunks, start_search);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) /* If there wasn't any free chunk, bail out */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) if (found == mm->total_chunks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) goto mm_no_free_chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) /* Update fields of mm_obj */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) (*mm_obj)->range_start = found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) (*mm_obj)->range_end = found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) /* If we need only one chunk, mark it as allocated and get out */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) if (size <= mm->chunk_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) set_bit(found, mm->bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) goto mm_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) /* Otherwise, try to see if we have enough contiguous chunks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) cur_size = size - mm->chunk_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) (*mm_obj)->range_end = find_next_zero_bit(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) mm->bitmap, mm->total_chunks, ++found);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * If next free chunk is not contiguous than we need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * restart our search from the last free chunk we found (which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * wasn't contiguous to the previous ones
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) if ((*mm_obj)->range_end != found) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) start_search = found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) goto mm_restart_search;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * If we reached end of buffer, bail out with error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) if (found == mm->total_chunks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) goto mm_no_free_chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) /* Check if we don't need another chunk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) if (cur_size <= mm->chunk_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) cur_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) cur_size -= mm->chunk_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) } while (cur_size > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) /* Mark the chunks as allocated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) for (found = (*mm_obj)->range_start; found <= (*mm_obj)->range_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) found++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) set_bit(found, mm->bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) mm_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) mm->free_chunks -= ((*mm_obj)->range_end - (*mm_obj)->range_start + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) mutex_unlock(&mm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) LOG_DEBUG("mm allocate, mm_obj: %p, range_start: %d, range_end: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) *mm_obj, (*mm_obj)->range_start, (*mm_obj)->range_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) mm_no_free_chunk:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) mutex_unlock(&mm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) kfree(*mm_obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) int rknpu_mm_free(struct rknpu_mm *mm, struct rknpu_mm_obj *mm_obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) unsigned int bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) /* Act like kfree when trying to free a NULL object */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) if (!mm_obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) LOG_DEBUG("mm free, mem_obj: %p, range_start: %d, range_end: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) mm_obj, mm_obj->range_start, mm_obj->range_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) mutex_lock(&mm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) /* Mark the chunks as free */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) for (bit = mm_obj->range_start; bit <= mm_obj->range_end; bit++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) clear_bit(bit, mm->bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) mm->free_chunks += (mm_obj->range_end - mm_obj->range_start + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) mutex_unlock(&mm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) kfree(mm_obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) int rknpu_mm_dump(struct seq_file *m, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) struct rknpu_debugger_node *node = m->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) struct rknpu_debugger *debugger = node->debugger;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) struct rknpu_device *rknpu_dev =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) container_of(debugger, struct rknpu_device, debugger);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) struct rknpu_mm *mm = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) int cur = 0, rbot = 0, rtop = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) size_t ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) char buf[64];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) size_t size = sizeof(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) int seg_chunks = 32, seg_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) int free_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) mm = rknpu_dev->sram_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) if (mm == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) seq_printf(m, "SRAM bitmap: \"*\" - used, \".\" - free (1bit = %dKB)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) mm->chunk_size / 1024);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) rbot = cur = find_first_bit(mm->bitmap, mm->total_chunks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) for (i = 0; i < cur; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) ret += scnprintf(buf + ret, size - ret, ".");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) if (ret >= seg_chunks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) seq_printf(m, "[%03d] [%s]\n", seg_id++, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) while (cur < mm->total_chunks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) rtop = cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) cur = find_next_bit(mm->bitmap, mm->total_chunks, cur + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) if (cur < mm->total_chunks && cur <= rtop + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) for (i = rbot; i <= rtop; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) ret += scnprintf(buf + ret, size - ret, "*");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) if (ret >= seg_chunks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) seq_printf(m, "[%03d] [%s]\n", seg_id++, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) for (i = rtop + 1; i < cur; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) ret += scnprintf(buf + ret, size - ret, ".");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) if (ret >= seg_chunks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) seq_printf(m, "[%03d] [%s]\n", seg_id++, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) rbot = cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) if (ret > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) seq_printf(m, "[%03d] [%s]\n", seg_id++, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) free_size = mm->free_chunks * mm->chunk_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) seq_printf(m, "SRAM total size: %d, used: %d, free: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) rknpu_dev->sram_size, rknpu_dev->sram_size - free_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) free_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) dma_addr_t rknpu_iommu_dma_alloc_iova(struct iommu_domain *domain, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) u64 dma_limit, struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) struct rknpu_iommu_dma_cookie *cookie = domain->iova_cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) struct iova_domain *iovad = &cookie->iovad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) unsigned long shift, iova_len, iova = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) #if (KERNEL_VERSION(5, 4, 0) > LINUX_VERSION_CODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) dma_addr_t limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) shift = iova_shift(iovad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) iova_len = size >> shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) * Freeing non-power-of-two-sized allocations back into the IOVA caches
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) * will come back to bite us badly, so we have to waste a bit of space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) * rounding up anything cacheable to make sure that can't happen. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) * order of the unadjusted size will still match upon freeing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) iova_len = roundup_pow_of_two(iova_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) #if (KERNEL_VERSION(5, 10, 0) <= LINUX_VERSION_CODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) dma_limit = min_not_zero(dma_limit, dev->bus_dma_limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) if (dev->bus_dma_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) dma_limit &= dev->bus_dma_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) if (domain->geometry.force_aperture)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) dma_limit =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) min_t(u64, dma_limit, domain->geometry.aperture_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) #if (KERNEL_VERSION(5, 4, 0) <= LINUX_VERSION_CODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) limit = min_t(dma_addr_t, dma_limit >> shift, iovad->end_pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) iova = alloc_iova_fast(iovad, iova_len, limit, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) return (dma_addr_t)iova << shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) void rknpu_iommu_dma_free_iova(struct rknpu_iommu_dma_cookie *cookie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) dma_addr_t iova, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) struct iova_domain *iovad = &cookie->iovad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) free_iova_fast(iovad, iova_pfn(iovad, iova), size >> iova_shift(iovad));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) }