^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2015-2016 Socionext Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Author: Masahiro Yamada <yamada.masahiro@socionext.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #define pr_fmt(fmt) "uniphier: " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/log2.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/of_address.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <asm/hardware/cache-uniphier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <asm/outercache.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) /* control registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #define UNIPHIER_SSCC 0x0 /* Control Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #define UNIPHIER_SSCC_BST BIT(20) /* UCWG burst read */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #define UNIPHIER_SSCC_ACT BIT(19) /* Inst-Data separate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #define UNIPHIER_SSCC_WTG BIT(18) /* WT gathering on */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #define UNIPHIER_SSCC_PRD BIT(17) /* enable pre-fetch */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #define UNIPHIER_SSCC_ON BIT(0) /* enable cache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define UNIPHIER_SSCLPDAWCR 0x30 /* Unified/Data Active Way Control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define UNIPHIER_SSCLPIAWCR 0x34 /* Instruction Active Way Control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) /* revision registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define UNIPHIER_SSCID 0x0 /* ID Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) /* operation registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define UNIPHIER_SSCOPE 0x244 /* Cache Operation Primitive Entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define UNIPHIER_SSCOPE_CM_INV 0x0 /* invalidate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define UNIPHIER_SSCOPE_CM_CLEAN 0x1 /* clean */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define UNIPHIER_SSCOPE_CM_FLUSH 0x2 /* flush */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define UNIPHIER_SSCOPE_CM_SYNC 0x8 /* sync (drain bufs) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define UNIPHIER_SSCOPE_CM_FLUSH_PREFETCH 0x9 /* flush p-fetch buf */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define UNIPHIER_SSCOQM 0x248 /* Cache Operation Queue Mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define UNIPHIER_SSCOQM_S_MASK (0x3 << 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define UNIPHIER_SSCOQM_S_RANGE (0x0 << 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define UNIPHIER_SSCOQM_S_ALL (0x1 << 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define UNIPHIER_SSCOQM_CE BIT(15) /* notify completion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define UNIPHIER_SSCOQM_CM_INV 0x0 /* invalidate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define UNIPHIER_SSCOQM_CM_CLEAN 0x1 /* clean */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define UNIPHIER_SSCOQM_CM_FLUSH 0x2 /* flush */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define UNIPHIER_SSCOQAD 0x24c /* Cache Operation Queue Address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define UNIPHIER_SSCOQSZ 0x250 /* Cache Operation Queue Size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define UNIPHIER_SSCOPPQSEF 0x25c /* Cache Operation Queue Set Complete*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define UNIPHIER_SSCOPPQSEF_FE BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define UNIPHIER_SSCOPPQSEF_OE BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define UNIPHIER_SSCOLPQS 0x260 /* Cache Operation Queue Status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define UNIPHIER_SSCOLPQS_EF BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define UNIPHIER_SSCOLPQS_EST BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #define UNIPHIER_SSCOLPQS_QST BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) /* Is the operation region specified by address range? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define UNIPHIER_SSCOQM_S_IS_RANGE(op) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) ((op & UNIPHIER_SSCOQM_S_MASK) == UNIPHIER_SSCOQM_S_RANGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * uniphier_cache_data - UniPhier outer cache specific data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * @ctrl_base: virtual base address of control registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * @rev_base: virtual base address of revision registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * @op_base: virtual base address of operation registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * @way_mask: each bit specifies if the way is present
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * @nsets: number of associativity sets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * @line_size: line size in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * @range_op_max_size: max size that can be handled by a single range operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * @list: list node to include this level in the whole cache hierarchy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) struct uniphier_cache_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) void __iomem *ctrl_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) void __iomem *rev_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) void __iomem *op_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) void __iomem *way_ctrl_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) u32 way_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) u32 nsets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) u32 line_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) u32 range_op_max_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * List of the whole outer cache hierarchy. This list is only modified during
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * the early boot stage, so no mutex is taken for the access to the list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) static LIST_HEAD(uniphier_cache_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * __uniphier_cache_sync - perform a sync point for a particular cache level
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * @data: cache controller specific data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) static void __uniphier_cache_sync(struct uniphier_cache_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) /* This sequence need not be atomic. Do not disable IRQ. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) writel_relaxed(UNIPHIER_SSCOPE_CM_SYNC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) data->op_base + UNIPHIER_SSCOPE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) /* need a read back to confirm */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) readl_relaxed(data->op_base + UNIPHIER_SSCOPE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * __uniphier_cache_maint_common - run a queue operation for a particular level
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * @data: cache controller specific data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * @start: start address of range operation (don't care for "all" operation)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * @size: data size of range operation (don't care for "all" operation)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * @operation: flags to specify the desired cache operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) static void __uniphier_cache_maint_common(struct uniphier_cache_data *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) unsigned long start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) unsigned long size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) u32 operation)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * No spin lock is necessary here because:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * [1] This outer cache controller is able to accept maintenance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * operations from multiple CPUs at a time in an SMP system; if a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * maintenance operation is under way and another operation is issued,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * the new one is stored in the queue. The controller performs one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * operation after another. If the queue is full, the status register,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * UNIPHIER_SSCOPPQSEF, indicates that the queue registration has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) * failed. The status registers, UNIPHIER_{SSCOPPQSEF, SSCOLPQS}, have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * different instances for each CPU, i.e. each CPU can track the status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * of the maintenance operations triggered by itself.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * [2] The cache command registers, UNIPHIER_{SSCOQM, SSCOQAD, SSCOQSZ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) * SSCOQWN}, are shared between multiple CPUs, but the hardware still
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * guarantees the registration sequence is atomic; the write access to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * them are arbitrated by the hardware. The first accessor to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * register, UNIPHIER_SSCOQM, holds the access right and it is released
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) * by reading the status register, UNIPHIER_SSCOPPQSEF. While one CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) * is holding the access right, other CPUs fail to register operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * One CPU should not hold the access right for a long time, so local
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) * IRQs should be disabled while the following sequence.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) /* clear the complete notification flag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) writel_relaxed(UNIPHIER_SSCOLPQS_EF, data->op_base + UNIPHIER_SSCOLPQS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) /* set cache operation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) writel_relaxed(UNIPHIER_SSCOQM_CE | operation,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) data->op_base + UNIPHIER_SSCOQM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) /* set address range if needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) if (likely(UNIPHIER_SSCOQM_S_IS_RANGE(operation))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) writel_relaxed(start, data->op_base + UNIPHIER_SSCOQAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) writel_relaxed(size, data->op_base + UNIPHIER_SSCOQSZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) } while (unlikely(readl_relaxed(data->op_base + UNIPHIER_SSCOPPQSEF) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) (UNIPHIER_SSCOPPQSEF_FE | UNIPHIER_SSCOPPQSEF_OE)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) /* wait until the operation is completed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) while (likely(readl_relaxed(data->op_base + UNIPHIER_SSCOLPQS) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) UNIPHIER_SSCOLPQS_EF))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) static void __uniphier_cache_maint_all(struct uniphier_cache_data *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) u32 operation)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) __uniphier_cache_maint_common(data, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) UNIPHIER_SSCOQM_S_ALL | operation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) __uniphier_cache_sync(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) static void __uniphier_cache_maint_range(struct uniphier_cache_data *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) unsigned long start, unsigned long end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) u32 operation)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) unsigned long size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * If the start address is not aligned,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * perform a cache operation for the first cache-line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) start = start & ~(data->line_size - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) size = end - start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) if (unlikely(size >= (unsigned long)(-data->line_size))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) /* this means cache operation for all range */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) __uniphier_cache_maint_all(data, operation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) * If the end address is not aligned,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * perform a cache operation for the last cache-line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) size = ALIGN(size, data->line_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) while (size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) unsigned long chunk_size = min_t(unsigned long, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) data->range_op_max_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) __uniphier_cache_maint_common(data, start, chunk_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) UNIPHIER_SSCOQM_S_RANGE | operation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) start += chunk_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) size -= chunk_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) __uniphier_cache_sync(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) static void __uniphier_cache_enable(struct uniphier_cache_data *data, bool on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) u32 val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) if (on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) val = UNIPHIER_SSCC_WTG | UNIPHIER_SSCC_PRD | UNIPHIER_SSCC_ON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) writel_relaxed(val, data->ctrl_base + UNIPHIER_SSCC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) static void __init __uniphier_cache_set_active_ways(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) struct uniphier_cache_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) unsigned int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) for_each_possible_cpu(cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) writel_relaxed(data->way_mask, data->way_ctrl_base + 4 * cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) static void uniphier_cache_maint_range(unsigned long start, unsigned long end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) u32 operation)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) struct uniphier_cache_data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) list_for_each_entry(data, &uniphier_cache_list, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) __uniphier_cache_maint_range(data, start, end, operation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) static void uniphier_cache_maint_all(u32 operation)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) struct uniphier_cache_data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) list_for_each_entry(data, &uniphier_cache_list, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) __uniphier_cache_maint_all(data, operation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) static void uniphier_cache_inv_range(unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) uniphier_cache_maint_range(start, end, UNIPHIER_SSCOQM_CM_INV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) static void uniphier_cache_clean_range(unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) uniphier_cache_maint_range(start, end, UNIPHIER_SSCOQM_CM_CLEAN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) static void uniphier_cache_flush_range(unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) uniphier_cache_maint_range(start, end, UNIPHIER_SSCOQM_CM_FLUSH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) static void __init uniphier_cache_inv_all(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) uniphier_cache_maint_all(UNIPHIER_SSCOQM_CM_INV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) static void uniphier_cache_flush_all(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) uniphier_cache_maint_all(UNIPHIER_SSCOQM_CM_FLUSH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) static void uniphier_cache_disable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) struct uniphier_cache_data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) list_for_each_entry_reverse(data, &uniphier_cache_list, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) __uniphier_cache_enable(data, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) uniphier_cache_flush_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) static void __init uniphier_cache_enable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) struct uniphier_cache_data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) uniphier_cache_inv_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) list_for_each_entry(data, &uniphier_cache_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) __uniphier_cache_enable(data, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) __uniphier_cache_set_active_ways(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) static void uniphier_cache_sync(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) struct uniphier_cache_data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) list_for_each_entry(data, &uniphier_cache_list, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) __uniphier_cache_sync(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) static const struct of_device_id uniphier_cache_match[] __initconst = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) { .compatible = "socionext,uniphier-system-cache" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) { /* sentinel */ }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) static int __init __uniphier_cache_init(struct device_node *np,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) unsigned int *cache_level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) struct uniphier_cache_data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) u32 level, cache_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) struct device_node *next_np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) if (!of_match_node(uniphier_cache_match, np)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) pr_err("L%d: not compatible with uniphier cache\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) *cache_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) if (of_property_read_u32(np, "cache-level", &level)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) pr_err("L%d: cache-level is not specified\n", *cache_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) if (level != *cache_level) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) pr_err("L%d: cache-level is unexpected value %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) *cache_level, level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) if (!of_property_read_bool(np, "cache-unified")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) pr_err("L%d: cache-unified is not specified\n", *cache_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) data = kzalloc(sizeof(*data), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) if (!data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) if (of_property_read_u32(np, "cache-line-size", &data->line_size) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) !is_power_of_2(data->line_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) pr_err("L%d: cache-line-size is unspecified or invalid\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) *cache_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) if (of_property_read_u32(np, "cache-sets", &data->nsets) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) !is_power_of_2(data->nsets)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) pr_err("L%d: cache-sets is unspecified or invalid\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) *cache_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) if (of_property_read_u32(np, "cache-size", &cache_size) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) cache_size == 0 || cache_size % (data->nsets * data->line_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) pr_err("L%d: cache-size is unspecified or invalid\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) *cache_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) data->way_mask = GENMASK(cache_size / data->nsets / data->line_size - 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) data->ctrl_base = of_iomap(np, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) if (!data->ctrl_base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) pr_err("L%d: failed to map control register\n", *cache_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) data->rev_base = of_iomap(np, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) if (!data->rev_base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) pr_err("L%d: failed to map revision register\n", *cache_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) data->op_base = of_iomap(np, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) if (!data->op_base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) pr_err("L%d: failed to map operation register\n", *cache_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) data->way_ctrl_base = data->ctrl_base + 0xc00;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) if (*cache_level == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) u32 revision = readl(data->rev_base + UNIPHIER_SSCID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) * The size of range operation is limited to (1 << 22) or less
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) * for PH-sLD8 or older SoCs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) if (revision <= 0x16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) data->range_op_max_size = (u32)1 << 22;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) * Unfortunatly, the offset address of active way control base
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) * varies from SoC to SoC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) switch (revision) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) case 0x11: /* sLD3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) data->way_ctrl_base = data->ctrl_base + 0x870;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) case 0x12: /* LD4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) case 0x16: /* sld8 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) data->way_ctrl_base = data->ctrl_base + 0x840;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) data->range_op_max_size -= data->line_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) INIT_LIST_HEAD(&data->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) list_add_tail(&data->list, &uniphier_cache_list); /* no mutex */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) * OK, this level has been successfully initialized. Look for the next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) * level cache. Do not roll back even if the initialization of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) * next level cache fails because we want to continue with available
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) * cache levels.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) next_np = of_find_next_cache_node(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) if (next_np) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) (*cache_level)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) ret = __uniphier_cache_init(next_np, cache_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) of_node_put(next_np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) iounmap(data->op_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) iounmap(data->rev_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) iounmap(data->ctrl_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) kfree(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) int __init uniphier_cache_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) struct device_node *np = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) unsigned int cache_level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) /* look for level 2 cache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) while ((np = of_find_matching_node(np, uniphier_cache_match)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) if (!of_property_read_u32(np, "cache-level", &cache_level) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) cache_level == 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) if (!np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) ret = __uniphier_cache_init(np, &cache_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) of_node_put(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) * Error out iif L2 initialization fails. Continue with any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) * error on L3 or outer because they are optional.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) if (cache_level == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) pr_err("failed to initialize L2 cache\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) cache_level--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) outer_cache.inv_range = uniphier_cache_inv_range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) outer_cache.clean_range = uniphier_cache_clean_range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) outer_cache.flush_range = uniphier_cache_flush_range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) outer_cache.flush_all = uniphier_cache_flush_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) outer_cache.disable = uniphier_cache_disable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) outer_cache.sync = uniphier_cache_sync;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) uniphier_cache_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) pr_info("enabled outer cache (cache level: %d)\n", cache_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) }