^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * arch/sh/mm/pmb.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Privileged Space Mapping Buffer (PMB) Support.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 2005 - 2011 Paul Mundt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (C) 2010 Matt Fleming
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * This file is subject to the terms and conditions of the GNU General Public
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * License. See the file "COPYING" in the main directory of this archive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/syscore_ops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/debugfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/seq_file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/pgtable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <asm/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/sizes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <asm/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <asm/mmu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <asm/mmu_context.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) struct pmb_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) struct pmb_entry {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) unsigned long vpn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) unsigned long ppn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) unsigned long size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) raw_spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * 0 .. NR_PMB_ENTRIES for specific entry selection, or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * PMB_NO_ENTRY to search for a free one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) int entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) /* Adjacent entry link for contiguous multi-entry mappings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) struct pmb_entry *link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) static struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) unsigned long size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) int flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) } pmb_sizes[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) { .size = SZ_512M, .flag = PMB_SZ_512M, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) { .size = SZ_128M, .flag = PMB_SZ_128M, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) { .size = SZ_64M, .flag = PMB_SZ_64M, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) { .size = SZ_16M, .flag = PMB_SZ_16M, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) static void pmb_unmap_entry(struct pmb_entry *, int depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) static DEFINE_RWLOCK(pmb_rwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) static DECLARE_BITMAP(pmb_map, NR_PMB_ENTRIES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) static unsigned int pmb_iomapping_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) static __always_inline unsigned long mk_pmb_entry(unsigned int entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) return (entry & PMB_E_MASK) << PMB_E_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) static __always_inline unsigned long mk_pmb_addr(unsigned int entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) return mk_pmb_entry(entry) | PMB_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) static __always_inline unsigned long mk_pmb_data(unsigned int entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) return mk_pmb_entry(entry) | PMB_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) static __always_inline unsigned int pmb_ppn_in_range(unsigned long ppn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) return ppn >= __pa(memory_start) && ppn < __pa(memory_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * Ensure that the PMB entries match our cache configuration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * When we are in 32-bit address extended mode, CCR.CB becomes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * invalid, so care must be taken to manually adjust cacheable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * translations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) static __always_inline unsigned long pmb_cache_flags(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) unsigned long flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #if defined(CONFIG_CACHE_OFF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) flags |= PMB_WT | PMB_UB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #elif defined(CONFIG_CACHE_WRITETHROUGH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) flags |= PMB_C | PMB_WT | PMB_UB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #elif defined(CONFIG_CACHE_WRITEBACK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) flags |= PMB_C;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) return flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * Convert typical pgprot value to the PMB equivalent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) static inline unsigned long pgprot_to_pmb_flags(pgprot_t prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) unsigned long pmb_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) u64 flags = pgprot_val(prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) if (flags & _PAGE_CACHABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) pmb_flags |= PMB_C;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) if (flags & _PAGE_WT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) pmb_flags |= PMB_WT | PMB_UB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) return pmb_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) static inline bool pmb_can_merge(struct pmb_entry *a, struct pmb_entry *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) return (b->vpn == (a->vpn + a->size)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) (b->ppn == (a->ppn + a->size)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) (b->flags == a->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) static bool pmb_mapping_exists(unsigned long vaddr, phys_addr_t phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) read_lock(&pmb_rwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) struct pmb_entry *pmbe, *iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) unsigned long span;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) if (!test_bit(i, pmb_map))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) pmbe = &pmb_entry_list[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * See if VPN and PPN are bounded by an existing mapping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) if ((vaddr < pmbe->vpn) || (vaddr >= (pmbe->vpn + pmbe->size)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) if ((phys < pmbe->ppn) || (phys >= (pmbe->ppn + pmbe->size)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * Now see if we're in range of a simple mapping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) if (size <= pmbe->size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) read_unlock(&pmb_rwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) span = pmbe->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) * Finally for sizes that involve compound mappings, walk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) * the chain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) for (iter = pmbe->link; iter; iter = iter->link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) span += iter->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) * Nothing else to do if the range requirements are met.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) if (size <= span) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) read_unlock(&pmb_rwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) read_unlock(&pmb_rwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) static bool pmb_size_valid(unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) if (pmb_sizes[i].size == size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) static inline bool pmb_addr_valid(unsigned long addr, unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) return (addr >= P1SEG && (addr + size - 1) < P3SEG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) static inline bool pmb_prot_valid(pgprot_t prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) return (pgprot_val(prot) & _PAGE_USER) == 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) static int pmb_size_to_flags(unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) if (pmb_sizes[i].size == size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) return pmb_sizes[i].flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) static int pmb_alloc_entry(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) int pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) pos = find_first_zero_bit(pmb_map, NR_PMB_ENTRIES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) if (pos >= 0 && pos < NR_PMB_ENTRIES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) __set_bit(pos, pmb_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) pos = -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) return pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) unsigned long flags, int entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) struct pmb_entry *pmbe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) unsigned long irqflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) void *ret = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) int pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) write_lock_irqsave(&pmb_rwlock, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) if (entry == PMB_NO_ENTRY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) pos = pmb_alloc_entry();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) if (unlikely(pos < 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) ret = ERR_PTR(pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) if (__test_and_set_bit(entry, pmb_map)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) ret = ERR_PTR(-ENOSPC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) pos = entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) write_unlock_irqrestore(&pmb_rwlock, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) pmbe = &pmb_entry_list[pos];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) memset(pmbe, 0, sizeof(struct pmb_entry));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) raw_spin_lock_init(&pmbe->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) pmbe->vpn = vpn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) pmbe->ppn = ppn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) pmbe->flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) pmbe->entry = pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) return pmbe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) write_unlock_irqrestore(&pmb_rwlock, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) static void pmb_free(struct pmb_entry *pmbe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) __clear_bit(pmbe->entry, pmb_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) pmbe->entry = PMB_NO_ENTRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) pmbe->link = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) * Must be run uncached.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) static void __set_pmb_entry(struct pmb_entry *pmbe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) unsigned long addr, data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) addr = mk_pmb_addr(pmbe->entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) data = mk_pmb_data(pmbe->entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) jump_to_uncached();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) /* Set V-bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) __raw_writel(pmbe->vpn | PMB_V, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) __raw_writel(pmbe->ppn | pmbe->flags | PMB_V, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) back_to_cached();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) static void __clear_pmb_entry(struct pmb_entry *pmbe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) unsigned long addr, data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) unsigned long addr_val, data_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) addr = mk_pmb_addr(pmbe->entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) data = mk_pmb_data(pmbe->entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) addr_val = __raw_readl(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) data_val = __raw_readl(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) /* Clear V-bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) writel_uncached(addr_val & ~PMB_V, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) writel_uncached(data_val & ~PMB_V, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) #ifdef CONFIG_PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) static void set_pmb_entry(struct pmb_entry *pmbe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) raw_spin_lock_irqsave(&pmbe->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) __set_pmb_entry(pmbe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) raw_spin_unlock_irqrestore(&pmbe->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) #endif /* CONFIG_PM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) unsigned long size, pgprot_t prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) struct pmb_entry *pmbp, *pmbe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) unsigned long orig_addr, orig_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) unsigned long flags, pmb_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) int i, mapped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) if (size < SZ_16M)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) if (!pmb_addr_valid(vaddr, size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) if (pmb_mapping_exists(vaddr, phys, size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) orig_addr = vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) orig_size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) flush_tlb_kernel_range(vaddr, vaddr + size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) pmb_flags = pgprot_to_pmb_flags(prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) pmbp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) for (i = mapped = 0; i < ARRAY_SIZE(pmb_sizes); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) if (size < pmb_sizes[i].size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) pmbe = pmb_alloc(vaddr, phys, pmb_flags |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) pmb_sizes[i].flag, PMB_NO_ENTRY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) if (IS_ERR(pmbe)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) pmb_unmap_entry(pmbp, mapped);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) return PTR_ERR(pmbe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) raw_spin_lock_irqsave(&pmbe->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) pmbe->size = pmb_sizes[i].size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) __set_pmb_entry(pmbe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) phys += pmbe->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) vaddr += pmbe->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) size -= pmbe->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) * Link adjacent entries that span multiple PMB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) * entries for easier tear-down.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) if (likely(pmbp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) raw_spin_lock_nested(&pmbp->lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) SINGLE_DEPTH_NESTING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) pmbp->link = pmbe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) raw_spin_unlock(&pmbp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) pmbp = pmbe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) * Instead of trying smaller sizes on every
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) * iteration (even if we succeed in allocating
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) * space), try using pmb_sizes[i].size again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) i--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) mapped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) raw_spin_unlock_irqrestore(&pmbe->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) } while (size >= SZ_16M);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) flush_cache_vmap(orig_addr, orig_addr + orig_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) void __iomem *pmb_remap_caller(phys_addr_t phys, unsigned long size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) pgprot_t prot, void *caller)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) unsigned long vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) phys_addr_t offset, last_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) phys_addr_t align_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) unsigned long aligned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) struct vm_struct *area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) if (!pmb_iomapping_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) * Small mappings need to go through the TLB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) if (size < SZ_16M)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) if (!pmb_prot_valid(prot))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) if (size >= pmb_sizes[i].size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) last_addr = phys + size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) align_mask = ~(pmb_sizes[i].size - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) offset = phys & ~align_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) phys &= align_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) aligned = ALIGN(last_addr, pmb_sizes[i].size) - phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) * XXX: This should really start from uncached_end, but this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) * causes the MMU to reset, so for now we restrict it to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) * 0xb000...0xc000 range.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) area = __get_vm_area_caller(aligned, VM_IOREMAP, 0xb0000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) P3SEG, caller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) if (!area)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) area->phys_addr = phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) vaddr = (unsigned long)area->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) ret = pmb_bolt_mapping(vaddr, phys, size, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) if (unlikely(ret != 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) return (void __iomem *)(offset + (char *)vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) int pmb_unmap(void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) struct pmb_entry *pmbe = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) unsigned long vaddr = (unsigned long __force)addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) int i, found = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) read_lock(&pmb_rwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) if (test_bit(i, pmb_map)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) pmbe = &pmb_entry_list[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) if (pmbe->vpn == vaddr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) found = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) read_unlock(&pmb_rwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) if (found) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) pmb_unmap_entry(pmbe, NR_PMB_ENTRIES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) static void __pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) struct pmb_entry *pmblink = pmbe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) * We may be called before this pmb_entry has been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) * entered into the PMB table via set_pmb_entry(), but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) * that's OK because we've allocated a unique slot for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) * this entry in pmb_alloc() (even if we haven't filled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) * it yet).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) * Therefore, calling __clear_pmb_entry() is safe as no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) * other mapping can be using that slot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) __clear_pmb_entry(pmbe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) flush_cache_vunmap(pmbe->vpn, pmbe->vpn + pmbe->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) pmbe = pmblink->link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) pmb_free(pmblink);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) } while (pmbe && --depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) static void pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) if (unlikely(!pmbe))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) write_lock_irqsave(&pmb_rwlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) __pmb_unmap_entry(pmbe, depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) write_unlock_irqrestore(&pmb_rwlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) static void __init pmb_notify(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) pr_info("PMB: boot mappings:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) read_lock(&pmb_rwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) struct pmb_entry *pmbe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) if (!test_bit(i, pmb_map))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) pmbe = &pmb_entry_list[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) pr_info(" 0x%08lx -> 0x%08lx [ %4ldMB %2scached ]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) pmbe->vpn >> PAGE_SHIFT, pmbe->ppn >> PAGE_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) pmbe->size >> 20, (pmbe->flags & PMB_C) ? "" : "un");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) read_unlock(&pmb_rwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) * Sync our software copy of the PMB mappings with those in hardware. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) * mappings in the hardware PMB were either set up by the bootloader or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) * very early on by the kernel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) static void __init pmb_synchronize(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) struct pmb_entry *pmbp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) * Run through the initial boot mappings, log the established
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) * ones, and blow away anything that falls outside of the valid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) * PPN range. Specifically, we only care about existing mappings
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) * that impact the cached/uncached sections.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) * Note that touching these can be a bit of a minefield; the boot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) * loader can establish multi-page mappings with the same caching
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) * attributes, so we need to ensure that we aren't modifying a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) * mapping that we're presently executing from, or may execute
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) * from in the case of straddling page boundaries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) * In the future we will have to tidy up after the boot loader by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) * jumping between the cached and uncached mappings and tearing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) * down alternating mappings while executing from the other.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) for (i = 0; i < NR_PMB_ENTRIES; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) unsigned long addr, data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) unsigned long addr_val, data_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) unsigned long ppn, vpn, flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) unsigned long irqflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) unsigned int size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) struct pmb_entry *pmbe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) addr = mk_pmb_addr(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) data = mk_pmb_data(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) addr_val = __raw_readl(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) data_val = __raw_readl(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) * Skip over any bogus entries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) if (!(data_val & PMB_V) || !(addr_val & PMB_V))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) ppn = data_val & PMB_PFN_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) vpn = addr_val & PMB_PFN_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) * Only preserve in-range mappings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) if (!pmb_ppn_in_range(ppn)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) * Invalidate anything out of bounds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) writel_uncached(addr_val & ~PMB_V, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) writel_uncached(data_val & ~PMB_V, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) * Update the caching attributes if necessary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) if (data_val & PMB_C) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) data_val &= ~PMB_CACHE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) data_val |= pmb_cache_flags();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) writel_uncached(data_val, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) size = data_val & PMB_SZ_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) flags = size | (data_val & PMB_CACHE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) pmbe = pmb_alloc(vpn, ppn, flags, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) if (IS_ERR(pmbe)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) raw_spin_lock_irqsave(&pmbe->lock, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) for (j = 0; j < ARRAY_SIZE(pmb_sizes); j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) if (pmb_sizes[j].flag == size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) pmbe->size = pmb_sizes[j].size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) if (pmbp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) raw_spin_lock_nested(&pmbp->lock, SINGLE_DEPTH_NESTING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) * Compare the previous entry against the current one to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) * see if the entries span a contiguous mapping. If so,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) * setup the entry links accordingly. Compound mappings
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) * are later coalesced.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) if (pmb_can_merge(pmbp, pmbe))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) pmbp->link = pmbe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) raw_spin_unlock(&pmbp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) pmbp = pmbe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) raw_spin_unlock_irqrestore(&pmbe->lock, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) static void __init pmb_merge(struct pmb_entry *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) unsigned long span, newsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) struct pmb_entry *tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) int i = 1, depth = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) span = newsize = head->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) tail = head->link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) while (tail) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) span += tail->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) if (pmb_size_valid(span)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) newsize = span;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) depth = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) /* This is the end of the line.. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) if (!tail->link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) tail = tail->link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) * The merged page size must be valid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) if (!depth || !pmb_size_valid(newsize))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) head->flags &= ~PMB_SZ_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) head->flags |= pmb_size_to_flags(newsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) head->size = newsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) __pmb_unmap_entry(head->link, depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) __set_pmb_entry(head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) static void __init pmb_coalesce(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) write_lock_irqsave(&pmb_rwlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) struct pmb_entry *pmbe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) if (!test_bit(i, pmb_map))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) pmbe = &pmb_entry_list[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) * We're only interested in compound mappings
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) if (!pmbe->link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) * Nothing to do if it already uses the largest possible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) * page size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) if (pmbe->size == SZ_512M)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) pmb_merge(pmbe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) write_unlock_irqrestore(&pmb_rwlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) #ifdef CONFIG_UNCACHED_MAPPING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) static void __init pmb_resize(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) * If the uncached mapping was constructed by the kernel, it will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) * already be a reasonable size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) if (uncached_size == SZ_16M)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) read_lock(&pmb_rwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) struct pmb_entry *pmbe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) if (!test_bit(i, pmb_map))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) pmbe = &pmb_entry_list[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) if (pmbe->vpn != uncached_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) * Found it, now resize it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) raw_spin_lock_irqsave(&pmbe->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) pmbe->size = SZ_16M;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) pmbe->flags &= ~PMB_SZ_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) pmbe->flags |= pmb_size_to_flags(pmbe->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) uncached_resize(pmbe->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) __set_pmb_entry(pmbe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) raw_spin_unlock_irqrestore(&pmbe->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) read_unlock(&pmb_rwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) static int __init early_pmb(char *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) if (strstr(p, "iomap"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) pmb_iomapping_enabled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) early_param("pmb", early_pmb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) void __init pmb_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) /* Synchronize software state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) pmb_synchronize();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) /* Attempt to combine compound mappings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) pmb_coalesce();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) #ifdef CONFIG_UNCACHED_MAPPING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) /* Resize initial mappings, if necessary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) pmb_resize();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) /* Log them */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) pmb_notify();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) writel_uncached(0, PMB_IRMCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) /* Flush out the TLB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) local_flush_tlb_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) ctrl_barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) bool __in_29bit_mode(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) return (__raw_readl(PMB_PASCR) & PASCR_SE) == 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) static int pmb_seq_show(struct seq_file *file, void *iter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) seq_printf(file, "V: Valid, C: Cacheable, WT: Write-Through\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) "CB: Copy-Back, B: Buffered, UB: Unbuffered\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) seq_printf(file, "ety vpn ppn size flags\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) for (i = 0; i < NR_PMB_ENTRIES; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) unsigned long addr, data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) unsigned int size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) char *sz_str = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) addr = __raw_readl(mk_pmb_addr(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) data = __raw_readl(mk_pmb_data(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) size = data & PMB_SZ_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) sz_str = (size == PMB_SZ_16M) ? " 16MB":
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) (size == PMB_SZ_64M) ? " 64MB":
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) (size == PMB_SZ_128M) ? "128MB":
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) "512MB";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) /* 02: V 0x88 0x08 128MB C CB B */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) seq_printf(file, "%02d: %c 0x%02lx 0x%02lx %s %c %s %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) i, ((addr & PMB_V) && (data & PMB_V)) ? 'V' : ' ',
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) (addr >> 24) & 0xff, (data >> 24) & 0xff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) sz_str, (data & PMB_C) ? 'C' : ' ',
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) (data & PMB_WT) ? "WT" : "CB",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) (data & PMB_UB) ? "UB" : " B");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) static int pmb_debugfs_open(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) return single_open(file, pmb_seq_show, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) static const struct file_operations pmb_debugfs_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) .open = pmb_debugfs_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) .read = seq_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) .llseek = seq_lseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) .release = single_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) static int __init pmb_debugfs_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) debugfs_create_file("pmb", S_IFREG | S_IRUGO, arch_debugfs_dir, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) &pmb_debugfs_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) subsys_initcall(pmb_debugfs_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) #ifdef CONFIG_PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) static void pmb_syscore_resume(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) struct pmb_entry *pmbe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) read_lock(&pmb_rwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) if (test_bit(i, pmb_map)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) pmbe = &pmb_entry_list[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) set_pmb_entry(pmbe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) read_unlock(&pmb_rwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) static struct syscore_ops pmb_syscore_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) .resume = pmb_syscore_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) static int __init pmb_sysdev_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) register_syscore_ops(&pmb_syscore_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) subsys_initcall(pmb_sysdev_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) #endif