^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * c 2001 PPC 64 Team, IBM Corp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/memblock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/sched/task.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/numa.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/pgtable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <asm/lppaca.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <asm/paca.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <asm/sections.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <asm/kexec.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <asm/svm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <asm/ultravisor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <asm/rtas.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include "setup.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #ifndef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #define boot_cpuid 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) static void *__init alloc_paca_data(unsigned long size, unsigned long align,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) unsigned long limit, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) void *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) int nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * boot_cpuid paca is allocated very early before cpu_to_node is up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * Set bottom-up mode, because the boot CPU should be on node-0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * which will put its paca in the right place.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) if (cpu == boot_cpuid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) nid = NUMA_NO_NODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) memblock_set_bottom_up(true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) nid = early_cpu_to_node(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) ptr = memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) limit, nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) if (!ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) panic("cannot allocate paca data");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) if (cpu == boot_cpuid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) memblock_set_bottom_up(false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) return ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #ifdef CONFIG_PPC_PSERIES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define LPPACA_SIZE 0x400
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) static void *__init alloc_shared_lppaca(unsigned long size, unsigned long limit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) size_t shared_lppaca_total_size = PAGE_ALIGN(nr_cpu_ids * LPPACA_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) static unsigned long shared_lppaca_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) static void *shared_lppaca;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) void *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) if (!shared_lppaca) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) memblock_set_bottom_up(true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * See Documentation/powerpc/ultravisor.rst for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * UV/HV data sharing is in PAGE_SIZE granularity. In order to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * minimize the number of pages shared, align the allocation to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * PAGE_SIZE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) shared_lppaca =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) memblock_alloc_try_nid(shared_lppaca_total_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) PAGE_SIZE, MEMBLOCK_LOW_LIMIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) limit, NUMA_NO_NODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) if (!shared_lppaca)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) panic("cannot allocate shared data");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) memblock_set_bottom_up(false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) uv_share_page(PHYS_PFN(__pa(shared_lppaca)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) shared_lppaca_total_size >> PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) ptr = shared_lppaca + shared_lppaca_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) shared_lppaca_size += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * This is very early in boot, so no harm done if the kernel crashes at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * this point.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) BUG_ON(shared_lppaca_size > shared_lppaca_total_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) return ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * See asm/lppaca.h for more detail.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * lppaca structures must must be 1kB in size, L1 cache line aligned,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * and not cross 4kB boundary. A 1kB size and 1kB alignment will satisfy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * these requirements.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) static inline void init_lppaca(struct lppaca *lppaca)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) BUILD_BUG_ON(sizeof(struct lppaca) != 640);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) *lppaca = (struct lppaca) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) .desc = cpu_to_be32(0xd397d781), /* "LpPa" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) .size = cpu_to_be16(LPPACA_SIZE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) .fpregs_in_use = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) .slb_count = cpu_to_be16(64),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) .vmxregs_in_use = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) .page_ins = 0, };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) static struct lppaca * __init new_lppaca(int cpu, unsigned long limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) struct lppaca *lp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) BUILD_BUG_ON(sizeof(struct lppaca) > LPPACA_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) if (early_cpu_has_feature(CPU_FTR_HVMODE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) if (is_secure_guest())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) lp = alloc_shared_lppaca(LPPACA_SIZE, limit, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) lp = alloc_paca_data(LPPACA_SIZE, 0x400, limit, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) init_lppaca(lp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) return lp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) #endif /* CONFIG_PPC_PSERIES */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) #ifdef CONFIG_PPC_BOOK3S_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * 3 persistent SLBs are allocated here. The buffer will be zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * initially, hence will all be invaild until we actually write them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * If you make the number of persistent SLB entries dynamic, please also
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * update PR KVM to flush and restore them accordingly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) static struct slb_shadow * __init new_slb_shadow(int cpu, unsigned long limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) struct slb_shadow *s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) if (cpu != boot_cpuid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * Boot CPU comes here before early_radix_enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) * is parsed (e.g., for disable_radix). So allocate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * always and this will be fixed up in free_unused_pacas.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) if (early_radix_enabled())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) s = alloc_paca_data(sizeof(*s), L1_CACHE_BYTES, limit, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) s->persistent = cpu_to_be32(SLB_NUM_BOLTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) s->buffer_length = cpu_to_be32(sizeof(*s));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) return s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) #endif /* CONFIG_PPC_BOOK3S_64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) #ifdef CONFIG_PPC_PSERIES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * new_rtas_args() - Allocates rtas args
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * @cpu: CPU number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) * @limit: Memory limit for this allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * Allocates a struct rtas_args and return it's pointer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * if not in Hypervisor mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * Return: Pointer to allocated rtas_args
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * NULL if CPU in Hypervisor Mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) static struct rtas_args * __init new_rtas_args(int cpu, unsigned long limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) limit = min_t(unsigned long, limit, RTAS_INSTANTIATE_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) if (early_cpu_has_feature(CPU_FTR_HVMODE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) return alloc_paca_data(sizeof(struct rtas_args), L1_CACHE_BYTES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) limit, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) #endif /* CONFIG_PPC_PSERIES */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) /* The Paca is an array with one entry per processor. Each contains an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) * lppaca, which contains the information shared between the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) * hypervisor and Linux.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) * On systems with hardware multi-threading, there are two threads
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) * per processor. The Paca array must contain an entry for each thread.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) * The VPD Areas will give a max logical processors = 2 * max physical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) * processors. The processor VPD array needs one entry per physical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) * processor (not thread).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) struct paca_struct **paca_ptrs __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) EXPORT_SYMBOL(paca_ptrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) void __init initialise_paca(struct paca_struct *new_paca, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) #ifdef CONFIG_PPC_PSERIES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) new_paca->lppaca_ptr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) #ifdef CONFIG_PPC_BOOK3E
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) new_paca->kernel_pgd = swapper_pg_dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) new_paca->lock_token = 0x8000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) new_paca->paca_index = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) new_paca->kernel_toc = kernel_toc_addr();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) new_paca->kernelbase = (unsigned long) _stext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) /* Only set MSR:IR/DR when MMU is initialized */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) new_paca->kernel_msr = MSR_KERNEL & ~(MSR_IR | MSR_DR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) new_paca->hw_cpu_id = 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) new_paca->kexec_state = KEXEC_STATE_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) new_paca->__current = &init_task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) new_paca->data_offset = 0xfeeeeeeeeeeeeeeeULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) #ifdef CONFIG_PPC_BOOK3S_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) new_paca->slb_shadow_ptr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) #ifdef CONFIG_PPC_BOOK3E
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) /* For now -- if we have threads this will be adjusted later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) new_paca->tcd_ptr = &new_paca->tcd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) #ifdef CONFIG_PPC_PSERIES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) new_paca->rtas_args_reentrant = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) /* Put the paca pointer into r13 and SPRG_PACA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) void setup_paca(struct paca_struct *new_paca)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) /* Setup r13 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) local_paca = new_paca;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) #ifdef CONFIG_PPC_BOOK3E
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) /* On Book3E, initialize the TLB miss exception frames */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) mtspr(SPRN_SPRG_TLB_EXFRAME, local_paca->extlb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) * In HV mode, we setup both HPACA and PACA to avoid problems
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) * if we do a GET_PACA() before the feature fixups have been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) * applied.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) * Normally you should test against CPU_FTR_HVMODE, but CPU features
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) * are not yet set up when we first reach here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) if (mfmsr() & MSR_HV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) mtspr(SPRN_SPRG_HPACA, local_paca);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) mtspr(SPRN_SPRG_PACA, local_paca);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) static int __initdata paca_nr_cpu_ids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) static int __initdata paca_ptrs_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) static int __initdata paca_struct_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) void __init allocate_paca_ptrs(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) paca_nr_cpu_ids = nr_cpu_ids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) paca_ptrs_size = sizeof(struct paca_struct *) * nr_cpu_ids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) paca_ptrs = memblock_alloc_raw(paca_ptrs_size, SMP_CACHE_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) if (!paca_ptrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) panic("Failed to allocate %d bytes for paca pointers\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) paca_ptrs_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) memset(paca_ptrs, 0x88, paca_ptrs_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) void __init allocate_paca(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) u64 limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) struct paca_struct *paca;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) BUG_ON(cpu >= paca_nr_cpu_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) #ifdef CONFIG_PPC_BOOK3S_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) * We access pacas in real mode, and cannot take SLB faults
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) * on them when in virtual mode, so allocate them accordingly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) limit = min(ppc64_bolted_size(), ppc64_rma_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) limit = ppc64_rma_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) paca = alloc_paca_data(sizeof(struct paca_struct), L1_CACHE_BYTES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) limit, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) paca_ptrs[cpu] = paca;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) initialise_paca(paca, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) #ifdef CONFIG_PPC_PSERIES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) paca->lppaca_ptr = new_lppaca(cpu, limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) #ifdef CONFIG_PPC_BOOK3S_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) paca->slb_shadow_ptr = new_slb_shadow(cpu, limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) #ifdef CONFIG_PPC_PSERIES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) paca->rtas_args_reentrant = new_rtas_args(cpu, limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) paca_struct_size += sizeof(struct paca_struct);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) void __init free_unused_pacas(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) int new_ptrs_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) new_ptrs_size = sizeof(struct paca_struct *) * nr_cpu_ids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) if (new_ptrs_size < paca_ptrs_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) memblock_free(__pa(paca_ptrs) + new_ptrs_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) paca_ptrs_size - new_ptrs_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) paca_nr_cpu_ids = nr_cpu_ids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) paca_ptrs_size = new_ptrs_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) #ifdef CONFIG_PPC_BOOK3S_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) if (early_radix_enabled()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) /* Ugly fixup, see new_slb_shadow() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) memblock_free(__pa(paca_ptrs[boot_cpuid]->slb_shadow_ptr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) sizeof(struct slb_shadow));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) paca_ptrs[boot_cpuid]->slb_shadow_ptr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) printk(KERN_DEBUG "Allocated %u bytes for %u pacas\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) paca_ptrs_size + paca_struct_size, nr_cpu_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) void copy_mm_to_paca(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) #ifdef CONFIG_PPC_BOOK3S
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) mm_context_t *context = &mm->context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) get_paca()->mm_ctx_id = context->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) #ifdef CONFIG_PPC_MM_SLICES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) VM_BUG_ON(!mm_ctx_slb_addr_limit(context));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) get_paca()->mm_ctx_slb_addr_limit = mm_ctx_slb_addr_limit(context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) memcpy(&get_paca()->mm_ctx_low_slices_psize, mm_ctx_low_slices(context),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) LOW_SLICE_ARRAY_SZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) memcpy(&get_paca()->mm_ctx_high_slices_psize, mm_ctx_high_slices(context),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) TASK_SLICE_ARRAY_SZ(context));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) #else /* CONFIG_PPC_MM_SLICES */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) get_paca()->mm_ctx_user_psize = context->user_psize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) get_paca()->mm_ctx_sllp = context->sllp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) #else /* !CONFIG_PPC_BOOK3S */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) }