^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * address space "slices" (meta-segments) support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2007 Benjamin Herrenschmidt, IBM Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Based on hugetlb implementation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Copyright (C) 2003 David Gibson, IBM Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #undef DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/pagemap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/hugetlb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/sched/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/security.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <asm/mman.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <asm/mmu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <asm/copro.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <asm/hugetlb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <asm/mmu_context.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) static DEFINE_SPINLOCK(slice_convert_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #ifdef DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) int _slice_debug = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) static void slice_print_mask(const char *label, const struct slice_mask *mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) if (!_slice_debug)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) pr_devel("%s low_slice: %*pbl\n", label,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) (int)SLICE_NUM_LOW, &mask->low_slices);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) pr_devel("%s high_slice: %*pbl\n", label,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) (int)SLICE_NUM_HIGH, mask->high_slices);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define slice_dbg(fmt...) do { if (_slice_debug) pr_devel(fmt); } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) static void slice_print_mask(const char *label, const struct slice_mask *mask) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define slice_dbg(fmt...)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) static inline notrace bool slice_addr_is_low(unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) u64 tmp = (u64)addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) return tmp < SLICE_LOW_TOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) static void slice_range_to_mask(unsigned long start, unsigned long len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) struct slice_mask *ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) unsigned long end = start + len - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) ret->low_slices = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) if (SLICE_NUM_HIGH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) if (slice_addr_is_low(start)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) unsigned long mend = min(end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) (unsigned long)(SLICE_LOW_TOP - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) ret->low_slices = (1u << (GET_LOW_SLICE_INDEX(mend) + 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) - (1u << GET_LOW_SLICE_INDEX(start));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) if (SLICE_NUM_HIGH && !slice_addr_is_low(end)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) unsigned long start_index = GET_HIGH_SLICE_INDEX(start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) unsigned long align_end = ALIGN(end, (1UL << SLICE_HIGH_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) unsigned long count = GET_HIGH_SLICE_INDEX(align_end) - start_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) bitmap_set(ret->high_slices, start_index, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) unsigned long len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) struct vm_area_struct *vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) if ((mm_ctx_slb_addr_limit(&mm->context) - len) < addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) vma = find_vma(mm, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) return (!vma || (addr + len) <= vm_start_gap(vma));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) return !slice_area_is_free(mm, slice << SLICE_LOW_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 1ul << SLICE_LOW_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) static int slice_high_has_vma(struct mm_struct *mm, unsigned long slice)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) unsigned long start = slice << SLICE_HIGH_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) unsigned long end = start + (1ul << SLICE_HIGH_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) /* Hack, so that each addresses is controlled by exactly one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * of the high or low area bitmaps, the first high area starts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * at 4GB, not 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) if (start == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) start = (unsigned long)SLICE_LOW_TOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) return !slice_area_is_free(mm, start, end - start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) unsigned long high_limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) unsigned long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) ret->low_slices = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) if (SLICE_NUM_HIGH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) for (i = 0; i < SLICE_NUM_LOW; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) if (!slice_low_has_vma(mm, i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) ret->low_slices |= 1u << i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) if (slice_addr_is_low(high_limit - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) for (i = 0; i < GET_HIGH_SLICE_INDEX(high_limit); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) if (!slice_high_has_vma(mm, i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) __set_bit(i, ret->high_slices);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) static bool slice_check_range_fits(struct mm_struct *mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) const struct slice_mask *available,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) unsigned long start, unsigned long len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) unsigned long end = start + len - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) u64 low_slices = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) if (slice_addr_is_low(start)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) unsigned long mend = min(end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) (unsigned long)(SLICE_LOW_TOP - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) low_slices = (1u << (GET_LOW_SLICE_INDEX(mend) + 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) - (1u << GET_LOW_SLICE_INDEX(start));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) if ((low_slices & available->low_slices) != low_slices)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) if (SLICE_NUM_HIGH && !slice_addr_is_low(end)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) unsigned long start_index = GET_HIGH_SLICE_INDEX(start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) unsigned long align_end = ALIGN(end, (1UL << SLICE_HIGH_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) unsigned long count = GET_HIGH_SLICE_INDEX(align_end) - start_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) unsigned long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) for (i = start_index; i < start_index + count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) if (!test_bit(i, available->high_slices))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) static void slice_flush_segments(void *parm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) #ifdef CONFIG_PPC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) struct mm_struct *mm = parm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) if (mm != current->active_mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) copy_mm_to_paca(current->active_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) slb_flush_and_restore_bolted();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) static void slice_convert(struct mm_struct *mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) const struct slice_mask *mask, int psize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) int index, mask_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) /* Write the new slice psize bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) unsigned char *hpsizes, *lpsizes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) struct slice_mask *psize_mask, *old_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) unsigned long i, flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) int old_psize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) slice_dbg("slice_convert(mm=%p, psize=%d)\n", mm, psize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) slice_print_mask(" mask", mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) psize_mask = slice_mask_for_size(&mm->context, psize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) /* We need to use a spinlock here to protect against
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) * concurrent 64k -> 4k demotion ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) spin_lock_irqsave(&slice_convert_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) lpsizes = mm_ctx_low_slices(&mm->context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) for (i = 0; i < SLICE_NUM_LOW; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) if (!(mask->low_slices & (1u << i)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) mask_index = i & 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) index = i >> 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) /* Update the slice_mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) old_psize = (lpsizes[index] >> (mask_index * 4)) & 0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) old_mask = slice_mask_for_size(&mm->context, old_psize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) old_mask->low_slices &= ~(1u << i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) psize_mask->low_slices |= 1u << i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) /* Update the sizes array */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) lpsizes[index] = (lpsizes[index] & ~(0xf << (mask_index * 4))) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) (((unsigned long)psize) << (mask_index * 4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) hpsizes = mm_ctx_high_slices(&mm->context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) for (i = 0; i < GET_HIGH_SLICE_INDEX(mm_ctx_slb_addr_limit(&mm->context)); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) if (!test_bit(i, mask->high_slices))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) mask_index = i & 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) index = i >> 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) /* Update the slice_mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) old_psize = (hpsizes[index] >> (mask_index * 4)) & 0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) old_mask = slice_mask_for_size(&mm->context, old_psize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) __clear_bit(i, old_mask->high_slices);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) __set_bit(i, psize_mask->high_slices);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) /* Update the sizes array */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) hpsizes[index] = (hpsizes[index] & ~(0xf << (mask_index * 4))) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) (((unsigned long)psize) << (mask_index * 4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) slice_dbg(" lsps=%lx, hsps=%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) (unsigned long)mm_ctx_low_slices(&mm->context),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) (unsigned long)mm_ctx_high_slices(&mm->context));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) spin_unlock_irqrestore(&slice_convert_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) copro_flush_all_slbs(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) * Compute which slice addr is part of;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) * set *boundary_addr to the start or end boundary of that slice
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) * (depending on 'end' parameter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) * return boolean indicating if the slice is marked as available in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) * 'available' slice_mark.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) static bool slice_scan_available(unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) const struct slice_mask *available,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) int end, unsigned long *boundary_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) unsigned long slice;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) if (slice_addr_is_low(addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) slice = GET_LOW_SLICE_INDEX(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) *boundary_addr = (slice + end) << SLICE_LOW_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) return !!(available->low_slices & (1u << slice));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) slice = GET_HIGH_SLICE_INDEX(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) *boundary_addr = (slice + end) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) ((slice + end) << SLICE_HIGH_SHIFT) : SLICE_LOW_TOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) return !!test_bit(slice, available->high_slices);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) unsigned long len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) const struct slice_mask *available,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) int psize, unsigned long high_limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) unsigned long addr, found, next_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) struct vm_unmapped_area_info info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) info.flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) info.length = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) info.align_mask = PAGE_MASK & ((1ul << pshift) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) info.align_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) addr = TASK_UNMAPPED_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) * Check till the allow max value for this mmap request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) while (addr < high_limit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) info.low_limit = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) if (!slice_scan_available(addr, available, 1, &addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) next_slice:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) * At this point [info.low_limit; addr) covers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) * available slices only and ends at a slice boundary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) * Check if we need to reduce the range, or if we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) * extend it to cover the next available slice.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) if (addr >= high_limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) addr = high_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) else if (slice_scan_available(addr, available, 1, &next_end)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) addr = next_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) goto next_slice;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) info.high_limit = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) found = vm_unmapped_area(&info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) if (!(found & ~PAGE_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) return found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) static unsigned long slice_find_area_topdown(struct mm_struct *mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) unsigned long len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) const struct slice_mask *available,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) int psize, unsigned long high_limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) unsigned long addr, found, prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) struct vm_unmapped_area_info info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) unsigned long min_addr = max(PAGE_SIZE, mmap_min_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) info.flags = VM_UNMAPPED_AREA_TOPDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) info.length = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) info.align_mask = PAGE_MASK & ((1ul << pshift) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) info.align_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) addr = mm->mmap_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) * If we are trying to allocate above DEFAULT_MAP_WINDOW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) * Add the different to the mmap_base.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) * Only for that request for which high_limit is above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) * DEFAULT_MAP_WINDOW we should apply this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) if (high_limit > DEFAULT_MAP_WINDOW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) addr += mm_ctx_slb_addr_limit(&mm->context) - DEFAULT_MAP_WINDOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) while (addr > min_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) info.high_limit = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) if (!slice_scan_available(addr - 1, available, 0, &addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) prev_slice:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) * At this point [addr; info.high_limit) covers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) * available slices only and starts at a slice boundary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) * Check if we need to reduce the range, or if we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) * extend it to cover the previous available slice.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) if (addr < min_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) addr = min_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) else if (slice_scan_available(addr - 1, available, 0, &prev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) addr = prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) goto prev_slice;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) info.low_limit = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) found = vm_unmapped_area(&info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) if (!(found & ~PAGE_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) return found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) * A failed mmap() very likely causes application failure,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) * so fall back to the bottom-up function here. This scenario
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) * can happen with large stack limits and large mmap()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) * allocations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) return slice_find_area_bottomup(mm, len, available, psize, high_limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) static unsigned long slice_find_area(struct mm_struct *mm, unsigned long len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) const struct slice_mask *mask, int psize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) int topdown, unsigned long high_limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) if (topdown)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) return slice_find_area_topdown(mm, len, mask, psize, high_limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) return slice_find_area_bottomup(mm, len, mask, psize, high_limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) static inline void slice_copy_mask(struct slice_mask *dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) const struct slice_mask *src)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) dst->low_slices = src->low_slices;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) if (!SLICE_NUM_HIGH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) bitmap_copy(dst->high_slices, src->high_slices, SLICE_NUM_HIGH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) static inline void slice_or_mask(struct slice_mask *dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) const struct slice_mask *src1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) const struct slice_mask *src2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) dst->low_slices = src1->low_slices | src2->low_slices;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) if (!SLICE_NUM_HIGH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) bitmap_or(dst->high_slices, src1->high_slices, src2->high_slices, SLICE_NUM_HIGH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) static inline void slice_andnot_mask(struct slice_mask *dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) const struct slice_mask *src1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) const struct slice_mask *src2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) dst->low_slices = src1->low_slices & ~src2->low_slices;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) if (!SLICE_NUM_HIGH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) bitmap_andnot(dst->high_slices, src1->high_slices, src2->high_slices, SLICE_NUM_HIGH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) #ifdef CONFIG_PPC_64K_PAGES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) #define MMU_PAGE_BASE MMU_PAGE_64K
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) #define MMU_PAGE_BASE MMU_PAGE_4K
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) unsigned long flags, unsigned int psize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) int topdown)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) struct slice_mask good_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) struct slice_mask potential_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) const struct slice_mask *maskp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) const struct slice_mask *compat_maskp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) int fixed = (flags & MAP_FIXED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) unsigned long page_size = 1UL << pshift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) struct mm_struct *mm = current->mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) unsigned long newaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) unsigned long high_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) high_limit = DEFAULT_MAP_WINDOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) if (addr >= high_limit || (fixed && (addr + len > high_limit)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) high_limit = TASK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) if (len > high_limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) if (len & (page_size - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) if (fixed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) if (addr & (page_size - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) if (addr > high_limit - len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) if (high_limit > mm_ctx_slb_addr_limit(&mm->context)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) * Increasing the slb_addr_limit does not require
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) * slice mask cache to be recalculated because it should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) * be already initialised beyond the old address limit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) mm_ctx_set_slb_addr_limit(&mm->context, high_limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) on_each_cpu(slice_flush_segments, mm, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) /* Sanity checks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) BUG_ON(mm->task_size == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) BUG_ON(mm_ctx_slb_addr_limit(&mm->context) == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) VM_BUG_ON(radix_enabled());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) slice_dbg("slice_get_unmapped_area(mm=%p, psize=%d...\n", mm, psize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) slice_dbg(" addr=%lx, len=%lx, flags=%lx, topdown=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) addr, len, flags, topdown);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) /* If hint, make sure it matches our alignment restrictions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) if (!fixed && addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) addr = ALIGN(addr, page_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) slice_dbg(" aligned addr=%lx\n", addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) /* Ignore hint if it's too large or overlaps a VMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) if (addr > high_limit - len || addr < mmap_min_addr ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) !slice_area_is_free(mm, addr, len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) /* First make up a "good" mask of slices that have the right size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) * already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) maskp = slice_mask_for_size(&mm->context, psize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) * Here "good" means slices that are already the right page size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) * "compat" means slices that have a compatible page size (i.e.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) * 4k in a 64k pagesize kernel), and "free" means slices without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) * any VMAs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) * If MAP_FIXED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) * check if fits in good | compat => OK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) * check if fits in good | compat | free => convert free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) * else bad
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) * If have hint:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) * check if hint fits in good => OK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) * check if hint fits in good | free => convert free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) * Otherwise:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) * search in good, found => OK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) * search in good | free, found => convert free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) * search in good | compat | free, found => convert free.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) * If we support combo pages, we can allow 64k pages in 4k slices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) * The mask copies could be avoided in most cases here if we had
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) * a pointer to good mask for the next code to use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) if (IS_ENABLED(CONFIG_PPC_64K_PAGES) && psize == MMU_PAGE_64K) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) compat_maskp = slice_mask_for_size(&mm->context, MMU_PAGE_4K);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) if (fixed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) slice_or_mask(&good_mask, maskp, compat_maskp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) slice_copy_mask(&good_mask, maskp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) slice_copy_mask(&good_mask, maskp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) slice_print_mask(" good_mask", &good_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) if (compat_maskp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) slice_print_mask(" compat_mask", compat_maskp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) /* First check hint if it's valid or if we have MAP_FIXED */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) if (addr != 0 || fixed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) /* Check if we fit in the good mask. If we do, we just return,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) * nothing else to do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) if (slice_check_range_fits(mm, &good_mask, addr, len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) slice_dbg(" fits good !\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) newaddr = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) goto return_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) /* Now let's see if we can find something in the existing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) * slices for that size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) newaddr = slice_find_area(mm, len, &good_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) psize, topdown, high_limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) if (newaddr != -ENOMEM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) /* Found within the good mask, we don't have to setup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) * we thus return directly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) slice_dbg(" found area at 0x%lx\n", newaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) goto return_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) * We don't fit in the good mask, check what other slices are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) * empty and thus can be converted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) slice_mask_for_free(mm, &potential_mask, high_limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) slice_or_mask(&potential_mask, &potential_mask, &good_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) slice_print_mask(" potential", &potential_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) if (addr != 0 || fixed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) if (slice_check_range_fits(mm, &potential_mask, addr, len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) slice_dbg(" fits potential !\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) newaddr = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) goto convert;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) /* If we have MAP_FIXED and failed the above steps, then error out */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) if (fixed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) slice_dbg(" search...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) /* If we had a hint that didn't work out, see if we can fit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) * anywhere in the good area.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) if (addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) newaddr = slice_find_area(mm, len, &good_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) psize, topdown, high_limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) if (newaddr != -ENOMEM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) slice_dbg(" found area at 0x%lx\n", newaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) goto return_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) /* Now let's see if we can find something in the existing slices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) * for that size plus free slices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) newaddr = slice_find_area(mm, len, &potential_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) psize, topdown, high_limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) if (IS_ENABLED(CONFIG_PPC_64K_PAGES) && newaddr == -ENOMEM &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) psize == MMU_PAGE_64K) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) /* retry the search with 4k-page slices included */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) slice_or_mask(&potential_mask, &potential_mask, compat_maskp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) newaddr = slice_find_area(mm, len, &potential_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) psize, topdown, high_limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) if (newaddr == -ENOMEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) slice_range_to_mask(newaddr, len, &potential_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) slice_dbg(" found potential area at 0x%lx\n", newaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) slice_print_mask(" mask", &potential_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) convert:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) * Try to allocate the context before we do slice convert
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) * so that we handle the context allocation failure gracefully.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) if (need_extra_context(mm, newaddr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) if (alloc_extended_context(mm, newaddr) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) slice_andnot_mask(&potential_mask, &potential_mask, &good_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) if (compat_maskp && !fixed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) slice_andnot_mask(&potential_mask, &potential_mask, compat_maskp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) if (potential_mask.low_slices ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) (SLICE_NUM_HIGH &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) !bitmap_empty(potential_mask.high_slices, SLICE_NUM_HIGH))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) slice_convert(mm, &potential_mask, psize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) if (psize > MMU_PAGE_BASE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) on_each_cpu(slice_flush_segments, mm, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) return newaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) return_addr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) if (need_extra_context(mm, newaddr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) if (alloc_extended_context(mm, newaddr) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) return newaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) EXPORT_SYMBOL_GPL(slice_get_unmapped_area);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) unsigned long arch_get_unmapped_area(struct file *filp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) unsigned long len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) unsigned long pgoff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) return slice_get_unmapped_area(addr, len, flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) mm_ctx_user_psize(¤t->mm->context), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) unsigned long arch_get_unmapped_area_topdown(struct file *filp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) const unsigned long addr0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) const unsigned long len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) const unsigned long pgoff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) const unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) return slice_get_unmapped_area(addr0, len, flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) mm_ctx_user_psize(¤t->mm->context), 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) unsigned int notrace get_slice_psize(struct mm_struct *mm, unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) unsigned char *psizes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) int index, mask_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) VM_BUG_ON(radix_enabled());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) if (slice_addr_is_low(addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) psizes = mm_ctx_low_slices(&mm->context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) index = GET_LOW_SLICE_INDEX(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) psizes = mm_ctx_high_slices(&mm->context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) index = GET_HIGH_SLICE_INDEX(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) mask_index = index & 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) return (psizes[index >> 1] >> (mask_index * 4)) & 0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) EXPORT_SYMBOL_GPL(get_slice_psize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) void slice_init_new_context_exec(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) unsigned char *hpsizes, *lpsizes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) struct slice_mask *mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) unsigned int psize = mmu_virtual_psize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) slice_dbg("slice_init_new_context_exec(mm=%p)\n", mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) * In the case of exec, use the default limit. In the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) * case of fork it is just inherited from the mm being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) * duplicated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) mm_ctx_set_slb_addr_limit(&mm->context, SLB_ADDR_LIMIT_DEFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) mm_ctx_set_user_psize(&mm->context, psize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) * Set all slice psizes to the default.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) lpsizes = mm_ctx_low_slices(&mm->context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) memset(lpsizes, (psize << 4) | psize, SLICE_NUM_LOW >> 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) hpsizes = mm_ctx_high_slices(&mm->context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) memset(hpsizes, (psize << 4) | psize, SLICE_NUM_HIGH >> 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) * Slice mask cache starts zeroed, fill the default size cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) mask = slice_mask_for_size(&mm->context, psize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) mask->low_slices = ~0UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) if (SLICE_NUM_HIGH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) bitmap_fill(mask->high_slices, SLICE_NUM_HIGH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) #ifdef CONFIG_PPC_BOOK3S_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) void slice_setup_new_exec(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) struct mm_struct *mm = current->mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) slice_dbg("slice_setup_new_exec(mm=%p)\n", mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) if (!is_32bit_task())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) mm_ctx_set_slb_addr_limit(&mm->context, DEFAULT_MAP_WINDOW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) unsigned long len, unsigned int psize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) struct slice_mask mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) VM_BUG_ON(radix_enabled());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) slice_range_to_mask(start, len, &mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) slice_convert(mm, &mask, psize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) #ifdef CONFIG_HUGETLB_PAGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) * is_hugepage_only_range() is used by generic code to verify whether
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) * a normal mmap mapping (non hugetlbfs) is valid on a given area.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) * until the generic code provides a more generic hook and/or starts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) * calling arch get_unmapped_area for MAP_FIXED (which our implementation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) * here knows how to deal with), we hijack it to keep standard mappings
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) * away from us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) * because of that generic code limitation, MAP_FIXED mapping cannot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) * "convert" back a slice with no VMAs to the standard page size, only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) * get_unmapped_area() can. It would be possible to fix it here but I
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) * prefer working on fixing the generic code instead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) * WARNING: This will not work if hugetlbfs isn't enabled since the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) * generic code will redefine that function as 0 in that. This is ok
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) * for now as we only use slices with hugetlbfs enabled. This should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) * be fixed as the generic code gets fixed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) int slice_is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) unsigned long len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) const struct slice_mask *maskp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) unsigned int psize = mm_ctx_user_psize(&mm->context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) VM_BUG_ON(radix_enabled());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) maskp = slice_mask_for_size(&mm->context, psize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) /* We need to account for 4k slices too */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) if (IS_ENABLED(CONFIG_PPC_64K_PAGES) && psize == MMU_PAGE_64K) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) const struct slice_mask *compat_maskp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) struct slice_mask available;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) compat_maskp = slice_mask_for_size(&mm->context, MMU_PAGE_4K);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) slice_or_mask(&available, maskp, compat_maskp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) return !slice_check_range_fits(mm, &available, addr, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) return !slice_check_range_fits(mm, maskp, addr, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) #endif