^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright IBM Corp. 2019
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/pgtable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <asm/mem_detect.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <asm/cpacf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <asm/timex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <asm/sclp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include "compressed/decompressor.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include "boot.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #define PRNG_MODE_TDES 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #define PRNG_MODE_SHA512 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #define PRNG_MODE_TRNG 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) struct prno_parm {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) u32 res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) u32 reseed_counter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) u64 stream_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) u8 V[112];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) u8 C[112];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) struct prng_parm {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) u8 parm_block[32];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) u32 reseed_counter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) u64 byte_counter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) static int check_prng(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) if (!cpacf_query_func(CPACF_KMC, CPACF_KMC_PRNG)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) sclp_early_printk("KASLR disabled: CPU has no PRNG\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_TRNG))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) return PRNG_MODE_TRNG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_SHA512_DRNG_GEN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) return PRNG_MODE_SHA512;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) return PRNG_MODE_TDES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) static int get_random(unsigned long limit, unsigned long *value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) struct prng_parm prng = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) /* initial parameter block for tdes mode, copied from libica */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) .parm_block = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) 0x0F, 0x2B, 0x8E, 0x63, 0x8C, 0x8E, 0xD2, 0x52,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) 0x64, 0xB7, 0xA0, 0x7B, 0x75, 0x28, 0xB8, 0xF4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) 0x75, 0x5F, 0xD2, 0xA6, 0x8D, 0x97, 0x11, 0xFF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) 0x49, 0xD8, 0x23, 0xF3, 0x7E, 0x21, 0xEC, 0xA0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) unsigned long seed, random;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) struct prno_parm prno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) __u64 entropy[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) int mode, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) mode = check_prng();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) seed = get_tod_clock_fast();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) switch (mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) case PRNG_MODE_TRNG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) cpacf_trng(NULL, 0, (u8 *) &random, sizeof(random));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) case PRNG_MODE_SHA512:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) cpacf_prno(CPACF_PRNO_SHA512_DRNG_SEED, &prno, NULL, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) (u8 *) &seed, sizeof(seed));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) cpacf_prno(CPACF_PRNO_SHA512_DRNG_GEN, &prno, (u8 *) &random,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) sizeof(random), NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) case PRNG_MODE_TDES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) /* add entropy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) *(unsigned long *) prng.parm_block ^= seed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) for (i = 0; i < 16; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) cpacf_kmc(CPACF_KMC_PRNG, prng.parm_block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) (u8 *) entropy, (u8 *) entropy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) sizeof(entropy));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) memcpy(prng.parm_block, entropy, sizeof(entropy));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) random = seed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) cpacf_kmc(CPACF_KMC_PRNG, prng.parm_block, (u8 *) &random,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) (u8 *) &random, sizeof(random));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) *value = random % limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * To randomize kernel base address we have to consider several facts:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * 1. physical online memory might not be continuous and have holes. mem_detect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * info contains list of online memory ranges we should consider.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * 2. we have several memory regions which are occupied and we should not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * overlap and destroy them. Currently safe_addr tells us the border below
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * which all those occupied regions are. We are safe to use anything above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * safe_addr.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * 3. the upper limit might apply as well, even if memory above that limit is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * online. Currently those limitations are:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * 3.1. Limit set by "mem=" kernel command line option
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * 3.2. memory reserved at the end for kasan initialization.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * 4. kernel base address must be aligned to THREAD_SIZE (kernel stack size).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * Which is required for CONFIG_CHECK_STACK. Currently THREAD_SIZE is 4 pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * (16 pages when the kernel is built with kasan enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * Assumptions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * 1. kernel size (including .bss size) and upper memory limit are page aligned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * 2. mem_detect memory region start is THREAD_SIZE aligned / end is PAGE_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * aligned (in practice memory configurations granularity on z/VM and LPAR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * is 1mb).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * To guarantee uniform distribution of kernel base address among all suitable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * addresses we generate random value just once. For that we need to build a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * continuous range in which every value would be suitable. We can build this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * range by simply counting all suitable addresses (let's call them positions)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * which would be valid as kernel base address. To count positions we iterate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * over online memory ranges. For each range which is big enough for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * kernel image we count all suitable addresses we can put the kernel image at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * that is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * (end - start - kernel_size) / THREAD_SIZE + 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * Two functions count_valid_kernel_positions and position_to_address help
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * to count positions in memory range given and then convert position back
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * to address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) static unsigned long count_valid_kernel_positions(unsigned long kernel_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) unsigned long _min,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) unsigned long _max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) unsigned long start, end, pos = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) for_each_mem_detect_block(i, &start, &end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) if (_min >= end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) if (start >= _max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) start = max(_min, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) end = min(_max, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) if (end - start < kernel_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) pos += (end - start - kernel_size) / THREAD_SIZE + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) return pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) static unsigned long position_to_address(unsigned long pos, unsigned long kernel_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) unsigned long _min, unsigned long _max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) unsigned long start, end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) for_each_mem_detect_block(i, &start, &end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) if (_min >= end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) if (start >= _max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) start = max(_min, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) end = min(_max, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) if (end - start < kernel_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) if ((end - start - kernel_size) / THREAD_SIZE + 1 >= pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) return start + (pos - 1) * THREAD_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) pos -= (end - start - kernel_size) / THREAD_SIZE + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) unsigned long get_random_base(unsigned long safe_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) unsigned long memory_limit = get_mem_detect_end();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) unsigned long base_pos, max_pos, kernel_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) unsigned long kasan_needs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) if (memory_end_set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) memory_limit = min(memory_limit, memory_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && INITRD_START && INITRD_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) if (safe_addr < INITRD_START + INITRD_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) safe_addr = INITRD_START + INITRD_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) safe_addr = ALIGN(safe_addr, THREAD_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) if ((IS_ENABLED(CONFIG_KASAN))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) * Estimate kasan memory requirements, which it will reserve
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) * at the very end of available physical memory. To estimate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) * that, we take into account that kasan would require
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) * 1/8 of available physical memory (for shadow memory) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) * creating page tables for the whole memory + shadow memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) * region (1 + 1/8). To keep page tables estimates simple take
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) * the double of combined ptes size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) memory_limit = get_mem_detect_end();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) if (memory_end_set && memory_limit > memory_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) memory_limit = memory_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) /* for shadow memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) kasan_needs = memory_limit / 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) /* for paging structures */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) kasan_needs += (memory_limit + kasan_needs) / PAGE_SIZE /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) _PAGE_ENTRIES * _PAGE_TABLE_SIZE * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) memory_limit -= kasan_needs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) kernel_size = vmlinux.image_size + vmlinux.bss_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) if (safe_addr + kernel_size > memory_limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) max_pos = count_valid_kernel_positions(kernel_size, safe_addr, memory_limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) if (!max_pos) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) sclp_early_printk("KASLR disabled: not enough memory\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) /* we need a value in the range [1, base_pos] inclusive */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) if (get_random(max_pos, &base_pos))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) return position_to_address(base_pos + 1, kernel_size, safe_addr, memory_limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) }