^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * s390 arch random implementation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright IBM Corp. 2017, 2018
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Author(s): Harald Freudenberger
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * The s390_arch_random_generate() function may be called from random.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * in interrupt context. So this implementation does the best to be very
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * fast. There is a buffer of random data which is asynchronously checked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * and filled by a workqueue thread.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * If there are enough bytes in the buffer the s390_arch_random_generate()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * just delivers these bytes. Otherwise false is returned until the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * worker thread refills the buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * The worker fills the rng buffer by pulling fresh entropy from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * high quality (but slow) true hardware random generator. This entropy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * is then spread over the buffer with an pseudo random generator PRNG.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * As the arch_get_random_seed_long() fetches 8 bytes and the calling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * function add_interrupt_randomness() counts this as 1 bit entropy the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * distribution needs to make sure there is in fact 1 bit entropy contained
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * in 8 bytes of the buffer. The current values pull 32 byte entropy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * and scatter this into a 2048 byte buffer. So 8 byte in the buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * will contain 1 bit of entropy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * The worker thread is rescheduled based on the charge level of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * buffer but at least with 500 ms delay to avoid too much CPU consumption.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * So the max. amount of rng data delivered via arch_get_random_seed is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * limited to 4k bytes per second.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/random.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/static_key.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/workqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <asm/cpacf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) DEFINE_STATIC_KEY_FALSE(s390_arch_random_available);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) atomic64_t s390_arch_random_counter = ATOMIC64_INIT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) EXPORT_SYMBOL(s390_arch_random_counter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define ARCH_REFILL_TICKS (HZ/2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define ARCH_PRNG_SEED_SIZE 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define ARCH_RNG_BUF_SIZE 2048
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) static DEFINE_SPINLOCK(arch_rng_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) static u8 *arch_rng_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) static unsigned int arch_rng_buf_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) static void arch_rng_refill_buffer(struct work_struct *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) static DECLARE_DELAYED_WORK(arch_rng_work, arch_rng_refill_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) bool s390_arch_random_generate(u8 *buf, unsigned int nbytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) /* max hunk is ARCH_RNG_BUF_SIZE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) if (nbytes > ARCH_RNG_BUF_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) /* lock rng buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) if (!spin_trylock(&arch_rng_lock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) /* try to resolve the requested amount of bytes from the buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) arch_rng_buf_idx -= nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) if (arch_rng_buf_idx < ARCH_RNG_BUF_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) memcpy(buf, arch_rng_buf + arch_rng_buf_idx, nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) atomic64_add(nbytes, &s390_arch_random_counter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) spin_unlock(&arch_rng_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) /* not enough bytes in rng buffer, refill is done asynchronously */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) spin_unlock(&arch_rng_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) EXPORT_SYMBOL(s390_arch_random_generate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) static void arch_rng_refill_buffer(struct work_struct *unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) unsigned int delay = ARCH_REFILL_TICKS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) spin_lock(&arch_rng_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) if (arch_rng_buf_idx > ARCH_RNG_BUF_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) /* buffer is exhausted and needs refill */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) u8 seed[ARCH_PRNG_SEED_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) u8 prng_wa[240];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) /* fetch ARCH_PRNG_SEED_SIZE bytes of entropy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) cpacf_trng(NULL, 0, seed, sizeof(seed));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) /* blow this entropy up to ARCH_RNG_BUF_SIZE with PRNG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) memset(prng_wa, 0, sizeof(prng_wa));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) cpacf_prno(CPACF_PRNO_SHA512_DRNG_SEED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) &prng_wa, NULL, 0, seed, sizeof(seed));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) cpacf_prno(CPACF_PRNO_SHA512_DRNG_GEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) &prng_wa, arch_rng_buf, ARCH_RNG_BUF_SIZE, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) arch_rng_buf_idx = ARCH_RNG_BUF_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) delay += (ARCH_REFILL_TICKS * arch_rng_buf_idx) / ARCH_RNG_BUF_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) spin_unlock(&arch_rng_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) /* kick next check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) queue_delayed_work(system_long_wq, &arch_rng_work, delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) static int __init s390_arch_random_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) /* all the needed PRNO subfunctions available ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_TRNG) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) cpacf_query_func(CPACF_PRNO, CPACF_PRNO_SHA512_DRNG_GEN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) /* alloc arch random working buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) arch_rng_buf = kmalloc(ARCH_RNG_BUF_SIZE, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) if (!arch_rng_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) /* kick worker queue job to fill the random buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) queue_delayed_work(system_long_wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) &arch_rng_work, ARCH_REFILL_TICKS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) /* enable arch random to the outside world */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) static_branch_enable(&s390_arch_random_available);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) arch_initcall(s390_arch_random_init);