^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Access kernel memory without faulting -- s390 specific implementation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright IBM Corp. 2009, 2015
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <asm/ctl_reg.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <asm/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <asm/stacktrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) static notrace long s390_kernel_write_odd(void *dst, const void *src, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) unsigned long aligned, offset, count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) char tmp[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) aligned = (unsigned long) dst & ~7UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) offset = (unsigned long) dst & 7UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) size = min(8UL - offset, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) count = size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) asm volatile(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) " bras 1,0f\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) " mvc 0(1,%4),0(%5)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) "0: mvc 0(8,%3),0(%0)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) " ex %1,0(1)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) " lg %1,0(%3)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) " lra %0,0(%0)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) " sturg %1,%0\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) : "+&a" (aligned), "+&a" (count), "=m" (tmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) : "a" (&tmp), "a" (&tmp[offset]), "a" (src)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) : "cc", "memory", "1");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) return size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * s390_kernel_write - write to kernel memory bypassing DAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * @dst: destination address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * @src: source address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * @size: number of bytes to copy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * This function writes to kernel memory bypassing DAT and possible page table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * write protection. It writes to the destination using the sturg instruction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * Therefore we have a read-modify-write sequence: the function reads eight
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * bytes from destination at an eight byte boundary, modifies the bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * requested and writes the result back in a loop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) static DEFINE_SPINLOCK(s390_kernel_write_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) notrace void *s390_kernel_write(void *dst, const void *src, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) void *tmp = dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) long copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) spin_lock_irqsave(&s390_kernel_write_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) if (!(flags & PSW_MASK_DAT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) memcpy(dst, src, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) while (size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) copied = s390_kernel_write_odd(tmp, src, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) tmp += copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) src += copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) size -= copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) spin_unlock_irqrestore(&s390_kernel_write_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) return dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) static int __no_sanitize_address __memcpy_real(void *dest, void *src, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) register unsigned long _dest asm("2") = (unsigned long) dest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) register unsigned long _len1 asm("3") = (unsigned long) count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) register unsigned long _src asm("4") = (unsigned long) src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) register unsigned long _len2 asm("5") = (unsigned long) count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) int rc = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) asm volatile (
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) "0: mvcle %1,%2,0x0\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) "1: jo 0b\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) " lhi %0,0x0\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) "2:\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) EX_TABLE(1b,2b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) : "+d" (rc), "+d" (_dest), "+d" (_src), "+d" (_len1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) "+d" (_len2), "=m" (*((long *) dest))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) : "m" (*((long *) src))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) : "cc", "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) static unsigned long __no_sanitize_address _memcpy_real(unsigned long dest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) unsigned long src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) unsigned long count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) int irqs_disabled, rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) if (!count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) flags = arch_local_irq_save();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) irqs_disabled = arch_irqs_disabled_flags(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) if (!irqs_disabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) trace_hardirqs_off();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) __arch_local_irq_stnsm(0xf8); // disable DAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) rc = __memcpy_real((void *) dest, (void *) src, (size_t) count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) if (flags & PSW_MASK_DAT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) __arch_local_irq_stosm(0x04); // enable DAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) if (!irqs_disabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) trace_hardirqs_on();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) __arch_local_irq_ssm(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * Copy memory in real mode (kernel to kernel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) int memcpy_real(void *dest, void *src, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) if (S390_lowcore.nodat_stack != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) rc = CALL_ON_STACK(_memcpy_real, S390_lowcore.nodat_stack, 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) dest, src, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * This is a really early memcpy_real call, the stacks are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) * not set up yet. Just call _memcpy_real on the early boot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) return _memcpy_real((unsigned long) dest,(unsigned long) src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) (unsigned long) count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * Copy memory in absolute mode (kernel to kernel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) void memcpy_absolute(void *dest, void *src, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) unsigned long cr0, flags, prefix;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) flags = arch_local_irq_save();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) __ctl_store(cr0, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) __ctl_clear_bit(0, 28); /* disable lowcore protection */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) prefix = store_prefix();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) if (prefix) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) local_mcck_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) set_prefix(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) memcpy(dest, src, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) set_prefix(prefix);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) local_mcck_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) memcpy(dest, src, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) __ctl_load(cr0, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) arch_local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) * Copy memory from kernel (real) to user (virtual)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) int copy_to_user_real(void __user *dest, void *src, unsigned long count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) int offs = 0, size, rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) char *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) buf = (char *) __get_free_page(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) if (!buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) rc = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) while (offs < count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) size = min(PAGE_SIZE, count - offs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) if (memcpy_real(buf, src + offs, size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) if (copy_to_user(dest + offs, buf, size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) offs += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) free_page((unsigned long) buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) * Check if physical address is within prefix or zero page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) static int is_swapped(unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) unsigned long lc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) if (addr < sizeof(struct lowcore))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) for_each_online_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) lc = (unsigned long) lowcore_ptr[cpu];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) if (addr > lc + sizeof(struct lowcore) - 1 || addr < lc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) * Convert a physical pointer for /dev/mem access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) * For swapped prefix pages a new buffer is returned that contains a copy of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) * the absolute memory. The buffer size is maximum one page large.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) void *xlate_dev_mem_ptr(phys_addr_t addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) void *bounce = (void *) addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) unsigned long size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) get_online_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) if (is_swapped(addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) size = PAGE_SIZE - (addr & ~PAGE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) bounce = (void *) __get_free_page(GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) if (bounce)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) memcpy_absolute(bounce, (void *) addr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) put_online_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) return bounce;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) * Free converted buffer for /dev/mem access (if necessary)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) void unxlate_dev_mem_ptr(phys_addr_t addr, void *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) if ((void *) addr != buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) free_page((unsigned long) buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) }