^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * linux/drivers/char/mem.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 1991, 1992 Linus Torvalds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Added devfs support.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Shared /dev/zero mmapping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/miscdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/mman.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/random.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/raw.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/tty.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/capability.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/highmem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/backing-dev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/shmem_fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/splice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/pfn.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/uio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/security.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/pseudo_fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <uapi/linux/magic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <linux/mount.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #ifdef CONFIG_IA64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) # include <linux/efi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define DEVMEM_MINOR 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define DEVPORT_MINOR 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) static inline unsigned long size_inside_page(unsigned long start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) unsigned long sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) sz = PAGE_SIZE - (start & (PAGE_SIZE - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) return min(sz, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) static inline int valid_phys_addr_range(phys_addr_t addr, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) return addr + count <= __pa(high_memory);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #ifdef CONFIG_STRICT_DEVMEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) static inline int page_is_allowed(unsigned long pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) return devmem_is_allowed(pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) static inline int range_is_allowed(unsigned long pfn, unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) u64 from = ((u64)pfn) << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) u64 to = from + size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) u64 cursor = from;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) while (cursor < to) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) if (!devmem_is_allowed(pfn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) cursor += PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) pfn++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) static inline int page_is_allowed(unsigned long pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) static inline int range_is_allowed(unsigned long pfn, unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) #ifndef unxlate_dev_mem_ptr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) #define unxlate_dev_mem_ptr unxlate_dev_mem_ptr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) void __weak unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) static inline bool should_stop_iteration(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) if (need_resched())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) return fatal_signal_pending(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * This funcion reads the *physical* memory. The f_pos points directly to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) * memory location.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) static ssize_t read_mem(struct file *file, char __user *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) phys_addr_t p = *ppos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) ssize_t read, sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) void *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) char *bounce;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) if (p != *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) if (!valid_phys_addr_range(p, count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) read = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) /* we don't have page 0 mapped on sparc and m68k.. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) if (p < PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) sz = size_inside_page(p, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) if (sz > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) if (clear_user(buf, sz))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) buf += sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) p += sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) count -= sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) read += sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) bounce = kmalloc(PAGE_SIZE, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) if (!bounce)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) while (count > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) unsigned long remaining;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) int allowed, probe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) sz = size_inside_page(p, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) err = -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) allowed = page_is_allowed(p >> PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) if (!allowed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) err = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) if (allowed == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) /* Show zeros for restricted memory. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) remaining = clear_user(buf, sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * On ia64 if a page has been mapped somewhere as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * uncached, then it must also be accessed uncached
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * by the kernel or data corruption may occur.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) ptr = xlate_dev_mem_ptr(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) if (!ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) probe = copy_from_kernel_nofault(bounce, ptr, sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) unxlate_dev_mem_ptr(p, ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) if (probe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) remaining = copy_to_user(buf, bounce, sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) if (remaining)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) buf += sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) p += sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) count -= sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) read += sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) if (should_stop_iteration())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) kfree(bounce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) *ppos += read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) return read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) kfree(bounce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) static ssize_t write_mem(struct file *file, const char __user *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) phys_addr_t p = *ppos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) ssize_t written, sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) unsigned long copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) void *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) if (p != *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) return -EFBIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) if (!valid_phys_addr_range(p, count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) written = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) /* we don't have page 0 mapped on sparc and m68k.. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) if (p < PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) sz = size_inside_page(p, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) /* Hmm. Do something? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) buf += sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) p += sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) count -= sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) written += sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) while (count > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) int allowed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) sz = size_inside_page(p, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) allowed = page_is_allowed(p >> PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) if (!allowed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) /* Skip actual writing when a page is marked as restricted. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) if (allowed == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) * On ia64 if a page has been mapped somewhere as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) * uncached, then it must also be accessed uncached
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) * by the kernel or data corruption may occur.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) ptr = xlate_dev_mem_ptr(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) if (!ptr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) if (written)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) copied = copy_from_user(ptr, buf, sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) unxlate_dev_mem_ptr(p, ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) if (copied) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) written += sz - copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) if (written)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) buf += sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) p += sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) count -= sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) written += sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) if (should_stop_iteration())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) *ppos += written;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) return written;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) int __weak phys_mem_access_prot_allowed(struct file *file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) #ifndef __HAVE_PHYS_MEM_ACCESS_PROT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) * Architectures vary in how they handle caching for addresses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) * outside of main memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) #ifdef pgprot_noncached
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) static int uncached_access(struct file *file, phys_addr_t addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) #if defined(CONFIG_IA64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) * On ia64, we ignore O_DSYNC because we cannot tolerate memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) * attribute aliases.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) #elif defined(CONFIG_MIPS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) extern int __uncached_access(struct file *file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) unsigned long addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) return __uncached_access(file, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) * Accessing memory above the top the kernel knows about or through a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) * file pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) * that was marked O_DSYNC will be done non-cached.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) if (file->f_flags & O_DSYNC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) return addr >= __pa(high_memory);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) unsigned long size, pgprot_t vma_prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) #ifdef pgprot_noncached
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) phys_addr_t offset = pfn << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) if (uncached_access(file, offset))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) return pgprot_noncached(vma_prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) return vma_prot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) #ifndef CONFIG_MMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) static unsigned long get_unmapped_area_mem(struct file *file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) unsigned long len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) unsigned long pgoff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) if (!valid_mmap_phys_addr_range(pgoff, len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) return (unsigned long) -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) return pgoff << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) /* permit direct mmap, for read, write or exec */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) static unsigned memory_mmap_capabilities(struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) return NOMMU_MAP_DIRECT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) NOMMU_MAP_READ | NOMMU_MAP_WRITE | NOMMU_MAP_EXEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) static unsigned zero_mmap_capabilities(struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) return NOMMU_MAP_COPY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) /* can't do an in-place private mapping if there's no MMU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) static inline int private_mapping_ok(struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) return vma->vm_flags & VM_MAYSHARE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) static inline int private_mapping_ok(struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) static const struct vm_operations_struct mmap_mem_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) #ifdef CONFIG_HAVE_IOREMAP_PROT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) .access = generic_access_phys
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) static int mmap_mem(struct file *file, struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) size_t size = vma->vm_end - vma->vm_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) /* Does it even fit in phys_addr_t? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) if (offset >> PAGE_SHIFT != vma->vm_pgoff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) /* It's illegal to wrap around the end of the physical address space. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) if (offset + (phys_addr_t)size - 1 < offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) if (!private_mapping_ok(vma))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) return -ENOSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) if (!range_is_allowed(vma->vm_pgoff, size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) &vma->vm_page_prot))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) vma->vm_page_prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) vma->vm_ops = &mmap_mem_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) /* Remap-pfn-range will mark the range VM_IO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) if (remap_pfn_range(vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) vma->vm_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) vma->vm_pgoff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) vma->vm_page_prot)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) static int mmap_kmem(struct file *file, struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) unsigned long pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) /* Turn a kernel-virtual address into a physical page frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) * RED-PEN: on some architectures there is more mapped memory than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) * available in mem_map which pfn_valid checks for. Perhaps should add a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) * new macro here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) * RED-PEN: vmalloc is not supported right now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) if (!pfn_valid(pfn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) vma->vm_pgoff = pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) return mmap_mem(file, vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) * This function reads the *virtual* memory as seen by the kernel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) static ssize_t read_kmem(struct file *file, char __user *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) unsigned long p = *ppos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) ssize_t low_count, read, sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) read = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) if (p < (unsigned long) high_memory) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) low_count = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) if (count > (unsigned long)high_memory - p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) low_count = (unsigned long)high_memory - p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) /* we don't have page 0 mapped on sparc and m68k.. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) if (p < PAGE_SIZE && low_count > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) sz = size_inside_page(p, low_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) if (clear_user(buf, sz))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) buf += sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) p += sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) read += sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) low_count -= sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) count -= sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) while (low_count > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) sz = size_inside_page(p, low_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) * On ia64 if a page has been mapped somewhere as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) * uncached, then it must also be accessed uncached
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) * by the kernel or data corruption may occur
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) kbuf = xlate_dev_kmem_ptr((void *)p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) if (!virt_addr_valid(kbuf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) if (copy_to_user(buf, kbuf, sz))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) buf += sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) p += sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) read += sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) low_count -= sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) count -= sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) if (should_stop_iteration()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) if (count > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) kbuf = (char *)__get_free_page(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) if (!kbuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) while (count > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) sz = size_inside_page(p, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) if (!is_vmalloc_or_module_addr((void *)p)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) err = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) sz = vread(kbuf, (char *)p, sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) if (!sz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) if (copy_to_user(buf, kbuf, sz)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) err = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) count -= sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) buf += sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) read += sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) p += sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) if (should_stop_iteration())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) free_page((unsigned long)kbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) *ppos = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) return read ? read : err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) static ssize_t do_write_kmem(unsigned long p, const char __user *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) ssize_t written, sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) unsigned long copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) written = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) /* we don't have page 0 mapped on sparc and m68k.. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) if (p < PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) sz = size_inside_page(p, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) /* Hmm. Do something? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) buf += sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) p += sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) count -= sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) written += sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) while (count > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) void *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) sz = size_inside_page(p, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) * On ia64 if a page has been mapped somewhere as uncached, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) * it must also be accessed uncached by the kernel or data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) * corruption may occur.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) ptr = xlate_dev_kmem_ptr((void *)p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) if (!virt_addr_valid(ptr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) copied = copy_from_user(ptr, buf, sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) if (copied) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) written += sz - copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) if (written)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) buf += sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) p += sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) count -= sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) written += sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) if (should_stop_iteration())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) *ppos += written;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) return written;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) * This function writes to the *virtual* memory as seen by the kernel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) static ssize_t write_kmem(struct file *file, const char __user *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) unsigned long p = *ppos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) ssize_t wrote = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) ssize_t virtr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) char *kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) if (p < (unsigned long) high_memory) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) unsigned long to_write = min_t(unsigned long, count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) (unsigned long)high_memory - p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) wrote = do_write_kmem(p, buf, to_write, ppos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) if (wrote != to_write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) return wrote;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) p += wrote;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) buf += wrote;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) count -= wrote;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) if (count > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) kbuf = (char *)__get_free_page(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) if (!kbuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) return wrote ? wrote : -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) while (count > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) unsigned long sz = size_inside_page(p, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) unsigned long n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) if (!is_vmalloc_or_module_addr((void *)p)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) err = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) n = copy_from_user(kbuf, buf, sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) if (n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) err = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) vwrite(kbuf, (char *)p, sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) count -= sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) buf += sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) virtr += sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) p += sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) if (should_stop_iteration())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) free_page((unsigned long)kbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) *ppos = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) return virtr + wrote ? : err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) static ssize_t read_port(struct file *file, char __user *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) unsigned long i = *ppos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) char __user *tmp = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) if (!access_ok(buf, count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) while (count-- > 0 && i < 65536) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) if (__put_user(inb(i), tmp) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) tmp++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) *ppos = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) return tmp-buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) static ssize_t write_port(struct file *file, const char __user *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) unsigned long i = *ppos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) const char __user *tmp = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) if (!access_ok(buf, count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) while (count-- > 0 && i < 65536) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) char c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) if (__get_user(c, tmp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) if (tmp > buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) outb(c, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) tmp++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) *ppos = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) return tmp-buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) static ssize_t read_null(struct file *file, char __user *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) static ssize_t write_null(struct file *file, const char __user *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) static ssize_t read_iter_null(struct kiocb *iocb, struct iov_iter *to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) static ssize_t write_iter_null(struct kiocb *iocb, struct iov_iter *from)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) size_t count = iov_iter_count(from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) iov_iter_advance(from, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) struct splice_desc *sd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) return sd->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) static ssize_t splice_write_null(struct pipe_inode_info *pipe, struct file *out,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) loff_t *ppos, size_t len, unsigned int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) static ssize_t read_iter_zero(struct kiocb *iocb, struct iov_iter *iter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) size_t written = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) while (iov_iter_count(iter)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) size_t chunk = iov_iter_count(iter), n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) if (chunk > PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) chunk = PAGE_SIZE; /* Just for latency reasons */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) n = iov_iter_zero(chunk, iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) if (!n && iov_iter_count(iter))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) return written ? written : -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) written += n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) if (signal_pending(current))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) return written ? written : -ERESTARTSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) return written;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) static ssize_t read_zero(struct file *file, char __user *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) size_t cleared = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) while (count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) size_t chunk = min_t(size_t, count, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) size_t left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) left = clear_user(buf + cleared, chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) if (unlikely(left)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) cleared += (chunk - left);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) if (!cleared)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) cleared += chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) count -= chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) if (signal_pending(current))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) return cleared;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) static int mmap_zero(struct file *file, struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) #ifndef CONFIG_MMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) return -ENOSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) if (vma->vm_flags & VM_SHARED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) return shmem_zero_setup(vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) vma_set_anonymous(vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) static unsigned long get_unmapped_area_zero(struct file *file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) unsigned long addr, unsigned long len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) unsigned long pgoff, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) #ifdef CONFIG_MMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) if (flags & MAP_SHARED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) * mmap_zero() will call shmem_zero_setup() to create a file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) * so use shmem's get_unmapped_area in case it can be huge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) * and pass NULL for file as in mmap.c's get_unmapped_area(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) * so as not to confuse shmem with our handle on "/dev/zero".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) return shmem_get_unmapped_area(NULL, addr, len, pgoff, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) /* Otherwise flags & MAP_PRIVATE: with no shmem object beneath it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) return -ENOSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) static ssize_t write_full(struct file *file, const char __user *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) * Special lseek() function for /dev/null and /dev/zero. Most notably, you
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) * can fopen() both devices with "a" now. This was previously impossible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) * -- SRB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) static loff_t null_lseek(struct file *file, loff_t offset, int orig)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) return file->f_pos = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) * The memory devices use the full 32/64 bits of the offset, and so we cannot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) * check against negative addresses: they are ok. The return value is weird,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) * though, in that case (0).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) * also note that seeking relative to the "end of file" isn't supported:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) * it has no meaning, so it returns -EINVAL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) loff_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) inode_lock(file_inode(file));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) switch (orig) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) case SEEK_CUR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) offset += file->f_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) case SEEK_SET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) /* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) if ((unsigned long long)offset >= -MAX_ERRNO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) ret = -EOVERFLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) file->f_pos = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) ret = file->f_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) force_successful_syscall_return();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) inode_unlock(file_inode(file));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) static struct inode *devmem_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) #ifdef CONFIG_IO_STRICT_DEVMEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) void revoke_devmem(struct resource *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) /* pairs with smp_store_release() in devmem_init_inode() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) struct inode *inode = smp_load_acquire(&devmem_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) * Check that the initialization has completed. Losing the race
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) * is ok because it means drivers are claiming resources before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) * the fs_initcall level of init and prevent /dev/mem from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) * establishing mappings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) if (!inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) * The expectation is that the driver has successfully marked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) * the resource busy by this point, so devmem_is_allowed()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) * should start returning false, however for performance this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) * does not iterate the entire resource range.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) if (devmem_is_allowed(PHYS_PFN(res->start)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) devmem_is_allowed(PHYS_PFN(res->end))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) * *cringe* iomem=relaxed says "go ahead, what's the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) * worst that can happen?"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) unmap_mapping_range(inode->i_mapping, res->start, resource_size(res), 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) static int open_port(struct inode *inode, struct file *filp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) if (!capable(CAP_SYS_RAWIO))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) rc = security_locked_down(LOCKDOWN_DEV_MEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) if (iminor(inode) != DEVMEM_MINOR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) * Use a unified address space to have a single point to manage
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) * revocations when drivers want to take over a /dev/mem mapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) * range.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) inode->i_mapping = devmem_inode->i_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) filp->f_mapping = inode->i_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) #define zero_lseek null_lseek
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) #define full_lseek null_lseek
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) #define write_zero write_null
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) #define write_iter_zero write_iter_null
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) #define open_mem open_port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) #define open_kmem open_mem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) static const struct file_operations __maybe_unused mem_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) .llseek = memory_lseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) .read = read_mem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) .write = write_mem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) .mmap = mmap_mem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) .open = open_mem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) #ifndef CONFIG_MMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) .get_unmapped_area = get_unmapped_area_mem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) .mmap_capabilities = memory_mmap_capabilities,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) static const struct file_operations __maybe_unused kmem_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) .llseek = memory_lseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) .read = read_kmem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) .write = write_kmem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) .mmap = mmap_kmem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) .open = open_kmem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) #ifndef CONFIG_MMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) .get_unmapped_area = get_unmapped_area_mem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) .mmap_capabilities = memory_mmap_capabilities,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) static const struct file_operations null_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) .llseek = null_lseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) .read = read_null,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) .write = write_null,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) .read_iter = read_iter_null,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) .write_iter = write_iter_null,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) .splice_write = splice_write_null,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) static const struct file_operations __maybe_unused port_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) .llseek = memory_lseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) .read = read_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) .write = write_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) .open = open_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) static const struct file_operations zero_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) .llseek = zero_lseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) .write = write_zero,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) .read_iter = read_iter_zero,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) .read = read_zero,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) .write_iter = write_iter_zero,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) .mmap = mmap_zero,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) .get_unmapped_area = get_unmapped_area_zero,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) #ifndef CONFIG_MMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) .mmap_capabilities = zero_mmap_capabilities,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) static const struct file_operations full_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) .llseek = full_lseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) .read_iter = read_iter_zero,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) .write = write_full,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) static const struct memdev {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) umode_t mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) const struct file_operations *fops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) fmode_t fmode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) } devlist[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) #ifdef CONFIG_DEVMEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) [DEVMEM_MINOR] = { "mem", 0, &mem_fops, FMODE_UNSIGNED_OFFSET },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) #ifdef CONFIG_DEVKMEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) [2] = { "kmem", 0, &kmem_fops, FMODE_UNSIGNED_OFFSET },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) [3] = { "null", 0666, &null_fops, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) #ifdef CONFIG_DEVPORT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) [4] = { "port", 0, &port_fops, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) [5] = { "zero", 0666, &zero_fops, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) [7] = { "full", 0666, &full_fops, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) [8] = { "random", 0666, &random_fops, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) [9] = { "urandom", 0666, &urandom_fops, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) #ifdef CONFIG_PRINTK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) [11] = { "kmsg", 0644, &kmsg_fops, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) static int memory_open(struct inode *inode, struct file *filp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) int minor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) const struct memdev *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) minor = iminor(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) if (minor >= ARRAY_SIZE(devlist))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) dev = &devlist[minor];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) if (!dev->fops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) filp->f_op = dev->fops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) filp->f_mode |= dev->fmode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) if (dev->fops->open)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) return dev->fops->open(inode, filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) static const struct file_operations memory_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) .open = memory_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) .llseek = noop_llseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) static char *mem_devnode(struct device *dev, umode_t *mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) if (mode && devlist[MINOR(dev->devt)].mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) *mode = devlist[MINOR(dev->devt)].mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) static struct class *mem_class;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) static int devmem_fs_init_fs_context(struct fs_context *fc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) return init_pseudo(fc, DEVMEM_MAGIC) ? 0 : -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) static struct file_system_type devmem_fs_type = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) .name = "devmem",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) .init_fs_context = devmem_fs_init_fs_context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) .kill_sb = kill_anon_super,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) static int devmem_init_inode(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) static struct vfsmount *devmem_vfs_mount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) static int devmem_fs_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) struct inode *inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) rc = simple_pin_fs(&devmem_fs_type, &devmem_vfs_mount, &devmem_fs_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) if (rc < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) pr_err("Cannot mount /dev/mem pseudo filesystem: %d\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) inode = alloc_anon_inode(devmem_vfs_mount->mnt_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) if (IS_ERR(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) rc = PTR_ERR(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) pr_err("Cannot allocate inode for /dev/mem: %d\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) simple_release_fs(&devmem_vfs_mount, &devmem_fs_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) * Publish /dev/mem initialized.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) * Pairs with smp_load_acquire() in revoke_devmem().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) smp_store_release(&devmem_inode, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) static int __init chr_dev_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) int minor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) if (register_chrdev(MEM_MAJOR, "mem", &memory_fops))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) printk("unable to get major %d for memory devs\n", MEM_MAJOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) mem_class = class_create(THIS_MODULE, "mem");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) if (IS_ERR(mem_class))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) return PTR_ERR(mem_class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) mem_class->devnode = mem_devnode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) if (!devlist[minor].name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) * Create /dev/port?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) if ((minor == DEVPORT_MINOR) && !arch_has_dev_port())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) if ((minor == DEVMEM_MINOR) && devmem_init_inode() != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) NULL, devlist[minor].name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) return tty_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) fs_initcall(chr_dev_init);