^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/backing-dev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/compat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/mount.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/blkpg.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/magic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/major.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/mtd/mtd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/mtd/partitions.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/mtd/map.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include "mtdcore.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) static DEFINE_MUTEX(mtd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * Data structure to hold the pointer to the mtd device as well
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * as mode information of various use cases.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) struct mtd_file_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) struct mtd_info *mtd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) enum mtd_file_modes mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) static loff_t mtdchar_lseek(struct file *file, loff_t offset, int orig)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) struct mtd_file_info *mfi = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) return fixed_size_llseek(file, offset, orig, mfi->mtd->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) static int mtdchar_open(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) int minor = iminor(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) int devnum = minor >> 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) struct mtd_info *mtd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) struct mtd_file_info *mfi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) pr_debug("MTD_open\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) /* You can't open the RO devices RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) if ((file->f_mode & FMODE_WRITE) && (minor & 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) mutex_lock(&mtd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) mtd = get_mtd_device(NULL, devnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) if (IS_ERR(mtd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) ret = PTR_ERR(mtd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) if (mtd->type == MTD_ABSENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) goto out1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) /* You can't open it RW if it's not a writeable device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) if ((file->f_mode & FMODE_WRITE) && !(mtd->flags & MTD_WRITEABLE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) ret = -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) goto out1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) mfi = kzalloc(sizeof(*mfi), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) if (!mfi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) goto out1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) mfi->mtd = mtd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) file->private_data = mfi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) mutex_unlock(&mtd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) out1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) put_mtd_device(mtd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) mutex_unlock(&mtd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) } /* mtdchar_open */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) /*====================================================================*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) static int mtdchar_close(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) struct mtd_file_info *mfi = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) struct mtd_info *mtd = mfi->mtd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) pr_debug("MTD_close\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) /* Only sync if opened RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) if ((file->f_mode & FMODE_WRITE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) mtd_sync(mtd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) put_mtd_device(mtd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) file->private_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) kfree(mfi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) } /* mtdchar_close */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) /* Back in June 2001, dwmw2 wrote:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * FIXME: This _really_ needs to die. In 2.5, we should lock the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * userspace buffer down and use it directly with readv/writev.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * The implementation below, using mtd_kmalloc_up_to, mitigates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * allocation failures when the system is under low-memory situations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * or if memory is highly fragmented at the cost of reducing the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * performance of the requested transfer due to a smaller buffer size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * A more complex but more memory-efficient implementation based on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) * get_user_pages and iovecs to cover extents of those pages is a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * longer-term goal, as intimated by dwmw2 above. However, for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * write case, this requires yet more complex head and tail transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * handling when those head and tail offsets and sizes are such that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * alignment requirements are not met in the NAND subdriver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) static ssize_t mtdchar_read(struct file *file, char __user *buf, size_t count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) struct mtd_file_info *mfi = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) struct mtd_info *mtd = mfi->mtd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) size_t retlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) size_t total_retlen=0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) int ret=0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) size_t size = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) char *kbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) pr_debug("MTD_read\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) if (*ppos + count > mtd->size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) if (*ppos < mtd->size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) count = mtd->size - *ppos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) if (!count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) kbuf = mtd_kmalloc_up_to(mtd, &size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) if (!kbuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) while (count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) len = min_t(size_t, count, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) switch (mfi->mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) case MTD_FILE_MODE_OTP_FACTORY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) ret = mtd_read_fact_prot_reg(mtd, *ppos, len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) &retlen, kbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) case MTD_FILE_MODE_OTP_USER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) ret = mtd_read_user_prot_reg(mtd, *ppos, len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) &retlen, kbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) case MTD_FILE_MODE_RAW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) struct mtd_oob_ops ops = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) ops.mode = MTD_OPS_RAW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) ops.datbuf = kbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) ops.oobbuf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) ops.len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) ret = mtd_read_oob(mtd, *ppos, &ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) retlen = ops.retlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) ret = mtd_read(mtd, *ppos, len, &retlen, kbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) /* Nand returns -EBADMSG on ECC errors, but it returns
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) * the data. For our userspace tools it is important
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) * to dump areas with ECC errors!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) * For kernel internal usage it also might return -EUCLEAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) * to signal the caller that a bitflip has occurred and has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) * been corrected by the ECC algorithm.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) * Userspace software which accesses NAND this way
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) * must be aware of the fact that it deals with NAND
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) if (!ret || mtd_is_bitflip_or_eccerr(ret)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) *ppos += retlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) if (copy_to_user(buf, kbuf, retlen)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) kfree(kbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) total_retlen += retlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) count -= retlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) buf += retlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) if (retlen == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) kfree(kbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) kfree(kbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) return total_retlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) } /* mtdchar_read */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) static ssize_t mtdchar_write(struct file *file, const char __user *buf, size_t count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) struct mtd_file_info *mfi = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) struct mtd_info *mtd = mfi->mtd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) size_t size = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) char *kbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) size_t retlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) size_t total_retlen=0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) int ret=0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) pr_debug("MTD_write\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) if (*ppos >= mtd->size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) if (*ppos + count > mtd->size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) count = mtd->size - *ppos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) if (!count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) kbuf = mtd_kmalloc_up_to(mtd, &size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) if (!kbuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) while (count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) len = min_t(size_t, count, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) if (copy_from_user(kbuf, buf, len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) kfree(kbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) switch (mfi->mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) case MTD_FILE_MODE_OTP_FACTORY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) ret = -EROFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) case MTD_FILE_MODE_OTP_USER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) ret = mtd_write_user_prot_reg(mtd, *ppos, len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) &retlen, kbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) case MTD_FILE_MODE_RAW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) struct mtd_oob_ops ops = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) ops.mode = MTD_OPS_RAW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) ops.datbuf = kbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) ops.oobbuf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) ops.ooboffs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) ops.len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) ret = mtd_write_oob(mtd, *ppos, &ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) retlen = ops.retlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) ret = mtd_write(mtd, *ppos, len, &retlen, kbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) * Return -ENOSPC only if no data could be written at all.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) * Otherwise just return the number of bytes that actually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) * have been written.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) if ((ret == -ENOSPC) && (total_retlen))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) *ppos += retlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) total_retlen += retlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) count -= retlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) buf += retlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) kfree(kbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) kfree(kbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) return total_retlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) } /* mtdchar_write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) /*======================================================================
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) IOCTL calls for getting device parameters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) ======================================================================*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) static int otp_select_filemode(struct mtd_file_info *mfi, int mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) struct mtd_info *mtd = mfi->mtd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) size_t retlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) switch (mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) case MTD_OTP_FACTORY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) if (mtd_read_fact_prot_reg(mtd, -1, 0, &retlen, NULL) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) -EOPNOTSUPP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) mfi->mode = MTD_FILE_MODE_OTP_FACTORY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) case MTD_OTP_USER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) if (mtd_read_user_prot_reg(mtd, -1, 0, &retlen, NULL) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) -EOPNOTSUPP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) mfi->mode = MTD_FILE_MODE_OTP_USER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) case MTD_OTP_OFF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) mfi->mode = MTD_FILE_MODE_NORMAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) static int mtdchar_writeoob(struct file *file, struct mtd_info *mtd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) uint64_t start, uint32_t length, void __user *ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) uint32_t __user *retp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) struct mtd_info *master = mtd_get_master(mtd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) struct mtd_file_info *mfi = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) struct mtd_oob_ops ops = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) uint32_t retlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) if (length > 4096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) if (!master->_write_oob)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) ops.ooblen = length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) ops.ooboffs = start & (mtd->writesize - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) ops.datbuf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) ops.mode = (mfi->mode == MTD_FILE_MODE_RAW) ? MTD_OPS_RAW :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) MTD_OPS_PLACE_OOB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) ops.oobbuf = memdup_user(ptr, length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) if (IS_ERR(ops.oobbuf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) return PTR_ERR(ops.oobbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) start &= ~((uint64_t)mtd->writesize - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) ret = mtd_write_oob(mtd, start, &ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) if (ops.oobretlen > 0xFFFFFFFFU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) ret = -EOVERFLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) retlen = ops.oobretlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) if (copy_to_user(retp, &retlen, sizeof(length)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) kfree(ops.oobbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) static int mtdchar_readoob(struct file *file, struct mtd_info *mtd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) uint64_t start, uint32_t length, void __user *ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) uint32_t __user *retp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) struct mtd_file_info *mfi = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) struct mtd_oob_ops ops = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) if (length > 4096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) ops.ooblen = length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) ops.ooboffs = start & (mtd->writesize - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) ops.datbuf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) ops.mode = (mfi->mode == MTD_FILE_MODE_RAW) ? MTD_OPS_RAW :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) MTD_OPS_PLACE_OOB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) ops.oobbuf = kmalloc(length, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) if (!ops.oobbuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) start &= ~((uint64_t)mtd->writesize - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) ret = mtd_read_oob(mtd, start, &ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) if (put_user(ops.oobretlen, retp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) else if (ops.oobretlen && copy_to_user(ptr, ops.oobbuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) ops.oobretlen))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) kfree(ops.oobbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) * NAND returns -EBADMSG on ECC errors, but it returns the OOB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) * data. For our userspace tools it is important to dump areas
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) * with ECC errors!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) * For kernel internal usage it also might return -EUCLEAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) * to signal the caller that a bitflip has occurred and has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) * been corrected by the ECC algorithm.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) * Note: currently the standard NAND function, nand_read_oob_std,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) * does not calculate ECC for the OOB area, so do not rely on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) * this behavior unless you have replaced it with your own.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) if (mtd_is_bitflip_or_eccerr(ret))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) * Copies (and truncates, if necessary) OOB layout information to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) * deprecated layout struct, nand_ecclayout_user. This is necessary only to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) * support the deprecated API ioctl ECCGETLAYOUT while allowing all new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) * functionality to use mtd_ooblayout_ops flexibly (i.e. mtd_ooblayout_ops
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) * can describe any kind of OOB layout with almost zero overhead from a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) * memory usage point of view).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) static int shrink_ecclayout(struct mtd_info *mtd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) struct nand_ecclayout_user *to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) struct mtd_oob_region oobregion;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) int i, section = 0, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) if (!mtd || !to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) memset(to, 0, sizeof(*to));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) to->eccbytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) for (i = 0; i < MTD_MAX_ECCPOS_ENTRIES;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) u32 eccpos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) ret = mtd_ooblayout_ecc(mtd, section++, &oobregion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) if (ret != -ERANGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) eccpos = oobregion.offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) for (; i < MTD_MAX_ECCPOS_ENTRIES &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) eccpos < oobregion.offset + oobregion.length; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) to->eccpos[i] = eccpos++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) to->eccbytes++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) ret = mtd_ooblayout_free(mtd, i, &oobregion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) if (ret != -ERANGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) to->oobfree[i].offset = oobregion.offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) to->oobfree[i].length = oobregion.length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) to->oobavail += to->oobfree[i].length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) static int get_oobinfo(struct mtd_info *mtd, struct nand_oobinfo *to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) struct mtd_oob_region oobregion;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) int i, section = 0, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) if (!mtd || !to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) memset(to, 0, sizeof(*to));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) to->eccbytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) for (i = 0; i < ARRAY_SIZE(to->eccpos);) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) u32 eccpos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) ret = mtd_ooblayout_ecc(mtd, section++, &oobregion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) if (ret != -ERANGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) if (oobregion.length + i > ARRAY_SIZE(to->eccpos))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) eccpos = oobregion.offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) for (; eccpos < oobregion.offset + oobregion.length; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) to->eccpos[i] = eccpos++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) to->eccbytes++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) for (i = 0; i < 8; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) ret = mtd_ooblayout_free(mtd, i, &oobregion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) if (ret != -ERANGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) to->oobfree[i][0] = oobregion.offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) to->oobfree[i][1] = oobregion.length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) to->useecc = MTD_NANDECC_AUTOPLACE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) static int mtdchar_blkpg_ioctl(struct mtd_info *mtd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) struct blkpg_ioctl_arg *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) struct blkpg_partition p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) if (!capable(CAP_SYS_ADMIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) if (copy_from_user(&p, arg->data, sizeof(p)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) switch (arg->op) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) case BLKPG_ADD_PARTITION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) /* Only master mtd device must be used to add partitions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) if (mtd_is_partition(mtd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) /* Sanitize user input */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) p.devname[BLKPG_DEVNAMELTH - 1] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) return mtd_add_partition(mtd, p.devname, p.start, p.length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) case BLKPG_DEL_PARTITION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) if (p.pno < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) return mtd_del_partition(mtd, p.pno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) static int mtdchar_write_ioctl(struct mtd_info *mtd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) struct mtd_write_req __user *argp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) struct mtd_info *master = mtd_get_master(mtd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) struct mtd_write_req req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) struct mtd_oob_ops ops = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) const void __user *usr_data, *usr_oob;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) if (copy_from_user(&req, argp, sizeof(req)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) usr_data = (const void __user *)(uintptr_t)req.usr_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) usr_oob = (const void __user *)(uintptr_t)req.usr_oob;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) if (!master->_write_oob)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) ops.mode = req.mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) ops.len = (size_t)req.len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) ops.ooblen = (size_t)req.ooblen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) ops.ooboffs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) if (usr_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) ops.datbuf = memdup_user(usr_data, ops.len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) if (IS_ERR(ops.datbuf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) return PTR_ERR(ops.datbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) ops.datbuf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) if (usr_oob) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) ops.oobbuf = memdup_user(usr_oob, ops.ooblen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) if (IS_ERR(ops.oobbuf)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) kfree(ops.datbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) return PTR_ERR(ops.oobbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) ops.oobbuf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) ret = mtd_write_oob(mtd, (loff_t)req.start, &ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) kfree(ops.datbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) kfree(ops.oobbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) struct mtd_file_info *mfi = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) struct mtd_info *mtd = mfi->mtd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) struct mtd_info *master = mtd_get_master(mtd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) void __user *argp = (void __user *)arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) struct mtd_info_user info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) pr_debug("MTD_ioctl\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) * Check the file mode to require "dangerous" commands to have write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) * permissions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) switch (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) /* "safe" commands */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) case MEMGETREGIONCOUNT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) case MEMGETREGIONINFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) case MEMGETINFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) case MEMREADOOB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) case MEMREADOOB64:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) case MEMISLOCKED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) case MEMGETOOBSEL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) case MEMGETBADBLOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) case OTPSELECT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) case OTPGETREGIONCOUNT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) case OTPGETREGIONINFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) case ECCGETLAYOUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) case ECCGETSTATS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) case MTDFILEMODE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) case BLKPG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) case BLKRRPART:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) /* "dangerous" commands */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) case MEMERASE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) case MEMERASE64:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) case MEMLOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) case MEMUNLOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) case MEMSETBADBLOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) case MEMWRITEOOB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) case MEMWRITEOOB64:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) case MEMWRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) case OTPLOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) if (!(file->f_mode & FMODE_WRITE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) return -ENOTTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) switch (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) case MEMGETREGIONCOUNT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) if (copy_to_user(argp, &(mtd->numeraseregions), sizeof(int)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) case MEMGETREGIONINFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) uint32_t ur_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) struct mtd_erase_region_info *kr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) struct region_info_user __user *ur = argp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) if (get_user(ur_idx, &(ur->regionindex)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) if (ur_idx >= mtd->numeraseregions)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) kr = &(mtd->eraseregions[ur_idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) if (put_user(kr->offset, &(ur->offset))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) || put_user(kr->erasesize, &(ur->erasesize))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) || put_user(kr->numblocks, &(ur->numblocks)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) case MEMGETINFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) memset(&info, 0, sizeof(info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) info.type = mtd->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) info.flags = mtd->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) info.size = mtd->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) info.erasesize = mtd->erasesize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) info.writesize = mtd->writesize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) info.oobsize = mtd->oobsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) /* The below field is obsolete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) info.padding = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) if (copy_to_user(argp, &info, sizeof(struct mtd_info_user)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) case MEMERASE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) case MEMERASE64:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) struct erase_info *erase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) erase=kzalloc(sizeof(struct erase_info),GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) if (!erase)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) if (cmd == MEMERASE64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) struct erase_info_user64 einfo64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) if (copy_from_user(&einfo64, argp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) sizeof(struct erase_info_user64))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) kfree(erase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) erase->addr = einfo64.start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) erase->len = einfo64.length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) struct erase_info_user einfo32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) if (copy_from_user(&einfo32, argp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) sizeof(struct erase_info_user))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) kfree(erase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) erase->addr = einfo32.start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) erase->len = einfo32.length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) ret = mtd_erase(mtd, erase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) kfree(erase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) case MEMWRITEOOB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) struct mtd_oob_buf buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) struct mtd_oob_buf __user *buf_user = argp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) /* NOTE: writes return length to buf_user->length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) if (copy_from_user(&buf, argp, sizeof(buf)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) ret = mtdchar_writeoob(file, mtd, buf.start, buf.length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) buf.ptr, &buf_user->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) case MEMREADOOB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) struct mtd_oob_buf buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) struct mtd_oob_buf __user *buf_user = argp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) /* NOTE: writes return length to buf_user->start */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) if (copy_from_user(&buf, argp, sizeof(buf)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) ret = mtdchar_readoob(file, mtd, buf.start, buf.length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) buf.ptr, &buf_user->start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) case MEMWRITEOOB64:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) struct mtd_oob_buf64 buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) struct mtd_oob_buf64 __user *buf_user = argp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) if (copy_from_user(&buf, argp, sizeof(buf)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) ret = mtdchar_writeoob(file, mtd, buf.start, buf.length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) (void __user *)(uintptr_t)buf.usr_ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) &buf_user->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) case MEMREADOOB64:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) struct mtd_oob_buf64 buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) struct mtd_oob_buf64 __user *buf_user = argp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) if (copy_from_user(&buf, argp, sizeof(buf)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) ret = mtdchar_readoob(file, mtd, buf.start, buf.length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) (void __user *)(uintptr_t)buf.usr_ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) &buf_user->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) case MEMWRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) ret = mtdchar_write_ioctl(mtd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) (struct mtd_write_req __user *)arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) case MEMLOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) struct erase_info_user einfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) if (copy_from_user(&einfo, argp, sizeof(einfo)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) ret = mtd_lock(mtd, einfo.start, einfo.length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) case MEMUNLOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) struct erase_info_user einfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) if (copy_from_user(&einfo, argp, sizeof(einfo)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) ret = mtd_unlock(mtd, einfo.start, einfo.length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) case MEMISLOCKED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) struct erase_info_user einfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) if (copy_from_user(&einfo, argp, sizeof(einfo)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) ret = mtd_is_locked(mtd, einfo.start, einfo.length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) /* Legacy interface */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) case MEMGETOOBSEL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) struct nand_oobinfo oi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) if (!master->ooblayout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) ret = get_oobinfo(mtd, &oi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) if (copy_to_user(argp, &oi, sizeof(struct nand_oobinfo)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) case MEMGETBADBLOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) loff_t offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) if (copy_from_user(&offs, argp, sizeof(loff_t)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) return mtd_block_isbad(mtd, offs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) case MEMSETBADBLOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) loff_t offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) if (copy_from_user(&offs, argp, sizeof(loff_t)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) return mtd_block_markbad(mtd, offs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) case OTPSELECT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) int mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) if (copy_from_user(&mode, argp, sizeof(int)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) mfi->mode = MTD_FILE_MODE_NORMAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) ret = otp_select_filemode(mfi, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) file->f_pos = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) case OTPGETREGIONCOUNT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) case OTPGETREGIONINFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) struct otp_info *buf = kmalloc(4096, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) size_t retlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) if (!buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) switch (mfi->mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) case MTD_FILE_MODE_OTP_FACTORY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) ret = mtd_get_fact_prot_info(mtd, 4096, &retlen, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) case MTD_FILE_MODE_OTP_USER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) ret = mtd_get_user_prot_info(mtd, 4096, &retlen, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) if (cmd == OTPGETREGIONCOUNT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) int nbr = retlen / sizeof(struct otp_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) ret = copy_to_user(argp, &nbr, sizeof(int));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) ret = copy_to_user(argp, buf, retlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) kfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) case OTPLOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) struct otp_info oinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) if (mfi->mode != MTD_FILE_MODE_OTP_USER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) if (copy_from_user(&oinfo, argp, sizeof(oinfo)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) ret = mtd_lock_user_prot_reg(mtd, oinfo.start, oinfo.length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) /* This ioctl is being deprecated - it truncates the ECC layout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) case ECCGETLAYOUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) struct nand_ecclayout_user *usrlay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) if (!master->ooblayout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) usrlay = kmalloc(sizeof(*usrlay), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) if (!usrlay)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) shrink_ecclayout(mtd, usrlay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) if (copy_to_user(argp, usrlay, sizeof(*usrlay)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) kfree(usrlay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) case ECCGETSTATS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) if (copy_to_user(argp, &mtd->ecc_stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) sizeof(struct mtd_ecc_stats)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) case MTDFILEMODE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) mfi->mode = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) switch(arg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) case MTD_FILE_MODE_OTP_FACTORY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) case MTD_FILE_MODE_OTP_USER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) ret = otp_select_filemode(mfi, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) case MTD_FILE_MODE_RAW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) if (!mtd_has_oob(mtd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) mfi->mode = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) case MTD_FILE_MODE_NORMAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) file->f_pos = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) case BLKPG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) struct blkpg_ioctl_arg __user *blk_arg = argp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) struct blkpg_ioctl_arg a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) if (copy_from_user(&a, blk_arg, sizeof(a)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) ret = mtdchar_blkpg_ioctl(mtd, &a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) case BLKRRPART:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) /* No reread partition feature. Just return ok */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) } /* memory_ioctl */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) static long mtdchar_unlocked_ioctl(struct file *file, u_int cmd, u_long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) mutex_lock(&mtd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) ret = mtdchar_ioctl(file, cmd, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) mutex_unlock(&mtd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) #ifdef CONFIG_COMPAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) struct mtd_oob_buf32 {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) u_int32_t start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) u_int32_t length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) compat_caddr_t ptr; /* unsigned char* */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) #define MEMWRITEOOB32 _IOWR('M', 3, struct mtd_oob_buf32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) #define MEMREADOOB32 _IOWR('M', 4, struct mtd_oob_buf32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) static long mtdchar_compat_ioctl(struct file *file, unsigned int cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) struct mtd_file_info *mfi = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) struct mtd_info *mtd = mfi->mtd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) void __user *argp = compat_ptr(arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) mutex_lock(&mtd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) switch (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) case MEMWRITEOOB32:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) struct mtd_oob_buf32 buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) struct mtd_oob_buf32 __user *buf_user = argp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) if (!(file->f_mode & FMODE_WRITE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) ret = -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) if (copy_from_user(&buf, argp, sizeof(buf)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) ret = mtdchar_writeoob(file, mtd, buf.start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) buf.length, compat_ptr(buf.ptr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) &buf_user->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) case MEMREADOOB32:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) struct mtd_oob_buf32 buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) struct mtd_oob_buf32 __user *buf_user = argp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) /* NOTE: writes return length to buf->start */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) if (copy_from_user(&buf, argp, sizeof(buf)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) ret = mtdchar_readoob(file, mtd, buf.start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) buf.length, compat_ptr(buf.ptr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) &buf_user->start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) case BLKPG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) /* Convert from blkpg_compat_ioctl_arg to blkpg_ioctl_arg */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) struct blkpg_compat_ioctl_arg __user *uarg = argp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) struct blkpg_compat_ioctl_arg compat_arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) struct blkpg_ioctl_arg a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) if (copy_from_user(&compat_arg, uarg, sizeof(compat_arg))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) memset(&a, 0, sizeof(a));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) a.op = compat_arg.op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) a.flags = compat_arg.flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) a.datalen = compat_arg.datalen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) a.data = compat_ptr(compat_arg.data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) ret = mtdchar_blkpg_ioctl(mtd, &a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) ret = mtdchar_ioctl(file, cmd, (unsigned long)argp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) mutex_unlock(&mtd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) #endif /* CONFIG_COMPAT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) * try to determine where a shared mapping can be made
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) * - only supported for NOMMU at the moment (MMU can't doesn't copy private
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) * mappings)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) #ifndef CONFIG_MMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) static unsigned long mtdchar_get_unmapped_area(struct file *file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) unsigned long len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) unsigned long pgoff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) struct mtd_file_info *mfi = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) struct mtd_info *mtd = mfi->mtd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) unsigned long offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) if (addr != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) return (unsigned long) -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) if (len > mtd->size || pgoff >= (mtd->size >> PAGE_SHIFT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) return (unsigned long) -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) offset = pgoff << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) if (offset > mtd->size - len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) return (unsigned long) -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) ret = mtd_get_unmapped_area(mtd, len, offset, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) return ret == -EOPNOTSUPP ? -ENODEV : ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) static unsigned mtdchar_mmap_capabilities(struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) struct mtd_file_info *mfi = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) return mtd_mmap_capabilities(mfi->mtd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) * set up a mapping for shared memory segments
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) static int mtdchar_mmap(struct file *file, struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) #ifdef CONFIG_MMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) struct mtd_file_info *mfi = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) struct mtd_info *mtd = mfi->mtd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) struct map_info *map = mtd->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) /* This is broken because it assumes the MTD device is map-based
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) and that mtd->priv is a valid struct map_info. It should be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) replaced with something that uses the mtd_get_unmapped_area()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) operation properly. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) if (0 /*mtd->type == MTD_RAM || mtd->type == MTD_ROM*/) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) #ifdef pgprot_noncached
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) if (file->f_flags & O_DSYNC || map->phys >= __pa(high_memory))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) return vm_iomap_memory(vma, map->phys, map->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) return vma->vm_flags & VM_SHARED ? 0 : -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) static const struct file_operations mtd_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) .llseek = mtdchar_lseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) .read = mtdchar_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) .write = mtdchar_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) .unlocked_ioctl = mtdchar_unlocked_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) #ifdef CONFIG_COMPAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) .compat_ioctl = mtdchar_compat_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) .open = mtdchar_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) .release = mtdchar_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) .mmap = mtdchar_mmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) #ifndef CONFIG_MMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) .get_unmapped_area = mtdchar_get_unmapped_area,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) .mmap_capabilities = mtdchar_mmap_capabilities,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) int __init init_mtdchar(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) ret = __register_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) "mtd", &mtd_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) pr_err("Can't allocate major number %d for MTD\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) MTD_CHAR_MAJOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) void __exit cleanup_mtdchar(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) __unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) MODULE_ALIAS_CHARDEV_MAJOR(MTD_CHAR_MAJOR);