Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3) /* Copyright (c) 2018 Rockchip Electronics Co. Ltd. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) #include <linux/blkpg.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <linux/blk-mq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/freezer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/hdreg.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/kthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <linux/proc_fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #include <linux/semaphore.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #include <linux/seq_file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #include <linux/timer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #include <linux/wait.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #include <linux/version.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) #include <linux/soc/rockchip/rk_vendor_storage.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) #include "../soc/rockchip/flash_vendor_storage.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) #include "rkflash_blk.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) #include "rkflash_debug.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) #include "rk_sftl.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) void __printf(1, 2) sftl_printk(char *fmt, ...)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	va_list ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	va_start(ap, fmt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	vprintk(fmt, ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	va_end(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) /* For rkflash block dev private data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) static const struct flash_boot_ops *g_boot_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) static int g_flash_type = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) static struct flash_part disk_array[MAX_PART_COUNT];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) static int g_max_part_num = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) #define FW_HRADER_PT_NAME		("fw_header_p")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) static struct flash_part fw_header_p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) #define PART_READONLY 0x85
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) #define PART_WRITEONLY 0x86
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) #define PART_NO_ACCESS 0x87
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) static unsigned long totle_read_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) static unsigned long totle_write_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) static unsigned long totle_read_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) static unsigned long totle_write_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) static char *mtd_read_temp_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) #define MTD_RW_SECTORS (512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) #define DISABLE_WRITE _IO('V', 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) #define ENABLE_WRITE _IO('V', 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) #define DISABLE_READ _IO('V', 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) #define ENABLE_READ _IO('V', 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) /* Thread for gc operation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) static DECLARE_WAIT_QUEUE_HEAD(nand_gc_thread_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) static unsigned long nand_gc_do;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) static struct task_struct *nand_gc_thread __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) /* For rkflash dev private data, including mtd dev and block dev */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) static int rkflash_dev_initialised;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) static DEFINE_MUTEX(g_flash_ops_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) static unsigned int rk_partition_init(struct flash_part *part)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	int i, part_num = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	u32 desity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	struct STRUCT_PART_INFO *g_part;  /* size 2KB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	g_part = kmalloc(sizeof(*g_part), GFP_KERNEL | GFP_DMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	if (!g_part)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	mutex_lock(&g_flash_ops_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	if (g_boot_ops->read(0, 4, g_part) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 		if (g_part->hdr.ui_fw_tag == RK_PARTITION_TAG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 			part_num = g_part->hdr.ui_part_entry_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 			desity = g_boot_ops->get_capacity();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 			for (i = 0; i < part_num; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 				memcpy(part[i].name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 				       g_part->part[i].sz_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 				       32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 				part[i].offset = g_part->part[i].ui_pt_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 				part[i].size = g_part->part[i].ui_pt_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 				part[i].type = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 				if (part[i].size == UINT_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 					part[i].size = desity - part[i].offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 				if (part[i].offset + part[i].size > desity) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 					part[i].size = desity - part[i].offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	mutex_unlock(&g_flash_ops_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	kfree(g_part);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	memset(&fw_header_p, 0x0, sizeof(fw_header_p));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	memcpy(fw_header_p.name, FW_HRADER_PT_NAME, strlen(FW_HRADER_PT_NAME));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	fw_header_p.offset = 0x0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	fw_header_p.size = 0x4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	fw_header_p.type = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	return part_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) static int rkflash_blk_proc_show(struct seq_file *m, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	char *ftl_buf = kzalloc(4096, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) #if IS_ENABLED(CONFIG_RK_SFTL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	int real_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	real_size = rknand_proc_ftlread(4096, ftl_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	if (real_size > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 		seq_printf(m, "%s", ftl_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	seq_printf(m, "Totle Read %ld KB\n", totle_read_data >> 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	seq_printf(m, "Totle Write %ld KB\n", totle_write_data >> 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	seq_printf(m, "totle_write_count %ld\n", totle_write_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	seq_printf(m, "totle_read_count %ld\n", totle_read_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	kfree(ftl_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) static int rkflash_blk_proc_open(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	return single_open(file, rkflash_blk_proc_show, PDE_DATA(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) static const struct proc_ops rkflash_blk_proc_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	.proc_open		= rkflash_blk_proc_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	.proc_read		= seq_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	.proc_lseek		= seq_lseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	.proc_release	= single_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) static int rkflash_blk_create_procfs(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	struct proc_dir_entry *ent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	ent = proc_create_data("rkflash", 0x664, NULL, &rkflash_blk_proc_fops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 			       (void *)0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	if (!ent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) static int rkflash_blk_discard(u32 sec, u32 n_sec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	if (g_boot_ops->discard)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 		ret = g_boot_ops->discard(sec, n_sec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 		ret = -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) static int rkflash_blk_xfer(struct flash_blk_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 			    unsigned long start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 			    unsigned long nsector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 			    char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 			    int cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	if (dev->disable_access ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	    (cmd == WRITE && dev->readonly) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	    (cmd == READ && dev->writeonly)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	start += dev->off_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	switch (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	case READ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 		totle_read_data += nsector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 		totle_read_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 		rkflash_print_bio("rkflash r sec= %lx, n_sec= %lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 				  start, nsector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 		ret = g_boot_ops->read(start, nsector, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 			ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	case WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 		totle_write_data += nsector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 		totle_write_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 		rkflash_print_bio("rkflash w sec= %lx, n_sec= %lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 				  start, nsector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 		ret = g_boot_ops->write(start, nsector, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 			ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 		ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) static int rkflash_blk_check_buffer_align(struct request *req, char **pbuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	int nr_vec = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	struct bio_vec bv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	struct req_iterator iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	char *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	void *firstbuf = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	char *nextbuffer = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	rq_for_each_segment(bv, req, iter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 		/* high mem return 0 and using kernel buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 		if (PageHighMem(bv.bv_page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 		buffer = page_address(bv.bv_page) + bv.bv_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 		if (!buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 		if (!firstbuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 			firstbuf = buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 		nr_vec++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 		if (nextbuffer && nextbuffer != buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 		nextbuffer = buffer + bv.bv_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	*pbuf = firstbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) static blk_status_t do_blktrans_all_request(struct flash_blk_ops *tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 			       struct flash_blk_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 			       struct request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	unsigned long block, nsect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	char *buf = NULL, *page_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	struct req_iterator rq_iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	struct bio_vec bvec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	unsigned long totle_nsect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	block = blk_rq_pos(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	nsect = blk_rq_cur_bytes(req) >> 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	totle_nsect = (req->__data_len) >> 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	if (blk_rq_pos(req) + blk_rq_cur_sectors(req) >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	    get_capacity(req->rq_disk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 		return BLK_STS_IOERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 	switch (req_op(req)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	case REQ_OP_DISCARD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 		rkflash_print_bio("%s discard\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 		if (rkflash_blk_discard(block, nsect))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 			return BLK_STS_IOERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 		return BLK_STS_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	case REQ_OP_READ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 		rkflash_print_bio("%s read block=%lx nsec=%lx\n", __func__, block, totle_nsect);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 		buf = mtd_read_temp_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 		rkflash_blk_check_buffer_align(req, &buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 		ret = rkflash_blk_xfer(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 				       block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 				       totle_nsect,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 				       buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 				       REQ_OP_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 		if (buf == mtd_read_temp_buffer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 			char *p = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 			rq_for_each_segment(bvec, req, rq_iter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 				page_buf = kmap_atomic(bvec.bv_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 				memcpy(page_buf +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 				       bvec.bv_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 				       p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 				       bvec.bv_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 				p += bvec.bv_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 				kunmap_atomic(page_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 			return BLK_STS_IOERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 			return BLK_STS_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	case REQ_OP_WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 		rkflash_print_bio("%s write block=%lx nsec=%lx\n", __func__, block, totle_nsect);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 		buf = mtd_read_temp_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 		rkflash_blk_check_buffer_align(req, &buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 		if (buf == mtd_read_temp_buffer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 			char *p = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 			rq_for_each_segment(bvec, req, rq_iter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 				page_buf = kmap_atomic(bvec.bv_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 				memcpy(p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 					page_buf +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 					bvec.bv_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 					bvec.bv_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 				p += bvec.bv_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 				kunmap_atomic(page_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 		ret = rkflash_blk_xfer(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 					block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 					totle_nsect,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 					buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 					REQ_OP_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 			return BLK_STS_IOERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 			return BLK_STS_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 		return BLK_STS_IOERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) static struct request *rkflash_next_request(struct flash_blk_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	struct request *rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 	struct flash_blk_ops *tr = dev->blk_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	rq = list_first_entry_or_null(&tr->rq_list, struct request, queuelist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 	if (rq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 		list_del_init(&rq->queuelist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 		blk_mq_start_request(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 		return rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) static void rkflash_blktrans_work(struct flash_blk_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 	__releases(&dev->blk_ops->queue_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 	__acquires(&dev->blk_ops->queue_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 	struct flash_blk_ops *tr = dev->blk_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 	struct request *req = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 	while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 		blk_status_t res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 		req = rkflash_next_request(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 		if (!req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 		spin_unlock_irq(&dev->blk_ops->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 		mutex_lock(&g_flash_ops_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 		res = do_blktrans_all_request(tr, dev, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 		mutex_unlock(&g_flash_ops_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 		if (!blk_update_request(req, res, req->__data_len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 			__blk_mq_end_request(req, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 			req = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 		spin_lock_irq(&dev->blk_ops->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) static blk_status_t rkflash_queue_rq(struct blk_mq_hw_ctx *hctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 				     const struct blk_mq_queue_data *bd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 	struct flash_blk_dev *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 	dev = hctx->queue->queuedata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 	if (!dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 		blk_mq_start_request(bd->rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 		return BLK_STS_IOERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 	nand_gc_do = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 	spin_lock_irq(&dev->blk_ops->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 	list_add_tail(&bd->rq->queuelist, &dev->blk_ops->rq_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 	rkflash_blktrans_work(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 	spin_unlock_irq(&dev->blk_ops->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 	/* wake up gc thread */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 	nand_gc_do = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	wake_up(&nand_gc_thread_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 	return BLK_STS_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) static const struct blk_mq_ops rkflash_mq_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 	.queue_rq	= rkflash_queue_rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) static int nand_gc_has_work(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 	return nand_gc_do;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) static int nand_gc_do_work(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 	int ret = nand_gc_has_work();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 	/* do garbage collect at idle state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 		mutex_lock(&g_flash_ops_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 		ret = g_boot_ops->gc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 		rkflash_print_bio("%s gc result= %d\n", __func__, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 		mutex_unlock(&g_flash_ops_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) static void nand_gc_wait_work(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 	unsigned long nand_gc_jiffies = HZ / 20;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 	if (nand_gc_has_work())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 		wait_event_freezable_timeout(nand_gc_thread_wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 					     kthread_should_stop(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 					     nand_gc_jiffies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 		wait_event_freezable(nand_gc_thread_wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 				     kthread_should_stop() || nand_gc_has_work());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) static int nand_gc_mythread(void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 	int gc_done_times = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 	set_freezable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 	while (!kthread_should_stop()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 		if (nand_gc_do_work() == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 			gc_done_times++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 			if (gc_done_times > 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 				nand_gc_do = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 			gc_done_times = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 		nand_gc_wait_work();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 	pr_info("nand gc quited\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) static int rkflash_blk_open(struct block_device *bdev, fmode_t mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) static void rkflash_blk_release(struct gendisk *disk, fmode_t mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) static int rkflash_blk_ioctl(struct block_device *bdev, fmode_t mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 			 unsigned int cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 			 unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 	struct flash_blk_dev *dev = bdev->bd_disk->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 	switch (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 	case ENABLE_WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 		dev->disable_access = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 		dev->readonly = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 		set_disk_ro(dev->blkcore_priv, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 	case DISABLE_WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 		dev->readonly = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 		set_disk_ro(dev->blkcore_priv, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 	case ENABLE_READ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 		dev->disable_access = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 		dev->writeonly = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 	case DISABLE_READ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 		dev->writeonly = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 		return -ENOTTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) const struct block_device_operations rkflash_blk_trans_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 	.owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 	.open = rkflash_blk_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 	.release = rkflash_blk_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 	.ioctl = rkflash_blk_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) static struct flash_blk_ops mytr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 	.name =  "rkflash",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 	.major = 31,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 	.minorbits = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 	.owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) static int rkflash_blk_add_dev(struct flash_blk_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 			       struct flash_blk_ops *blk_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 			       struct flash_part *part)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 	struct gendisk *gd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 	if (part->size == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 	gd = alloc_disk(1 << blk_ops->minorbits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 	if (!gd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 		kfree(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 	dev->blk_ops = blk_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 	dev->size = part->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 	dev->off_size = part->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 	dev->devnum = blk_ops->last_dev_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 	list_add_tail(&dev->list, &blk_ops->devs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 	blk_ops->last_dev_index++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 	gd->major = blk_ops->major;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 	gd->first_minor = (dev->devnum) << blk_ops->minorbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 	gd->fops = &rkflash_blk_trans_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 	if (part->name[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 		snprintf(gd->disk_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 			 sizeof(gd->disk_name),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 			 "%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 			 part->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 		gd->flags = GENHD_FL_EXT_DEVT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 		gd->minors = 255;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 		snprintf(gd->disk_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 			 sizeof(gd->disk_name),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 			 "%s%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 			 blk_ops->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 			 dev->devnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 	set_capacity(gd, dev->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 	gd->private_data = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 	dev->blkcore_priv = gd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 	gd->queue = blk_ops->rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 	if (part->type == PART_NO_ACCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 		dev->disable_access = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 	if (part->type == PART_READONLY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 		dev->readonly = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 	if (part->type == PART_WRITEONLY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 		dev->writeonly = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 	if (dev->readonly)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 		set_disk_ro(gd, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 	add_disk(gd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) static int rkflash_blk_remove_dev(struct flash_blk_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 	struct gendisk *gd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 	gd = dev->blkcore_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 	list_del(&dev->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 	gd->queue = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 	del_gendisk(gd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 	put_disk(gd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 	kfree(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) static int rkflash_blk_register(struct flash_blk_ops *blk_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 	int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 	u64 offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 	struct flash_blk_dev *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) 	if (!dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 	mtd_read_temp_buffer = kmalloc(MTD_RW_SECTORS * 512,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) 				       GFP_KERNEL | GFP_DMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 	ret = register_blkdev(blk_ops->major, blk_ops->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 		kfree(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 	/* Create the request queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) 	spin_lock_init(&blk_ops->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 	INIT_LIST_HEAD(&blk_ops->rq_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 	blk_ops->tag_set = kzalloc(sizeof(*blk_ops->tag_set), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 	if (!blk_ops->tag_set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 		goto error1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 	blk_ops->rq = blk_mq_init_sq_queue(blk_ops->tag_set, &rkflash_mq_ops, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) 					   BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) 	if (IS_ERR(blk_ops->rq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) 		ret = PTR_ERR(blk_ops->rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) 		blk_ops->rq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) 		goto error2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) 	blk_ops->rq->queuedata = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) 	blk_queue_max_hw_sectors(blk_ops->rq, MTD_RW_SECTORS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) 	blk_queue_max_segments(blk_ops->rq, MTD_RW_SECTORS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) 	blk_queue_flag_set(QUEUE_FLAG_DISCARD, blk_ops->rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) 	blk_queue_max_discard_sectors(blk_ops->rq, UINT_MAX >> 9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) 	blk_ops->rq->limits.discard_granularity = 64 << 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) 	if (g_flash_type == FLASH_TYPE_SFC_NAND || g_flash_type == FLASH_TYPE_NANDC_NAND)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) 		nand_gc_thread = kthread_run(nand_gc_mythread, (void *)blk_ops, "rkflash_gc");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) 	INIT_LIST_HEAD(&blk_ops->devs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) 	g_max_part_num = rk_partition_init(disk_array);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) 	if (g_max_part_num) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) 		/* partition 0 is save vendor data, need hidden */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) 		blk_ops->last_dev_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) 		for (i = 1; i < g_max_part_num; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) 			offset = (u64)disk_array[i].offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) 			pr_info("%10s: 0x%09llx -- 0x%09llx (%llu MB)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) 				disk_array[i].name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) 				offset * 512,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) 				(u64)(offset + disk_array[i].size) * 512,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) 				(u64)disk_array[i].size / 2048);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) 			rkflash_blk_add_dev(dev, blk_ops, &disk_array[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) 		rkflash_blk_add_dev(dev, blk_ops, &fw_header_p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) 		struct flash_part part;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) 		part.offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) 		part.size = g_boot_ops->get_capacity();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) 		part.type = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) 		part.name[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) 		rkflash_blk_add_dev(dev, blk_ops, &part);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) 	rkflash_blk_create_procfs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) error2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) 	kfree(blk_ops->tag_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) error1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) 	unregister_blkdev(blk_ops->major, blk_ops->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) 	kfree(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) static void rkflash_blk_unregister(struct flash_blk_ops *blk_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) 	struct list_head *this, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) 	list_for_each_safe(this, next, &blk_ops->devs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) 		struct flash_blk_dev *dev =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) 			list_entry(this, struct flash_blk_dev, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) 		rkflash_blk_remove_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) 	blk_cleanup_queue(blk_ops->rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) 	unregister_blkdev(blk_ops->major, blk_ops->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) static int __maybe_unused rkflash_dev_vendor_read(u32 sec, u32 n_sec, void *p_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) 	if (g_boot_ops->vendor_read) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) 		mutex_lock(&g_flash_ops_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) 		ret = g_boot_ops->vendor_read(sec, n_sec, p_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) 		mutex_unlock(&g_flash_ops_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) 		ret = -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) static int __maybe_unused rkflash_dev_vendor_write(u32 sec, u32 n_sec, void *p_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) 	if (g_boot_ops->vendor_write) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) 		mutex_lock(&g_flash_ops_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) 		ret = g_boot_ops->vendor_write(sec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) 					       n_sec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) 					       p_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) 		mutex_unlock(&g_flash_ops_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) 		ret = -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) int rkflash_dev_init(void __iomem *reg_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) 		     enum flash_type type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) 		     const struct flash_boot_ops *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) 	int ret = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) 	pr_err("%s enter\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) 	if (rkflash_dev_initialised) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) 		pr_err("rkflash has already inited as id[%d]\n", g_flash_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) 	if (!ops->init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) 	ret = ops->init(reg_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) 		pr_err("rkflash[%d] is invalid", type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) 	pr_info("rkflash[%d] init success\n", type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) 	g_boot_ops = ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) 	/* vendor part */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) 	switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) 	case FLASH_TYPE_SFC_NOR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) #if IS_ENABLED(CONFIG_RK_SFC_NOR_MTD) && IS_ENABLED(CONFIG_ROCKCHIP_MTD_VENDOR_STORAGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) 		flash_vendor_dev_ops_register(rkflash_dev_vendor_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) 					      rkflash_dev_vendor_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) 	case FLASH_TYPE_SFC_NAND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) #ifdef CONFIG_RK_SFC_NAND_MTD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) 	case FLASH_TYPE_NANDC_NAND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) #if defined(CONFIG_RK_SFTL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) 		rk_sftl_vendor_dev_ops_register(rkflash_dev_vendor_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) 						rkflash_dev_vendor_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) 		ret = rk_sftl_vendor_storage_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) 		if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) 			rk_vendor_register(rk_sftl_vendor_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) 					   rk_sftl_vendor_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) 			rk_sftl_vendor_register();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) 			pr_info("rkflashd vendor storage init ok !\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) 			pr_info("rkflash vendor storage init failed !\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) 	switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) 	case FLASH_TYPE_SFC_NOR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) #ifdef CONFIG_RK_SFC_NOR_MTD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) 		ret = sfc_nor_mtd_init(sfnor_dev, &g_flash_ops_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) 		pr_err("%s device register as mtd dev, ret= %d\n", __func__, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) 	case FLASH_TYPE_SFC_NAND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) #ifdef CONFIG_RK_SFC_NAND_MTD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) 		ret = sfc_nand_mtd_init(sfnand_dev, &g_flash_ops_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) 		pr_err("%s device register as mtd dev, ret= %d\n", __func__, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) 	case FLASH_TYPE_NANDC_NAND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) 		g_flash_type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) 		ret = rkflash_blk_register(&mytr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) 		pr_err("%s device register as blk dev, ret= %d\n", __func__, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) 			g_flash_type = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) 	if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) 		rkflash_dev_initialised = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) int rkflash_dev_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) 	if (rkflash_dev_initialised)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) 		rkflash_dev_initialised = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) 	if (g_flash_type != -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) 		rkflash_blk_unregister(&mytr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) 	pr_info("%s:OK\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) int rkflash_dev_suspend(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) 	mutex_lock(&g_flash_ops_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) int rkflash_dev_resume(void __iomem *reg_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) 	g_boot_ops->resume(reg_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) 	mutex_unlock(&g_flash_ops_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) void rkflash_dev_shutdown(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) 	pr_info("rkflash_shutdown...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) 	if (g_flash_type == FLASH_TYPE_SFC_NAND || g_flash_type == FLASH_TYPE_NANDC_NAND)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) 		kthread_stop(nand_gc_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) 	mutex_lock(&g_flash_ops_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) 	g_boot_ops->deinit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) 	mutex_unlock(&g_flash_ops_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) 	pr_info("rkflash_shutdown:OK\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) }