Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /* vmu-flash.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Driver for SEGA Dreamcast Visual Memory Unit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright (c) Adrian McMenamin 2002 - 2009
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * Copyright (c) Paul Mundt 2001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/maple.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/mtd/mtd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/mtd/map.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) struct vmu_cache {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) 	unsigned char *buffer;		/* Cache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) 	unsigned int block;		/* Which block was cached */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 	unsigned long jiffies_atc;	/* When was it cached? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 	int valid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) struct mdev_part {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 	struct maple_device *mdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 	int partition;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) struct vmupart {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 	u16 user_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 	u16 root_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 	u16 numblocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 	struct vmu_cache *pcache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) struct memcard {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	u16 tempA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	u16 tempB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	u32 partitions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	u32 blocklen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	u32 writecnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	u32 readcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	u32 removable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	int partition;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	int read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	unsigned char *blockread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	struct vmupart *parts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	struct mtd_info *mtd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) struct vmu_block {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	unsigned int num; /* block number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	unsigned int ofs; /* block offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) static struct vmu_block *ofs_to_block(unsigned long src_ofs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	struct mtd_info *mtd, int partition)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	struct vmu_block *vblock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	struct maple_device *mdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	struct memcard *card;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	struct mdev_part *mpart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	int num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	mpart = mtd->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	mdev = mpart->mdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	card = maple_get_drvdata(mdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	if (src_ofs >= card->parts[partition].numblocks * card->blocklen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 		goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	num = src_ofs / card->blocklen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	if (num > card->parts[partition].numblocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 		goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	vblock = kmalloc(sizeof(struct vmu_block), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	if (!vblock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 		goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	vblock->num = num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	vblock->ofs = src_ofs % card->blocklen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	return vblock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) /* Maple bus callback function for reads */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) static void vmu_blockread(struct mapleq *mq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	struct maple_device *mdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	struct memcard *card;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	mdev = mq->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	card = maple_get_drvdata(mdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	/* copy the read in data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	if (unlikely(!card->blockread))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	memcpy(card->blockread, mq->recvbuf->buf + 12,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 		card->blocklen/card->readcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) /* Interface with maple bus to read blocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)  * caching the results so that other parts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)  * of the driver can access block reads */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) static int maple_vmu_read_block(unsigned int num, unsigned char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	struct mtd_info *mtd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	struct memcard *card;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	struct mdev_part *mpart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	struct maple_device *mdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	int partition, error = 0, x, wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	unsigned char *blockread = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	struct vmu_cache *pcache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	__be32 sendbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	mpart = mtd->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	mdev = mpart->mdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	partition = mpart->partition;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	card = maple_get_drvdata(mdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	pcache = card->parts[partition].pcache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	pcache->valid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	/* prepare the cache for this block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	if (!pcache->buffer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 		pcache->buffer = kmalloc(card->blocklen, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 		if (!pcache->buffer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 			dev_err(&mdev->dev, "VMU at (%d, %d) - read fails due"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 				" to lack of memory\n", mdev->port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 				mdev->unit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 			error = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 			goto outB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	* Reads may be phased - again the hardware spec
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	* supports this - though may not be any devices in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	* the wild that implement it, but we will here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	for (x = 0; x < card->readcnt; x++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 		sendbuf = cpu_to_be32(partition << 24 | x << 16 | num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 		if (atomic_read(&mdev->busy) == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 			wait_event_interruptible_timeout(mdev->maple_wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 				atomic_read(&mdev->busy) == 0, HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 			if (atomic_read(&mdev->busy) == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 				dev_notice(&mdev->dev, "VMU at (%d, %d)"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 					" is busy\n", mdev->port, mdev->unit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 				error = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 				goto outB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 		atomic_set(&mdev->busy, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 		blockread = kmalloc(card->blocklen/card->readcnt, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 		if (!blockread) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 			error = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 			atomic_set(&mdev->busy, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 			goto outB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 		card->blockread = blockread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 		maple_getcond_callback(mdev, vmu_blockread, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 			MAPLE_FUNC_MEMCARD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 		error = maple_add_packet(mdev, MAPLE_FUNC_MEMCARD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 				MAPLE_COMMAND_BREAD, 2, &sendbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 		/* Very long timeouts seem to be needed when box is stressed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 		wait = wait_event_interruptible_timeout(mdev->maple_wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 			(atomic_read(&mdev->busy) == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 			atomic_read(&mdev->busy) == 2), HZ * 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 		* MTD layer does not handle hotplugging well
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 		* so have to return errors when VMU is unplugged
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 		* in the middle of a read (busy == 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 		*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 		if (error || atomic_read(&mdev->busy) == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 			if (atomic_read(&mdev->busy) == 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 				error = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 			atomic_set(&mdev->busy, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 			card->blockread = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 			goto outA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 		if (wait == 0 || wait == -ERESTARTSYS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 			card->blockread = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 			atomic_set(&mdev->busy, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 			error = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 			list_del_init(&(mdev->mq->list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 			kfree(mdev->mq->sendbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 			mdev->mq->sendbuf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 			if (wait == -ERESTARTSYS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 				dev_warn(&mdev->dev, "VMU read on (%d, %d)"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 					" interrupted on block 0x%X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 					mdev->port, mdev->unit, num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 			} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 				dev_notice(&mdev->dev, "VMU read on (%d, %d)"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 					" timed out on block 0x%X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 					mdev->port, mdev->unit, num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 			goto outA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 		memcpy(buf + (card->blocklen/card->readcnt) * x, blockread,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 			card->blocklen/card->readcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 		memcpy(pcache->buffer + (card->blocklen/card->readcnt) * x,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 			card->blockread, card->blocklen/card->readcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 		card->blockread = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 		pcache->block = num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 		pcache->jiffies_atc = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 		pcache->valid = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 		kfree(blockread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) outA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	kfree(blockread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) outB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) /* communicate with maple bus for phased writing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) static int maple_vmu_write_block(unsigned int num, const unsigned char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	struct mtd_info *mtd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	struct memcard *card;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	struct mdev_part *mpart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	struct maple_device *mdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	int partition, error, locking, x, phaselen, wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	__be32 *sendbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	mpart = mtd->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	mdev = mpart->mdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	partition = mpart->partition;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	card = maple_get_drvdata(mdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	phaselen = card->blocklen/card->writecnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	sendbuf = kmalloc(phaselen + 4, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	if (!sendbuf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 		error = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 		goto fail_nosendbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	for (x = 0; x < card->writecnt; x++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 		sendbuf[0] = cpu_to_be32(partition << 24 | x << 16 | num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 		memcpy(&sendbuf[1], buf + phaselen * x, phaselen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 		/* wait until the device is not busy doing something else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 		* or 1 second - which ever is longer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 		if (atomic_read(&mdev->busy) == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 			wait_event_interruptible_timeout(mdev->maple_wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 				atomic_read(&mdev->busy) == 0, HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 			if (atomic_read(&mdev->busy) == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 				error = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 				dev_notice(&mdev->dev, "VMU write at (%d, %d)"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 					"failed - device is busy\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 					mdev->port, mdev->unit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 				goto fail_nolock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 		atomic_set(&mdev->busy, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 		locking = maple_add_packet(mdev, MAPLE_FUNC_MEMCARD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 			MAPLE_COMMAND_BWRITE, phaselen / 4 + 2, sendbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 		wait = wait_event_interruptible_timeout(mdev->maple_wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 			atomic_read(&mdev->busy) == 0, HZ/10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 		if (locking) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 			error = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 			atomic_set(&mdev->busy, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 			goto fail_nolock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 		if (atomic_read(&mdev->busy) == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 			atomic_set(&mdev->busy, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 		} else if (wait == 0 || wait == -ERESTARTSYS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 			error = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 			dev_warn(&mdev->dev, "Write at (%d, %d) of block"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 				" 0x%X at phase %d failed: could not"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 				" communicate with VMU", mdev->port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 				mdev->unit, num, x);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 			atomic_set(&mdev->busy, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 			kfree(mdev->mq->sendbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 			mdev->mq->sendbuf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 			list_del_init(&(mdev->mq->list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 			goto fail_nolock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 	kfree(sendbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	return card->blocklen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) fail_nolock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	kfree(sendbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) fail_nosendbuf:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	dev_err(&mdev->dev, "VMU (%d, %d): write failed\n", mdev->port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 		mdev->unit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) /* mtd function to simulate reading byte by byte */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) static unsigned char vmu_flash_read_char(unsigned long ofs, int *retval,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	struct mtd_info *mtd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	struct vmu_block *vblock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	struct memcard *card;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	struct mdev_part *mpart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	struct maple_device *mdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	unsigned char *buf, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	int partition, error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	mpart = mtd->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	mdev = mpart->mdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	partition = mpart->partition;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	card = maple_get_drvdata(mdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	*retval =  0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	buf = kmalloc(card->blocklen, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	if (!buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 		*retval = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 		goto finish;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	vblock = ofs_to_block(ofs, mtd, partition);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	if (!vblock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 		*retval = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 		goto out_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	error = maple_vmu_read_block(vblock->num, buf, mtd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 	if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 		ret = error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 		*retval = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 		goto out_vblock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 	ret = buf[vblock->ofs];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) out_vblock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 	kfree(vblock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) out_buf:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 	kfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) finish:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) /* mtd higher order function to read flash */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) static int vmu_flash_read(struct mtd_info *mtd, loff_t from, size_t len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 	size_t *retlen,  u_char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 	struct maple_device *mdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 	struct memcard *card;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 	struct mdev_part *mpart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 	struct vmu_cache *pcache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 	struct vmu_block *vblock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 	int index = 0, retval, partition, leftover, numblocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 	unsigned char cx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 	mpart = mtd->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 	mdev = mpart->mdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 	partition = mpart->partition;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 	card = maple_get_drvdata(mdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 	numblocks = card->parts[partition].numblocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	if (from + len > numblocks * card->blocklen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 		len = numblocks * card->blocklen - from;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 	if (len == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 	/* Have we cached this bit already? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 	pcache = card->parts[partition].pcache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 		vblock =  ofs_to_block(from + index, mtd, partition);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 		if (!vblock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 		/* Have we cached this and is the cache valid and timely? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 		if (pcache->valid &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 			time_before(jiffies, pcache->jiffies_atc + HZ) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 			(pcache->block == vblock->num)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 			/* we have cached it, so do necessary copying */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 			leftover = card->blocklen - vblock->ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 			if (vblock->ofs + len - index < card->blocklen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 				/* only a bit of this block to copy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 				memcpy(buf + index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 					pcache->buffer + vblock->ofs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 					len - index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 				index = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 				/* otherwise copy remainder of whole block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 				memcpy(buf + index, pcache->buffer +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 					vblock->ofs, leftover);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 				index += leftover;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 			* Not cached so read one byte -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 			* but cache the rest of the block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 			*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 			cx = vmu_flash_read_char(from + index, &retval, mtd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 			if (retval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 				*retlen = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 				kfree(vblock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 				return cx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 			memset(buf + index, cx, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 			index++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 		kfree(vblock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 	} while (len > index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	*retlen = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) static int vmu_flash_write(struct mtd_info *mtd, loff_t to, size_t len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 	size_t *retlen, const u_char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 	struct maple_device *mdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 	struct memcard *card;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 	struct mdev_part *mpart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 	int index = 0, partition, error = 0, numblocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 	struct vmu_cache *pcache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 	struct vmu_block *vblock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 	unsigned char *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 	mpart = mtd->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 	mdev = mpart->mdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 	partition = mpart->partition;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 	card = maple_get_drvdata(mdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 	numblocks = card->parts[partition].numblocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 	if (to + len > numblocks * card->blocklen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 		len = numblocks * card->blocklen - to;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 	if (len == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 		error = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 		goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 	vblock = ofs_to_block(to, mtd, partition);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 	if (!vblock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 		error = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 		goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 	buffer = kmalloc(card->blocklen, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 	if (!buffer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 		error = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 		goto fail_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 		/* Read in the block we are to write to */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 		error = maple_vmu_read_block(vblock->num, buffer, mtd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 		if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 			goto fail_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 		do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 			buffer[vblock->ofs] = buf[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 			vblock->ofs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 			index++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 			if (index >= len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 		} while (vblock->ofs < card->blocklen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 		/* write out new buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 		error = maple_vmu_write_block(vblock->num, buffer, mtd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 		/* invalidate the cache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 		pcache = card->parts[partition].pcache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 		pcache->valid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 		if (error != card->blocklen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 			goto fail_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 		vblock->num++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 		vblock->ofs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 	} while (len > index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 	kfree(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 	*retlen = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 	kfree(vblock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) fail_io:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 	kfree(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) fail_buffer:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 	kfree(vblock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 	dev_err(&mdev->dev, "VMU write failing with error %d\n", error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) static void vmu_flash_sync(struct mtd_info *mtd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 	/* Do nothing here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) /* Maple bus callback function to recursively query hardware details */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) static void vmu_queryblocks(struct mapleq *mq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 	struct maple_device *mdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 	unsigned short *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 	struct memcard *card;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 	__be32 partnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 	struct vmu_cache *pcache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 	struct mdev_part *mpart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 	struct mtd_info *mtd_cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 	struct vmupart *part_cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 	int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 	mdev = mq->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 	card = maple_get_drvdata(mdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 	res = (unsigned short *) (mq->recvbuf->buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 	card->tempA = res[12];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 	card->tempB = res[6];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 	dev_info(&mdev->dev, "VMU device at partition %d has %d user "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 		"blocks with a root block at %d\n", card->partition,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 		card->tempA, card->tempB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 	part_cur = &card->parts[card->partition];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 	part_cur->user_blocks = card->tempA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 	part_cur->root_block = card->tempB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 	part_cur->numblocks = card->tempB + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 	part_cur->name = kmalloc(12, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 	if (!part_cur->name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 		goto fail_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 	sprintf(part_cur->name, "vmu%d.%d.%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 		mdev->port, mdev->unit, card->partition);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 	mtd_cur = &card->mtd[card->partition];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 	mtd_cur->name = part_cur->name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 	mtd_cur->type = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 	mtd_cur->flags = MTD_WRITEABLE|MTD_NO_ERASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 	mtd_cur->size = part_cur->numblocks * card->blocklen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 	mtd_cur->erasesize = card->blocklen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 	mtd_cur->_write = vmu_flash_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 	mtd_cur->_read = vmu_flash_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 	mtd_cur->_sync = vmu_flash_sync;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 	mtd_cur->writesize = card->blocklen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 	mpart = kmalloc(sizeof(struct mdev_part), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 	if (!mpart)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 		goto fail_mpart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 	mpart->mdev = mdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 	mpart->partition = card->partition;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 	mtd_cur->priv = mpart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 	mtd_cur->owner = THIS_MODULE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 	pcache = kzalloc(sizeof(struct vmu_cache), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 	if (!pcache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 		goto fail_cache_create;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 	part_cur->pcache = pcache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 	error = mtd_device_register(mtd_cur, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 	if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 		goto fail_mtd_register;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 	maple_getcond_callback(mdev, NULL, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 		MAPLE_FUNC_MEMCARD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 	* Set up a recursive call to the (probably theoretical)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 	* second or more partition
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 	*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 	if (++card->partition < card->partitions) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 		partnum = cpu_to_be32(card->partition << 24);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 		maple_getcond_callback(mdev, vmu_queryblocks, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 			MAPLE_FUNC_MEMCARD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 		maple_add_packet(mdev, MAPLE_FUNC_MEMCARD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 			MAPLE_COMMAND_GETMINFO, 2, &partnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) fail_mtd_register:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 	dev_err(&mdev->dev, "Could not register maple device at (%d, %d)"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 		"error is 0x%X\n", mdev->port, mdev->unit, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 	for (error = 0; error <= card->partition; error++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 		kfree(((card->parts)[error]).pcache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 		((card->parts)[error]).pcache = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) fail_cache_create:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) fail_mpart:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 	for (error = 0; error <= card->partition; error++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 		kfree(((card->mtd)[error]).priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 		((card->mtd)[error]).priv = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 	maple_getcond_callback(mdev, NULL, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 		MAPLE_FUNC_MEMCARD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 	kfree(part_cur->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) fail_name:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) /* Handles very basic info about the flash, queries for details */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) static int vmu_connect(struct maple_device *mdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 	unsigned long test_flash_data, basic_flash_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) 	int c, error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) 	struct memcard *card;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) 	u32 partnum = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 	test_flash_data = be32_to_cpu(mdev->devinfo.function);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) 	/* Need to count how many bits are set - to find out which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 	 * function_data element has details of the memory card
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 	c = hweight_long(test_flash_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) 	basic_flash_data = be32_to_cpu(mdev->devinfo.function_data[c - 1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 	card = kmalloc(sizeof(struct memcard), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 	if (!card) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 		error = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) 		goto fail_nomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 	card->partitions = (basic_flash_data >> 24 & 0xFF) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 	card->blocklen = ((basic_flash_data >> 16 & 0xFF) + 1) << 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 	card->writecnt = basic_flash_data >> 12 & 0xF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) 	card->readcnt = basic_flash_data >> 8 & 0xF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 	card->removable = basic_flash_data >> 7 & 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) 	card->partition = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) 	* Not sure there are actually any multi-partition devices in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) 	* real world, but the hardware supports them, so, so will we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) 	*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) 	card->parts = kmalloc_array(card->partitions, sizeof(struct vmupart),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) 				    GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) 	if (!card->parts) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) 		error = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) 		goto fail_partitions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) 	card->mtd = kmalloc_array(card->partitions, sizeof(struct mtd_info),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) 				  GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) 	if (!card->mtd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) 		error = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) 		goto fail_mtd_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) 	maple_set_drvdata(mdev, card);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) 	* We want to trap meminfo not get cond
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) 	* so set interval to zero, but rely on maple bus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) 	* driver to pass back the results of the meminfo
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) 	*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) 	maple_getcond_callback(mdev, vmu_queryblocks, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) 		MAPLE_FUNC_MEMCARD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) 	/* Make sure we are clear to go */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) 	if (atomic_read(&mdev->busy) == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) 		wait_event_interruptible_timeout(mdev->maple_wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) 			atomic_read(&mdev->busy) == 0, HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) 		if (atomic_read(&mdev->busy) == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) 			dev_notice(&mdev->dev, "VMU at (%d, %d) is busy\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) 				mdev->port, mdev->unit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) 			error = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) 			goto fail_device_busy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) 	atomic_set(&mdev->busy, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) 	* Set up the minfo call: vmu_queryblocks will handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) 	* the information passed back
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) 	*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) 	error = maple_add_packet(mdev, MAPLE_FUNC_MEMCARD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) 		MAPLE_COMMAND_GETMINFO, 2, &partnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) 	if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) 		dev_err(&mdev->dev, "Could not lock VMU at (%d, %d)"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) 			" error is 0x%X\n", mdev->port, mdev->unit, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) 		goto fail_mtd_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) fail_device_busy:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) 	kfree(card->mtd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) fail_mtd_info:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) 	kfree(card->parts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) fail_partitions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) 	kfree(card);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) fail_nomem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) static void vmu_disconnect(struct maple_device *mdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) 	struct memcard *card;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) 	struct mdev_part *mpart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) 	int x;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) 	mdev->callback = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) 	card = maple_get_drvdata(mdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) 	for (x = 0; x < card->partitions; x++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) 		mpart = ((card->mtd)[x]).priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) 		mpart->mdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) 		mtd_device_unregister(&((card->mtd)[x]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) 		kfree(((card->parts)[x]).name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) 	kfree(card->parts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) 	kfree(card->mtd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) 	kfree(card);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) /* Callback to handle eccentricities of both mtd subsystem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)  * and general flakyness of Dreamcast VMUs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) static int vmu_can_unload(struct maple_device *mdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) 	struct memcard *card;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) 	int x;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) 	struct mtd_info *mtd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) 	card = maple_get_drvdata(mdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) 	for (x = 0; x < card->partitions; x++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) 		mtd = &((card->mtd)[x]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) 		if (mtd->usecount > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) #define ERRSTR "VMU at (%d, %d) file error -"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) static void vmu_file_error(struct maple_device *mdev, void *recvbuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) 	enum maple_file_errors error = ((int *)recvbuf)[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) 	switch (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) 	case MAPLE_FILEERR_INVALID_PARTITION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) 		dev_notice(&mdev->dev, ERRSTR " invalid partition number\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) 			mdev->port, mdev->unit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) 	case MAPLE_FILEERR_PHASE_ERROR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) 		dev_notice(&mdev->dev, ERRSTR " phase error\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) 			mdev->port, mdev->unit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) 	case MAPLE_FILEERR_INVALID_BLOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) 		dev_notice(&mdev->dev, ERRSTR " invalid block number\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) 			mdev->port, mdev->unit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) 	case MAPLE_FILEERR_WRITE_ERROR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) 		dev_notice(&mdev->dev, ERRSTR " write error\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) 			mdev->port, mdev->unit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) 	case MAPLE_FILEERR_INVALID_WRITE_LENGTH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) 		dev_notice(&mdev->dev, ERRSTR " invalid write length\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) 			mdev->port, mdev->unit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) 	case MAPLE_FILEERR_BAD_CRC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) 		dev_notice(&mdev->dev, ERRSTR " bad CRC\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) 			mdev->port, mdev->unit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) 		dev_notice(&mdev->dev, ERRSTR " 0x%X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) 			mdev->port, mdev->unit, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) static int probe_maple_vmu(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) 	struct maple_device *mdev = to_maple_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) 	struct maple_driver *mdrv = to_maple_driver(dev->driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) 	mdev->can_unload = vmu_can_unload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) 	mdev->fileerr_handler = vmu_file_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) 	mdev->driver = mdrv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) 	return vmu_connect(mdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) static int remove_maple_vmu(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) 	struct maple_device *mdev = to_maple_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) 	vmu_disconnect(mdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) static struct maple_driver vmu_flash_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) 	.function =	MAPLE_FUNC_MEMCARD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) 	.drv = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) 		.name =		"Dreamcast_visual_memory",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) 		.probe =	probe_maple_vmu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) 		.remove =	remove_maple_vmu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) static int __init vmu_flash_map_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) 	return maple_driver_register(&vmu_flash_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) static void __exit vmu_flash_map_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) 	maple_driver_unregister(&vmu_flash_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) module_init(vmu_flash_map_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) module_exit(vmu_flash_map_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) MODULE_AUTHOR("Adrian McMenamin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) MODULE_DESCRIPTION("Flash mapping for Sega Dreamcast visual memory");