^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #ifndef FWH_LOCK_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #define FWH_LOCK_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) enum fwh_lock_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) FWH_UNLOCKED = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) FWH_DENY_WRITE = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) FWH_IMMUTABLE = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) FWH_DENY_READ = 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) struct fwh_xxlock_thunk {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) enum fwh_lock_state val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) flstate_t state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #define FWH_XXLOCK_ONEBLOCK_LOCK ((struct fwh_xxlock_thunk){ FWH_DENY_WRITE, FL_LOCKING})
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #define FWH_XXLOCK_ONEBLOCK_UNLOCK ((struct fwh_xxlock_thunk){ FWH_UNLOCKED, FL_UNLOCKING})
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * This locking/unlock is specific to firmware hub parts. Only one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * is known that supports the Intel command set. Firmware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * hub parts cannot be interleaved as they are on the LPC bus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * so this code has not been tested with interleaved chips,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * and will likely fail in that context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) static int fwh_xxlock_oneblock(struct map_info *map, struct flchip *chip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) unsigned long adr, int len, void *thunk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) struct cfi_private *cfi = map->fldrv_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) struct fwh_xxlock_thunk *xxlt = (struct fwh_xxlock_thunk *)thunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) /* Refuse the operation if the we cannot look behind the chip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) if (chip->start < 0x400000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) pr_debug( "MTD %s(): chip->start: %lx wanted >= 0x400000\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) __func__, chip->start );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * lock block registers:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * - on 64k boundariesand
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * - bit 1 set high
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * - block lock registers are 4MiB lower - overflow subtract (danger)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * The address manipulation is first done on the logical address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * which is 0 at the start of the chip, and then the offset of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * the individual chip is addted to it. Any other order a weird
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * map offset could cause problems.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) adr = (adr & ~0xffffUL) | 0x2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) adr += chip->start - 0x400000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * This is easy because these are writes to registers and not writes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * to flash memory - that means that we don't have to check status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * and timeout.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) mutex_lock(&chip->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) ret = get_chip(map, chip, adr, FL_LOCKING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) mutex_unlock(&chip->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) chip->oldstate = chip->state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) chip->state = xxlt->state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) map_write(map, CMD(xxlt->val), adr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) /* Done and happy. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) chip->state = chip->oldstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) put_chip(map, chip, adr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) mutex_unlock(&chip->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) static int fwh_lock_varsize(struct mtd_info *mtd, loff_t ofs, uint64_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) ret = cfi_varsize_frob(mtd, fwh_xxlock_oneblock, ofs, len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) (void *)&FWH_XXLOCK_ONEBLOCK_LOCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) static int fwh_unlock_varsize(struct mtd_info *mtd, loff_t ofs, uint64_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) ret = cfi_varsize_frob(mtd, fwh_xxlock_oneblock, ofs, len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) (void *)&FWH_XXLOCK_ONEBLOCK_UNLOCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) static void fixup_use_fwh_lock(struct mtd_info *mtd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) printk(KERN_NOTICE "using fwh lock/unlock method\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) /* Setup for the chips with the fwh lock method */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) mtd->_lock = fwh_lock_varsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) mtd->_unlock = fwh_unlock_varsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #endif /* FWH_LOCK_H */