^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Bad block management
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * - Heavily based on MD badblocks code from Neil Brown
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (c) 2015, Intel Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/badblocks.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/seqlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/stddef.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * badblocks_check() - check a given range for bad sectors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * @bb: the badblocks structure that holds all badblock information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * @s: sector (start) at which to check for badblocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * @sectors: number of sectors to check for badblocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * @first_bad: pointer to store location of the first badblock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * @bad_sectors: pointer to store number of badblocks after @first_bad
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * We can record which blocks on each device are 'bad' and so just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * fail those blocks, or that stripe, rather than the whole device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * Entries in the bad-block table are 64bits wide. This comprises:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * Length of bad-range, in sectors: 0-511 for lengths 1-512
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * Start of bad-range, sector offset, 54 bits (allows 8 exbibytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * A 'shift' can be set so that larger blocks are tracked and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * consequently larger devices can be covered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * 'Acknowledged' flag - 1 bit. - the most significant bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * Locking of the bad-block table uses a seqlock so badblocks_check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * might need to retry if it is very unlucky.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * We will sometimes want to check for bad blocks in a bi_end_io function,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * so we use the write_seqlock_irq variant.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * When looking for a bad block we specify a range and want to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * know if any block in the range is bad. So we binary-search
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * to the last range that starts at-or-before the given endpoint,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * (or "before the sector after the target range")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * then see if it ends after the given start.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * 0: there are no known bad blocks in the range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * 1: there are known bad block which are all acknowledged
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * -1: there are bad blocks which have not yet been acknowledged in metadata.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * plus the start/length of the first bad section we overlap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) int badblocks_check(struct badblocks *bb, sector_t s, int sectors,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) sector_t *first_bad, int *bad_sectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) int hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) int lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) u64 *p = bb->page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) int rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) sector_t target = s + sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) unsigned seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) if (bb->shift > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) /* round the start down, and the end up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) s >>= bb->shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) target += (1<<bb->shift) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) target >>= bb->shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) sectors = target - s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) /* 'target' is now the first block after the bad range */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) seq = read_seqbegin(&bb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) lo = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) rv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) hi = bb->count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) /* Binary search between lo and hi for 'target'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * i.e. for the last range that starts before 'target'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) /* INVARIANT: ranges before 'lo' and at-or-after 'hi'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * are known not to be the last range before target.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * VARIANT: hi-lo is the number of possible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * ranges, and decreases until it reaches 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) while (hi - lo > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) int mid = (lo + hi) / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) sector_t a = BB_OFFSET(p[mid]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) if (a < target)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) /* This could still be the one, earlier ranges
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * could not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) lo = mid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) /* This and later ranges are definitely out. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) hi = mid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) /* 'lo' might be the last that started before target, but 'hi' isn't */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) if (hi > lo) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) /* need to check all range that end after 's' to see if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * any are unacknowledged.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) while (lo >= 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > s) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) if (BB_OFFSET(p[lo]) < target) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) /* starts before the end, and finishes after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * the start, so they must overlap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) if (rv != -1 && BB_ACK(p[lo]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) rv = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) rv = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) *first_bad = BB_OFFSET(p[lo]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) *bad_sectors = BB_LEN(p[lo]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) lo--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) if (read_seqretry(&bb->lock, seq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) EXPORT_SYMBOL_GPL(badblocks_check);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) static void badblocks_update_acked(struct badblocks *bb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) u64 *p = bb->page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) bool unacked = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) if (!bb->unacked_exist)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) for (i = 0; i < bb->count ; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) if (!BB_ACK(p[i])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) unacked = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) if (!unacked)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) bb->unacked_exist = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * badblocks_set() - Add a range of bad blocks to the table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * @bb: the badblocks structure that holds all badblock information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) * @s: first sector to mark as bad
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * @sectors: number of sectors to mark as bad
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * @acknowledged: weather to mark the bad sectors as acknowledged
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) * This might extend the table, or might contract it if two adjacent ranges
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) * can be merged. We binary-search to find the 'insertion' point, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * decide how best to handle it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * 0: success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) * 1: failed to set badblocks (out of space)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) int badblocks_set(struct badblocks *bb, sector_t s, int sectors,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) int acknowledged)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) u64 *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) int lo, hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) int rv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) if (bb->shift < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) /* badblocks are disabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) if (bb->shift) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) /* round the start down, and the end up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) sector_t next = s + sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) s >>= bb->shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) next += (1<<bb->shift) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) next >>= bb->shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) sectors = next - s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) write_seqlock_irqsave(&bb->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) p = bb->page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) lo = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) hi = bb->count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) /* Find the last range that starts at-or-before 's' */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) while (hi - lo > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) int mid = (lo + hi) / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) sector_t a = BB_OFFSET(p[mid]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) if (a <= s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) lo = mid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) hi = mid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) if (hi > lo && BB_OFFSET(p[lo]) > s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) hi = lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) if (hi > lo) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) /* we found a range that might merge with the start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) * of our new range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) sector_t a = BB_OFFSET(p[lo]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) sector_t e = a + BB_LEN(p[lo]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) int ack = BB_ACK(p[lo]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) if (e >= s) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) /* Yes, we can merge with a previous range */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) if (s == a && s + sectors >= e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) /* new range covers old */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) ack = acknowledged;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) ack = ack && acknowledged;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) if (e < s + sectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) e = s + sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) if (e - a <= BB_MAX_LEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) p[lo] = BB_MAKE(a, e-a, ack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) s = e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) /* does not all fit in one range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) * make p[lo] maximal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) if (BB_LEN(p[lo]) != BB_MAX_LEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) p[lo] = BB_MAKE(a, BB_MAX_LEN, ack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) s = a + BB_MAX_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) sectors = e - s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) if (sectors && hi < bb->count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) /* 'hi' points to the first range that starts after 's'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) * Maybe we can merge with the start of that range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) sector_t a = BB_OFFSET(p[hi]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) sector_t e = a + BB_LEN(p[hi]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) int ack = BB_ACK(p[hi]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) if (a <= s + sectors) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) /* merging is possible */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) if (e <= s + sectors) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) /* full overlap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) e = s + sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) ack = acknowledged;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) ack = ack && acknowledged;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) a = s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) if (e - a <= BB_MAX_LEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) p[hi] = BB_MAKE(a, e-a, ack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) s = e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) p[hi] = BB_MAKE(a, BB_MAX_LEN, ack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) s = a + BB_MAX_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) sectors = e - s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) lo = hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) hi++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) if (sectors == 0 && hi < bb->count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) /* we might be able to combine lo and hi */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) /* Note: 's' is at the end of 'lo' */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) sector_t a = BB_OFFSET(p[hi]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) int lolen = BB_LEN(p[lo]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) int hilen = BB_LEN(p[hi]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) int newlen = lolen + hilen - (s - a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) if (s >= a && newlen < BB_MAX_LEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) /* yes, we can combine them */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) int ack = BB_ACK(p[lo]) && BB_ACK(p[hi]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) p[lo] = BB_MAKE(BB_OFFSET(p[lo]), newlen, ack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) memmove(p + hi, p + hi + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) (bb->count - hi - 1) * 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) bb->count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) while (sectors) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) /* didn't merge (it all).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) * Need to add a range just before 'hi'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) if (bb->count >= MAX_BADBLOCKS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) /* No room for more */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) rv = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) int this_sectors = sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) memmove(p + hi + 1, p + hi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) (bb->count - hi) * 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) bb->count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) if (this_sectors > BB_MAX_LEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) this_sectors = BB_MAX_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) p[hi] = BB_MAKE(s, this_sectors, acknowledged);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) sectors -= this_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) s += this_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) bb->changed = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) if (!acknowledged)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) bb->unacked_exist = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) badblocks_update_acked(bb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) write_sequnlock_irqrestore(&bb->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) EXPORT_SYMBOL_GPL(badblocks_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) * badblocks_clear() - Remove a range of bad blocks to the table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) * @bb: the badblocks structure that holds all badblock information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) * @s: first sector to mark as bad
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) * @sectors: number of sectors to mark as bad
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) * This may involve extending the table if we spilt a region,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) * but it must not fail. So if the table becomes full, we just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) * drop the remove request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) * 0: success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) * 1: failed to clear badblocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) int badblocks_clear(struct badblocks *bb, sector_t s, int sectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) u64 *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) int lo, hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) sector_t target = s + sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) int rv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) if (bb->shift > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) /* When clearing we round the start up and the end down.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) * This should not matter as the shift should align with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) * the block size and no rounding should ever be needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) * However it is better the think a block is bad when it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) * isn't than to think a block is not bad when it is.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) s += (1<<bb->shift) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) s >>= bb->shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) target >>= bb->shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) sectors = target - s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) write_seqlock_irq(&bb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) p = bb->page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) lo = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) hi = bb->count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) /* Find the last range that starts before 'target' */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) while (hi - lo > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) int mid = (lo + hi) / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) sector_t a = BB_OFFSET(p[mid]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) if (a < target)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) lo = mid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) hi = mid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) if (hi > lo) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) /* p[lo] is the last range that could overlap the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) * current range. Earlier ranges could also overlap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) * but only this one can overlap the end of the range.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) if ((BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > target) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) (BB_OFFSET(p[lo]) < target)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) /* Partial overlap, leave the tail of this range */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) int ack = BB_ACK(p[lo]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) sector_t a = BB_OFFSET(p[lo]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) sector_t end = a + BB_LEN(p[lo]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) if (a < s) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) /* we need to split this range */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) if (bb->count >= MAX_BADBLOCKS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) rv = -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) memmove(p+lo+1, p+lo, (bb->count - lo) * 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) bb->count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) p[lo] = BB_MAKE(a, s-a, ack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) lo++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) p[lo] = BB_MAKE(target, end - target, ack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) /* there is no longer an overlap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) hi = lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) lo--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) while (lo >= 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) (BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > s) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) (BB_OFFSET(p[lo]) < target)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) /* This range does overlap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) if (BB_OFFSET(p[lo]) < s) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) /* Keep the early parts of this range. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) int ack = BB_ACK(p[lo]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) sector_t start = BB_OFFSET(p[lo]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) p[lo] = BB_MAKE(start, s - start, ack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) /* now low doesn't overlap, so.. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) lo--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) /* 'lo' is strictly before, 'hi' is strictly after,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) * anything between needs to be discarded
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) if (hi - lo > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) memmove(p+lo+1, p+hi, (bb->count - hi) * 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) bb->count -= (hi - lo - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) badblocks_update_acked(bb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) bb->changed = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) write_sequnlock_irq(&bb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) EXPORT_SYMBOL_GPL(badblocks_clear);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) * ack_all_badblocks() - Acknowledge all bad blocks in a list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) * @bb: the badblocks structure that holds all badblock information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) * This only succeeds if ->changed is clear. It is used by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) * in-kernel metadata updates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) void ack_all_badblocks(struct badblocks *bb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) if (bb->page == NULL || bb->changed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) /* no point even trying */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) write_seqlock_irq(&bb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) if (bb->changed == 0 && bb->unacked_exist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) u64 *p = bb->page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) for (i = 0; i < bb->count ; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) if (!BB_ACK(p[i])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) sector_t start = BB_OFFSET(p[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) int len = BB_LEN(p[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) p[i] = BB_MAKE(start, len, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) bb->unacked_exist = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) write_sequnlock_irq(&bb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) EXPORT_SYMBOL_GPL(ack_all_badblocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) * badblocks_show() - sysfs access to bad-blocks list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) * @bb: the badblocks structure that holds all badblock information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) * @page: buffer received from sysfs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) * @unack: weather to show unacknowledged badblocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) * Length of returned data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) ssize_t badblocks_show(struct badblocks *bb, char *page, int unack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) size_t len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) u64 *p = bb->page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) unsigned seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) if (bb->shift < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) seq = read_seqbegin(&bb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) while (len < PAGE_SIZE && i < bb->count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) sector_t s = BB_OFFSET(p[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) unsigned int length = BB_LEN(p[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) int ack = BB_ACK(p[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) if (unack && ack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) len += snprintf(page+len, PAGE_SIZE-len, "%llu %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) (unsigned long long)s << bb->shift,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) length << bb->shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) if (unack && len == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) bb->unacked_exist = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) if (read_seqretry(&bb->lock, seq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) EXPORT_SYMBOL_GPL(badblocks_show);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) * badblocks_store() - sysfs access to bad-blocks list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) * @bb: the badblocks structure that holds all badblock information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) * @page: buffer received from sysfs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) * @len: length of data received from sysfs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) * @unack: weather to show unacknowledged badblocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) * Length of the buffer processed or -ve error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) ssize_t badblocks_store(struct badblocks *bb, const char *page, size_t len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) int unack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) unsigned long long sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) int length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) char newline;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) switch (sscanf(page, "%llu %d%c", §or, &length, &newline)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) if (newline != '\n')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) if (length <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) if (badblocks_set(bb, sector, length, !unack))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) EXPORT_SYMBOL_GPL(badblocks_store);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) static int __badblocks_init(struct device *dev, struct badblocks *bb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) int enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) bb->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) bb->count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) if (enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) bb->shift = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) bb->shift = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) if (dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) bb->page = devm_kzalloc(dev, PAGE_SIZE, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) bb->page = kzalloc(PAGE_SIZE, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) if (!bb->page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) bb->shift = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) seqlock_init(&bb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) * badblocks_init() - initialize the badblocks structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) * @bb: the badblocks structure that holds all badblock information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) * @enable: weather to enable badblocks accounting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) * 0: success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) * -ve errno: on error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) int badblocks_init(struct badblocks *bb, int enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) return __badblocks_init(NULL, bb, enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) EXPORT_SYMBOL_GPL(badblocks_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) int devm_init_badblocks(struct device *dev, struct badblocks *bb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) if (!bb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) return __badblocks_init(dev, bb, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) EXPORT_SYMBOL_GPL(devm_init_badblocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) * badblocks_exit() - free the badblocks structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) * @bb: the badblocks structure that holds all badblock information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) void badblocks_exit(struct badblocks *bb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) if (!bb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) if (bb->dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) devm_kfree(bb->dev, bb->page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) kfree(bb->page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) bb->page = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) EXPORT_SYMBOL_GPL(badblocks_exit);