^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2004, OGAWA Hirofumi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include "fat.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) struct fatent_operations {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) void (*ent_blocknr)(struct super_block *, int, int *, sector_t *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) void (*ent_set_ptr)(struct fat_entry *, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) int (*ent_bread)(struct super_block *, struct fat_entry *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) int, sector_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) int (*ent_get)(struct fat_entry *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) void (*ent_put)(struct fat_entry *, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) int (*ent_next)(struct fat_entry *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) static DEFINE_SPINLOCK(fat12_entry_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) static void fat12_ent_blocknr(struct super_block *sb, int entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) int *offset, sector_t *blocknr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) struct msdos_sb_info *sbi = MSDOS_SB(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) int bytes = entry + (entry >> 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) WARN_ON(!fat_valid_entry(sbi, entry));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) *offset = bytes & (sb->s_blocksize - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) *blocknr = sbi->fat_start + (bytes >> sb->s_blocksize_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) static void fat_ent_blocknr(struct super_block *sb, int entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) int *offset, sector_t *blocknr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) struct msdos_sb_info *sbi = MSDOS_SB(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) int bytes = (entry << sbi->fatent_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) WARN_ON(!fat_valid_entry(sbi, entry));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) *offset = bytes & (sb->s_blocksize - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) *blocknr = sbi->fat_start + (bytes >> sb->s_blocksize_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) static void fat12_ent_set_ptr(struct fat_entry *fatent, int offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) struct buffer_head **bhs = fatent->bhs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) if (fatent->nr_bhs == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) WARN_ON(offset >= (bhs[0]->b_size - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) fatent->u.ent12_p[0] = bhs[0]->b_data + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) fatent->u.ent12_p[1] = bhs[0]->b_data + (offset + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) WARN_ON(offset != (bhs[0]->b_size - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) fatent->u.ent12_p[0] = bhs[0]->b_data + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) fatent->u.ent12_p[1] = bhs[1]->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) static void fat16_ent_set_ptr(struct fat_entry *fatent, int offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) WARN_ON(offset & (2 - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) fatent->u.ent16_p = (__le16 *)(fatent->bhs[0]->b_data + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) static void fat32_ent_set_ptr(struct fat_entry *fatent, int offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) WARN_ON(offset & (4 - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) fatent->u.ent32_p = (__le32 *)(fatent->bhs[0]->b_data + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) static int fat12_ent_bread(struct super_block *sb, struct fat_entry *fatent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) int offset, sector_t blocknr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) struct buffer_head **bhs = fatent->bhs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) WARN_ON(blocknr < MSDOS_SB(sb)->fat_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) fatent->fat_inode = MSDOS_SB(sb)->fat_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) bhs[0] = sb_bread(sb, blocknr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) if (!bhs[0])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) if ((offset + 1) < sb->s_blocksize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) fatent->nr_bhs = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) /* This entry is block boundary, it needs the next block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) blocknr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) bhs[1] = sb_bread(sb, blocknr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) if (!bhs[1])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) goto err_brelse;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) fatent->nr_bhs = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) fat12_ent_set_ptr(fatent, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) err_brelse:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) brelse(bhs[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) fat_msg(sb, KERN_ERR, "FAT read failed (blocknr %llu)", (llu)blocknr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) static int fat_ent_bread(struct super_block *sb, struct fat_entry *fatent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) int offset, sector_t blocknr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) const struct fatent_operations *ops = MSDOS_SB(sb)->fatent_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) WARN_ON(blocknr < MSDOS_SB(sb)->fat_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) fatent->fat_inode = MSDOS_SB(sb)->fat_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) fatent->bhs[0] = sb_bread(sb, blocknr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) if (!fatent->bhs[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) fat_msg(sb, KERN_ERR, "FAT read failed (blocknr %llu)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) (llu)blocknr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) fatent->nr_bhs = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) ops->ent_set_ptr(fatent, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) static int fat12_ent_get(struct fat_entry *fatent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) u8 **ent12_p = fatent->u.ent12_p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) int next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) spin_lock(&fat12_entry_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) if (fatent->entry & 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) next = (*ent12_p[0] >> 4) | (*ent12_p[1] << 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) next = (*ent12_p[1] << 8) | *ent12_p[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) spin_unlock(&fat12_entry_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) next &= 0x0fff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) if (next >= BAD_FAT12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) next = FAT_ENT_EOF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) return next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) static int fat16_ent_get(struct fat_entry *fatent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) int next = le16_to_cpu(*fatent->u.ent16_p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) WARN_ON((unsigned long)fatent->u.ent16_p & (2 - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) if (next >= BAD_FAT16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) next = FAT_ENT_EOF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) return next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) static int fat32_ent_get(struct fat_entry *fatent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) int next = le32_to_cpu(*fatent->u.ent32_p) & 0x0fffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) WARN_ON((unsigned long)fatent->u.ent32_p & (4 - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) if (next >= BAD_FAT32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) next = FAT_ENT_EOF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) return next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) static void fat12_ent_put(struct fat_entry *fatent, int new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) u8 **ent12_p = fatent->u.ent12_p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) if (new == FAT_ENT_EOF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) new = EOF_FAT12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) spin_lock(&fat12_entry_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) if (fatent->entry & 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) *ent12_p[0] = (new << 4) | (*ent12_p[0] & 0x0f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) *ent12_p[1] = new >> 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) *ent12_p[0] = new & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) *ent12_p[1] = (*ent12_p[1] & 0xf0) | (new >> 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) spin_unlock(&fat12_entry_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) mark_buffer_dirty_inode(fatent->bhs[0], fatent->fat_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) if (fatent->nr_bhs == 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) mark_buffer_dirty_inode(fatent->bhs[1], fatent->fat_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) static void fat16_ent_put(struct fat_entry *fatent, int new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) if (new == FAT_ENT_EOF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) new = EOF_FAT16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) *fatent->u.ent16_p = cpu_to_le16(new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) mark_buffer_dirty_inode(fatent->bhs[0], fatent->fat_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) static void fat32_ent_put(struct fat_entry *fatent, int new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) WARN_ON(new & 0xf0000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) new |= le32_to_cpu(*fatent->u.ent32_p) & ~0x0fffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) *fatent->u.ent32_p = cpu_to_le32(new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) mark_buffer_dirty_inode(fatent->bhs[0], fatent->fat_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) static int fat12_ent_next(struct fat_entry *fatent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) u8 **ent12_p = fatent->u.ent12_p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) struct buffer_head **bhs = fatent->bhs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) u8 *nextp = ent12_p[1] + 1 + (fatent->entry & 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) fatent->entry++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) if (fatent->nr_bhs == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) WARN_ON(ent12_p[0] > (u8 *)(bhs[0]->b_data +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) (bhs[0]->b_size - 2)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) WARN_ON(ent12_p[1] > (u8 *)(bhs[0]->b_data +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) (bhs[0]->b_size - 1)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) if (nextp < (u8 *)(bhs[0]->b_data + (bhs[0]->b_size - 1))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) ent12_p[0] = nextp - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) ent12_p[1] = nextp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) WARN_ON(ent12_p[0] != (u8 *)(bhs[0]->b_data +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) (bhs[0]->b_size - 1)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) WARN_ON(ent12_p[1] != (u8 *)bhs[1]->b_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) ent12_p[0] = nextp - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) ent12_p[1] = nextp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) brelse(bhs[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) bhs[0] = bhs[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) fatent->nr_bhs = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) ent12_p[0] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) ent12_p[1] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) static int fat16_ent_next(struct fat_entry *fatent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) const struct buffer_head *bh = fatent->bhs[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) fatent->entry++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) if (fatent->u.ent16_p < (__le16 *)(bh->b_data + (bh->b_size - 2))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) fatent->u.ent16_p++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) fatent->u.ent16_p = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) static int fat32_ent_next(struct fat_entry *fatent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) const struct buffer_head *bh = fatent->bhs[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) fatent->entry++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) if (fatent->u.ent32_p < (__le32 *)(bh->b_data + (bh->b_size - 4))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) fatent->u.ent32_p++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) fatent->u.ent32_p = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) static const struct fatent_operations fat12_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) .ent_blocknr = fat12_ent_blocknr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) .ent_set_ptr = fat12_ent_set_ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) .ent_bread = fat12_ent_bread,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) .ent_get = fat12_ent_get,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) .ent_put = fat12_ent_put,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) .ent_next = fat12_ent_next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) static const struct fatent_operations fat16_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) .ent_blocknr = fat_ent_blocknr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) .ent_set_ptr = fat16_ent_set_ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) .ent_bread = fat_ent_bread,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) .ent_get = fat16_ent_get,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) .ent_put = fat16_ent_put,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) .ent_next = fat16_ent_next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) static const struct fatent_operations fat32_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) .ent_blocknr = fat_ent_blocknr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) .ent_set_ptr = fat32_ent_set_ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) .ent_bread = fat_ent_bread,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) .ent_get = fat32_ent_get,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) .ent_put = fat32_ent_put,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) .ent_next = fat32_ent_next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) static inline void lock_fat(struct msdos_sb_info *sbi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) mutex_lock(&sbi->fat_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) static inline void unlock_fat(struct msdos_sb_info *sbi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) mutex_unlock(&sbi->fat_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) void fat_ent_access_init(struct super_block *sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) struct msdos_sb_info *sbi = MSDOS_SB(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) mutex_init(&sbi->fat_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) if (is_fat32(sbi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) sbi->fatent_shift = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) sbi->fatent_ops = &fat32_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) } else if (is_fat16(sbi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) sbi->fatent_shift = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) sbi->fatent_ops = &fat16_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) } else if (is_fat12(sbi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) sbi->fatent_shift = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) sbi->fatent_ops = &fat12_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) fat_fs_error(sb, "invalid FAT variant, %u bits", sbi->fat_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) static void mark_fsinfo_dirty(struct super_block *sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) struct msdos_sb_info *sbi = MSDOS_SB(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) if (sb_rdonly(sb) || !is_fat32(sbi))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) __mark_inode_dirty(sbi->fsinfo_inode, I_DIRTY_SYNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) static inline int fat_ent_update_ptr(struct super_block *sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) struct fat_entry *fatent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) int offset, sector_t blocknr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) struct msdos_sb_info *sbi = MSDOS_SB(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) const struct fatent_operations *ops = sbi->fatent_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) struct buffer_head **bhs = fatent->bhs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) /* Is this fatent's blocks including this entry? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) if (!fatent->nr_bhs || bhs[0]->b_blocknr != blocknr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) if (is_fat12(sbi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) if ((offset + 1) < sb->s_blocksize) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) /* This entry is on bhs[0]. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) if (fatent->nr_bhs == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) brelse(bhs[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) fatent->nr_bhs = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) /* This entry needs the next block. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) if (fatent->nr_bhs != 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) if (bhs[1]->b_blocknr != (blocknr + 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) ops->ent_set_ptr(fatent, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) int fat_ent_read(struct inode *inode, struct fat_entry *fatent, int entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) struct super_block *sb = inode->i_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) const struct fatent_operations *ops = sbi->fatent_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) int err, offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) sector_t blocknr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) if (!fat_valid_entry(sbi, entry)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) fatent_brelse(fatent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) fat_fs_error(sb, "invalid access to FAT (entry 0x%08x)", entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) fatent_set_entry(fatent, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) ops->ent_blocknr(sb, entry, &offset, &blocknr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) if (!fat_ent_update_ptr(sb, fatent, offset, blocknr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) fatent_brelse(fatent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) err = ops->ent_bread(sb, fatent, offset, blocknr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) return ops->ent_get(fatent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) /* FIXME: We can write the blocks as more big chunk. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) static int fat_mirror_bhs(struct super_block *sb, struct buffer_head **bhs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) int nr_bhs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) struct msdos_sb_info *sbi = MSDOS_SB(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) struct buffer_head *c_bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) int err, n, copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) for (copy = 1; copy < sbi->fats; copy++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) sector_t backup_fat = sbi->fat_length * copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) for (n = 0; n < nr_bhs; n++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) c_bh = sb_getblk(sb, backup_fat + bhs[n]->b_blocknr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) if (!c_bh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) /* Avoid race with userspace read via bdev */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) lock_buffer(c_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) memcpy(c_bh->b_data, bhs[n]->b_data, sb->s_blocksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) set_buffer_uptodate(c_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) unlock_buffer(c_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) mark_buffer_dirty_inode(c_bh, sbi->fat_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) if (sb->s_flags & SB_SYNCHRONOUS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) err = sync_dirty_buffer(c_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) brelse(c_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) int fat_ent_write(struct inode *inode, struct fat_entry *fatent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) int new, int wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) struct super_block *sb = inode->i_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) const struct fatent_operations *ops = MSDOS_SB(sb)->fatent_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) ops->ent_put(fatent, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) if (wait) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) err = fat_sync_bhs(fatent->bhs, fatent->nr_bhs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) return fat_mirror_bhs(sb, fatent->bhs, fatent->nr_bhs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) static inline int fat_ent_next(struct msdos_sb_info *sbi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) struct fat_entry *fatent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) if (sbi->fatent_ops->ent_next(fatent)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) if (fatent->entry < sbi->max_cluster)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) static inline int fat_ent_read_block(struct super_block *sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) struct fat_entry *fatent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) const struct fatent_operations *ops = MSDOS_SB(sb)->fatent_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) sector_t blocknr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) int offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) fatent_brelse(fatent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) ops->ent_blocknr(sb, fatent->entry, &offset, &blocknr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) return ops->ent_bread(sb, fatent, offset, blocknr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) static void fat_collect_bhs(struct buffer_head **bhs, int *nr_bhs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) struct fat_entry *fatent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) int n, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) for (n = 0; n < fatent->nr_bhs; n++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) for (i = 0; i < *nr_bhs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) if (fatent->bhs[n] == bhs[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) if (i == *nr_bhs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) get_bh(fatent->bhs[n]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) bhs[i] = fatent->bhs[n];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) (*nr_bhs)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) int fat_alloc_clusters(struct inode *inode, int *cluster, int nr_cluster)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) struct super_block *sb = inode->i_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) struct msdos_sb_info *sbi = MSDOS_SB(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) const struct fatent_operations *ops = sbi->fatent_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) struct fat_entry fatent, prev_ent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) struct buffer_head *bhs[MAX_BUF_PER_PAGE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) int i, count, err, nr_bhs, idx_clus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) BUG_ON(nr_cluster > (MAX_BUF_PER_PAGE / 2)); /* fixed limit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) lock_fat(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) if (sbi->free_clusters != -1 && sbi->free_clus_valid &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) sbi->free_clusters < nr_cluster) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) unlock_fat(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) err = nr_bhs = idx_clus = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) count = FAT_START_ENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) fatent_init(&prev_ent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) fatent_init(&fatent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) fatent_set_entry(&fatent, sbi->prev_free + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) while (count < sbi->max_cluster) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) if (fatent.entry >= sbi->max_cluster)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) fatent.entry = FAT_START_ENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) fatent_set_entry(&fatent, fatent.entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) err = fat_ent_read_block(sb, &fatent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) /* Find the free entries in a block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) if (ops->ent_get(&fatent) == FAT_ENT_FREE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) int entry = fatent.entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) /* make the cluster chain */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) ops->ent_put(&fatent, FAT_ENT_EOF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) if (prev_ent.nr_bhs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) ops->ent_put(&prev_ent, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) fat_collect_bhs(bhs, &nr_bhs, &fatent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) sbi->prev_free = entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) if (sbi->free_clusters != -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) sbi->free_clusters--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) cluster[idx_clus] = entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) idx_clus++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) if (idx_clus == nr_cluster)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) * fat_collect_bhs() gets ref-count of bhs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) * so we can still use the prev_ent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) prev_ent = fatent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) if (count == sbi->max_cluster)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) } while (fat_ent_next(sbi, &fatent));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) /* Couldn't allocate the free entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) sbi->free_clusters = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) sbi->free_clus_valid = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) err = -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) unlock_fat(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) mark_fsinfo_dirty(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) fatent_brelse(&fatent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) if (inode_needs_sync(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) err = fat_sync_bhs(bhs, nr_bhs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) err = fat_mirror_bhs(sb, bhs, nr_bhs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) for (i = 0; i < nr_bhs; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) brelse(bhs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) if (err && idx_clus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) fat_free_clusters(inode, cluster[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) int fat_free_clusters(struct inode *inode, int cluster)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) struct super_block *sb = inode->i_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) struct msdos_sb_info *sbi = MSDOS_SB(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) const struct fatent_operations *ops = sbi->fatent_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) struct fat_entry fatent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) struct buffer_head *bhs[MAX_BUF_PER_PAGE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) int i, err, nr_bhs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) int first_cl = cluster, dirty_fsinfo = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) nr_bhs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) fatent_init(&fatent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) lock_fat(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) cluster = fat_ent_read(inode, &fatent, cluster);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) if (cluster < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) err = cluster;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) } else if (cluster == FAT_ENT_FREE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) fat_fs_error(sb, "%s: deleting FAT entry beyond EOF",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) if (sbi->options.discard) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) * Issue discard for the sectors we no longer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) * care about, batching contiguous clusters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) * into one request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) if (cluster != fatent.entry + 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) int nr_clus = fatent.entry - first_cl + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) sb_issue_discard(sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) fat_clus_to_blknr(sbi, first_cl),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) nr_clus * sbi->sec_per_clus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) GFP_NOFS, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) first_cl = cluster;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) ops->ent_put(&fatent, FAT_ENT_FREE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) if (sbi->free_clusters != -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) sbi->free_clusters++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) dirty_fsinfo = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) if (nr_bhs + fatent.nr_bhs > MAX_BUF_PER_PAGE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) if (sb->s_flags & SB_SYNCHRONOUS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) err = fat_sync_bhs(bhs, nr_bhs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) err = fat_mirror_bhs(sb, bhs, nr_bhs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) for (i = 0; i < nr_bhs; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) brelse(bhs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) nr_bhs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) fat_collect_bhs(bhs, &nr_bhs, &fatent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) } while (cluster != FAT_ENT_EOF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) if (sb->s_flags & SB_SYNCHRONOUS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) err = fat_sync_bhs(bhs, nr_bhs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) err = fat_mirror_bhs(sb, bhs, nr_bhs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) fatent_brelse(&fatent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) for (i = 0; i < nr_bhs; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) brelse(bhs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) unlock_fat(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) if (dirty_fsinfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) mark_fsinfo_dirty(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) EXPORT_SYMBOL_GPL(fat_free_clusters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) struct fatent_ra {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) sector_t cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) sector_t limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) unsigned int ra_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) sector_t ra_advance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) sector_t ra_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) sector_t ra_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) static void fat_ra_init(struct super_block *sb, struct fatent_ra *ra,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) struct fat_entry *fatent, int ent_limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) struct msdos_sb_info *sbi = MSDOS_SB(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) const struct fatent_operations *ops = sbi->fatent_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) sector_t blocknr, block_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) int offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) * This is the sequential read, so ra_pages * 2 (but try to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) * align the optimal hardware IO size).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) * [BTW, 128kb covers the whole sectors for FAT12 and FAT16]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) unsigned long ra_pages = sb->s_bdi->ra_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) unsigned int reada_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) if (fatent->entry >= ent_limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) if (ra_pages > sb->s_bdi->io_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) ra_pages = rounddown(ra_pages, sb->s_bdi->io_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) reada_blocks = ra_pages << (PAGE_SHIFT - sb->s_blocksize_bits + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) /* Initialize the range for sequential read */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) ops->ent_blocknr(sb, fatent->entry, &offset, &blocknr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) ops->ent_blocknr(sb, ent_limit - 1, &offset, &block_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) ra->cur = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) ra->limit = (block_end + 1) - blocknr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) /* Advancing the window at half size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) ra->ra_blocks = reada_blocks >> 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) ra->ra_advance = ra->cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) ra->ra_next = ra->cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) ra->ra_limit = ra->cur + min_t(sector_t, reada_blocks, ra->limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) /* Assuming to be called before reading a new block (increments ->cur). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) static void fat_ent_reada(struct super_block *sb, struct fatent_ra *ra,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) struct fat_entry *fatent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) if (ra->ra_next >= ra->ra_limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) if (ra->cur >= ra->ra_advance) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) struct msdos_sb_info *sbi = MSDOS_SB(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) const struct fatent_operations *ops = sbi->fatent_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) struct blk_plug plug;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) sector_t blocknr, diff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) int offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) ops->ent_blocknr(sb, fatent->entry, &offset, &blocknr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) diff = blocknr - ra->cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) blk_start_plug(&plug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) * FIXME: we would want to directly use the bio with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) * pages to reduce the number of segments.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) for (; ra->ra_next < ra->ra_limit; ra->ra_next++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) sb_breadahead(sb, ra->ra_next + diff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) blk_finish_plug(&plug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) /* Advance the readahead window */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) ra->ra_advance += ra->ra_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) ra->ra_limit += min_t(sector_t,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) ra->ra_blocks, ra->limit - ra->ra_limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) ra->cur++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) int fat_count_free_clusters(struct super_block *sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) struct msdos_sb_info *sbi = MSDOS_SB(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) const struct fatent_operations *ops = sbi->fatent_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) struct fat_entry fatent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) struct fatent_ra fatent_ra;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) int err = 0, free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) lock_fat(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) if (sbi->free_clusters != -1 && sbi->free_clus_valid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) free = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) fatent_init(&fatent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) fatent_set_entry(&fatent, FAT_START_ENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) fat_ra_init(sb, &fatent_ra, &fatent, sbi->max_cluster);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) while (fatent.entry < sbi->max_cluster) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) /* readahead of fat blocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) fat_ent_reada(sb, &fatent_ra, &fatent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) err = fat_ent_read_block(sb, &fatent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) if (ops->ent_get(&fatent) == FAT_ENT_FREE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) free++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) } while (fat_ent_next(sbi, &fatent));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) sbi->free_clusters = free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) sbi->free_clus_valid = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) mark_fsinfo_dirty(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) fatent_brelse(&fatent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) unlock_fat(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) static int fat_trim_clusters(struct super_block *sb, u32 clus, u32 nr_clus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) struct msdos_sb_info *sbi = MSDOS_SB(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) return sb_issue_discard(sb, fat_clus_to_blknr(sbi, clus),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) nr_clus * sbi->sec_per_clus, GFP_NOFS, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) int fat_trim_fs(struct inode *inode, struct fstrim_range *range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) struct super_block *sb = inode->i_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) struct msdos_sb_info *sbi = MSDOS_SB(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) const struct fatent_operations *ops = sbi->fatent_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) struct fat_entry fatent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) struct fatent_ra fatent_ra;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) u64 ent_start, ent_end, minlen, trimmed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) u32 free = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) * FAT data is organized as clusters, trim at the granulary of cluster.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) * fstrim_range is in byte, convert vaules to cluster index.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) * Treat sectors before data region as all used, not to trim them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) ent_start = max_t(u64, range->start>>sbi->cluster_bits, FAT_START_ENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) ent_end = ent_start + (range->len >> sbi->cluster_bits) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) minlen = range->minlen >> sbi->cluster_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) if (ent_start >= sbi->max_cluster || range->len < sbi->cluster_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) if (ent_end >= sbi->max_cluster)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) ent_end = sbi->max_cluster - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) fatent_init(&fatent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) lock_fat(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) fatent_set_entry(&fatent, ent_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) fat_ra_init(sb, &fatent_ra, &fatent, ent_end + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) while (fatent.entry <= ent_end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) /* readahead of fat blocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) fat_ent_reada(sb, &fatent_ra, &fatent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) err = fat_ent_read_block(sb, &fatent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) if (ops->ent_get(&fatent) == FAT_ENT_FREE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) free++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) } else if (free) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) if (free >= minlen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) u32 clus = fatent.entry - free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) err = fat_trim_clusters(sb, clus, free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) if (err && err != -EOPNOTSUPP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) trimmed += free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) free = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) } while (fat_ent_next(sbi, &fatent) && fatent.entry <= ent_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) if (fatal_signal_pending(current)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) err = -ERESTARTSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) if (need_resched()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) fatent_brelse(&fatent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) unlock_fat(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) lock_fat(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) /* handle scenario when tail entries are all free */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) if (free && free >= minlen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) u32 clus = fatent.entry - free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) err = fat_trim_clusters(sb, clus, free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) if (err && err != -EOPNOTSUPP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) trimmed += free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) fatent_brelse(&fatent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) unlock_fat(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) range->len = trimmed << sbi->cluster_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) }