^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Squashfs - a compressed read only filesystem for Linux
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Phillip Lougher <phillip@squashfs.org.uk>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * file.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * This file contains code for handling regular files. A regular file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * consists of a sequence of contiguous compressed blocks, and/or a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * compressed fragment block (tail-end packed block). The compressed size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * of each datablock is stored in a block list contained within the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * file inode (itself stored in one or more compressed metadata blocks).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * To speed up access to datablocks when reading 'large' files (256 Mbytes or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * larger), the code implements an index cache that caches the mapping from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * block index to datablock location on disk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * The index cache allows Squashfs to handle large files (up to 1.75 TiB) while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * retaining a simple and space-efficient block list on disk. The cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * is split into slots, caching up to eight 224 GiB files (128 KiB blocks).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * Larger files use multiple slots, with 1.75 TiB files using all 8 slots.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * The index cache is designed to be memory efficient, and by default uses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * 16 KiB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/vfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/pagemap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include "squashfs_fs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include "squashfs_fs_sb.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include "squashfs_fs_i.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include "squashfs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * Locate cache slot in range [offset, index] for specified inode. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * there's more than one return the slot closest to index.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) static struct meta_index *locate_meta_index(struct inode *inode, int offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) struct meta_index *meta = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) mutex_lock(&msblk->meta_index_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) TRACE("locate_meta_index: index %d, offset %d\n", index, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) if (msblk->meta_index == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) goto not_allocated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) for (i = 0; i < SQUASHFS_META_SLOTS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) if (msblk->meta_index[i].inode_number == inode->i_ino &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) msblk->meta_index[i].offset >= offset &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) msblk->meta_index[i].offset <= index &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) msblk->meta_index[i].locked == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) TRACE("locate_meta_index: entry %d, offset %d\n", i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) msblk->meta_index[i].offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) meta = &msblk->meta_index[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) offset = meta->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) if (meta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) meta->locked = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) not_allocated:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) mutex_unlock(&msblk->meta_index_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) return meta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * Find and initialise an empty cache slot for index offset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) static struct meta_index *empty_meta_index(struct inode *inode, int offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) int skip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) struct meta_index *meta = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) mutex_lock(&msblk->meta_index_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) TRACE("empty_meta_index: offset %d, skip %d\n", offset, skip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) if (msblk->meta_index == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * First time cache index has been used, allocate and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * initialise. The cache index could be allocated at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * mount time but doing it here means it is allocated only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * if a 'large' file is read.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) msblk->meta_index = kcalloc(SQUASHFS_META_SLOTS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) sizeof(*(msblk->meta_index)), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) if (msblk->meta_index == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) ERROR("Failed to allocate meta_index\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) for (i = 0; i < SQUASHFS_META_SLOTS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) msblk->meta_index[i].inode_number = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) msblk->meta_index[i].locked = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) msblk->next_meta_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) for (i = SQUASHFS_META_SLOTS; i &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) msblk->meta_index[msblk->next_meta_index].locked; i--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) msblk->next_meta_index = (msblk->next_meta_index + 1) %
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) SQUASHFS_META_SLOTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) if (i == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) TRACE("empty_meta_index: failed!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) TRACE("empty_meta_index: returned meta entry %d, %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) msblk->next_meta_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) &msblk->meta_index[msblk->next_meta_index]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) meta = &msblk->meta_index[msblk->next_meta_index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) msblk->next_meta_index = (msblk->next_meta_index + 1) %
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) SQUASHFS_META_SLOTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) meta->inode_number = inode->i_ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) meta->offset = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) meta->skip = skip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) meta->entries = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) meta->locked = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) mutex_unlock(&msblk->meta_index_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) return meta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) static void release_meta_index(struct inode *inode, struct meta_index *meta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) mutex_lock(&msblk->meta_index_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) meta->locked = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) mutex_unlock(&msblk->meta_index_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * Read the next n blocks from the block list, starting from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) * metadata block <start_block, offset>.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) static long long read_indexes(struct super_block *sb, int n,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) u64 *start_block, int *offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) int err, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) long long block = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) __le32 *blist = kmalloc(PAGE_SIZE, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) if (blist == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) ERROR("read_indexes: Failed to allocate block_list\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) while (n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) int blocks = min_t(int, n, PAGE_SIZE >> 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) err = squashfs_read_metadata(sb, blist, start_block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) offset, blocks << 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) ERROR("read_indexes: reading block [%llx:%x]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) *start_block, *offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) goto failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) for (i = 0; i < blocks; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) int size = squashfs_block_size(blist[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) if (size < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) err = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) goto failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) block += SQUASHFS_COMPRESSED_SIZE_BLOCK(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) n -= blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) kfree(blist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) return block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) kfree(blist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) * Each cache index slot has SQUASHFS_META_ENTRIES, each of which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) * can cache one index -> datablock/blocklist-block mapping. We wish
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) * to distribute these over the length of the file, entry[0] maps index x,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * entry[1] maps index x + skip, entry[2] maps index x + 2 * skip, and so on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) * The larger the file, the greater the skip factor. The skip factor is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) * limited to the size of the metadata cache (SQUASHFS_CACHED_BLKS) to ensure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) * the number of metadata blocks that need to be read fits into the cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * If the skip factor is limited in this way then the file will use multiple
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) * slots.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) static inline int calculate_skip(u64 blocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) u64 skip = blocks / ((SQUASHFS_META_ENTRIES + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) * SQUASHFS_META_INDEXES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) return min((u64) SQUASHFS_CACHED_BLKS - 1, skip + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) * Search and grow the index cache for the specified inode, returning the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) * on-disk locations of the datablock and block list metadata block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) * <index_block, index_offset> for index (scaled to nearest cache index).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) static int fill_meta_index(struct inode *inode, int index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) u64 *index_block, int *index_offset, u64 *data_block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) int skip = calculate_skip(i_size_read(inode) >> msblk->block_log);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) int offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) struct meta_index *meta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) struct meta_entry *meta_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) u64 cur_index_block = squashfs_i(inode)->block_list_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) int cur_offset = squashfs_i(inode)->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) u64 cur_data_block = squashfs_i(inode)->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) int err, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) * Scale index to cache index (cache slot entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) index /= SQUASHFS_META_INDEXES * skip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) while (offset < index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) meta = locate_meta_index(inode, offset + 1, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) if (meta == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) meta = empty_meta_index(inode, offset + 1, skip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) if (meta == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) goto all_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) offset = index < meta->offset + meta->entries ? index :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) meta->offset + meta->entries - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) meta_entry = &meta->meta_entry[offset - meta->offset];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) cur_index_block = meta_entry->index_block +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) msblk->inode_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) cur_offset = meta_entry->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) cur_data_block = meta_entry->data_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) TRACE("get_meta_index: offset %d, meta->offset %d, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) "meta->entries %d\n", offset, meta->offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) meta->entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) TRACE("get_meta_index: index_block 0x%llx, offset 0x%x"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) " data_block 0x%llx\n", cur_index_block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) cur_offset, cur_data_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) * If necessary grow cache slot by reading block list. Cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) * slot is extended up to index or to the end of the slot, in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) * which case further slots will be used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) for (i = meta->offset + meta->entries; i <= index &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) i < meta->offset + SQUASHFS_META_ENTRIES; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) int blocks = skip * SQUASHFS_META_INDEXES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) long long res = read_indexes(inode->i_sb, blocks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) &cur_index_block, &cur_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) if (res < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) if (meta->entries == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) * Don't leave an empty slot on read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) * error allocated to this inode...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) meta->inode_number = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) err = res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) cur_data_block += res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) meta_entry = &meta->meta_entry[i - meta->offset];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) meta_entry->index_block = cur_index_block -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) msblk->inode_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) meta_entry->offset = cur_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) meta_entry->data_block = cur_data_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) meta->entries++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) offset++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) TRACE("get_meta_index: meta->offset %d, meta->entries %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) meta->offset, meta->entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) release_meta_index(inode, meta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) all_done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) *index_block = cur_index_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) *index_offset = cur_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) *data_block = cur_data_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * Scale cache index (cache slot entry) to index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) return offset * SQUASHFS_META_INDEXES * skip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) release_meta_index(inode, meta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) * Get the on-disk location and compressed size of the datablock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) * specified by index. Fill_meta_index() does most of the work.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) static int read_blocklist(struct inode *inode, int index, u64 *block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) u64 start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) long long blks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) int offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) __le32 size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) int res = fill_meta_index(inode, index, &start, &offset, block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) TRACE("read_blocklist: res %d, index %d, start 0x%llx, offset"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) " 0x%x, block 0x%llx\n", res, index, start, offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) *block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) if (res < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) * res contains the index of the mapping returned by fill_meta_index(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) * this will likely be less than the desired index (because the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) * meta_index cache works at a higher granularity). Read any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) * extra block indexes needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) if (res < index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) blks = read_indexes(inode->i_sb, index - res, &start, &offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) if (blks < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) return (int) blks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) *block += blks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) * Read length of block specified by index.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) res = squashfs_read_metadata(inode->i_sb, &size, &start, &offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) sizeof(size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) if (res < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) return squashfs_block_size(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) void squashfs_fill_page(struct page *page, struct squashfs_cache_entry *buffer, int offset, int avail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) int copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) void *pageaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) pageaddr = kmap_atomic(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) copied = squashfs_copy_data(pageaddr, buffer, offset, avail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) memset(pageaddr + copied, 0, PAGE_SIZE - copied);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) kunmap_atomic(pageaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) flush_dcache_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) if (copied == avail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) SetPageUptodate(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) SetPageError(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) /* Copy data into page cache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) void squashfs_copy_cache(struct page *page, struct squashfs_cache_entry *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) int bytes, int offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) struct inode *inode = page->mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) int i, mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) int start_index = page->index & ~mask, end_index = start_index | mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) * Loop copying datablock into pages. As the datablock likely covers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) * many PAGE_SIZE pages (default block size is 128 KiB) explicitly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) * grab the pages from the page cache, except for the page that we've
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) * been called to fill.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) for (i = start_index; i <= end_index && bytes > 0; i++,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) bytes -= PAGE_SIZE, offset += PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) struct page *push_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) int avail = buffer ? min_t(int, bytes, PAGE_SIZE) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) TRACE("bytes %d, i %d, available_bytes %d\n", bytes, i, avail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) push_page = (i == page->index) ? page :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) grab_cache_page_nowait(page->mapping, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) if (!push_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) if (PageUptodate(push_page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) goto skip_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) squashfs_fill_page(push_page, buffer, offset, avail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) skip_page:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) unlock_page(push_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) if (i != page->index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) put_page(push_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) /* Read datablock stored packed inside a fragment (tail-end packed block) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) static int squashfs_readpage_fragment(struct page *page, int expected)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) struct inode *inode = page->mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) struct squashfs_cache_entry *buffer = squashfs_get_fragment(inode->i_sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) squashfs_i(inode)->fragment_block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) squashfs_i(inode)->fragment_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) int res = buffer->error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) if (res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) ERROR("Unable to read page, block %llx, size %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) squashfs_i(inode)->fragment_block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) squashfs_i(inode)->fragment_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) squashfs_copy_cache(page, buffer, expected,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) squashfs_i(inode)->fragment_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) squashfs_cache_put(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) static int squashfs_readpage_sparse(struct page *page, int expected)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) squashfs_copy_cache(page, NULL, expected, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) static int squashfs_readpage(struct file *file, struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) struct inode *inode = page->mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) int index = page->index >> (msblk->block_log - PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) int file_end = i_size_read(inode) >> msblk->block_log;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) int expected = index == file_end ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) (i_size_read(inode) & (msblk->block_size - 1)) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) msblk->block_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) int res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) void *pageaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) TRACE("Entered squashfs_readpage, page index %lx, start block %llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) page->index, squashfs_i(inode)->start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) if (page->index >= ((i_size_read(inode) + PAGE_SIZE - 1) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) PAGE_SHIFT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) if (index < file_end || squashfs_i(inode)->fragment_block ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) SQUASHFS_INVALID_BLK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) u64 block = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) int bsize = read_blocklist(inode, index, &block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) if (bsize < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) goto error_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) if (bsize == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) res = squashfs_readpage_sparse(page, expected);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) res = squashfs_readpage_block(page, block, bsize, expected);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) res = squashfs_readpage_fragment(page, expected);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) if (!res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) error_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) SetPageError(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) pageaddr = kmap_atomic(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) memset(pageaddr, 0, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) kunmap_atomic(pageaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) flush_dcache_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) if (!PageError(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) SetPageUptodate(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) const struct address_space_operations squashfs_aops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) .readpage = squashfs_readpage
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) };