Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags   |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Squashfs - a compressed read only filesystem for Linux
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * Phillip Lougher <phillip@squashfs.org.uk>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  * cache.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  * Blocks in Squashfs are compressed.  To avoid repeatedly decompressing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13)  * recently accessed data Squashfs uses two small metadata and fragment caches.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15)  * This file implements a generic cache implementation used for both caches,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16)  * plus functions layered ontop of the generic cache implementation to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17)  * access the metadata and fragment caches.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19)  * To avoid out of memory and fragmentation issues with vmalloc the cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20)  * uses sequences of kmalloced PAGE_SIZE buffers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22)  * It should be noted that the cache is not used for file datablocks, these
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23)  * are decompressed and cached in the page-cache in the normal way.  The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24)  * cache is only used to temporarily cache fragment and metadata blocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25)  * which have been read as as a result of a metadata (i.e. inode or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26)  * directory) or fragment access.  Because metadata and fragments are packed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27)  * together into blocks (to gain greater compression) the read of a particular
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28)  * piece of metadata or fragment will retrieve other metadata/fragments which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29)  * have been packed with it, these because of locality-of-reference may be read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30)  * in the near future. Temporarily caching them ensures they are available for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31)  * near future access without requiring an additional read and decompress.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) #include <linux/vfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) #include <linux/wait.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) #include <linux/pagemap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) #include "squashfs_fs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) #include "squashfs_fs_sb.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) #include "squashfs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) #include "page_actor.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49)  * Look-up block in cache, and increment usage count.  If not in cache, read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50)  * and decompress it from disk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) struct squashfs_cache_entry *squashfs_cache_get(struct super_block *sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	struct squashfs_cache *cache, u64 block, int length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	int i, n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	struct squashfs_cache_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	spin_lock(&cache->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 		for (i = cache->curr_blk, n = 0; n < cache->entries; n++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 			if (cache->entry[i].block == block) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 				cache->curr_blk = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 			i = (i + 1) % cache->entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 		if (n == cache->entries) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 			 * Block not in cache, if all cache entries are used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 			 * go to sleep waiting for one to become available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 			if (cache->unused == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 				cache->num_waiters++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 				spin_unlock(&cache->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 				wait_event(cache->wait_queue, cache->unused);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 				spin_lock(&cache->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 				cache->num_waiters--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 			 * At least one unused cache entry.  A simple
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 			 * round-robin strategy is used to choose the entry to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 			 * be evicted from the cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 			i = cache->next_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 			for (n = 0; n < cache->entries; n++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 				if (cache->entry[i].refcount == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 				i = (i + 1) % cache->entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 			cache->next_blk = (i + 1) % cache->entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 			entry = &cache->entry[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 			 * Initialise chosen cache entry, and fill it in from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 			 * disk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 			cache->unused--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 			entry->block = block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 			entry->refcount = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 			entry->pending = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 			entry->num_waiters = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 			entry->error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 			spin_unlock(&cache->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 			entry->length = squashfs_read_data(sb, block, length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 				&entry->next_index, entry->actor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 			spin_lock(&cache->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 			if (entry->length < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 				entry->error = entry->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 			entry->pending = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 			 * While filling this entry one or more other processes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 			 * have looked it up in the cache, and have slept
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 			 * waiting for it to become available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 			if (entry->num_waiters) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 				spin_unlock(&cache->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 				wake_up_all(&entry->wait_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 			} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 				spin_unlock(&cache->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 		 * Block already in cache.  Increment refcount so it doesn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 		 * get reused until we're finished with it, if it was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 		 * previously unused there's one less cache entry available
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 		 * for reuse.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 		entry = &cache->entry[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 		if (entry->refcount == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 			cache->unused--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 		entry->refcount++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 		 * If the entry is currently being filled in by another process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 		 * go to sleep waiting for it to become available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 		if (entry->pending) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 			entry->num_waiters++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 			spin_unlock(&cache->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 			wait_event(entry->wait_queue, !entry->pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 			spin_unlock(&cache->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	TRACE("Got %s %d, start block %lld, refcount %d, error %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 		cache->name, i, entry->block, entry->refcount, entry->error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	if (entry->error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 		ERROR("Unable to read %s cache entry [%llx]\n", cache->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 							block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	return entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)  * Release cache entry, once usage count is zero it can be reused.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) void squashfs_cache_put(struct squashfs_cache_entry *entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	struct squashfs_cache *cache = entry->cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	spin_lock(&cache->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	entry->refcount--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	if (entry->refcount == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 		cache->unused++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 		 * If there's any processes waiting for a block to become
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 		 * available, wake one up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 		if (cache->num_waiters) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 			spin_unlock(&cache->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 			wake_up(&cache->wait_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	spin_unlock(&cache->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)  * Delete cache reclaiming all kmalloced buffers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) void squashfs_cache_delete(struct squashfs_cache *cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	if (cache == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	for (i = 0; i < cache->entries; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 		if (cache->entry[i].data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 			for (j = 0; j < cache->pages; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 				kfree(cache->entry[i].data[j]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 			kfree(cache->entry[i].data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 		kfree(cache->entry[i].actor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	kfree(cache->entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	kfree(cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)  * Initialise cache allocating the specified number of entries, each of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)  * size block_size.  To avoid vmalloc fragmentation issues each entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)  * is allocated as a sequence of kmalloced PAGE_SIZE buffers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) struct squashfs_cache *squashfs_cache_init(char *name, int entries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	int block_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	struct squashfs_cache *cache = kzalloc(sizeof(*cache), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	if (cache == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 		ERROR("Failed to allocate %s cache\n", name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	cache->entry = kcalloc(entries, sizeof(*(cache->entry)), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	if (cache->entry == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 		ERROR("Failed to allocate %s cache\n", name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 		goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	cache->curr_blk = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	cache->next_blk = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	cache->unused = entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	cache->entries = entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	cache->block_size = block_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	cache->pages = block_size >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	cache->pages = cache->pages ? cache->pages : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	cache->name = name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	cache->num_waiters = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	spin_lock_init(&cache->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	init_waitqueue_head(&cache->wait_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	for (i = 0; i < entries; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 		struct squashfs_cache_entry *entry = &cache->entry[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 		init_waitqueue_head(&cache->entry[i].wait_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 		entry->cache = cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 		entry->block = SQUASHFS_INVALID_BLK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 		entry->data = kcalloc(cache->pages, sizeof(void *), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 		if (entry->data == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 			ERROR("Failed to allocate %s cache entry\n", name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 			goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 		for (j = 0; j < cache->pages; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 			entry->data[j] = kmalloc(PAGE_SIZE, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 			if (entry->data[j] == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 				ERROR("Failed to allocate %s buffer\n", name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 				goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 		entry->actor = squashfs_page_actor_init(entry->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 						cache->pages, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 		if (entry->actor == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 			ERROR("Failed to allocate %s cache entry\n", name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 			goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	return cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) cleanup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	squashfs_cache_delete(cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)  * Copy up to length bytes from cache entry to buffer starting at offset bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)  * into the cache entry.  If there's not length bytes then copy the number of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)  * bytes available.  In all cases return the number of bytes copied.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) int squashfs_copy_data(void *buffer, struct squashfs_cache_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 		int offset, int length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	int remaining = length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 	if (length == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	else if (buffer == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 		return min(length, entry->length - offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	while (offset < entry->length) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 		void *buff = entry->data[offset / PAGE_SIZE]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 				+ (offset % PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 		int bytes = min_t(int, entry->length - offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 				PAGE_SIZE - (offset % PAGE_SIZE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 		if (bytes >= remaining) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 			memcpy(buffer, buff, remaining);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 			remaining = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 		memcpy(buffer, buff, bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 		buffer += bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 		remaining -= bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 		offset += bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	return length - remaining;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)  * Read length bytes from metadata position <block, offset> (block is the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)  * start of the compressed block on disk, and offset is the offset into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)  * the block once decompressed).  Data is packed into consecutive blocks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)  * and length bytes may require reading more than one block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) int squashfs_read_metadata(struct super_block *sb, void *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 		u64 *block, int *offset, int length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	struct squashfs_sb_info *msblk = sb->s_fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 	int bytes, res = length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	struct squashfs_cache_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	TRACE("Entered squashfs_read_metadata [%llx:%x]\n", *block, *offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 	if (unlikely(length < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 	while (length) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 		entry = squashfs_cache_get(sb, msblk->block_cache, *block, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 		if (entry->error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 			res = entry->error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 			goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 		} else if (*offset >= entry->length) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 			res = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 			goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 		bytes = squashfs_copy_data(buffer, entry, *offset, length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 		if (buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 			buffer += bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 		length -= bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 		*offset += bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 		if (*offset == entry->length) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 			*block = entry->next_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 			*offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 		squashfs_cache_put(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 	squashfs_cache_put(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 	return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)  * Look-up in the fragmment cache the fragment located at <start_block> in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)  * filesystem.  If necessary read and decompress it from disk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) struct squashfs_cache_entry *squashfs_get_fragment(struct super_block *sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 				u64 start_block, int length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 	struct squashfs_sb_info *msblk = sb->s_fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 	return squashfs_cache_get(sb, msblk->fragment_cache, start_block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 		length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)  * Read and decompress the datablock located at <start_block> in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)  * filesystem.  The cache is used here to avoid duplicating locking and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)  * read/decompress code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) struct squashfs_cache_entry *squashfs_get_datablock(struct super_block *sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 				u64 start_block, int length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 	struct squashfs_sb_info *msblk = sb->s_fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	return squashfs_cache_get(sb, msblk->read_page, start_block, length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)  * Read a filesystem table (uncompressed sequence of bytes) from disk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) void *squashfs_read_table(struct super_block *sb, u64 block, int length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 	int pages = (length + PAGE_SIZE - 1) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 	int i, res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 	void *table, *buffer, **data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	struct squashfs_page_actor *actor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 	table = buffer = kmalloc(length, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 	if (table == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 	data = kcalloc(pages, sizeof(void *), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 	if (data == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 		res = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 		goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 	actor = squashfs_page_actor_init(data, pages, length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 	if (actor == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 		res = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 		goto failed2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 	for (i = 0; i < pages; i++, buffer += PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 		data[i] = buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 	res = squashfs_read_data(sb, block, length |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 		SQUASHFS_COMPRESSED_BIT_BLOCK, NULL, actor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 	kfree(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 	kfree(actor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 	if (res < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 		goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 	return table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) failed2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 	kfree(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 	kfree(table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 	return ERR_PTR(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) }