Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * compress.c - NTFS kernel compressed attributes handling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *		Part of the Linux-NTFS project.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * Copyright (c) 2001-2004 Anton Altaparmakov
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * Copyright (c) 2002 Richard Russon
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/buffer_head.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include "attrib.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include "inode.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include "debug.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include "ntfs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22)  * ntfs_compression_constants - enum of constants used in the compression code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) typedef enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 	/* Token types and access mask. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 	NTFS_SYMBOL_TOKEN	=	0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 	NTFS_PHRASE_TOKEN	=	1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 	NTFS_TOKEN_MASK		=	1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 	/* Compression sub-block constants. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 	NTFS_SB_SIZE_MASK	=	0x0fff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	NTFS_SB_SIZE		=	0x1000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 	NTFS_SB_IS_COMPRESSED	=	0x8000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	 * The maximum compression block size is by definition 16 * the cluster
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	 * size, with the maximum supported cluster size being 4kiB. Thus the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	 * maximum compression buffer size is 64kiB, so we use this when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	 * initializing the compression buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	NTFS_MAX_CB_SIZE	= 64 * 1024,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) } ntfs_compression_constants;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45)  * ntfs_compression_buffer - one buffer for the decompression engine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) static u8 *ntfs_compression_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50)  * ntfs_cb_lock - spinlock which protects ntfs_compression_buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) static DEFINE_SPINLOCK(ntfs_cb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55)  * allocate_compression_buffers - allocate the decompression buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57)  * Caller has to hold the ntfs_lock mutex.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59)  * Return 0 on success or -ENOMEM if the allocations failed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) int allocate_compression_buffers(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	BUG_ON(ntfs_compression_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	ntfs_compression_buffer = vmalloc(NTFS_MAX_CB_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	if (!ntfs_compression_buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72)  * free_compression_buffers - free the decompression buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74)  * Caller has to hold the ntfs_lock mutex.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) void free_compression_buffers(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	BUG_ON(!ntfs_compression_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	vfree(ntfs_compression_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	ntfs_compression_buffer = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84)  * zero_partial_compressed_page - zero out of bounds compressed page region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) static void zero_partial_compressed_page(struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 		const s64 initialized_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	u8 *kp = page_address(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	unsigned int kp_ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	ntfs_debug("Zeroing page region outside initialized size.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	if (((s64)page->index << PAGE_SHIFT) >= initialized_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 		clear_page(kp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	kp_ofs = initialized_size & ~PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	memset(kp + kp_ofs, 0, PAGE_SIZE - kp_ofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)  * handle_bounds_compressed_page - test for&handle out of bounds compressed page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) static inline void handle_bounds_compressed_page(struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 		const loff_t i_size, const s64 initialized_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	if ((page->index >= (initialized_size >> PAGE_SHIFT)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 			(initialized_size < i_size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 		zero_partial_compressed_page(page, initialized_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)  * ntfs_decompress - decompress a compression block into an array of pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)  * @dest_pages:		destination array of pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)  * @completed_pages:	scratch space to track completed pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)  * @dest_index:		current index into @dest_pages (IN/OUT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)  * @dest_ofs:		current offset within @dest_pages[@dest_index] (IN/OUT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)  * @dest_max_index:	maximum index into @dest_pages (IN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)  * @dest_max_ofs:	maximum offset within @dest_pages[@dest_max_index] (IN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)  * @xpage:		the target page (-1 if none) (IN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)  * @xpage_done:		set to 1 if xpage was completed successfully (IN/OUT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)  * @cb_start:		compression block to decompress (IN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)  * @cb_size:		size of compression block @cb_start in bytes (IN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)  * @i_size:		file size when we started the read (IN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)  * @initialized_size:	initialized file size when we started the read (IN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)  * The caller must have disabled preemption. ntfs_decompress() reenables it when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)  * the critical section is finished.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)  * This decompresses the compression block @cb_start into the array of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)  * destination pages @dest_pages starting at index @dest_index into @dest_pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)  * and at offset @dest_pos into the page @dest_pages[@dest_index].
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)  * When the page @dest_pages[@xpage] is completed, @xpage_done is set to 1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)  * If xpage is -1 or @xpage has not been completed, @xpage_done is not modified.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)  * @cb_start is a pointer to the compression block which needs decompressing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)  * and @cb_size is the size of @cb_start in bytes (8-64kiB).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)  * Return 0 if success or -EOVERFLOW on error in the compressed stream.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)  * @xpage_done indicates whether the target page (@dest_pages[@xpage]) was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)  * completed during the decompression of the compression block (@cb_start).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)  * Warning: This function *REQUIRES* PAGE_SIZE >= 4096 or it will blow up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)  * unpredicatbly! You have been warned!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)  * Note to hackers: This function may not sleep until it has finished accessing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)  * the compression block @cb_start as it is a per-CPU buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) static int ntfs_decompress(struct page *dest_pages[], int completed_pages[],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 		int *dest_index, int *dest_ofs, const int dest_max_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 		const int dest_max_ofs, const int xpage, char *xpage_done,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 		u8 *const cb_start, const u32 cb_size, const loff_t i_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 		const s64 initialized_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	 * Pointers into the compressed data, i.e. the compression block (cb),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	 * and the therein contained sub-blocks (sb).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	u8 *cb_end = cb_start + cb_size; /* End of cb. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	u8 *cb = cb_start;	/* Current position in cb. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	u8 *cb_sb_start = cb;	/* Beginning of the current sb in the cb. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	u8 *cb_sb_end;		/* End of current sb / beginning of next sb. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	/* Variables for uncompressed data / destination. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	struct page *dp;	/* Current destination page being worked on. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	u8 *dp_addr;		/* Current pointer into dp. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	u8 *dp_sb_start;	/* Start of current sub-block in dp. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	u8 *dp_sb_end;		/* End of current sb in dp (dp_sb_start +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 				   NTFS_SB_SIZE). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	u16 do_sb_start;	/* @dest_ofs when starting this sub-block. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	u16 do_sb_end;		/* @dest_ofs of end of this sb (do_sb_start +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 				   NTFS_SB_SIZE). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	/* Variables for tag and token parsing. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	u8 tag;			/* Current tag. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	int token;		/* Loop counter for the eight tokens in tag. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	int nr_completed_pages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	/* Default error code. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	int err = -EOVERFLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	ntfs_debug("Entering, cb_size = 0x%x.", cb_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) do_next_sb:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	ntfs_debug("Beginning sub-block at offset = 0x%zx in the cb.",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 			cb - cb_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	 * Have we reached the end of the compression block or the end of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	 * decompressed data?  The latter can happen for example if the current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	 * position in the compression block is one byte before its end so the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	 * first two checks do not detect it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	if (cb == cb_end || !le16_to_cpup((le16*)cb) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 			(*dest_index == dest_max_index &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 			*dest_ofs == dest_max_ofs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 		int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 		ntfs_debug("Completed. Returning success (0).");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 		err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) return_error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 		/* We can sleep from now on, so we drop lock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 		spin_unlock(&ntfs_cb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 		/* Second stage: finalize completed pages. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 		if (nr_completed_pages > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 			for (i = 0; i < nr_completed_pages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 				int di = completed_pages[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 				dp = dest_pages[di];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 				/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 				 * If we are outside the initialized size, zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 				 * the out of bounds page range.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 				handle_bounds_compressed_page(dp, i_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 						initialized_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 				flush_dcache_page(dp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 				kunmap(dp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 				SetPageUptodate(dp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 				unlock_page(dp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 				if (di == xpage)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 					*xpage_done = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 				else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 					put_page(dp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 				dest_pages[di] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	/* Setup offsets for the current sub-block destination. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	do_sb_start = *dest_ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	do_sb_end = do_sb_start + NTFS_SB_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	/* Check that we are still within allowed boundaries. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	if (*dest_index == dest_max_index && do_sb_end > dest_max_ofs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 		goto return_overflow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	/* Does the minimum size of a compressed sb overflow valid range? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	if (cb + 6 > cb_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 		goto return_overflow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	/* Setup the current sub-block source pointers and validate range. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	cb_sb_start = cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	cb_sb_end = cb_sb_start + (le16_to_cpup((le16*)cb) & NTFS_SB_SIZE_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 			+ 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	if (cb_sb_end > cb_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 		goto return_overflow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	/* Get the current destination page. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	dp = dest_pages[*dest_index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	if (!dp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 		/* No page present. Skip decompression of this sub-block. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 		cb = cb_sb_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 		/* Advance destination position to next sub-block. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 		*dest_ofs = (*dest_ofs + NTFS_SB_SIZE) & ~PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 		if (!*dest_ofs && (++*dest_index > dest_max_index))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 			goto return_overflow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 		goto do_next_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	/* We have a valid destination page. Setup the destination pointers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	dp_addr = (u8*)page_address(dp) + do_sb_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	/* Now, we are ready to process the current sub-block (sb). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	if (!(le16_to_cpup((le16*)cb) & NTFS_SB_IS_COMPRESSED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 		ntfs_debug("Found uncompressed sub-block.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 		/* This sb is not compressed, just copy it into destination. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 		/* Advance source position to first data byte. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 		cb += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 		/* An uncompressed sb must be full size. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 		if (cb_sb_end - cb != NTFS_SB_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 			goto return_overflow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 		/* Copy the block and advance the source position. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 		memcpy(dp_addr, cb, NTFS_SB_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 		cb += NTFS_SB_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 		/* Advance destination position to next sub-block. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 		*dest_ofs += NTFS_SB_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 		if (!(*dest_ofs &= ~PAGE_MASK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) finalize_page:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 			 * First stage: add current page index to array of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 			 * completed pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 			completed_pages[nr_completed_pages++] = *dest_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 			if (++*dest_index > dest_max_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 				goto return_overflow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 		goto do_next_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	ntfs_debug("Found compressed sub-block.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	/* This sb is compressed, decompress it into destination. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	/* Setup destination pointers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	dp_sb_start = dp_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	dp_sb_end = dp_sb_start + NTFS_SB_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	/* Forward to the first tag in the sub-block. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	cb += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) do_next_tag:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	if (cb == cb_sb_end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 		/* Check if the decompressed sub-block was not full-length. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 		if (dp_addr < dp_sb_end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 			int nr_bytes = do_sb_end - *dest_ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 			ntfs_debug("Filling incomplete sub-block with "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 					"zeroes.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 			/* Zero remainder and update destination position. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 			memset(dp_addr, 0, nr_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 			*dest_ofs += nr_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 		/* We have finished the current sub-block. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 		if (!(*dest_ofs &= ~PAGE_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 			goto finalize_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 		goto do_next_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 	/* Check we are still in range. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	if (cb > cb_sb_end || dp_addr > dp_sb_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 		goto return_overflow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 	/* Get the next tag and advance to first token. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	tag = *cb++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	/* Parse the eight tokens described by the tag. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 	for (token = 0; token < 8; token++, tag >>= 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 		u16 lg, pt, length, max_non_overlap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 		register u16 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 		u8 *dp_back_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 		/* Check if we are done / still in range. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 		if (cb >= cb_sb_end || dp_addr > dp_sb_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 		/* Determine token type and parse appropriately.*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 		if ((tag & NTFS_TOKEN_MASK) == NTFS_SYMBOL_TOKEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 			 * We have a symbol token, copy the symbol across, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 			 * advance the source and destination positions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 			*dp_addr++ = *cb++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 			++*dest_ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 			/* Continue with the next token. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 		 * We have a phrase token. Make sure it is not the first tag in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 		 * the sb as this is illegal and would confuse the code below.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 		if (dp_addr == dp_sb_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 			goto return_overflow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 		 * Determine the number of bytes to go back (p) and the number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 		 * of bytes to copy (l). We use an optimized algorithm in which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 		 * we first calculate log2(current destination position in sb),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 		 * which allows determination of l and p in O(1) rather than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 		 * O(n). We just need an arch-optimized log2() function now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 		lg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 		for (i = *dest_ofs - do_sb_start - 1; i >= 0x10; i >>= 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 			lg++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 		/* Get the phrase token into i. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 		pt = le16_to_cpup((le16*)cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 		 * Calculate starting position of the byte sequence in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 		 * the destination using the fact that p = (pt >> (12 - lg)) + 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 		 * and make sure we don't go too far back.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 		dp_back_addr = dp_addr - (pt >> (12 - lg)) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 		if (dp_back_addr < dp_sb_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 			goto return_overflow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 		/* Now calculate the length of the byte sequence. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 		length = (pt & (0xfff >> lg)) + 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 		/* Advance destination position and verify it is in range. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 		*dest_ofs += length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 		if (*dest_ofs > do_sb_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 			goto return_overflow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 		/* The number of non-overlapping bytes. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 		max_non_overlap = dp_addr - dp_back_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 		if (length <= max_non_overlap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 			/* The byte sequence doesn't overlap, just copy it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 			memcpy(dp_addr, dp_back_addr, length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 			/* Advance destination pointer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 			dp_addr += length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 			 * The byte sequence does overlap, copy non-overlapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 			 * part and then do a slow byte by byte copy for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 			 * overlapping part. Also, advance the destination
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 			 * pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 			memcpy(dp_addr, dp_back_addr, max_non_overlap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 			dp_addr += max_non_overlap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 			dp_back_addr += max_non_overlap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 			length -= max_non_overlap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 			while (length--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 				*dp_addr++ = *dp_back_addr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 		/* Advance source position and continue with the next token. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 		cb += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 	/* No tokens left in the current tag. Continue with the next tag. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 	goto do_next_tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) return_overflow:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 	ntfs_error(NULL, "Failed. Returning -EOVERFLOW.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 	goto return_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)  * ntfs_read_compressed_block - read a compressed block into the page cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)  * @page:	locked page in the compression block(s) we need to read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)  * When we are called the page has already been verified to be locked and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)  * attribute is known to be non-resident, not encrypted, but compressed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)  * 1. Determine which compression block(s) @page is in.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)  * 2. Get hold of all pages corresponding to this/these compression block(s).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)  * 3. Read the (first) compression block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)  * 4. Decompress it into the corresponding pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)  * 5. Throw the compressed data away and proceed to 3. for the next compression
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)  *    block or return success if no more compression blocks left.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)  * Warning: We have to be careful what we do about existing pages. They might
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)  * have been written to so that we would lose data if we were to just overwrite
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)  * them with the out-of-date uncompressed data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)  * FIXME: For PAGE_SIZE > cb_size we are not doing the Right Thing(TM) at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)  * the end of the file I think. We need to detect this case and zero the out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)  * of bounds remainder of the page in question and mark it as handled. At the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)  * moment we would just return -EIO on such a page. This bug will only become
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)  * apparent if pages are above 8kiB and the NTFS volume only uses 512 byte
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)  * clusters so is probably not going to be seen by anyone. Still this should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)  * be fixed. (AIA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)  * FIXME: Again for PAGE_SIZE > cb_size we are screwing up both in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)  * handling sparse and compressed cbs. (AIA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)  * FIXME: At the moment we don't do any zeroing out in the case that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)  * initialized_size is less than data_size. This should be safe because of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)  * nature of the compression algorithm used. Just in case we check and output
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)  * an error message in read inode if the two sizes are not equal for a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)  * compressed file. (AIA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) int ntfs_read_compressed_block(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 	loff_t i_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 	s64 initialized_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 	struct address_space *mapping = page->mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 	ntfs_inode *ni = NTFS_I(mapping->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 	ntfs_volume *vol = ni->vol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 	struct super_block *sb = vol->sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 	runlist_element *rl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 	unsigned long flags, block_size = sb->s_blocksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 	unsigned char block_size_bits = sb->s_blocksize_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 	u8 *cb, *cb_pos, *cb_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 	struct buffer_head **bhs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 	unsigned long offset, index = page->index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 	u32 cb_size = ni->itype.compressed.block_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 	u64 cb_size_mask = cb_size - 1UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 	VCN vcn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 	LCN lcn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 	/* The first wanted vcn (minimum alignment is PAGE_SIZE). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 	VCN start_vcn = (((s64)index << PAGE_SHIFT) & ~cb_size_mask) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 			vol->cluster_size_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 	 * The first vcn after the last wanted vcn (minimum alignment is again
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 	 * PAGE_SIZE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 	VCN end_vcn = ((((s64)(index + 1UL) << PAGE_SHIFT) + cb_size - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 			& ~cb_size_mask) >> vol->cluster_size_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 	/* Number of compression blocks (cbs) in the wanted vcn range. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 	unsigned int nr_cbs = (end_vcn - start_vcn) << vol->cluster_size_bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 			>> ni->itype.compressed.block_size_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 	 * Number of pages required to store the uncompressed data from all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 	 * compression blocks (cbs) overlapping @page. Due to alignment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 	 * guarantees of start_vcn and end_vcn, no need to round up here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 	unsigned int nr_pages = (end_vcn - start_vcn) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 			vol->cluster_size_bits >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 	unsigned int xpage, max_page, cur_page, cur_ofs, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 	unsigned int cb_clusters, cb_max_ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 	int block, max_block, cb_max_page, bhs_size, nr_bhs, err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 	struct page **pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 	int *completed_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 	unsigned char xpage_done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 	ntfs_debug("Entering, page->index = 0x%lx, cb_size = 0x%x, nr_pages = "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 			"%i.", index, cb_size, nr_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 	 * Bad things happen if we get here for anything that is not an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 	 * unnamed $DATA attribute.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 	BUG_ON(ni->type != AT_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 	BUG_ON(ni->name_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 	pages = kmalloc_array(nr_pages, sizeof(struct page *), GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 	completed_pages = kmalloc_array(nr_pages + 1, sizeof(int), GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 	/* Allocate memory to store the buffer heads we need. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 	bhs_size = cb_size / block_size * sizeof(struct buffer_head *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 	bhs = kmalloc(bhs_size, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 	if (unlikely(!pages || !bhs || !completed_pages)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 		kfree(bhs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 		kfree(pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 		kfree(completed_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 		unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 		ntfs_error(vol->sb, "Failed to allocate internal buffers.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 	 * We have already been given one page, this is the one we must do.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 	 * Once again, the alignment guarantees keep it simple.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 	offset = start_vcn << vol->cluster_size_bits >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 	xpage = index - offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 	pages[xpage] = page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 	 * The remaining pages need to be allocated and inserted into the page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 	 * cache, alignment guarantees keep all the below much simpler. (-8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 	read_lock_irqsave(&ni->size_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 	i_size = i_size_read(VFS_I(ni));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 	initialized_size = ni->initialized_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 	read_unlock_irqrestore(&ni->size_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 	max_page = ((i_size + PAGE_SIZE - 1) >> PAGE_SHIFT) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 			offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 	/* Is the page fully outside i_size? (truncate in progress) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 	if (xpage >= max_page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 		kfree(bhs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 		kfree(pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 		kfree(completed_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 		zero_user(page, 0, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 		ntfs_debug("Compressed read outside i_size - truncated?");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 		SetPageUptodate(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 		unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 	if (nr_pages < max_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 		max_page = nr_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 	for (i = 0; i < max_page; i++, offset++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 		if (i != xpage)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 			pages[i] = grab_cache_page_nowait(mapping, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 		page = pages[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 		if (page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 			 * We only (re)read the page if it isn't already read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 			 * in and/or dirty or we would be losing data or at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 			 * least wasting our time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 			if (!PageDirty(page) && (!PageUptodate(page) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 					PageError(page))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 				ClearPageError(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 				kmap(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 			unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 			put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 			pages[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 	 * We have the runlist, and all the destination pages we need to fill.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 	 * Now read the first compression block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 	cur_page = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 	cur_ofs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 	cb_clusters = ni->itype.compressed.block_clusters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) do_next_cb:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 	nr_cbs--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 	nr_bhs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 	/* Read all cb buffer heads one cluster at a time. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 	rl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 	for (vcn = start_vcn, start_vcn += cb_clusters; vcn < start_vcn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 			vcn++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 		bool is_retry = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) 		if (!rl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) lock_retry_remap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) 			down_read(&ni->runlist.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 			rl = ni->runlist.rl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) 		if (likely(rl != NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 			/* Seek to element containing target vcn. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 			while (rl->length && rl[1].vcn <= vcn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 				rl++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 			lcn = ntfs_rl_vcn_to_lcn(rl, vcn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 			lcn = LCN_RL_NOT_MAPPED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 		ntfs_debug("Reading vcn = 0x%llx, lcn = 0x%llx.",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 				(unsigned long long)vcn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 				(unsigned long long)lcn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) 		if (lcn < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 			 * When we reach the first sparse cluster we have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 			 * finished with the cb.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 			if (lcn == LCN_HOLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 			if (is_retry || lcn != LCN_RL_NOT_MAPPED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) 				goto rl_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) 			is_retry = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) 			 * Attempt to map runlist, dropping lock for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) 			 * duration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) 			up_read(&ni->runlist.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) 			if (!ntfs_map_runlist(ni, vcn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) 				goto lock_retry_remap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) 			goto map_rl_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) 		block = lcn << vol->cluster_size_bits >> block_size_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) 		/* Read the lcn from device in chunks of block_size bytes. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) 		max_block = block + (vol->cluster_size >> block_size_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) 		do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) 			ntfs_debug("block = 0x%x.", block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) 			if (unlikely(!(bhs[nr_bhs] = sb_getblk(sb, block))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) 				goto getblk_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) 			nr_bhs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) 		} while (++block < max_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) 	/* Release the lock if we took it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) 	if (rl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) 		up_read(&ni->runlist.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) 	/* Setup and initiate io on all buffer heads. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) 	for (i = 0; i < nr_bhs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) 		struct buffer_head *tbh = bhs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) 		if (!trylock_buffer(tbh))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) 		if (unlikely(buffer_uptodate(tbh))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) 			unlock_buffer(tbh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) 		get_bh(tbh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) 		tbh->b_end_io = end_buffer_read_sync;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) 		submit_bh(REQ_OP_READ, 0, tbh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) 	/* Wait for io completion on all buffer heads. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) 	for (i = 0; i < nr_bhs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) 		struct buffer_head *tbh = bhs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) 		if (buffer_uptodate(tbh))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) 		wait_on_buffer(tbh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) 		 * We need an optimization barrier here, otherwise we start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) 		 * hitting the below fixup code when accessing a loopback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) 		 * mounted ntfs partition. This indicates either there is a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) 		 * race condition in the loop driver or, more likely, gcc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) 		 * overoptimises the code without the barrier and it doesn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) 		 * do the Right Thing(TM).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) 		barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) 		if (unlikely(!buffer_uptodate(tbh))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) 			ntfs_warning(vol->sb, "Buffer is unlocked but not "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) 					"uptodate! Unplugging the disk queue "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) 					"and rescheduling.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) 			get_bh(tbh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) 			io_schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) 			put_bh(tbh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) 			if (unlikely(!buffer_uptodate(tbh)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) 				goto read_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) 			ntfs_warning(vol->sb, "Buffer is now uptodate. Good.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) 	 * Get the compression buffer. We must not sleep any more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) 	 * until we are finished with it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) 	spin_lock(&ntfs_cb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) 	cb = ntfs_compression_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) 	BUG_ON(!cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) 	cb_pos = cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) 	cb_end = cb + cb_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) 	/* Copy the buffer heads into the contiguous buffer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) 	for (i = 0; i < nr_bhs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) 		memcpy(cb_pos, bhs[i]->b_data, block_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) 		cb_pos += block_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) 	/* Just a precaution. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) 	if (cb_pos + 2 <= cb + cb_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) 		*(u16*)cb_pos = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) 	/* Reset cb_pos back to the beginning. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) 	cb_pos = cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) 	/* We now have both source (if present) and destination. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) 	ntfs_debug("Successfully read the compression block.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) 	/* The last page and maximum offset within it for the current cb. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) 	cb_max_page = (cur_page << PAGE_SHIFT) + cur_ofs + cb_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) 	cb_max_ofs = cb_max_page & ~PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) 	cb_max_page >>= PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) 	/* Catch end of file inside a compression block. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) 	if (cb_max_page > max_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) 		cb_max_page = max_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) 	if (vcn == start_vcn - cb_clusters) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) 		/* Sparse cb, zero out page range overlapping the cb. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) 		ntfs_debug("Found sparse compression block.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) 		/* We can sleep from now on, so we drop lock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) 		spin_unlock(&ntfs_cb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) 		if (cb_max_ofs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) 			cb_max_page--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) 		for (; cur_page < cb_max_page; cur_page++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) 			page = pages[cur_page];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) 			if (page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) 				if (likely(!cur_ofs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) 					clear_page(page_address(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) 				else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) 					memset(page_address(page) + cur_ofs, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) 							PAGE_SIZE -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) 							cur_ofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) 				flush_dcache_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) 				kunmap(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) 				SetPageUptodate(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) 				unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) 				if (cur_page == xpage)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) 					xpage_done = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) 				else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) 					put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) 				pages[cur_page] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) 			cb_pos += PAGE_SIZE - cur_ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) 			cur_ofs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) 			if (cb_pos >= cb_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) 		/* If we have a partial final page, deal with it now. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) 		if (cb_max_ofs && cb_pos < cb_end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) 			page = pages[cur_page];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) 			if (page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) 				memset(page_address(page) + cur_ofs, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) 						cb_max_ofs - cur_ofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) 			 * No need to update cb_pos at this stage:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) 			 *	cb_pos += cb_max_ofs - cur_ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) 			cur_ofs = cb_max_ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) 	} else if (vcn == start_vcn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) 		/* We can't sleep so we need two stages. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) 		unsigned int cur2_page = cur_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) 		unsigned int cur_ofs2 = cur_ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) 		u8 *cb_pos2 = cb_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) 		ntfs_debug("Found uncompressed compression block.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) 		/* Uncompressed cb, copy it to the destination pages. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) 		 * TODO: As a big optimization, we could detect this case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) 		 * before we read all the pages and use block_read_full_page()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) 		 * on all full pages instead (we still have to treat partial
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) 		 * pages especially but at least we are getting rid of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) 		 * synchronous io for the majority of pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) 		 * Or if we choose not to do the read-ahead/-behind stuff, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) 		 * could just return block_read_full_page(pages[xpage]) as long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) 		 * as PAGE_SIZE <= cb_size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) 		if (cb_max_ofs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) 			cb_max_page--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) 		/* First stage: copy data into destination pages. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) 		for (; cur_page < cb_max_page; cur_page++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) 			page = pages[cur_page];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) 			if (page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) 				memcpy(page_address(page) + cur_ofs, cb_pos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) 						PAGE_SIZE - cur_ofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) 			cb_pos += PAGE_SIZE - cur_ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) 			cur_ofs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) 			if (cb_pos >= cb_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) 		/* If we have a partial final page, deal with it now. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) 		if (cb_max_ofs && cb_pos < cb_end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) 			page = pages[cur_page];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) 			if (page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) 				memcpy(page_address(page) + cur_ofs, cb_pos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) 						cb_max_ofs - cur_ofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) 			cb_pos += cb_max_ofs - cur_ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) 			cur_ofs = cb_max_ofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) 		/* We can sleep from now on, so drop lock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) 		spin_unlock(&ntfs_cb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) 		/* Second stage: finalize pages. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) 		for (; cur2_page < cb_max_page; cur2_page++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) 			page = pages[cur2_page];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) 			if (page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) 				/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) 				 * If we are outside the initialized size, zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) 				 * the out of bounds page range.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) 				handle_bounds_compressed_page(page, i_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) 						initialized_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) 				flush_dcache_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) 				kunmap(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) 				SetPageUptodate(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) 				unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) 				if (cur2_page == xpage)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) 					xpage_done = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) 				else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) 					put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) 				pages[cur2_page] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) 			cb_pos2 += PAGE_SIZE - cur_ofs2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) 			cur_ofs2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) 			if (cb_pos2 >= cb_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) 		/* Compressed cb, decompress it into the destination page(s). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) 		unsigned int prev_cur_page = cur_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) 		ntfs_debug("Found compressed compression block.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) 		err = ntfs_decompress(pages, completed_pages, &cur_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) 				&cur_ofs, cb_max_page, cb_max_ofs, xpage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) 				&xpage_done, cb_pos, cb_size - (cb_pos - cb),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) 				i_size, initialized_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) 		 * We can sleep from now on, lock already dropped by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) 		 * ntfs_decompress().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) 		if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) 			ntfs_error(vol->sb, "ntfs_decompress() failed in inode "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) 					"0x%lx with error code %i. Skipping "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) 					"this compression block.",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) 					ni->mft_no, -err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) 			/* Release the unfinished pages. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) 			for (; prev_cur_page < cur_page; prev_cur_page++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) 				page = pages[prev_cur_page];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) 				if (page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) 					flush_dcache_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) 					kunmap(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) 					unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) 					if (prev_cur_page != xpage)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) 						put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) 					pages[prev_cur_page] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) 	/* Release the buffer heads. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) 	for (i = 0; i < nr_bhs; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) 		brelse(bhs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) 	/* Do we have more work to do? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) 	if (nr_cbs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) 		goto do_next_cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) 	/* We no longer need the list of buffer heads. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) 	kfree(bhs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) 	/* Clean up if we have any pages left. Should never happen. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) 	for (cur_page = 0; cur_page < max_page; cur_page++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) 		page = pages[cur_page];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) 		if (page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) 			ntfs_error(vol->sb, "Still have pages left! "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) 					"Terminating them with extreme "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) 					"prejudice.  Inode 0x%lx, page index "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) 					"0x%lx.", ni->mft_no, page->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) 			flush_dcache_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) 			kunmap(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) 			unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) 			if (cur_page != xpage)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) 				put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) 			pages[cur_page] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) 	/* We no longer need the list of pages. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) 	kfree(pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) 	kfree(completed_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) 	/* If we have completed the requested page, we return success. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) 	if (likely(xpage_done))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) 	ntfs_debug("Failed. Returning error code %s.", err == -EOVERFLOW ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) 			"EOVERFLOW" : (!err ? "EIO" : "unknown error"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) 	return err < 0 ? err : -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) read_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) 	ntfs_error(vol->sb, "IO error while reading compressed data.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) 	/* Release the buffer heads. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) 	for (i = 0; i < nr_bhs; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) 		brelse(bhs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) 	goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) map_rl_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) 	ntfs_error(vol->sb, "ntfs_map_runlist() failed. Cannot read "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) 			"compression block.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) 	goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) rl_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) 	up_read(&ni->runlist.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) 	ntfs_error(vol->sb, "ntfs_rl_vcn_to_lcn() failed. Cannot read "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) 			"compression block.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) 	goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) getblk_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) 	up_read(&ni->runlist.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) 	ntfs_error(vol->sb, "getblk() failed. Cannot read compression block.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) 	kfree(bhs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) 	for (i = cur_page; i < max_page; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) 		page = pages[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) 		if (page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) 			flush_dcache_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) 			kunmap(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) 			unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) 			if (i != xpage)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) 				put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) 	kfree(pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) 	kfree(completed_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) 	return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) }