Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Copyright (C) 2019 HUAWEI, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *             https://www.huawei.com/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Created by Gao Xiang <gaoxiang25@huawei.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include "compress.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/lz4.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #ifndef LZ4_DISTANCE_MAX	/* history window size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #define LZ4_DISTANCE_MAX 65535	/* set to maximum value by default */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #define LZ4_MAX_DISTANCE_PAGES	(DIV_ROUND_UP(LZ4_DISTANCE_MAX, PAGE_SIZE) + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #ifndef LZ4_DECOMPRESS_INPLACE_MARGIN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #define LZ4_DECOMPRESS_INPLACE_MARGIN(srcsize)  (((srcsize) >> 8) + 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) struct z_erofs_decompressor {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 	 * if destpages have sparsed pages, fill them with bounce pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 	 * it also check whether destpages indicate continuous physical memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 	int (*prepare_destpages)(struct z_erofs_decompress_req *rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 				 struct list_head *pagepool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 	int (*decompress)(struct z_erofs_decompress_req *rq, u8 *out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 	char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) int z_erofs_load_lz4_config(struct super_block *sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 			    struct erofs_super_block *dsb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 			    struct z_erofs_lz4_cfgs *lz4, int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	struct erofs_sb_info *sbi = EROFS_SB(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	u16 distance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	if (lz4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 		if (size < sizeof(struct z_erofs_lz4_cfgs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 			erofs_err(sb, "invalid lz4 cfgs, size=%u", size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 		distance = le16_to_cpu(lz4->max_distance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 		sbi->lz4.max_pclusterblks = le16_to_cpu(lz4->max_pclusterblks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 		if (!sbi->lz4.max_pclusterblks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 			sbi->lz4.max_pclusterblks = 1;	/* reserved case */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 		} else if (sbi->lz4.max_pclusterblks >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 			   Z_EROFS_PCLUSTER_MAX_SIZE / EROFS_BLKSIZ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 			erofs_err(sb, "too large lz4 pclusterblks %u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 				  sbi->lz4.max_pclusterblks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 		} else if (sbi->lz4.max_pclusterblks >= 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 			erofs_info(sb, "EXPERIMENTAL big pcluster feature in use. Use at your own risk!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 		distance = le16_to_cpu(dsb->u1.lz4_max_distance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 		sbi->lz4.max_pclusterblks = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	sbi->lz4.max_distance_pages = distance ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 					DIV_ROUND_UP(distance, PAGE_SIZE) + 1 :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 					LZ4_MAX_DISTANCE_PAGES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	return erofs_pcpubuf_growsize(sbi->lz4.max_pclusterblks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) static int z_erofs_lz4_prepare_destpages(struct z_erofs_decompress_req *rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 					 struct list_head *pagepool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	const unsigned int nr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 		PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	struct page *availables[LZ4_MAX_DISTANCE_PAGES] = { NULL };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	unsigned long bounced[DIV_ROUND_UP(LZ4_MAX_DISTANCE_PAGES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 					   BITS_PER_LONG)] = { 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	unsigned int lz4_max_distance_pages =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 				EROFS_SB(rq->sb)->lz4.max_distance_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	void *kaddr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	unsigned int i, j, top;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	top = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	for (i = j = 0; i < nr; ++i, ++j) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 		struct page *const page = rq->out[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 		struct page *victim;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 		if (j >= lz4_max_distance_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 			j = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 		/* 'valid' bounced can only be tested after a complete round */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 		if (test_bit(j, bounced)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 			DBG_BUGON(i < lz4_max_distance_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 			DBG_BUGON(top >= lz4_max_distance_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 			availables[top++] = rq->out[i - lz4_max_distance_pages];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 		if (page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 			__clear_bit(j, bounced);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 			if (kaddr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 				if (kaddr + PAGE_SIZE == page_address(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 					kaddr += PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 				else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 					kaddr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 			} else if (!i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 				kaddr = page_address(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 		kaddr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 		__set_bit(j, bounced);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 		if (top) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 			victim = availables[--top];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 			get_page(victim);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 			victim = erofs_allocpage(pagepool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 						 GFP_KERNEL | __GFP_NOFAIL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 			set_page_private(victim, Z_EROFS_SHORTLIVED_PAGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 		rq->out[i] = victim;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	return kaddr ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) static void *z_erofs_handle_inplace_io(struct z_erofs_decompress_req *rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 			void *inpage, unsigned int *inputmargin, int *maptype,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 			bool support_0padding)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	unsigned int nrpages_in, nrpages_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	unsigned int ofull, oend, inputsize, total, i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	struct page **in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	void *src, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	inputsize = rq->inputsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	nrpages_in = PAGE_ALIGN(inputsize) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	oend = rq->pageofs_out + rq->outputsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	ofull = PAGE_ALIGN(oend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	nrpages_out = ofull >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	if (rq->inplace_io) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 		if (rq->partial_decoding || !support_0padding ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 		    ofull - oend < LZ4_DECOMPRESS_INPLACE_MARGIN(inputsize))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 			goto docopy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 		for (i = 0; i < nrpages_in; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 			DBG_BUGON(rq->in[i] == NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 			for (j = 0; j < nrpages_out - nrpages_in + i; ++j)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 				if (rq->out[j] == rq->in[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 					goto docopy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	if (nrpages_in <= 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 		*maptype = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 		return inpage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	kunmap_atomic(inpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	might_sleep();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	src = erofs_vm_map_ram(rq->in, nrpages_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	if (!src)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	*maptype = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	return src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) docopy:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	/* Or copy compressed data which can be overlapped to per-CPU buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	in = rq->in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	src = erofs_get_pcpubuf(nrpages_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	if (!src) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 		DBG_BUGON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 		kunmap_atomic(inpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 		return ERR_PTR(-EFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	tmp = src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	total = rq->inputsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	while (total) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 		unsigned int page_copycnt =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 			min_t(unsigned int, total, PAGE_SIZE - *inputmargin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 		if (!inpage)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 			inpage = kmap_atomic(*in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 		memcpy(tmp, inpage + *inputmargin, page_copycnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 		kunmap_atomic(inpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 		inpage = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 		tmp += page_copycnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 		total -= page_copycnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 		++in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 		*inputmargin = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	*maptype = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	return src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq, u8 *out)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	unsigned int inputmargin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	u8 *headpage, *src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	bool support_0padding;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	int ret, maptype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	DBG_BUGON(*rq->in == NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	headpage = kmap_atomic(*rq->in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	inputmargin = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	support_0padding = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	/* decompression inplace is only safe when 0padding is enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	if (erofs_sb_has_lz4_0padding(EROFS_SB(rq->sb))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 		support_0padding = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 		while (!headpage[inputmargin & ~PAGE_MASK])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 			if (!(++inputmargin & ~PAGE_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 		if (inputmargin >= rq->inputsize) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 			kunmap_atomic(headpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 			return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	rq->inputsize -= inputmargin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	src = z_erofs_handle_inplace_io(rq, headpage, &inputmargin, &maptype,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 					support_0padding);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	if (IS_ERR(src))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 		return PTR_ERR(src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	/* legacy format could compress extra data in a pcluster. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	if (rq->partial_decoding || !support_0padding)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 		ret = LZ4_decompress_safe_partial(src + inputmargin, out,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 				rq->inputsize, rq->outputsize, rq->outputsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 		ret = LZ4_decompress_safe(src + inputmargin, out,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 					  rq->inputsize, rq->outputsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	if (ret != rq->outputsize) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 		erofs_err(rq->sb, "failed to decompress %d in[%u, %u] out[%u]",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 			  ret, rq->inputsize, inputmargin, rq->outputsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 		print_hex_dump(KERN_DEBUG, "[ in]: ", DUMP_PREFIX_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 			       16, 1, src + inputmargin, rq->inputsize, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 		print_hex_dump(KERN_DEBUG, "[out]: ", DUMP_PREFIX_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 			       16, 1, out, rq->outputsize, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 		if (ret >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 			memset(out + ret, 0, rq->outputsize - ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 		ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	if (maptype == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 		kunmap_atomic(src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	} else if (maptype == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 		vm_unmap_ram(src, PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	} else if (maptype == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 		erofs_put_pcpubuf(src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 		DBG_BUGON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) static struct z_erofs_decompressor decompressors[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	[Z_EROFS_COMPRESSION_SHIFTED] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 		.name = "shifted"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	[Z_EROFS_COMPRESSION_LZ4] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 		.prepare_destpages = z_erofs_lz4_prepare_destpages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 		.decompress = z_erofs_lz4_decompress,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 		.name = "lz4"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) static void copy_from_pcpubuf(struct page **out, const char *dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 			      unsigned short pageofs_out,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 			      unsigned int outputsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	const char *end = dst + outputsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	const unsigned int righthalf = PAGE_SIZE - pageofs_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	const char *cur = dst - pageofs_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	while (cur < end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 		struct page *const page = *out++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 		if (page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 			char *buf = kmap_atomic(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 			if (cur >= dst) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 				memcpy(buf, cur, min_t(uint, PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 						       end - cur));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 				memcpy(buf + pageofs_out, cur + pageofs_out,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 				       min_t(uint, righthalf, end - cur));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 			kunmap_atomic(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 		cur += PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) static int z_erofs_decompress_generic(struct z_erofs_decompress_req *rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 				      struct list_head *pagepool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	const unsigned int nrpages_out =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 		PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	const struct z_erofs_decompressor *alg = decompressors + rq->alg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	unsigned int dst_maptype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	void *dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	/* two optimized fast paths only for non bigpcluster cases yet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	if (rq->inputsize <= PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 		if (nrpages_out == 1 && !rq->inplace_io) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 			DBG_BUGON(!*rq->out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 			dst = kmap_atomic(*rq->out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 			dst_maptype = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 			goto dstmap_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 		 * For the case of small output size (especially much less
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 		 * than PAGE_SIZE), memcpy the decompressed data rather than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 		 * compressed data is preferred.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 		if (rq->outputsize <= PAGE_SIZE * 7 / 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 			dst = erofs_get_pcpubuf(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 			if (IS_ERR(dst))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 				return PTR_ERR(dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 			rq->inplace_io = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 			ret = alg->decompress(rq, dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 			if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 				copy_from_pcpubuf(rq->out, dst, rq->pageofs_out,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 						  rq->outputsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 			erofs_put_pcpubuf(dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	/* general decoding path which can be used for all cases */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 	ret = alg->prepare_destpages(rq, pagepool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 		dst = page_address(*rq->out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 		dst_maptype = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 		goto dstmap_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 	dst = erofs_vm_map_ram(rq->out, nrpages_out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 	if (!dst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 	dst_maptype = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) dstmap_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 	ret = alg->decompress(rq, dst + rq->pageofs_out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 	if (!dst_maptype)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 		kunmap_atomic(dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 	else if (dst_maptype == 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 		vm_unmap_ram(dst, nrpages_out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) static int z_erofs_shifted_transform(const struct z_erofs_decompress_req *rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 				     struct list_head *pagepool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 	const unsigned int nrpages_out =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 		PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 	const unsigned int righthalf = PAGE_SIZE - rq->pageofs_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 	unsigned char *src, *dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 	if (nrpages_out > 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 		DBG_BUGON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 	if (rq->out[0] == *rq->in) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 		DBG_BUGON(nrpages_out != 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 	src = kmap_atomic(*rq->in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 	if (rq->out[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 		dst = kmap_atomic(rq->out[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 		memcpy(dst + rq->pageofs_out, src, righthalf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 		kunmap_atomic(dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 	if (nrpages_out == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 		DBG_BUGON(!rq->out[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 		if (rq->out[1] == *rq->in) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 			memmove(src, src + righthalf, rq->pageofs_out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 			dst = kmap_atomic(rq->out[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 			memcpy(dst, src + righthalf, rq->pageofs_out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 			kunmap_atomic(dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 	kunmap_atomic(src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) int z_erofs_decompress(struct z_erofs_decompress_req *rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 		       struct list_head *pagepool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 	if (rq->alg == Z_EROFS_COMPRESSION_SHIFTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 		return z_erofs_shifted_transform(rq, pagepool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 	return z_erofs_decompress_generic(rq, pagepool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)