^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Cryptographic API for the NX-842 hardware compression.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) IBM Corporation, 2011-2015
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Designer of the Power data compression engine:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Bulent Abali <abali@us.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Original Authors: Robert Jennings <rcj@linux.vnet.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Seth Jennings <sjenning@linux.vnet.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * Rewrite: Dan Streetman <ddstreet@ieee.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * This is an interface to the NX-842 compression hardware in PowerPC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * processors. Most of the complexity of this drvier is due to the fact that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * the NX-842 compression hardware requires the input and output data buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * to be specifically aligned, to be a specific multiple in length, and within
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * specific minimum and maximum lengths. Those restrictions, provided by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * nx-842 driver via nx842_constraints, mean this driver must use bounce
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * buffers and headers to correct misaligned in or out buffers, and to split
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * input buffers that are too large.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * This driver will fall back to software decompression if the hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * decompression fails, so this driver's decompression should never fail as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * long as the provided compressed buffer is valid. Any compressed buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * created by this driver will have a header (except ones where the input
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * perfectly matches the constraints); so users of this driver cannot simply
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * pass a compressed buffer created by this driver over to the 842 software
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * decompression library. Instead, users must use this driver to decompress;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * if the hardware fails or is unavailable, the compressed buffer will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * parsed and the header removed, and the raw 842 buffer(s) passed to the 842
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * software decompression library.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * This does not fall back to software compression, however, since the caller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * of this function is specifically requesting hardware compression; if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * hardware compression fails, the caller can fall back to software
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * compression, and the raw 842 compressed buffer that the software compressor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * creates can be passed to this driver for hardware decompression; any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * buffer without our specific header magic is assumed to be a raw 842 buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * and passed directly to the hardware. Note that the software compression
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * library will produce a compressed buffer that is incompatible with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * hardware decompressor if the original input buffer length is not a multiple
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * of 8; if such a compressed buffer is passed to this driver for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * decompression, the hardware will reject it and this driver will then pass
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * it over to the software library for decompression.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #include <linux/sw842.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #include "nx-842.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) /* The first 5 bits of this magic are 0x1f, which is an invalid 842 5-bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * template (see lib/842/842.h), so this magic number will never appear at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * the start of a raw 842 compressed buffer. That is important, as any buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * passed to us without this magic is assumed to be a raw 842 compressed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * buffer, and passed directly to the hardware to decompress.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define NX842_CRYPTO_MAGIC (0xf842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define NX842_CRYPTO_HEADER_SIZE(g) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) (sizeof(struct nx842_crypto_header) + \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) sizeof(struct nx842_crypto_header_group) * (g))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #define NX842_CRYPTO_HEADER_MAX_SIZE \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) NX842_CRYPTO_HEADER_SIZE(NX842_CRYPTO_GROUP_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) /* bounce buffer size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #define BOUNCE_BUFFER_ORDER (2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #define BOUNCE_BUFFER_SIZE \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) ((unsigned int)(PAGE_SIZE << BOUNCE_BUFFER_ORDER))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) /* try longer on comp because we can fallback to sw decomp if hw is busy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #define COMP_BUSY_TIMEOUT (250) /* ms */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #define DECOMP_BUSY_TIMEOUT (50) /* ms */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) struct nx842_crypto_param {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) u8 *in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) unsigned int iremain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) u8 *out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) unsigned int oremain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) unsigned int ototal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) static int update_param(struct nx842_crypto_param *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) unsigned int slen, unsigned int dlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) if (p->iremain < slen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) return -EOVERFLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) if (p->oremain < dlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) p->in += slen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) p->iremain -= slen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) p->out += dlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) p->oremain -= dlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) p->ototal += dlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) int nx842_crypto_init(struct crypto_tfm *tfm, struct nx842_driver *driver)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) spin_lock_init(&ctx->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) ctx->driver = driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) ctx->wmem = kmalloc(driver->workmem_size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) ctx->sbounce = (u8 *)__get_free_pages(GFP_KERNEL, BOUNCE_BUFFER_ORDER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) ctx->dbounce = (u8 *)__get_free_pages(GFP_KERNEL, BOUNCE_BUFFER_ORDER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) if (!ctx->wmem || !ctx->sbounce || !ctx->dbounce) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) kfree(ctx->wmem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) free_page((unsigned long)ctx->sbounce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) free_page((unsigned long)ctx->dbounce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) EXPORT_SYMBOL_GPL(nx842_crypto_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) void nx842_crypto_exit(struct crypto_tfm *tfm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) kfree(ctx->wmem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) free_page((unsigned long)ctx->sbounce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) free_page((unsigned long)ctx->dbounce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) EXPORT_SYMBOL_GPL(nx842_crypto_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) static void check_constraints(struct nx842_constraints *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) /* limit maximum, to always have enough bounce buffer to decompress */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) if (c->maximum > BOUNCE_BUFFER_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) c->maximum = BOUNCE_BUFFER_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) static int nx842_crypto_add_header(struct nx842_crypto_header *hdr, u8 *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) int s = NX842_CRYPTO_HEADER_SIZE(hdr->groups);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) /* compress should have added space for header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) if (s > be16_to_cpu(hdr->group[0].padding)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) pr_err("Internal error: no space for header\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) memcpy(buf, hdr, s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) print_hex_dump_debug("header ", DUMP_PREFIX_OFFSET, 16, 1, buf, s, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) static int compress(struct nx842_crypto_ctx *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) struct nx842_crypto_param *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) struct nx842_crypto_header_group *g,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) struct nx842_constraints *c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) u16 *ignore,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) unsigned int hdrsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) unsigned int slen = p->iremain, dlen = p->oremain, tmplen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) unsigned int adj_slen = slen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) u8 *src = p->in, *dst = p->out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) int ret, dskip = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) ktime_t timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) if (p->iremain == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) return -EOVERFLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) if (p->oremain == 0 || hdrsize + c->minimum > dlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) if (slen % c->multiple)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) adj_slen = round_up(slen, c->multiple);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) if (slen < c->minimum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) adj_slen = c->minimum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) if (slen > c->maximum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) adj_slen = slen = c->maximum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) if (adj_slen > slen || (u64)src % c->alignment) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) adj_slen = min(adj_slen, BOUNCE_BUFFER_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) slen = min(slen, BOUNCE_BUFFER_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) if (adj_slen > slen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) memset(ctx->sbounce + slen, 0, adj_slen - slen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) memcpy(ctx->sbounce, src, slen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) src = ctx->sbounce;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) slen = adj_slen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) pr_debug("using comp sbounce buffer, len %x\n", slen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) dst += hdrsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) dlen -= hdrsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) if ((u64)dst % c->alignment) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) dskip = (int)(PTR_ALIGN(dst, c->alignment) - dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) dst += dskip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) dlen -= dskip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) if (dlen % c->multiple)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) dlen = round_down(dlen, c->multiple);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) if (dlen < c->minimum) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) nospc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) dst = ctx->dbounce;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) dlen = min(p->oremain, BOUNCE_BUFFER_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) dlen = round_down(dlen, c->multiple);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) dskip = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) pr_debug("using comp dbounce buffer, len %x\n", dlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) if (dlen > c->maximum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) dlen = c->maximum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) tmplen = dlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) timeout = ktime_add_ms(ktime_get(), COMP_BUSY_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) dlen = tmplen; /* reset dlen, if we're retrying */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) ret = ctx->driver->compress(src, slen, dst, &dlen, ctx->wmem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) /* possibly we should reduce the slen here, instead of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) * retrying with the dbounce buffer?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) if (ret == -ENOSPC && dst != ctx->dbounce)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) goto nospc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) } while (ret == -EBUSY && ktime_before(ktime_get(), timeout));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) dskip += hdrsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) if (dst == ctx->dbounce)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) memcpy(p->out + dskip, dst, dlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) g->padding = cpu_to_be16(dskip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) g->compressed_length = cpu_to_be32(dlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) g->uncompressed_length = cpu_to_be32(slen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) if (p->iremain < slen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) *ignore = slen - p->iremain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) slen = p->iremain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) pr_debug("compress slen %x ignore %x dlen %x padding %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) slen, *ignore, dlen, dskip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) return update_param(p, slen, dskip + dlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) int nx842_crypto_compress(struct crypto_tfm *tfm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) const u8 *src, unsigned int slen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) u8 *dst, unsigned int *dlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) struct nx842_crypto_header *hdr = &ctx->header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) struct nx842_crypto_param p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) struct nx842_constraints c = *ctx->driver->constraints;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) unsigned int groups, hdrsize, h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) int ret, n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) bool add_header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) u16 ignore = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) check_constraints(&c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) p.in = (u8 *)src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) p.iremain = slen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) p.out = dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) p.oremain = *dlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) p.ototal = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) *dlen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) groups = min_t(unsigned int, NX842_CRYPTO_GROUP_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) DIV_ROUND_UP(p.iremain, c.maximum));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) hdrsize = NX842_CRYPTO_HEADER_SIZE(groups);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) spin_lock_bh(&ctx->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) /* skip adding header if the buffers meet all constraints */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) add_header = (p.iremain % c.multiple ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) p.iremain < c.minimum ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) p.iremain > c.maximum ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) (u64)p.in % c.alignment ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) p.oremain % c.multiple ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) p.oremain < c.minimum ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) p.oremain > c.maximum ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) (u64)p.out % c.alignment);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) hdr->magic = cpu_to_be16(NX842_CRYPTO_MAGIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) hdr->groups = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) hdr->ignore = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) while (p.iremain > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) n = hdr->groups++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) ret = -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) if (hdr->groups > NX842_CRYPTO_GROUP_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) /* header goes before first group */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) h = !n && add_header ? hdrsize : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) if (ignore)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) pr_warn("internal error, ignore is set %x\n", ignore);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) ret = compress(ctx, &p, &hdr->group[n], &c, &ignore, h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) if (!add_header && hdr->groups > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) pr_err("Internal error: No header but multiple groups\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) /* ignore indicates the input stream needed to be padded */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) hdr->ignore = cpu_to_be16(ignore);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) if (ignore)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) pr_debug("marked %d bytes as ignore\n", ignore);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) if (add_header)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) ret = nx842_crypto_add_header(hdr, dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) *dlen = p.ototal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) pr_debug("compress total slen %x dlen %x\n", slen, *dlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) spin_unlock_bh(&ctx->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) EXPORT_SYMBOL_GPL(nx842_crypto_compress);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) static int decompress(struct nx842_crypto_ctx *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) struct nx842_crypto_param *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) struct nx842_crypto_header_group *g,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) struct nx842_constraints *c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) u16 ignore)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) unsigned int slen = be32_to_cpu(g->compressed_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) unsigned int required_len = be32_to_cpu(g->uncompressed_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) unsigned int dlen = p->oremain, tmplen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) unsigned int adj_slen = slen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) u8 *src = p->in, *dst = p->out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) u16 padding = be16_to_cpu(g->padding);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) int ret, spadding = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) ktime_t timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) if (!slen || !required_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) if (p->iremain <= 0 || padding + slen > p->iremain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) return -EOVERFLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) if (p->oremain <= 0 || required_len - ignore > p->oremain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) src += padding;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) if (slen % c->multiple)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) adj_slen = round_up(slen, c->multiple);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) if (slen < c->minimum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) adj_slen = c->minimum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) if (slen > c->maximum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) goto usesw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) if (slen < adj_slen || (u64)src % c->alignment) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) /* we can append padding bytes because the 842 format defines
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) * an "end" template (see lib/842/842_decompress.c) and will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) * ignore any bytes following it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) if (slen < adj_slen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) memset(ctx->sbounce + slen, 0, adj_slen - slen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) memcpy(ctx->sbounce, src, slen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) src = ctx->sbounce;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) spadding = adj_slen - slen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) slen = adj_slen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) pr_debug("using decomp sbounce buffer, len %x\n", slen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) if (dlen % c->multiple)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) dlen = round_down(dlen, c->multiple);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) if (dlen < required_len || (u64)dst % c->alignment) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) dst = ctx->dbounce;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) dlen = min(required_len, BOUNCE_BUFFER_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) pr_debug("using decomp dbounce buffer, len %x\n", dlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) if (dlen < c->minimum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) goto usesw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) if (dlen > c->maximum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) dlen = c->maximum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) tmplen = dlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) timeout = ktime_add_ms(ktime_get(), DECOMP_BUSY_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) dlen = tmplen; /* reset dlen, if we're retrying */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) ret = ctx->driver->decompress(src, slen, dst, &dlen, ctx->wmem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) } while (ret == -EBUSY && ktime_before(ktime_get(), timeout));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) usesw:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) /* reset everything, sw doesn't have constraints */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) src = p->in + padding;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) slen = be32_to_cpu(g->compressed_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) spadding = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) dst = p->out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) dlen = p->oremain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) if (dlen < required_len) { /* have ignore bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) dst = ctx->dbounce;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) dlen = BOUNCE_BUFFER_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) pr_info_ratelimited("using software 842 decompression\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) ret = sw842_decompress(src, slen, dst, &dlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) slen -= spadding;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) dlen -= ignore;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) if (ignore)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) pr_debug("ignoring last %x bytes\n", ignore);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) if (dst == ctx->dbounce)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) memcpy(p->out, dst, dlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) pr_debug("decompress slen %x padding %x dlen %x ignore %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) slen, padding, dlen, ignore);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) return update_param(p, slen + padding, dlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) int nx842_crypto_decompress(struct crypto_tfm *tfm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) const u8 *src, unsigned int slen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) u8 *dst, unsigned int *dlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) struct nx842_crypto_header *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) struct nx842_crypto_param p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) struct nx842_constraints c = *ctx->driver->constraints;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) int n, ret, hdr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) u16 ignore = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) check_constraints(&c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) p.in = (u8 *)src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) p.iremain = slen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) p.out = dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) p.oremain = *dlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) p.ototal = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) *dlen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) hdr = (struct nx842_crypto_header *)src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) spin_lock_bh(&ctx->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) /* If it doesn't start with our header magic number, assume it's a raw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) * 842 compressed buffer and pass it directly to the hardware driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) if (be16_to_cpu(hdr->magic) != NX842_CRYPTO_MAGIC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) struct nx842_crypto_header_group g = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) .padding = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) .compressed_length = cpu_to_be32(p.iremain),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) .uncompressed_length = cpu_to_be32(p.oremain),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) ret = decompress(ctx, &p, &g, &c, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) goto success;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) if (!hdr->groups) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) pr_err("header has no groups\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) if (hdr->groups > NX842_CRYPTO_GROUP_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) pr_err("header has too many groups %x, max %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) hdr->groups, NX842_CRYPTO_GROUP_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) hdr_len = NX842_CRYPTO_HEADER_SIZE(hdr->groups);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) if (hdr_len > slen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) ret = -EOVERFLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) memcpy(&ctx->header, src, hdr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) hdr = &ctx->header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) for (n = 0; n < hdr->groups; n++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) /* ignore applies to last group */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) if (n + 1 == hdr->groups)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) ignore = be16_to_cpu(hdr->ignore);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) ret = decompress(ctx, &p, &hdr->group[n], &c, ignore);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) success:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) *dlen = p.ototal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) pr_debug("decompress total slen %x dlen %x\n", slen, *dlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) spin_unlock_bh(&ctx->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) EXPORT_SYMBOL_GPL(nx842_crypto_decompress);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) MODULE_DESCRIPTION("IBM PowerPC Nest (NX) 842 Hardware Compression Driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>");