^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2017 Marvell
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Antoine Tenart <antoine.tenart@free-electrons.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/dmapool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/firmware.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/of_platform.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/of_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/workqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <crypto/internal/aead.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <crypto/internal/hash.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <crypto/internal/skcipher.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include "safexcel.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) static u32 max_rings = EIP197_MAX_RINGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) module_param(max_rings, uint, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) MODULE_PARM_DESC(max_rings, "Maximum number of rings to use.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) static void eip197_trc_cache_setupvirt(struct safexcel_crypto_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * Map all interfaces/rings to register index 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * so they can share contexts. Without this, the EIP197 will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * assume each interface/ring to be in its own memory domain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * i.e. have its own subset of UNIQUE memory addresses.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * Which would cause records with the SAME memory address to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * use DIFFERENT cache buffers, causing both poor cache utilization
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * AND serious coherence/invalidation issues.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) for (i = 0; i < 4; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) writel(0, priv->base + EIP197_FLUE_IFC_LUT(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * Initialize other virtualization regs for cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * These may not be in their reset state ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) for (i = 0; i < priv->config.rings; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) writel(0, priv->base + EIP197_FLUE_CACHEBASE_LO(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) writel(0, priv->base + EIP197_FLUE_CACHEBASE_HI(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) writel(EIP197_FLUE_CONFIG_MAGIC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) priv->base + EIP197_FLUE_CONFIG(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) writel(0, priv->base + EIP197_FLUE_OFFSETS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) writel(0, priv->base + EIP197_FLUE_ARC4_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) static void eip197_trc_cache_banksel(struct safexcel_crypto_priv *priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) u32 addrmid, int *actbank)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) int curbank;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) curbank = addrmid >> 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) if (curbank != *actbank) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) val = readl(priv->base + EIP197_CS_RAM_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) val = (val & ~EIP197_CS_BANKSEL_MASK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) (curbank << EIP197_CS_BANKSEL_OFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) writel(val, priv->base + EIP197_CS_RAM_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) *actbank = curbank;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) static u32 eip197_trc_cache_probe(struct safexcel_crypto_priv *priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) int maxbanks, u32 probemask, u32 stride)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) u32 val, addrhi, addrlo, addrmid, addralias, delta, marker;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) int actbank;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * And probe the actual size of the physically attached cache data RAM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * Using a binary subdivision algorithm downto 32 byte cache lines.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) addrhi = 1 << (16 + maxbanks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) addrlo = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) actbank = min(maxbanks - 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) while ((addrhi - addrlo) > stride) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) /* write marker to lowest address in top half */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) addrmid = (addrhi + addrlo) >> 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) marker = (addrmid ^ 0xabadbabe) & probemask; /* Unique */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) eip197_trc_cache_banksel(priv, addrmid, &actbank);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) writel(marker,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) priv->base + EIP197_CLASSIFICATION_RAMS +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) (addrmid & 0xffff));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) /* write invalid markers to possible aliases */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) delta = 1 << __fls(addrmid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) while (delta >= stride) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) addralias = addrmid - delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) eip197_trc_cache_banksel(priv, addralias, &actbank);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) writel(~marker,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) priv->base + EIP197_CLASSIFICATION_RAMS +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) (addralias & 0xffff));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) delta >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) /* read back marker from top half */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) eip197_trc_cache_banksel(priv, addrmid, &actbank);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) val = readl(priv->base + EIP197_CLASSIFICATION_RAMS +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) (addrmid & 0xffff));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) if ((val & probemask) == marker)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) /* read back correct, continue with top half */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) addrlo = addrmid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) /* not read back correct, continue with bottom half */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) addrhi = addrmid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) return addrhi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) static void eip197_trc_cache_clear(struct safexcel_crypto_priv *priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) int cs_rc_max, int cs_ht_wc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) u32 htable_offset, val, offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) /* Clear all records in administration RAM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) for (i = 0; i < cs_rc_max; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) offset = EIP197_CLASSIFICATION_RAMS + i * EIP197_CS_RC_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) writel(EIP197_CS_RC_NEXT(EIP197_RC_NULL) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) EIP197_CS_RC_PREV(EIP197_RC_NULL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) priv->base + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) val = EIP197_CS_RC_NEXT(i + 1) | EIP197_CS_RC_PREV(i - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) if (i == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) val |= EIP197_CS_RC_PREV(EIP197_RC_NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) else if (i == cs_rc_max - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) val |= EIP197_CS_RC_NEXT(EIP197_RC_NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) writel(val, priv->base + offset + 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) /* must also initialize the address key due to ECC! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) writel(0, priv->base + offset + 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) writel(0, priv->base + offset + 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) /* Clear the hash table entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) htable_offset = cs_rc_max * EIP197_CS_RC_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) for (i = 0; i < cs_ht_wc; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) writel(GENMASK(29, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) priv->base + EIP197_CLASSIFICATION_RAMS +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) htable_offset + i * sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) static int eip197_trc_cache_init(struct safexcel_crypto_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) u32 val, dsize, asize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) int cs_rc_max, cs_ht_wc, cs_trc_rec_wc, cs_trc_lg_rec_wc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) int cs_rc_abs_max, cs_ht_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) int maxbanks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) /* Setup (dummy) virtualization for cache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) eip197_trc_cache_setupvirt(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * Enable the record cache memory access and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * probe the bank select width
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) val = readl(priv->base + EIP197_CS_RAM_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) val &= ~EIP197_TRC_ENABLE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) val |= EIP197_TRC_ENABLE_0 | EIP197_CS_BANKSEL_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) writel(val, priv->base + EIP197_CS_RAM_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) val = readl(priv->base + EIP197_CS_RAM_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) maxbanks = ((val&EIP197_CS_BANKSEL_MASK)>>EIP197_CS_BANKSEL_OFS) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) /* Clear all ECC errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) writel(0, priv->base + EIP197_TRC_ECCCTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * Make sure the cache memory is accessible by taking record cache into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * reset. Need data memory access here, not admin access.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) val = readl(priv->base + EIP197_TRC_PARAMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) val |= EIP197_TRC_PARAMS_SW_RESET | EIP197_TRC_PARAMS_DATA_ACCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) writel(val, priv->base + EIP197_TRC_PARAMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) /* Probed data RAM size in bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) dsize = eip197_trc_cache_probe(priv, maxbanks, 0xffffffff, 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) * Now probe the administration RAM size pretty much the same way
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) * Except that only the lower 30 bits are writable and we don't need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) * bank selects
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) val = readl(priv->base + EIP197_TRC_PARAMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) /* admin access now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) val &= ~(EIP197_TRC_PARAMS_DATA_ACCESS | EIP197_CS_BANKSEL_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) writel(val, priv->base + EIP197_TRC_PARAMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) /* Probed admin RAM size in admin words */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) asize = eip197_trc_cache_probe(priv, 0, 0x3fffffff, 16) >> 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) /* Clear any ECC errors detected while probing! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) writel(0, priv->base + EIP197_TRC_ECCCTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) /* Sanity check probing results */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) if (dsize < EIP197_MIN_DSIZE || asize < EIP197_MIN_ASIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) dev_err(priv->dev, "Record cache probing failed (%d,%d).",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) dsize, asize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) * Determine optimal configuration from RAM sizes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) * Note that we assume that the physical RAM configuration is sane
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) * Therefore, we don't do any parameter error checking here ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) /* For now, just use a single record format covering everything */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) cs_trc_rec_wc = EIP197_CS_TRC_REC_WC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) cs_trc_lg_rec_wc = EIP197_CS_TRC_REC_WC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) * Step #1: How many records will physically fit?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) * Hard upper limit is 1023!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) cs_rc_abs_max = min_t(uint, ((dsize >> 2) / cs_trc_lg_rec_wc), 1023);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) /* Step #2: Need at least 2 words in the admin RAM per record */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) cs_rc_max = min_t(uint, cs_rc_abs_max, (asize >> 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) /* Step #3: Determine log2 of hash table size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) cs_ht_sz = __fls(asize - cs_rc_max) - 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) /* Step #4: determine current size of hash table in dwords */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) cs_ht_wc = 16 << cs_ht_sz; /* dwords, not admin words */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) /* Step #5: add back excess words and see if we can fit more records */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) cs_rc_max = min_t(uint, cs_rc_abs_max, asize - (cs_ht_wc >> 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) /* Clear the cache RAMs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) eip197_trc_cache_clear(priv, cs_rc_max, cs_ht_wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) /* Disable the record cache memory access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) val = readl(priv->base + EIP197_CS_RAM_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) val &= ~EIP197_TRC_ENABLE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) writel(val, priv->base + EIP197_CS_RAM_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) /* Write head and tail pointers of the record free chain */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) val = EIP197_TRC_FREECHAIN_HEAD_PTR(0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) EIP197_TRC_FREECHAIN_TAIL_PTR(cs_rc_max - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) writel(val, priv->base + EIP197_TRC_FREECHAIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) /* Configure the record cache #1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) val = EIP197_TRC_PARAMS2_RC_SZ_SMALL(cs_trc_rec_wc) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) EIP197_TRC_PARAMS2_HTABLE_PTR(cs_rc_max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) writel(val, priv->base + EIP197_TRC_PARAMS2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) /* Configure the record cache #2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) val = EIP197_TRC_PARAMS_RC_SZ_LARGE(cs_trc_lg_rec_wc) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) EIP197_TRC_PARAMS_BLK_TIMER_SPEED(1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) EIP197_TRC_PARAMS_HTABLE_SZ(cs_ht_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) writel(val, priv->base + EIP197_TRC_PARAMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) dev_info(priv->dev, "TRC init: %dd,%da (%dr,%dh)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) dsize, asize, cs_rc_max, cs_ht_wc + cs_ht_wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) static void eip197_init_firmware(struct safexcel_crypto_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) int pe, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) for (pe = 0; pe < priv->config.pes; pe++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) /* Configure the token FIFO's */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) writel(3, EIP197_PE(priv) + EIP197_PE_ICE_PUTF_CTRL(pe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) writel(0, EIP197_PE(priv) + EIP197_PE_ICE_PPTF_CTRL(pe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) /* Clear the ICE scratchpad memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) val = readl(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL(pe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) val |= EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_TIMER |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) EIP197_PE_ICE_SCRATCH_CTRL_TIMER_EN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) EIP197_PE_ICE_SCRATCH_CTRL_SCRATCH_ACCESS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) writel(val, EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL(pe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) /* clear the scratchpad RAM using 32 bit writes only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) for (i = 0; i < EIP197_NUM_OF_SCRATCH_BLOCKS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) writel(0, EIP197_PE(priv) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) EIP197_PE_ICE_SCRATCH_RAM(pe) + (i << 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) /* Reset the IFPP engine to make its program mem accessible */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) writel(EIP197_PE_ICE_x_CTRL_SW_RESET |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) EIP197_PE_ICE_x_CTRL_CLR_ECC_CORR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) EIP197_PE_ICE_x_CTRL_CLR_ECC_NON_CORR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) EIP197_PE(priv) + EIP197_PE_ICE_FPP_CTRL(pe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) /* Reset the IPUE engine to make its program mem accessible */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) writel(EIP197_PE_ICE_x_CTRL_SW_RESET |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) EIP197_PE_ICE_x_CTRL_CLR_ECC_CORR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) EIP197_PE_ICE_x_CTRL_CLR_ECC_NON_CORR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) EIP197_PE(priv) + EIP197_PE_ICE_PUE_CTRL(pe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) /* Enable access to all IFPP program memories */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) writel(EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) /* bypass the OCE, if present */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) if (priv->flags & EIP197_OCE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) writel(EIP197_DEBUG_OCE_BYPASS, EIP197_PE(priv) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) EIP197_PE_DEBUG(pe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) static int eip197_write_firmware(struct safexcel_crypto_priv *priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) const struct firmware *fw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) const __be32 *data = (const __be32 *)fw->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) /* Write the firmware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) for (i = 0; i < fw->size / sizeof(u32); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) writel(be32_to_cpu(data[i]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) priv->base + EIP197_CLASSIFICATION_RAMS +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) i * sizeof(__be32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) /* Exclude final 2 NOPs from size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) return i - EIP197_FW_TERMINAL_NOPS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) * If FW is actual production firmware, then poll for its initialization
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) * to complete and check if it is good for the HW, otherwise just return OK.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) static bool poll_fw_ready(struct safexcel_crypto_priv *priv, int fpp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) int pe, pollcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) u32 base, pollofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) if (fpp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) pollofs = EIP197_FW_FPP_READY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) pollofs = EIP197_FW_PUE_READY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) for (pe = 0; pe < priv->config.pes; pe++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) base = EIP197_PE_ICE_SCRATCH_RAM(pe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) pollcnt = EIP197_FW_START_POLLCNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) while (pollcnt &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) (readl_relaxed(EIP197_PE(priv) + base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) pollofs) != 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) pollcnt--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) if (!pollcnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) dev_err(priv->dev, "FW(%d) for PE %d failed to start\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) fpp, pe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) static bool eip197_start_firmware(struct safexcel_crypto_priv *priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) int ipuesz, int ifppsz, int minifw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) int pe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) for (pe = 0; pe < priv->config.pes; pe++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) /* Disable access to all program memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) writel(0, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) /* Start IFPP microengines */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) if (minifw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) val = EIP197_PE_ICE_UENG_START_OFFSET((ifppsz - 1) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) EIP197_PE_ICE_UENG_INIT_ALIGN_MASK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) EIP197_PE_ICE_UENG_DEBUG_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) writel(val, EIP197_PE(priv) + EIP197_PE_ICE_FPP_CTRL(pe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) /* Start IPUE microengines */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) if (minifw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) val = EIP197_PE_ICE_UENG_START_OFFSET((ipuesz - 1) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) EIP197_PE_ICE_UENG_INIT_ALIGN_MASK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) EIP197_PE_ICE_UENG_DEBUG_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) writel(val, EIP197_PE(priv) + EIP197_PE_ICE_PUE_CTRL(pe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) /* For miniFW startup, there is no initialization, so always succeed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) if (minifw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) /* Wait until all the firmwares have properly started up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) if (!poll_fw_ready(priv, 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) if (!poll_fw_ready(priv, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) static int eip197_load_firmwares(struct safexcel_crypto_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) const char *fw_name[] = {"ifpp.bin", "ipue.bin"};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) const struct firmware *fw[FW_NB];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) char fw_path[37], *dir = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) int i, j, ret = 0, pe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) int ipuesz, ifppsz, minifw = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) if (priv->version == EIP197D_MRVL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) dir = "eip197d";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) else if (priv->version == EIP197B_MRVL ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) priv->version == EIP197_DEVBRD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) dir = "eip197b";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) retry_fw:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) for (i = 0; i < FW_NB; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) snprintf(fw_path, 37, "inside-secure/%s/%s", dir, fw_name[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) ret = firmware_request_nowarn(&fw[i], fw_path, priv->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) if (minifw || priv->version != EIP197B_MRVL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) goto release_fw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) /* Fallback to the old firmware location for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) * EIP197b.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) ret = firmware_request_nowarn(&fw[i], fw_name[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) priv->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) goto release_fw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) eip197_init_firmware(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) ifppsz = eip197_write_firmware(priv, fw[FW_IFPP]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) /* Enable access to IPUE program memories */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) for (pe = 0; pe < priv->config.pes; pe++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) writel(EIP197_PE_ICE_RAM_CTRL_PUE_PROG_EN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) ipuesz = eip197_write_firmware(priv, fw[FW_IPUE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) if (eip197_start_firmware(priv, ipuesz, ifppsz, minifw)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) dev_dbg(priv->dev, "Firmware loaded successfully\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) release_fw:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) for (j = 0; j < i; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) release_firmware(fw[j]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) if (!minifw) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) /* Retry with minifw path */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) dev_dbg(priv->dev, "Firmware set not (fully) present or init failed, falling back to BCLA mode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) dir = "eip197_minifw";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) minifw = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) goto retry_fw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) dev_dbg(priv->dev, "Firmware load failed.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) static int safexcel_hw_setup_cdesc_rings(struct safexcel_crypto_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) u32 cd_size_rnd, val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) int i, cd_fetch_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) cd_size_rnd = (priv->config.cd_size +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) (BIT(priv->hwconfig.hwdataw) - 1)) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) priv->hwconfig.hwdataw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) /* determine number of CD's we can fetch into the CD FIFO as 1 block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) if (priv->flags & SAFEXCEL_HW_EIP197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) /* EIP197: try to fetch enough in 1 go to keep all pipes busy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) cd_fetch_cnt = (1 << priv->hwconfig.hwcfsize) / cd_size_rnd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) cd_fetch_cnt = min_t(uint, cd_fetch_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) (priv->config.pes * EIP197_FETCH_DEPTH));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) /* for the EIP97, just fetch all that fits minus 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) cd_fetch_cnt = ((1 << priv->hwconfig.hwcfsize) /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) cd_size_rnd) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) * Since we're using command desc's way larger than formally specified,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) * we need to check whether we can fit even 1 for low-end EIP196's!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) if (!cd_fetch_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) dev_err(priv->dev, "Unable to fit even 1 command desc!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) for (i = 0; i < priv->config.rings; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) /* ring base address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) writel(lower_32_bits(priv->ring[i].cdr.base_dma),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) writel(upper_32_bits(priv->ring[i].cdr.base_dma),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) writel(EIP197_xDR_DESC_MODE_64BIT | EIP197_CDR_DESC_MODE_ADCP |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) (priv->config.cd_offset << 14) | priv->config.cd_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) writel(((cd_fetch_cnt *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) (cd_size_rnd << priv->hwconfig.hwdataw)) << 16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) (cd_fetch_cnt * (priv->config.cd_offset / sizeof(u32))),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) /* Configure DMA tx control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) writel(val, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DMA_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) /* clear any pending interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) writel(GENMASK(5, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) u32 rd_size_rnd, val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) int i, rd_fetch_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) /* determine number of RD's we can fetch into the FIFO as one block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) rd_size_rnd = (EIP197_RD64_FETCH_SIZE +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) (BIT(priv->hwconfig.hwdataw) - 1)) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) priv->hwconfig.hwdataw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) if (priv->flags & SAFEXCEL_HW_EIP197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) /* EIP197: try to fetch enough in 1 go to keep all pipes busy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) rd_fetch_cnt = (1 << priv->hwconfig.hwrfsize) / rd_size_rnd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) rd_fetch_cnt = min_t(uint, rd_fetch_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) (priv->config.pes * EIP197_FETCH_DEPTH));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) /* for the EIP97, just fetch all that fits minus 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) rd_fetch_cnt = ((1 << priv->hwconfig.hwrfsize) /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) rd_size_rnd) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) for (i = 0; i < priv->config.rings; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) /* ring base address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) writel(lower_32_bits(priv->ring[i].rdr.base_dma),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) writel(upper_32_bits(priv->ring[i].rdr.base_dma),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.rd_offset << 14) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) priv->config.rd_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) writel(((rd_fetch_cnt *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) (rd_size_rnd << priv->hwconfig.hwdataw)) << 16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) (rd_fetch_cnt * (priv->config.rd_offset / sizeof(u32))),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) /* Configure DMA tx control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) val |= EIP197_HIA_xDR_WR_RES_BUF | EIP197_HIA_xDR_WR_CTRL_BUF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) writel(val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DMA_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) /* clear any pending interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) writel(GENMASK(7, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) /* enable ring interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) val = readl(EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) val |= EIP197_RDR_IRQ(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) writel(val, EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) int i, ret, pe, opbuflo, opbufhi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) dev_dbg(priv->dev, "HW init: using %d pipe(s) and %d ring(s)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) priv->config.pes, priv->config.rings);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) * For EIP197's only set maximum number of TX commands to 2^5 = 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) * Skip for the EIP97 as it does not have this field.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) if (priv->flags & SAFEXCEL_HW_EIP197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) val = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) val |= EIP197_MST_CTRL_TX_MAX_CMD(5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) writel(val, EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) /* Configure wr/rd cache values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) writel(EIP197_MST_CTRL_RD_CACHE(RD_CACHE_4BITS) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) EIP197_MST_CTRL_WD_CACHE(WR_CACHE_4BITS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) EIP197_HIA_GEN_CFG(priv) + EIP197_MST_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) /* Interrupts reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) /* Disable all global interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) writel(0, EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ENABLE_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) /* Clear any pending interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) writel(GENMASK(31, 0), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) /* Processing Engine configuration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) for (pe = 0; pe < priv->config.pes; pe++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) /* Data Fetch Engine configuration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) /* Reset all DFE threads */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) writel(EIP197_DxE_THR_CTRL_RESET_PE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) if (priv->flags & EIP197_PE_ARB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) /* Reset HIA input interface arbiter (if present) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) writel(EIP197_HIA_RA_PE_CTRL_RESET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL(pe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) /* DMA transfer size to use */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) val = EIP197_HIA_DFE_CFG_DIS_DEBUG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(6) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) val |= EIP197_HIA_DxE_CFG_MIN_CTRL_SIZE(6) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) EIP197_HIA_DxE_CFG_MAX_CTRL_SIZE(7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(RD_CACHE_3BITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) val |= EIP197_HIA_DxE_CFG_CTRL_CACHE_CTRL(RD_CACHE_3BITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) writel(val, EIP197_HIA_DFE(priv) + EIP197_HIA_DFE_CFG(pe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) /* Leave the DFE threads reset state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) writel(0, EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) /* Configure the processing engine thresholds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) writel(EIP197_PE_IN_xBUF_THRES_MIN(6) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) EIP197_PE_IN_xBUF_THRES_MAX(9),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) EIP197_PE(priv) + EIP197_PE_IN_DBUF_THRES(pe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) writel(EIP197_PE_IN_xBUF_THRES_MIN(6) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) EIP197_PE_IN_xBUF_THRES_MAX(7),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) EIP197_PE(priv) + EIP197_PE_IN_TBUF_THRES(pe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) if (priv->flags & SAFEXCEL_HW_EIP197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) /* enable HIA input interface arbiter and rings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) writel(EIP197_HIA_RA_PE_CTRL_EN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) GENMASK(priv->config.rings - 1, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL(pe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) /* Data Store Engine configuration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) /* Reset all DSE threads */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) writel(EIP197_DxE_THR_CTRL_RESET_PE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) /* Wait for all DSE threads to complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) while ((readl(EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_STAT(pe)) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) GENMASK(15, 12)) != GENMASK(15, 12))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) /* DMA transfer size to use */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) if (priv->hwconfig.hwnumpes > 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) opbuflo = 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) opbufhi = 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) opbuflo = 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) opbufhi = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) val = EIP197_HIA_DSE_CFG_DIS_DEBUG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(opbuflo) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(opbufhi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) val |= EIP197_HIA_DSE_CFG_ALWAYS_BUFFERABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) /* FIXME: instability issues can occur for EIP97 but disabling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) * it impacts performance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) if (priv->flags & SAFEXCEL_HW_EIP197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) val |= EIP197_HIA_DSE_CFG_EN_SINGLE_WR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) writel(val, EIP197_HIA_DSE(priv) + EIP197_HIA_DSE_CFG(pe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) /* Leave the DSE threads reset state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) writel(0, EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) /* Configure the procesing engine thresholds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) writel(EIP197_PE_OUT_DBUF_THRES_MIN(opbuflo) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) EIP197_PE_OUT_DBUF_THRES_MAX(opbufhi),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) EIP197_PE(priv) + EIP197_PE_OUT_DBUF_THRES(pe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) /* Processing Engine configuration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) /* Token & context configuration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) val = EIP197_PE_EIP96_TOKEN_CTRL_CTX_UPDATES |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) EIP197_PE_EIP96_TOKEN_CTRL_NO_TOKEN_WAIT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) EIP197_PE_EIP96_TOKEN_CTRL_ENABLE_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) writel(val, EIP197_PE(priv) + EIP197_PE_EIP96_TOKEN_CTRL(pe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) /* H/W capabilities selection: just enable everything */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) writel(EIP197_FUNCTION_ALL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) EIP197_PE(priv) + EIP197_PE_EIP96_FUNCTION_EN(pe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) writel(EIP197_FUNCTION_ALL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) EIP197_PE(priv) + EIP197_PE_EIP96_FUNCTION2_EN(pe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) /* Command Descriptor Rings prepare */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) for (i = 0; i < priv->config.rings; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) /* Clear interrupts for this ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) writel(GENMASK(31, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CLR(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) /* Disable external triggering */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) /* Clear the pending prepared counter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) writel(EIP197_xDR_PREP_CLR_COUNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PREP_COUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) /* Clear the pending processed counter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) writel(EIP197_xDR_PROC_CLR_COUNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_COUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) writel(0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PREP_PNTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) writel(0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) writel((EIP197_DEFAULT_RING_SIZE * priv->config.cd_offset),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) /* Result Descriptor Ring prepare */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) for (i = 0; i < priv->config.rings; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) /* Disable external triggering*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) /* Clear the pending prepared counter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) writel(EIP197_xDR_PREP_CLR_COUNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PREP_COUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) /* Clear the pending processed counter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) writel(EIP197_xDR_PROC_CLR_COUNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_COUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) writel(0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PREP_PNTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) writel(0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) /* Ring size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) writel((EIP197_DEFAULT_RING_SIZE * priv->config.rd_offset),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) for (pe = 0; pe < priv->config.pes; pe++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) /* Enable command descriptor rings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) /* Enable result descriptor rings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) /* Clear any HIA interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) writel(GENMASK(30, 20), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) if (priv->flags & EIP197_SIMPLE_TRC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) writel(EIP197_STRC_CONFIG_INIT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) EIP197_STRC_CONFIG_LARGE_REC(EIP197_CS_TRC_REC_WC) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) EIP197_STRC_CONFIG_SMALL_REC(EIP197_CS_TRC_REC_WC),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) priv->base + EIP197_STRC_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) writel(EIP197_PE_EIP96_TOKEN_CTRL2_CTX_DONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) EIP197_PE(priv) + EIP197_PE_EIP96_TOKEN_CTRL2(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) } else if (priv->flags & SAFEXCEL_HW_EIP197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) ret = eip197_trc_cache_init(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) if (priv->flags & EIP197_ICE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) ret = eip197_load_firmwares(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) return safexcel_hw_setup_cdesc_rings(priv) ?:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) safexcel_hw_setup_rdesc_rings(priv) ?:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) /* Called with ring's lock taken */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) static void safexcel_try_push_requests(struct safexcel_crypto_priv *priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) int ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) int coal = min_t(int, priv->ring[ring].requests, EIP197_MAX_BATCH_SZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) if (!coal)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) /* Configure when we want an interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) writel(EIP197_HIA_RDR_THRESH_PKT_MODE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) EIP197_HIA_RDR_THRESH_PROC_PKT(coal),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_THRESH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) struct crypto_async_request *req, *backlog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) struct safexcel_context *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) int ret, nreq = 0, cdesc = 0, rdesc = 0, commands, results;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) /* If a request wasn't properly dequeued because of a lack of resources,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) * proceeded it first,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) req = priv->ring[ring].req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) backlog = priv->ring[ring].backlog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) if (req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) goto handle_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) while (true) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) spin_lock_bh(&priv->ring[ring].queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) backlog = crypto_get_backlog(&priv->ring[ring].queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) req = crypto_dequeue_request(&priv->ring[ring].queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) spin_unlock_bh(&priv->ring[ring].queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) if (!req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) priv->ring[ring].req = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) priv->ring[ring].backlog = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) goto finalize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) handle_req:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) ctx = crypto_tfm_ctx(req->tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) ret = ctx->send(req, ring, &commands, &results);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) goto request_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) if (backlog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) backlog->complete(backlog, -EINPROGRESS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) /* In case the send() helper did not issue any command to push
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) * to the engine because the input data was cached, continue to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) * dequeue other requests as this is valid and not an error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) if (!commands && !results)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) cdesc += commands;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) rdesc += results;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) nreq++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) request_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) /* Not enough resources to handle all the requests. Bail out and save
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) * the request and the backlog for the next dequeue call (per-ring).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) priv->ring[ring].req = req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) priv->ring[ring].backlog = backlog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) finalize:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) if (!nreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) spin_lock_bh(&priv->ring[ring].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) priv->ring[ring].requests += nreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) if (!priv->ring[ring].busy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) safexcel_try_push_requests(priv, ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) priv->ring[ring].busy = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) spin_unlock_bh(&priv->ring[ring].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) /* let the RDR know we have pending descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) writel((rdesc * priv->config.rd_offset),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) /* let the CDR know we have pending descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) writel((cdesc * priv->config.cd_offset),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) EIP197_HIA_CDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) inline int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) void *rdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) struct safexcel_result_desc *rdesc = rdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) struct result_data_desc *result_data = rdp + priv->config.res_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) if (likely((!rdesc->last_seg) || /* Rest only valid if last seg! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) ((!rdesc->descriptor_overflow) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) (!rdesc->buffer_overflow) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) (!result_data->error_code))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) if (rdesc->descriptor_overflow)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) dev_err(priv->dev, "Descriptor overflow detected");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) if (rdesc->buffer_overflow)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) dev_err(priv->dev, "Buffer overflow detected");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) if (result_data->error_code & 0x4066) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) /* Fatal error (bits 1,2,5,6 & 14) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) dev_err(priv->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) "result descriptor error (%x)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) result_data->error_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) } else if (result_data->error_code &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) (BIT(7) | BIT(4) | BIT(3) | BIT(0))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) * Give priority over authentication fails:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) * Blocksize, length & overflow errors,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) * something wrong with the input!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) } else if (result_data->error_code & BIT(9)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) /* Authentication failed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) return -EBADMSG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) /* All other non-fatal errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) inline void safexcel_rdr_req_set(struct safexcel_crypto_priv *priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) int ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) struct safexcel_result_desc *rdesc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) struct crypto_async_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) int i = safexcel_ring_rdr_rdesc_index(priv, ring, rdesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) priv->ring[ring].rdr_req[i] = req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) inline struct crypto_async_request *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) safexcel_rdr_req_get(struct safexcel_crypto_priv *priv, int ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) int i = safexcel_ring_first_rdr_index(priv, ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) return priv->ring[ring].rdr_req[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) void safexcel_complete(struct safexcel_crypto_priv *priv, int ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) struct safexcel_command_desc *cdesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) /* Acknowledge the command descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) cdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].cdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) if (IS_ERR(cdesc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) dev_err(priv->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) "Could not retrieve the command descriptor\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) } while (!cdesc->last_seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) void safexcel_inv_complete(struct crypto_async_request *req, int error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) struct safexcel_inv_result *result = req->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) if (error == -EINPROGRESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) result->error = error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) complete(&result->completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) int safexcel_invalidate_cache(struct crypto_async_request *async,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) struct safexcel_crypto_priv *priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) dma_addr_t ctxr_dma, int ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) struct safexcel_command_desc *cdesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) struct safexcel_result_desc *rdesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) struct safexcel_token *dmmy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) /* Prepare command descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) cdesc = safexcel_add_cdesc(priv, ring, true, true, 0, 0, 0, ctxr_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) &dmmy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) if (IS_ERR(cdesc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) return PTR_ERR(cdesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) cdesc->control_data.type = EIP197_TYPE_EXTENDED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) cdesc->control_data.options = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) cdesc->control_data.context_lo &= ~EIP197_CONTEXT_SIZE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) cdesc->control_data.control0 = CONTEXT_CONTROL_INV_TR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) /* Prepare result descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) rdesc = safexcel_add_rdesc(priv, ring, true, true, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) if (IS_ERR(rdesc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) ret = PTR_ERR(rdesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) goto cdesc_rollback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) safexcel_rdr_req_set(priv, ring, rdesc, async);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) cdesc_rollback:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv *priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) int ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) struct crypto_async_request *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) struct safexcel_context *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) int ret, i, nreq, ndesc, tot_descs, handled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) bool should_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) handle_results:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) tot_descs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) nreq = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) nreq >>= EIP197_xDR_PROC_xD_PKT_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) nreq &= EIP197_xDR_PROC_xD_PKT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) if (!nreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) goto requests_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) for (i = 0; i < nreq; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) req = safexcel_rdr_req_get(priv, ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) ctx = crypto_tfm_ctx(req->tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) ndesc = ctx->handle_result(priv, ring, req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) &should_complete, &ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) if (ndesc < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) dev_err(priv->dev, "failed to handle result (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) ndesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) goto acknowledge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) if (should_complete) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) local_bh_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) req->complete(req, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) local_bh_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) tot_descs += ndesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) handled++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) acknowledge:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) if (i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) writel(EIP197_xDR_PROC_xD_PKT(i) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) (tot_descs * priv->config.rd_offset),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) /* If the number of requests overflowed the counter, try to proceed more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) * requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) if (nreq == EIP197_xDR_PROC_xD_PKT_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) goto handle_results;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) requests_left:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) spin_lock_bh(&priv->ring[ring].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) priv->ring[ring].requests -= handled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) safexcel_try_push_requests(priv, ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) if (!priv->ring[ring].requests)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) priv->ring[ring].busy = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) spin_unlock_bh(&priv->ring[ring].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) static void safexcel_dequeue_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) struct safexcel_work_data *data =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) container_of(work, struct safexcel_work_data, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) safexcel_dequeue(data->priv, data->ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) struct safexcel_ring_irq_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) struct safexcel_crypto_priv *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) int ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) static irqreturn_t safexcel_irq_ring(int irq, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) struct safexcel_ring_irq_data *irq_data = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) struct safexcel_crypto_priv *priv = irq_data->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) int ring = irq_data->ring, rc = IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) u32 status, stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) status = readl(EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLED_STAT(ring));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) if (!status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) /* RDR interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) if (status & EIP197_RDR_IRQ(ring)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) stat = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) if (unlikely(stat & EIP197_xDR_ERR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) * Fatal error, the RDR is unusable and must be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) * reinitialized. This should not happen under
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) * normal circumstances.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) dev_err(priv->dev, "RDR: fatal error.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) } else if (likely(stat & EIP197_xDR_THRESH)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) rc = IRQ_WAKE_THREAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) /* ACK the interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) writel(stat & 0xff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) /* ACK the interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) writel(status, EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ACK(ring));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) static irqreturn_t safexcel_irq_ring_thread(int irq, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) struct safexcel_ring_irq_data *irq_data = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) struct safexcel_crypto_priv *priv = irq_data->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) int ring = irq_data->ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) safexcel_handle_result_descriptor(priv, ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) queue_work(priv->ring[ring].workqueue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) &priv->ring[ring].work_data.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) static int safexcel_request_ring_irq(void *pdev, int irqid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) int is_pci_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) int ring_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) irq_handler_t handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) irq_handler_t threaded_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) struct safexcel_ring_irq_data *ring_irq_priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) int ret, irq, cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) if (IS_ENABLED(CONFIG_PCI) && is_pci_dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) struct pci_dev *pci_pdev = pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) dev = &pci_pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) irq = pci_irq_vector(pci_pdev, irqid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) if (irq < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) dev_err(dev, "unable to get device MSI IRQ %d (err %d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) irqid, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) return irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) } else if (IS_ENABLED(CONFIG_OF)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) struct platform_device *plf_pdev = pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) char irq_name[6] = {0}; /* "ringX\0" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) snprintf(irq_name, 6, "ring%d", irqid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) dev = &plf_pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) irq = platform_get_irq_byname(plf_pdev, irq_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) if (irq < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) dev_err(dev, "unable to get IRQ '%s' (err %d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) irq_name, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) return irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) ret = devm_request_threaded_irq(dev, irq, handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) threaded_handler, IRQF_ONESHOT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) dev_name(dev), ring_irq_priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) dev_err(dev, "unable to request IRQ %d\n", irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) /* Set affinity */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) cpu = cpumask_local_spread(ring_id, NUMA_NO_NODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) irq_set_affinity_hint(irq, get_cpu_mask(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) return irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) static struct safexcel_alg_template *safexcel_algs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) &safexcel_alg_ecb_des,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) &safexcel_alg_cbc_des,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) &safexcel_alg_ecb_des3_ede,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) &safexcel_alg_cbc_des3_ede,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) &safexcel_alg_ecb_aes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) &safexcel_alg_cbc_aes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) &safexcel_alg_cfb_aes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) &safexcel_alg_ofb_aes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) &safexcel_alg_ctr_aes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) &safexcel_alg_md5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) &safexcel_alg_sha1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) &safexcel_alg_sha224,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) &safexcel_alg_sha256,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) &safexcel_alg_sha384,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) &safexcel_alg_sha512,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) &safexcel_alg_hmac_md5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) &safexcel_alg_hmac_sha1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) &safexcel_alg_hmac_sha224,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) &safexcel_alg_hmac_sha256,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) &safexcel_alg_hmac_sha384,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) &safexcel_alg_hmac_sha512,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) &safexcel_alg_authenc_hmac_sha1_cbc_aes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) &safexcel_alg_authenc_hmac_sha224_cbc_aes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) &safexcel_alg_authenc_hmac_sha256_cbc_aes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) &safexcel_alg_authenc_hmac_sha384_cbc_aes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) &safexcel_alg_authenc_hmac_sha512_cbc_aes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) &safexcel_alg_authenc_hmac_sha1_cbc_des3_ede,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) &safexcel_alg_authenc_hmac_sha1_ctr_aes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) &safexcel_alg_authenc_hmac_sha224_ctr_aes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) &safexcel_alg_authenc_hmac_sha256_ctr_aes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) &safexcel_alg_authenc_hmac_sha384_ctr_aes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) &safexcel_alg_authenc_hmac_sha512_ctr_aes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) &safexcel_alg_xts_aes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) &safexcel_alg_gcm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) &safexcel_alg_ccm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) &safexcel_alg_crc32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) &safexcel_alg_cbcmac,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) &safexcel_alg_xcbcmac,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) &safexcel_alg_cmac,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) &safexcel_alg_chacha20,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) &safexcel_alg_chachapoly,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) &safexcel_alg_chachapoly_esp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) &safexcel_alg_sm3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) &safexcel_alg_hmac_sm3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) &safexcel_alg_ecb_sm4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) &safexcel_alg_cbc_sm4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) &safexcel_alg_ofb_sm4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) &safexcel_alg_cfb_sm4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) &safexcel_alg_ctr_sm4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) &safexcel_alg_authenc_hmac_sha1_cbc_sm4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) &safexcel_alg_authenc_hmac_sm3_cbc_sm4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) &safexcel_alg_authenc_hmac_sha1_ctr_sm4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) &safexcel_alg_authenc_hmac_sm3_ctr_sm4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) &safexcel_alg_sha3_224,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) &safexcel_alg_sha3_256,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) &safexcel_alg_sha3_384,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) &safexcel_alg_sha3_512,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) &safexcel_alg_hmac_sha3_224,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) &safexcel_alg_hmac_sha3_256,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) &safexcel_alg_hmac_sha3_384,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) &safexcel_alg_hmac_sha3_512,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) &safexcel_alg_authenc_hmac_sha1_cbc_des,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) &safexcel_alg_authenc_hmac_sha256_cbc_des3_ede,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) &safexcel_alg_authenc_hmac_sha224_cbc_des3_ede,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) &safexcel_alg_authenc_hmac_sha512_cbc_des3_ede,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) &safexcel_alg_authenc_hmac_sha384_cbc_des3_ede,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) &safexcel_alg_authenc_hmac_sha256_cbc_des,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) &safexcel_alg_authenc_hmac_sha224_cbc_des,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) &safexcel_alg_authenc_hmac_sha512_cbc_des,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) &safexcel_alg_authenc_hmac_sha384_cbc_des,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) &safexcel_alg_rfc4106_gcm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) &safexcel_alg_rfc4543_gcm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) &safexcel_alg_rfc4309_ccm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) static int safexcel_register_algorithms(struct safexcel_crypto_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) int i, j, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) safexcel_algs[i]->priv = priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) /* Do we have all required base algorithms available? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) if ((safexcel_algs[i]->algo_mask & priv->hwconfig.algo_flags) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) safexcel_algs[i]->algo_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) /* No, so don't register this ciphersuite */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) ret = crypto_register_skcipher(&safexcel_algs[i]->alg.skcipher);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) else if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_AEAD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) ret = crypto_register_aead(&safexcel_algs[i]->alg.aead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) ret = crypto_register_ahash(&safexcel_algs[i]->alg.ahash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) for (j = 0; j < i; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) /* Do we have all required base algorithms available? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) if ((safexcel_algs[j]->algo_mask & priv->hwconfig.algo_flags) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) safexcel_algs[j]->algo_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) /* No, so don't unregister this ciphersuite */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) crypto_unregister_skcipher(&safexcel_algs[j]->alg.skcipher);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) else if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_AEAD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) crypto_unregister_aead(&safexcel_algs[j]->alg.aead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) crypto_unregister_ahash(&safexcel_algs[j]->alg.ahash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) static void safexcel_unregister_algorithms(struct safexcel_crypto_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) /* Do we have all required base algorithms available? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) if ((safexcel_algs[i]->algo_mask & priv->hwconfig.algo_flags) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) safexcel_algs[i]->algo_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) /* No, so don't unregister this ciphersuite */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) crypto_unregister_skcipher(&safexcel_algs[i]->alg.skcipher);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) else if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_AEAD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) crypto_unregister_aead(&safexcel_algs[i]->alg.aead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) crypto_unregister_ahash(&safexcel_algs[i]->alg.ahash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) static void safexcel_configure(struct safexcel_crypto_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) u32 mask = BIT(priv->hwconfig.hwdataw) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) priv->config.pes = priv->hwconfig.hwnumpes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) priv->config.rings = min_t(u32, priv->hwconfig.hwnumrings, max_rings);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) /* Cannot currently support more rings than we have ring AICs! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) priv->config.rings = min_t(u32, priv->config.rings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) priv->hwconfig.hwnumraic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) priv->config.cd_size = EIP197_CD64_FETCH_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) priv->config.cd_offset = (priv->config.cd_size + mask) & ~mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) priv->config.cdsh_offset = (EIP197_MAX_TOKENS + mask) & ~mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) /* res token is behind the descr, but ofs must be rounded to buswdth */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) priv->config.res_offset = (EIP197_RD64_FETCH_SIZE + mask) & ~mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) /* now the size of the descr is this 1st part plus the result struct */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) priv->config.rd_size = priv->config.res_offset +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) EIP197_RD64_RESULT_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) priv->config.rd_offset = (priv->config.rd_size + mask) & ~mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) /* convert dwords to bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) priv->config.cd_offset *= sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) priv->config.cdsh_offset *= sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) priv->config.rd_offset *= sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) priv->config.res_offset *= sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) static void safexcel_init_register_offsets(struct safexcel_crypto_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) struct safexcel_register_offsets *offsets = &priv->offsets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) if (priv->flags & SAFEXCEL_HW_EIP197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) offsets->hia_aic = EIP197_HIA_AIC_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) offsets->hia_aic_g = EIP197_HIA_AIC_G_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) offsets->hia_aic_r = EIP197_HIA_AIC_R_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) offsets->hia_aic_xdr = EIP197_HIA_AIC_xDR_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) offsets->hia_dfe = EIP197_HIA_DFE_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) offsets->hia_dfe_thr = EIP197_HIA_DFE_THR_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) offsets->hia_dse = EIP197_HIA_DSE_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) offsets->hia_dse_thr = EIP197_HIA_DSE_THR_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) offsets->hia_gen_cfg = EIP197_HIA_GEN_CFG_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) offsets->pe = EIP197_PE_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) offsets->global = EIP197_GLOBAL_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) offsets->hia_aic = EIP97_HIA_AIC_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) offsets->hia_aic_g = EIP97_HIA_AIC_G_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) offsets->hia_aic_r = EIP97_HIA_AIC_R_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) offsets->hia_aic_xdr = EIP97_HIA_AIC_xDR_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) offsets->hia_dfe = EIP97_HIA_DFE_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) offsets->hia_dfe_thr = EIP97_HIA_DFE_THR_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) offsets->hia_dse = EIP97_HIA_DSE_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) offsets->hia_dse_thr = EIP97_HIA_DSE_THR_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) offsets->hia_gen_cfg = EIP97_HIA_GEN_CFG_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) offsets->pe = EIP97_PE_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) offsets->global = EIP97_GLOBAL_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) * Generic part of probe routine, shared by platform and PCI driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) * Assumes IO resources have been mapped, private data mem has been allocated,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) * clocks have been enabled, device pointer has been assigned etc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) static int safexcel_probe_generic(void *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) struct safexcel_crypto_priv *priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) int is_pci_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) struct device *dev = priv->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) u32 peid, version, mask, val, hiaopt, hwopt, peopt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) int i, ret, hwctg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) priv->context_pool = dmam_pool_create("safexcel-context", dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) sizeof(struct safexcel_context_record),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) if (!priv->context_pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) * First try the EIP97 HIA version regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) * For the EIP197, this is guaranteed to NOT return any of the test
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) * values
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) version = readl(priv->base + EIP97_HIA_AIC_BASE + EIP197_HIA_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) mask = 0; /* do not swap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) if (EIP197_REG_LO16(version) == EIP197_HIA_VERSION_LE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) priv->hwconfig.hiaver = EIP197_VERSION_MASK(version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) } else if (EIP197_REG_HI16(version) == EIP197_HIA_VERSION_BE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) /* read back byte-swapped, so complement byte swap bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) mask = EIP197_MST_CTRL_BYTE_SWAP_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) priv->hwconfig.hiaver = EIP197_VERSION_SWAP(version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) /* So it wasn't an EIP97 ... maybe it's an EIP197? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) version = readl(priv->base + EIP197_HIA_AIC_BASE +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) EIP197_HIA_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) if (EIP197_REG_LO16(version) == EIP197_HIA_VERSION_LE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) priv->hwconfig.hiaver = EIP197_VERSION_MASK(version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) priv->flags |= SAFEXCEL_HW_EIP197;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) } else if (EIP197_REG_HI16(version) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) EIP197_HIA_VERSION_BE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) /* read back byte-swapped, so complement swap bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) mask = EIP197_MST_CTRL_BYTE_SWAP_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) priv->hwconfig.hiaver = EIP197_VERSION_SWAP(version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) priv->flags |= SAFEXCEL_HW_EIP197;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) /* Now initialize the reg offsets based on the probing info so far */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) safexcel_init_register_offsets(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) * If the version was read byte-swapped, we need to flip the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) * swapping Keep in mind here, though, that what we write will also be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) * byte-swapped ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) if (mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) val = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) val = val ^ (mask >> 24); /* toggle byte swap bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) writel(val, EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) * We're not done probing yet! We may fall through to here if no HIA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) * was found at all. So, with the endianness presumably correct now and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) * the offsets setup, *really* probe for the EIP97/EIP197.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) version = readl(EIP197_GLOBAL(priv) + EIP197_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) if (((priv->flags & SAFEXCEL_HW_EIP197) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) (EIP197_REG_LO16(version) != EIP197_VERSION_LE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) (EIP197_REG_LO16(version) != EIP196_VERSION_LE)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) ((!(priv->flags & SAFEXCEL_HW_EIP197) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) (EIP197_REG_LO16(version) != EIP97_VERSION_LE)))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) * We did not find the device that matched our initial probing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) * (or our initial probing failed) Report appropriate error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) dev_err(priv->dev, "Probing for EIP97/EIP19x failed - no such device (read %08x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) priv->hwconfig.hwver = EIP197_VERSION_MASK(version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) hwctg = version >> 28;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) peid = version & 255;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) /* Detect EIP206 processing pipe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) version = readl(EIP197_PE(priv) + + EIP197_PE_VERSION(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) if (EIP197_REG_LO16(version) != EIP206_VERSION_LE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) dev_err(priv->dev, "EIP%d: EIP206 not detected\n", peid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) priv->hwconfig.ppver = EIP197_VERSION_MASK(version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) /* Detect EIP96 packet engine and version */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) version = readl(EIP197_PE(priv) + EIP197_PE_EIP96_VERSION(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) if (EIP197_REG_LO16(version) != EIP96_VERSION_LE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) dev_err(dev, "EIP%d: EIP96 not detected.\n", peid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) priv->hwconfig.pever = EIP197_VERSION_MASK(version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) hwopt = readl(EIP197_GLOBAL(priv) + EIP197_OPTIONS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) hiaopt = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_OPTIONS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) priv->hwconfig.icever = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) priv->hwconfig.ocever = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) priv->hwconfig.psever = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) if (priv->flags & SAFEXCEL_HW_EIP197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) /* EIP197 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) peopt = readl(EIP197_PE(priv) + EIP197_PE_OPTIONS(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) priv->hwconfig.hwdataw = (hiaopt >> EIP197_HWDATAW_OFFSET) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) EIP197_HWDATAW_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) priv->hwconfig.hwcfsize = ((hiaopt >> EIP197_CFSIZE_OFFSET) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) EIP197_CFSIZE_MASK) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) EIP197_CFSIZE_ADJUST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) priv->hwconfig.hwrfsize = ((hiaopt >> EIP197_RFSIZE_OFFSET) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) EIP197_RFSIZE_MASK) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) EIP197_RFSIZE_ADJUST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) priv->hwconfig.hwnumpes = (hiaopt >> EIP197_N_PES_OFFSET) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) EIP197_N_PES_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) priv->hwconfig.hwnumrings = (hiaopt >> EIP197_N_RINGS_OFFSET) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) EIP197_N_RINGS_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) if (hiaopt & EIP197_HIA_OPT_HAS_PE_ARB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) priv->flags |= EIP197_PE_ARB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) if (EIP206_OPT_ICE_TYPE(peopt) == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) priv->flags |= EIP197_ICE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) /* Detect ICE EIP207 class. engine and version */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) version = readl(EIP197_PE(priv) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) EIP197_PE_ICE_VERSION(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) if (EIP197_REG_LO16(version) != EIP207_VERSION_LE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) dev_err(dev, "EIP%d: ICE EIP207 not detected.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) peid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) priv->hwconfig.icever = EIP197_VERSION_MASK(version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) if (EIP206_OPT_OCE_TYPE(peopt) == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) priv->flags |= EIP197_OCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) /* Detect EIP96PP packet stream editor and version */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) version = readl(EIP197_PE(priv) + EIP197_PE_PSE_VERSION(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) if (EIP197_REG_LO16(version) != EIP96_VERSION_LE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) dev_err(dev, "EIP%d: EIP96PP not detected.\n", peid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) priv->hwconfig.psever = EIP197_VERSION_MASK(version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) /* Detect OCE EIP207 class. engine and version */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) version = readl(EIP197_PE(priv) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) EIP197_PE_ICE_VERSION(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) if (EIP197_REG_LO16(version) != EIP207_VERSION_LE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) dev_err(dev, "EIP%d: OCE EIP207 not detected.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) peid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) priv->hwconfig.ocever = EIP197_VERSION_MASK(version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) /* If not a full TRC, then assume simple TRC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) if (!(hwopt & EIP197_OPT_HAS_TRC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) priv->flags |= EIP197_SIMPLE_TRC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) /* EIP197 always has SOME form of TRC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) priv->flags |= EIP197_TRC_CACHE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) /* EIP97 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) priv->hwconfig.hwdataw = (hiaopt >> EIP197_HWDATAW_OFFSET) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) EIP97_HWDATAW_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) priv->hwconfig.hwcfsize = (hiaopt >> EIP97_CFSIZE_OFFSET) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) EIP97_CFSIZE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) priv->hwconfig.hwrfsize = (hiaopt >> EIP97_RFSIZE_OFFSET) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) EIP97_RFSIZE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) priv->hwconfig.hwnumpes = 1; /* by definition */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) priv->hwconfig.hwnumrings = (hiaopt >> EIP197_N_RINGS_OFFSET) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) EIP197_N_RINGS_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) /* Scan for ring AIC's */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) for (i = 0; i < EIP197_MAX_RING_AIC; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) version = readl(EIP197_HIA_AIC_R(priv) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) EIP197_HIA_AIC_R_VERSION(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) if (EIP197_REG_LO16(version) != EIP201_VERSION_LE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) priv->hwconfig.hwnumraic = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) /* Low-end EIP196 may not have any ring AIC's ... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) if (!priv->hwconfig.hwnumraic) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) dev_err(priv->dev, "No ring interrupt controller present!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) /* Get supported algorithms from EIP96 transform engine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) priv->hwconfig.algo_flags = readl(EIP197_PE(priv) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) EIP197_PE_EIP96_OPTIONS(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) /* Print single info line describing what we just detected */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) dev_info(priv->dev, "EIP%d:%x(%d,%d,%d,%d)-HIA:%x(%d,%d,%d),PE:%x/%x(alg:%08x)/%x/%x/%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) peid, priv->hwconfig.hwver, hwctg, priv->hwconfig.hwnumpes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) priv->hwconfig.hwnumrings, priv->hwconfig.hwnumraic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) priv->hwconfig.hiaver, priv->hwconfig.hwdataw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) priv->hwconfig.hwcfsize, priv->hwconfig.hwrfsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) priv->hwconfig.ppver, priv->hwconfig.pever,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) priv->hwconfig.algo_flags, priv->hwconfig.icever,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) priv->hwconfig.ocever, priv->hwconfig.psever);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) safexcel_configure(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) if (IS_ENABLED(CONFIG_PCI) && priv->version == EIP197_DEVBRD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) * Request MSI vectors for global + 1 per ring -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) * or just 1 for older dev images
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) struct pci_dev *pci_pdev = pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) ret = pci_alloc_irq_vectors(pci_pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) priv->config.rings + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) priv->config.rings + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) PCI_IRQ_MSI | PCI_IRQ_MSIX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) dev_err(dev, "Failed to allocate PCI MSI interrupts\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) /* Register the ring IRQ handlers and configure the rings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) priv->ring = devm_kcalloc(dev, priv->config.rings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) sizeof(*priv->ring),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) if (!priv->ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) for (i = 0; i < priv->config.rings; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) char wq_name[9] = {0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) struct safexcel_ring_irq_data *ring_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) ret = safexcel_init_ring_descriptors(priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) &priv->ring[i].cdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) &priv->ring[i].rdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) dev_err(dev, "Failed to initialize rings\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) priv->ring[i].rdr_req = devm_kcalloc(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) EIP197_DEFAULT_RING_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) sizeof(*priv->ring[i].rdr_req),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) if (!priv->ring[i].rdr_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) ring_irq = devm_kzalloc(dev, sizeof(*ring_irq), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) if (!ring_irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) ring_irq->priv = priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) ring_irq->ring = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) irq = safexcel_request_ring_irq(pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) EIP197_IRQ_NUMBER(i, is_pci_dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) is_pci_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) safexcel_irq_ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) safexcel_irq_ring_thread,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) ring_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) if (irq < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) dev_err(dev, "Failed to get IRQ ID for ring %d\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) return irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) priv->ring[i].irq = irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) priv->ring[i].work_data.priv = priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) priv->ring[i].work_data.ring = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) INIT_WORK(&priv->ring[i].work_data.work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) safexcel_dequeue_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) snprintf(wq_name, 9, "wq_ring%d", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) priv->ring[i].workqueue =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) create_singlethread_workqueue(wq_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) if (!priv->ring[i].workqueue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) priv->ring[i].requests = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) priv->ring[i].busy = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) crypto_init_queue(&priv->ring[i].queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) EIP197_DEFAULT_RING_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) spin_lock_init(&priv->ring[i].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) spin_lock_init(&priv->ring[i].queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) atomic_set(&priv->ring_used, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) ret = safexcel_hw_init(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) dev_err(dev, "HW init failed (%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) ret = safexcel_register_algorithms(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) dev_err(dev, "Failed to register algorithms (%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) static void safexcel_hw_reset_rings(struct safexcel_crypto_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) for (i = 0; i < priv->config.rings; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) /* clear any pending interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) writel(GENMASK(5, 0), EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) writel(GENMASK(7, 0), EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) /* Reset the CDR base address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) /* Reset the RDR base address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) /* for Device Tree platform driver */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) static int safexcel_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) struct safexcel_crypto_priv *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) if (!priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) priv->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) priv->version = (enum safexcel_eip_version)of_device_get_match_data(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) platform_set_drvdata(pdev, priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) priv->base = devm_platform_ioremap_resource(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) if (IS_ERR(priv->base)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) dev_err(dev, "failed to get resource\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) return PTR_ERR(priv->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) priv->clk = devm_clk_get(&pdev->dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) ret = PTR_ERR_OR_ZERO(priv->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) /* The clock isn't mandatory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) if (ret != -ENOENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) ret = clk_prepare_enable(priv->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) dev_err(dev, "unable to enable clk (%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) priv->reg_clk = devm_clk_get(&pdev->dev, "reg");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) ret = PTR_ERR_OR_ZERO(priv->reg_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) /* The clock isn't mandatory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) if (ret != -ENOENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) goto err_core_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) ret = clk_prepare_enable(priv->reg_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) dev_err(dev, "unable to enable reg clk (%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) goto err_core_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) goto err_reg_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) /* Generic EIP97/EIP197 device probing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) ret = safexcel_probe_generic(pdev, priv, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) goto err_reg_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) err_reg_clk:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) clk_disable_unprepare(priv->reg_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) err_core_clk:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) clk_disable_unprepare(priv->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) static int safexcel_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) struct safexcel_crypto_priv *priv = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) safexcel_unregister_algorithms(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) safexcel_hw_reset_rings(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) clk_disable_unprepare(priv->reg_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) clk_disable_unprepare(priv->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) for (i = 0; i < priv->config.rings; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) irq_set_affinity_hint(priv->ring[i].irq, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) destroy_workqueue(priv->ring[i].workqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) static const struct of_device_id safexcel_of_match_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) .compatible = "inside-secure,safexcel-eip97ies",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) .data = (void *)EIP97IES_MRVL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) .compatible = "inside-secure,safexcel-eip197b",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) .data = (void *)EIP197B_MRVL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) .compatible = "inside-secure,safexcel-eip197d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) .data = (void *)EIP197D_MRVL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) /* For backward compatibility and intended for generic use */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) .compatible = "inside-secure,safexcel-eip97",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) .data = (void *)EIP97IES_MRVL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) .compatible = "inside-secure,safexcel-eip197",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) .data = (void *)EIP197B_MRVL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) {},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) static struct platform_driver crypto_safexcel = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) .probe = safexcel_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) .remove = safexcel_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) .name = "crypto-safexcel",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) .of_match_table = safexcel_of_match_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) /* PCIE devices - i.e. Inside Secure development boards */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) static int safexcel_pci_probe(struct pci_dev *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) const struct pci_device_id *ent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) struct safexcel_crypto_priv *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) void __iomem *pciebase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) dev_dbg(dev, "Probing PCIE device: vendor %04x, device %04x, subv %04x, subdev %04x, ctxt %lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) ent->vendor, ent->device, ent->subvendor,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) ent->subdevice, ent->driver_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) priv = kzalloc(sizeof(*priv), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) if (!priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) priv->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) priv->version = (enum safexcel_eip_version)ent->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) pci_set_drvdata(pdev, priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) /* enable the device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) rc = pcim_enable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) dev_err(dev, "Failed to enable PCI device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) /* take ownership of PCI BAR0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) rc = pcim_iomap_regions(pdev, 1, "crypto_safexcel");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) dev_err(dev, "Failed to map IO region for BAR0\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) priv->base = pcim_iomap_table(pdev)[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) if (priv->version == EIP197_DEVBRD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) dev_dbg(dev, "Device identified as FPGA based development board - applying HW reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) rc = pcim_iomap_regions(pdev, 4, "crypto_safexcel");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) dev_err(dev, "Failed to map IO region for BAR4\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) pciebase = pcim_iomap_table(pdev)[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) val = readl(pciebase + EIP197_XLX_IRQ_BLOCK_ID_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) if ((val >> 16) == EIP197_XLX_IRQ_BLOCK_ID_VALUE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) dev_dbg(dev, "Detected Xilinx PCIE IRQ block version %d, multiple MSI support enabled\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) (val & 0xff));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) /* Setup MSI identity map mapping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) writel(EIP197_XLX_USER_VECT_LUT0_IDENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) pciebase + EIP197_XLX_USER_VECT_LUT0_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) writel(EIP197_XLX_USER_VECT_LUT1_IDENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) pciebase + EIP197_XLX_USER_VECT_LUT1_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) writel(EIP197_XLX_USER_VECT_LUT2_IDENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) pciebase + EIP197_XLX_USER_VECT_LUT2_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) writel(EIP197_XLX_USER_VECT_LUT3_IDENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) pciebase + EIP197_XLX_USER_VECT_LUT3_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) /* Enable all device interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) writel(GENMASK(31, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) pciebase + EIP197_XLX_USER_INT_ENB_MSK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) dev_err(dev, "Unrecognised IRQ block identifier %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) /* HW reset FPGA dev board */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) /* assert reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) writel(1, priv->base + EIP197_XLX_GPIO_BASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) wmb(); /* maintain strict ordering for accesses here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) /* deassert reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) writel(0, priv->base + EIP197_XLX_GPIO_BASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) wmb(); /* maintain strict ordering for accesses here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) /* enable bus mastering */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) pci_set_master(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) /* Generic EIP97/EIP197 device probing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) rc = safexcel_probe_generic(pdev, priv, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) static void safexcel_pci_remove(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) struct safexcel_crypto_priv *priv = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) safexcel_unregister_algorithms(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) for (i = 0; i < priv->config.rings; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) destroy_workqueue(priv->ring[i].workqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) safexcel_hw_reset_rings(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) static const struct pci_device_id safexcel_pci_ids[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) PCI_DEVICE_SUB(PCI_VENDOR_ID_XILINX, 0x9038,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) 0x16ae, 0xc522),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) .driver_data = EIP197_DEVBRD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) {},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) MODULE_DEVICE_TABLE(pci, safexcel_pci_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) static struct pci_driver safexcel_pci_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) .name = "crypto-safexcel",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) .id_table = safexcel_pci_ids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) .probe = safexcel_pci_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) .remove = safexcel_pci_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) static int __init safexcel_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) /* Register PCI driver */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) ret = pci_register_driver(&safexcel_pci_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) /* Register platform driver */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) if (IS_ENABLED(CONFIG_OF) && !ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) ret = platform_driver_register(&crypto_safexcel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) pci_unregister_driver(&safexcel_pci_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) static void __exit safexcel_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) /* Unregister platform driver */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) if (IS_ENABLED(CONFIG_OF))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) platform_driver_unregister(&crypto_safexcel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) /* Unregister PCI driver if successfully registered before */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) pci_unregister_driver(&safexcel_pci_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) module_init(safexcel_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) module_exit(safexcel_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) MODULE_AUTHOR("Antoine Tenart <antoine.tenart@free-electrons.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) MODULE_AUTHOR("Ofer Heifetz <oferh@marvell.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) MODULE_AUTHOR("Igal Liberman <igall@marvell.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) MODULE_DESCRIPTION("Support for SafeXcel cryptographic engines: EIP97 & EIP197");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) MODULE_LICENSE("GPL v2");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) MODULE_IMPORT_NS(CRYPTO_INTERNAL);