^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /* Copyright (C) 2012-2019 ARM Limited or its affiliates. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/crypto.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/moduleparam.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/of_address.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/of_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/pm_runtime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include "cc_driver.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include "cc_request_mgr.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include "cc_buffer_mgr.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include "cc_debugfs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include "cc_cipher.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include "cc_aead.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include "cc_hash.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include "cc_sram_mgr.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include "cc_pm.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include "cc_fips.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) bool cc_dump_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) module_param_named(dump_desc, cc_dump_desc, bool, 0600);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) MODULE_PARM_DESC(cc_dump_desc, "Dump descriptors to kernel log as debugging aid");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) bool cc_dump_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) module_param_named(dump_bytes, cc_dump_bytes, bool, 0600);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) MODULE_PARM_DESC(cc_dump_bytes, "Dump buffers to kernel log as debugging aid");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) static bool cc_sec_disable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) module_param_named(sec_disable, cc_sec_disable, bool, 0600);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) MODULE_PARM_DESC(cc_sec_disable, "Disable security functions");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) struct cc_hw_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) enum cc_hw_rev rev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) u32 sig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) u32 cidr_0123;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) u32 pidr_0124;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) int std_bodies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define CC_NUM_IDRS 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define CC_HW_RESET_LOOP_COUNT 10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) /* Note: PIDR3 holds CMOD/Rev so ignored for HW identification purposes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) static const u32 pidr_0124_offsets[CC_NUM_IDRS] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) CC_REG(PERIPHERAL_ID_0), CC_REG(PERIPHERAL_ID_1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) CC_REG(PERIPHERAL_ID_2), CC_REG(PERIPHERAL_ID_4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) static const u32 cidr_0123_offsets[CC_NUM_IDRS] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) CC_REG(COMPONENT_ID_0), CC_REG(COMPONENT_ID_1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) CC_REG(COMPONENT_ID_2), CC_REG(COMPONENT_ID_3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) /* Hardware revisions defs. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) /* The 703 is a OSCCA only variant of the 713 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) static const struct cc_hw_data cc703_hw = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) .name = "703", .rev = CC_HW_REV_713, .cidr_0123 = 0xB105F00DU,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) .pidr_0124 = 0x040BB0D0U, .std_bodies = CC_STD_OSCCA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) static const struct cc_hw_data cc713_hw = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) .name = "713", .rev = CC_HW_REV_713, .cidr_0123 = 0xB105F00DU,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) .pidr_0124 = 0x040BB0D0U, .std_bodies = CC_STD_ALL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) static const struct cc_hw_data cc712_hw = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) .name = "712", .rev = CC_HW_REV_712, .sig = 0xDCC71200U,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) .std_bodies = CC_STD_ALL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) static const struct cc_hw_data cc710_hw = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) .name = "710", .rev = CC_HW_REV_710, .sig = 0xDCC63200U,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) .std_bodies = CC_STD_ALL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) static const struct cc_hw_data cc630p_hw = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) .name = "630P", .rev = CC_HW_REV_630, .sig = 0xDCC63000U,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) .std_bodies = CC_STD_ALL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) static const struct of_device_id arm_ccree_dev_of_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) { .compatible = "arm,cryptocell-703-ree", .data = &cc703_hw },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) { .compatible = "arm,cryptocell-713-ree", .data = &cc713_hw },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) { .compatible = "arm,cryptocell-712-ree", .data = &cc712_hw },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) { .compatible = "arm,cryptocell-710-ree", .data = &cc710_hw },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) { .compatible = "arm,cryptocell-630p-ree", .data = &cc630p_hw },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) MODULE_DEVICE_TABLE(of, arm_ccree_dev_of_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) static u32 cc_read_idr(struct cc_drvdata *drvdata, const u32 *idr_offsets)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) u8 regs[CC_NUM_IDRS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) __le32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) } idr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) for (i = 0; i < CC_NUM_IDRS; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) idr.regs[i] = cc_ioread(drvdata, idr_offsets[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) return le32_to_cpu(idr.val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) void __dump_byte_array(const char *name, const u8 *buf, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) char prefix[64];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) if (!buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) snprintf(prefix, sizeof(prefix), "%s[%zu]: ", name, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) print_hex_dump(KERN_DEBUG, prefix, DUMP_PREFIX_ADDRESS, 16, 1, buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) len, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) static irqreturn_t cc_isr(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) struct cc_drvdata *drvdata = (struct cc_drvdata *)dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) struct device *dev = drvdata_to_dev(drvdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) u32 irr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) u32 imr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) /* STAT_OP_TYPE_GENERIC STAT_PHASE_0: Interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) /* if driver suspended return, probably shared interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) if (pm_runtime_suspended(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) /* read the interrupt status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) irr = cc_ioread(drvdata, CC_REG(HOST_IRR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) dev_dbg(dev, "Got IRR=0x%08X\n", irr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) if (irr == 0) /* Probably shared interrupt line */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) imr = cc_ioread(drvdata, CC_REG(HOST_IMR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) /* clear interrupt - must be before processing events */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) cc_iowrite(drvdata, CC_REG(HOST_ICR), irr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) drvdata->irq = irr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) /* Completion interrupt - most probable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) if (irr & drvdata->comp_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) /* Mask all completion interrupts - will be unmasked in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) * deferred service handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) cc_iowrite(drvdata, CC_REG(HOST_IMR), imr | drvdata->comp_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) irr &= ~drvdata->comp_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) complete_request(drvdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) #ifdef CONFIG_CRYPTO_FIPS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) /* TEE FIPS interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) if (irr & CC_GPR0_IRQ_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) /* Mask interrupt - will be unmasked in Deferred service
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) cc_iowrite(drvdata, CC_REG(HOST_IMR), imr | CC_GPR0_IRQ_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) irr &= ~CC_GPR0_IRQ_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) fips_handler(drvdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) /* AXI error interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) if (irr & CC_AXI_ERR_IRQ_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) u32 axi_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) /* Read the AXI error ID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) axi_err = cc_ioread(drvdata, CC_REG(AXIM_MON_ERR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) dev_dbg(dev, "AXI completion error: axim_mon_err=0x%08X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) axi_err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) irr &= ~CC_AXI_ERR_IRQ_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) if (irr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) dev_dbg_ratelimited(dev, "IRR includes unknown cause bits (0x%08X)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) irr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) /* Just warning */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) bool cc_wait_for_reset_completion(struct cc_drvdata *drvdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) unsigned int val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) /* 712/710/63 has no reset completion indication, always return true */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) if (drvdata->hw_rev <= CC_HW_REV_712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) for (i = 0; i < CC_HW_RESET_LOOP_COUNT; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) /* in cc7x3 NVM_IS_IDLE indicates that CC reset is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * completed and device is fully functional
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) val = cc_ioread(drvdata, CC_REG(NVM_IS_IDLE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) if (val & CC_NVM_IS_IDLE_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) /* hw indicate reset completed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) /* allow scheduling other process on the processor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) /* reset not completed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) int init_cc_regs(struct cc_drvdata *drvdata, bool is_probe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) unsigned int val, cache_params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) struct device *dev = drvdata_to_dev(drvdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) /* Unmask all AXI interrupt sources AXI_CFG1 register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) /* AXI interrupt config are obsoleted startign at cc7x3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) if (drvdata->hw_rev <= CC_HW_REV_712) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) val = cc_ioread(drvdata, CC_REG(AXIM_CFG));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) cc_iowrite(drvdata, CC_REG(AXIM_CFG), val & ~CC_AXI_IRQ_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) dev_dbg(dev, "AXIM_CFG=0x%08X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) cc_ioread(drvdata, CC_REG(AXIM_CFG)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) /* Clear all pending interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) val = cc_ioread(drvdata, CC_REG(HOST_IRR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) dev_dbg(dev, "IRR=0x%08X\n", val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) cc_iowrite(drvdata, CC_REG(HOST_ICR), val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) /* Unmask relevant interrupt cause */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) val = drvdata->comp_mask | CC_AXI_ERR_IRQ_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) if (drvdata->hw_rev >= CC_HW_REV_712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) val |= CC_GPR0_IRQ_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) cc_iowrite(drvdata, CC_REG(HOST_IMR), ~val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) cache_params = (drvdata->coherent ? CC_COHERENT_CACHE_PARAMS : 0x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) val = cc_ioread(drvdata, CC_REG(AXIM_CACHE_PARAMS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) if (is_probe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) dev_dbg(dev, "Cache params previous: 0x%08X\n", val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) cc_iowrite(drvdata, CC_REG(AXIM_CACHE_PARAMS), cache_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) val = cc_ioread(drvdata, CC_REG(AXIM_CACHE_PARAMS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) if (is_probe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) dev_dbg(dev, "Cache params current: 0x%08X (expect: 0x%08X)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) val, cache_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) static int init_cc_resources(struct platform_device *plat_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) struct resource *req_mem_cc_regs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) struct cc_drvdata *new_drvdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) struct device *dev = &plat_dev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) struct device_node *np = dev->of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) u32 val, hw_rev_pidr, sig_cidr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) u64 dma_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) const struct cc_hw_data *hw_rev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) struct clk *clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) new_drvdata = devm_kzalloc(dev, sizeof(*new_drvdata), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) if (!new_drvdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) hw_rev = of_device_get_match_data(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) new_drvdata->hw_rev_name = hw_rev->name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) new_drvdata->hw_rev = hw_rev->rev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) new_drvdata->std_bodies = hw_rev->std_bodies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) if (hw_rev->rev >= CC_HW_REV_712) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) new_drvdata->axim_mon_offset = CC_REG(AXIM_MON_COMP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) new_drvdata->sig_offset = CC_REG(HOST_SIGNATURE_712);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) new_drvdata->ver_offset = CC_REG(HOST_VERSION_712);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) new_drvdata->axim_mon_offset = CC_REG(AXIM_MON_COMP8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) new_drvdata->sig_offset = CC_REG(HOST_SIGNATURE_630);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) new_drvdata->ver_offset = CC_REG(HOST_VERSION_630);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) new_drvdata->comp_mask = CC_COMP_IRQ_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) platform_set_drvdata(plat_dev, new_drvdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) new_drvdata->plat_dev = plat_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) clk = devm_clk_get_optional(dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) if (IS_ERR(clk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) return dev_err_probe(dev, PTR_ERR(clk), "Error getting clock\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) new_drvdata->clk = clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) new_drvdata->coherent = of_dma_is_coherent(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) /* Get device resources */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) /* First CC registers space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) req_mem_cc_regs = platform_get_resource(plat_dev, IORESOURCE_MEM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) /* Map registers space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) new_drvdata->cc_base = devm_ioremap_resource(dev, req_mem_cc_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) if (IS_ERR(new_drvdata->cc_base)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) dev_err(dev, "Failed to ioremap registers");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) return PTR_ERR(new_drvdata->cc_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) dev_dbg(dev, "Got MEM resource (%s): %pR\n", req_mem_cc_regs->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) req_mem_cc_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) dev_dbg(dev, "CC registers mapped from %pa to 0x%p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) &req_mem_cc_regs->start, new_drvdata->cc_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) /* Then IRQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) irq = platform_get_irq(plat_dev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) if (irq < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) return irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) init_completion(&new_drvdata->hw_queue_avail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) if (!dev->dma_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) dev->dma_mask = &dev->coherent_dma_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) dma_mask = DMA_BIT_MASK(DMA_BIT_MASK_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) while (dma_mask > 0x7fffffffUL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) if (dma_supported(dev, dma_mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) rc = dma_set_coherent_mask(dev, dma_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) if (!rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) dma_mask >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) dev_err(dev, "Failed in dma_set_mask, mask=%llx\n", dma_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) rc = clk_prepare_enable(new_drvdata->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) dev_err(dev, "Failed to enable clock");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) new_drvdata->sec_disabled = cc_sec_disable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) pm_runtime_set_autosuspend_delay(dev, CC_SUSPEND_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) pm_runtime_use_autosuspend(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) pm_runtime_set_active(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) pm_runtime_enable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) rc = pm_runtime_get_sync(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) if (rc < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) dev_err(dev, "pm_runtime_get_sync() failed: %d\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) goto post_pm_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) /* Wait for Cryptocell reset completion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) if (!cc_wait_for_reset_completion(new_drvdata)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) dev_err(dev, "Cryptocell reset not completed");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) if (hw_rev->rev <= CC_HW_REV_712) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) /* Verify correct mapping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) val = cc_ioread(new_drvdata, new_drvdata->sig_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) if (val != hw_rev->sig) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) dev_err(dev, "Invalid CC signature: SIGNATURE=0x%08X != expected=0x%08X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) val, hw_rev->sig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) goto post_pm_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) sig_cidr = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) hw_rev_pidr = cc_ioread(new_drvdata, new_drvdata->ver_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) /* Verify correct mapping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) val = cc_read_idr(new_drvdata, pidr_0124_offsets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) if (val != hw_rev->pidr_0124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) dev_err(dev, "Invalid CC PIDR: PIDR0124=0x%08X != expected=0x%08X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) val, hw_rev->pidr_0124);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) goto post_pm_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) hw_rev_pidr = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) val = cc_read_idr(new_drvdata, cidr_0123_offsets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) if (val != hw_rev->cidr_0123) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) dev_err(dev, "Invalid CC CIDR: CIDR0123=0x%08X != expected=0x%08X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) val, hw_rev->cidr_0123);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) goto post_pm_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) sig_cidr = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) /* Check HW engine configuration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) val = cc_ioread(new_drvdata, CC_REG(HOST_REMOVE_INPUT_PINS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) switch (val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) case CC_PINS_FULL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) /* This is fine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) case CC_PINS_SLIM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) if (new_drvdata->std_bodies & CC_STD_NIST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) dev_warn(dev, "703 mode forced due to HW configuration.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) new_drvdata->std_bodies = CC_STD_OSCCA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) dev_err(dev, "Unsupported engines configuration.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) goto post_pm_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) /* Check security disable state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) val = cc_ioread(new_drvdata, CC_REG(SECURITY_DISABLED));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) val &= CC_SECURITY_DISABLED_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) new_drvdata->sec_disabled |= !!val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) if (!new_drvdata->sec_disabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) new_drvdata->comp_mask |= CC_CPP_SM4_ABORT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) if (new_drvdata->std_bodies & CC_STD_NIST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) new_drvdata->comp_mask |= CC_CPP_AES_ABORT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) if (new_drvdata->sec_disabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) dev_info(dev, "Security Disabled mode is in effect. Security functions disabled.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) /* Display HW versions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) dev_info(dev, "ARM CryptoCell %s Driver: HW version 0x%08X/0x%8X, Driver version %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) hw_rev->name, hw_rev_pidr, sig_cidr, DRV_MODULE_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) /* register the driver isr function */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) rc = devm_request_irq(dev, irq, cc_isr, IRQF_SHARED, "ccree",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) new_drvdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) dev_err(dev, "Could not register to interrupt %d\n", irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) goto post_pm_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) dev_dbg(dev, "Registered to IRQ: %d\n", irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) rc = init_cc_regs(new_drvdata, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) dev_err(dev, "init_cc_regs failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) goto post_pm_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) rc = cc_debugfs_init(new_drvdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) dev_err(dev, "Failed registering debugfs interface\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) goto post_regs_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) rc = cc_fips_init(new_drvdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) dev_err(dev, "cc_fips_init failed 0x%x\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) goto post_debugfs_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) rc = cc_sram_mgr_init(new_drvdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) dev_err(dev, "cc_sram_mgr_init failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) goto post_fips_init_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) new_drvdata->mlli_sram_addr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) cc_sram_alloc(new_drvdata, MAX_MLLI_BUFF_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) if (new_drvdata->mlli_sram_addr == NULL_SRAM_ADDR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) goto post_fips_init_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) rc = cc_req_mgr_init(new_drvdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) dev_err(dev, "cc_req_mgr_init failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) goto post_fips_init_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) rc = cc_buffer_mgr_init(new_drvdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) dev_err(dev, "cc_buffer_mgr_init failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) goto post_req_mgr_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) /* Allocate crypto algs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) rc = cc_cipher_alloc(new_drvdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) dev_err(dev, "cc_cipher_alloc failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) goto post_buf_mgr_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) /* hash must be allocated before aead since hash exports APIs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) rc = cc_hash_alloc(new_drvdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) dev_err(dev, "cc_hash_alloc failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) goto post_cipher_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) rc = cc_aead_alloc(new_drvdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) dev_err(dev, "cc_aead_alloc failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) goto post_hash_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) /* If we got here and FIPS mode is enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) * it means all FIPS test passed, so let TEE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) * know we're good.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) cc_set_ree_fips_status(new_drvdata, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) pm_runtime_put(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) post_hash_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) cc_hash_free(new_drvdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) post_cipher_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) cc_cipher_free(new_drvdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) post_buf_mgr_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) cc_buffer_mgr_fini(new_drvdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) post_req_mgr_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) cc_req_mgr_fini(new_drvdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) post_fips_init_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) cc_fips_fini(new_drvdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) post_debugfs_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) cc_debugfs_fini(new_drvdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) post_regs_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) fini_cc_regs(new_drvdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) post_pm_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) pm_runtime_put_noidle(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) pm_runtime_disable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) pm_runtime_set_suspended(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) clk_disable_unprepare(new_drvdata->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) void fini_cc_regs(struct cc_drvdata *drvdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) /* Mask all interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) cc_iowrite(drvdata, CC_REG(HOST_IMR), 0xFFFFFFFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) static void cleanup_cc_resources(struct platform_device *plat_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) struct device *dev = &plat_dev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) struct cc_drvdata *drvdata =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) (struct cc_drvdata *)platform_get_drvdata(plat_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) cc_aead_free(drvdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) cc_hash_free(drvdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) cc_cipher_free(drvdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) cc_buffer_mgr_fini(drvdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) cc_req_mgr_fini(drvdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) cc_fips_fini(drvdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) cc_debugfs_fini(drvdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) fini_cc_regs(drvdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) pm_runtime_put_noidle(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) pm_runtime_disable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) pm_runtime_set_suspended(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) clk_disable_unprepare(drvdata->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) unsigned int cc_get_default_hash_len(struct cc_drvdata *drvdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) if (drvdata->hw_rev >= CC_HW_REV_712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) return HASH_LEN_SIZE_712;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) return HASH_LEN_SIZE_630;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) static int ccree_probe(struct platform_device *plat_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) struct device *dev = &plat_dev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) /* Map registers space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) rc = init_cc_resources(plat_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) dev_info(dev, "ARM ccree device initialized\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) static int ccree_remove(struct platform_device *plat_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) struct device *dev = &plat_dev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) dev_dbg(dev, "Releasing ccree resources...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) cleanup_cc_resources(plat_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) dev_info(dev, "ARM ccree device terminated\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) static struct platform_driver ccree_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) .name = "ccree",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) .of_match_table = arm_ccree_dev_of_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) #ifdef CONFIG_PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) .pm = &ccree_pm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) .probe = ccree_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) .remove = ccree_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) static int __init ccree_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) cc_debugfs_global_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) return platform_driver_register(&ccree_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) module_init(ccree_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) static void __exit ccree_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) platform_driver_unregister(&ccree_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) cc_debugfs_global_fini();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) module_exit(ccree_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) /* Module description */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) MODULE_DESCRIPTION("ARM TrustZone CryptoCell REE Driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) MODULE_VERSION(DRV_MODULE_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) MODULE_AUTHOR("ARM");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) MODULE_LICENSE("GPL v2");