^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * AMD Cryptographic Coprocessor (CCP) driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2016,2019 Advanced Micro Devices, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Author: Gary R Hook <gary.hook@amd.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/kthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/compiler.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/ccp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include "ccp-dev.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) /* Allocate the requested number of contiguous LSB slots
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * from the LSB bitmap. Look in the private range for this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * queue first; failing that, check the public area.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * If no space is available, wait around.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * Return: first slot number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) static u32 ccp_lsb_alloc(struct ccp_cmd_queue *cmd_q, unsigned int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) struct ccp_device *ccp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) int start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) /* First look at the map for the queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) if (cmd_q->lsb >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) start = (u32)bitmap_find_next_zero_area(cmd_q->lsbmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) LSB_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) 0, count, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) if (start < LSB_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) bitmap_set(cmd_q->lsbmap, start, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) return start + cmd_q->lsb * LSB_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) /* No joy; try to get an entry from the shared blocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) ccp = cmd_q->ccp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) mutex_lock(&ccp->sb_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) start = (u32)bitmap_find_next_zero_area(ccp->lsbmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) MAX_LSB_CNT * LSB_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) count, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) if (start <= MAX_LSB_CNT * LSB_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) bitmap_set(ccp->lsbmap, start, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) mutex_unlock(&ccp->sb_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) return start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) ccp->sb_avail = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) mutex_unlock(&ccp->sb_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) /* Wait for KSB entries to become available */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) if (wait_event_interruptible(ccp->sb_queue, ccp->sb_avail))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) /* Free a number of LSB slots from the bitmap, starting at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * the indicated starting slot number.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) static void ccp_lsb_free(struct ccp_cmd_queue *cmd_q, unsigned int start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) unsigned int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) if (!start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) if (cmd_q->lsb == start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) /* An entry from the private LSB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) bitmap_clear(cmd_q->lsbmap, start, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) /* From the shared LSBs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) struct ccp_device *ccp = cmd_q->ccp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) mutex_lock(&ccp->sb_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) bitmap_clear(ccp->lsbmap, start, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) ccp->sb_avail = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) mutex_unlock(&ccp->sb_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) wake_up_interruptible_all(&ccp->sb_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) /* CCP version 5: Union to define the function field (cmd_reg1/dword0) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) union ccp_function {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) u16 size:7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) u16 encrypt:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) u16 mode:5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) u16 type:2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) } aes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) u16 size:7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) u16 encrypt:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) u16 rsvd:5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) u16 type:2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) } aes_xts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) u16 size:7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) u16 encrypt:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) u16 mode:5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) u16 type:2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) } des3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) u16 rsvd1:10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) u16 type:4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) u16 rsvd2:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) } sha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) u16 mode:3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) u16 size:12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) } rsa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) u16 byteswap:2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) u16 bitwise:3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) u16 reflect:2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) u16 rsvd:8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) } pt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) u16 rsvd:13;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) } zlib;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) u16 size:10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) u16 type:2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) u16 mode:3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) } ecc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) u16 raw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) #define CCP_AES_SIZE(p) ((p)->aes.size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) #define CCP_AES_ENCRYPT(p) ((p)->aes.encrypt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) #define CCP_AES_MODE(p) ((p)->aes.mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) #define CCP_AES_TYPE(p) ((p)->aes.type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) #define CCP_XTS_SIZE(p) ((p)->aes_xts.size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) #define CCP_XTS_TYPE(p) ((p)->aes_xts.type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) #define CCP_XTS_ENCRYPT(p) ((p)->aes_xts.encrypt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) #define CCP_DES3_SIZE(p) ((p)->des3.size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) #define CCP_DES3_ENCRYPT(p) ((p)->des3.encrypt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) #define CCP_DES3_MODE(p) ((p)->des3.mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) #define CCP_DES3_TYPE(p) ((p)->des3.type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) #define CCP_SHA_TYPE(p) ((p)->sha.type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) #define CCP_RSA_SIZE(p) ((p)->rsa.size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) #define CCP_PT_BYTESWAP(p) ((p)->pt.byteswap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) #define CCP_PT_BITWISE(p) ((p)->pt.bitwise)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) #define CCP_ECC_MODE(p) ((p)->ecc.mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) #define CCP_ECC_AFFINE(p) ((p)->ecc.one)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) /* Word 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) #define CCP5_CMD_DW0(p) ((p)->dw0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) #define CCP5_CMD_SOC(p) (CCP5_CMD_DW0(p).soc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) #define CCP5_CMD_IOC(p) (CCP5_CMD_DW0(p).ioc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) #define CCP5_CMD_INIT(p) (CCP5_CMD_DW0(p).init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) #define CCP5_CMD_EOM(p) (CCP5_CMD_DW0(p).eom)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) #define CCP5_CMD_FUNCTION(p) (CCP5_CMD_DW0(p).function)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) #define CCP5_CMD_ENGINE(p) (CCP5_CMD_DW0(p).engine)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) #define CCP5_CMD_PROT(p) (CCP5_CMD_DW0(p).prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) /* Word 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) #define CCP5_CMD_DW1(p) ((p)->length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) #define CCP5_CMD_LEN(p) (CCP5_CMD_DW1(p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) /* Word 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) #define CCP5_CMD_DW2(p) ((p)->src_lo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) #define CCP5_CMD_SRC_LO(p) (CCP5_CMD_DW2(p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) /* Word 3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) #define CCP5_CMD_DW3(p) ((p)->dw3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) #define CCP5_CMD_SRC_MEM(p) ((p)->dw3.src_mem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) #define CCP5_CMD_SRC_HI(p) ((p)->dw3.src_hi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) #define CCP5_CMD_LSB_ID(p) ((p)->dw3.lsb_cxt_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) #define CCP5_CMD_FIX_SRC(p) ((p)->dw3.fixed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) /* Words 4/5 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) #define CCP5_CMD_DW4(p) ((p)->dw4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) #define CCP5_CMD_DST_LO(p) (CCP5_CMD_DW4(p).dst_lo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) #define CCP5_CMD_DW5(p) ((p)->dw5.fields.dst_hi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) #define CCP5_CMD_DST_HI(p) (CCP5_CMD_DW5(p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) #define CCP5_CMD_DST_MEM(p) ((p)->dw5.fields.dst_mem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) #define CCP5_CMD_FIX_DST(p) ((p)->dw5.fields.fixed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) #define CCP5_CMD_SHA_LO(p) ((p)->dw4.sha_len_lo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) #define CCP5_CMD_SHA_HI(p) ((p)->dw5.sha_len_hi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) /* Word 6/7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) #define CCP5_CMD_DW6(p) ((p)->key_lo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) #define CCP5_CMD_KEY_LO(p) (CCP5_CMD_DW6(p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) #define CCP5_CMD_DW7(p) ((p)->dw7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) #define CCP5_CMD_KEY_HI(p) ((p)->dw7.key_hi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) #define CCP5_CMD_KEY_MEM(p) ((p)->dw7.key_mem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) static inline u32 low_address(unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) return (u64)addr & 0x0ffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) static inline u32 high_address(unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) return ((u64)addr >> 32) & 0x00000ffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) static unsigned int ccp5_get_free_slots(struct ccp_cmd_queue *cmd_q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) unsigned int head_idx, n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) u32 head_lo, queue_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) queue_start = low_address(cmd_q->qdma_tail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) head_lo = ioread32(cmd_q->reg_head_lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) head_idx = (head_lo - queue_start) / sizeof(struct ccp5_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) n = head_idx + COMMANDS_PER_QUEUE - cmd_q->qidx - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) return n % COMMANDS_PER_QUEUE; /* Always one unused spot */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) static int ccp5_do_cmd(struct ccp5_desc *desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) struct ccp_cmd_queue *cmd_q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) __le32 *mP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) u32 *dP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) u32 tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) cmd_q->total_ops++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) if (CCP5_CMD_SOC(desc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) CCP5_CMD_IOC(desc) = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) CCP5_CMD_SOC(desc) = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) mutex_lock(&cmd_q->q_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) mP = (__le32 *)&cmd_q->qbase[cmd_q->qidx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) dP = (u32 *)desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) for (i = 0; i < 8; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) mP[i] = cpu_to_le32(dP[i]); /* handle endianness */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) /* The data used by this command must be flushed to memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) /* Write the new tail address back to the queue register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) tail = low_address(cmd_q->qdma_tail + cmd_q->qidx * Q_DESC_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) iowrite32(tail, cmd_q->reg_tail_lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) /* Turn the queue back on using our cached control register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) iowrite32(cmd_q->qcontrol | CMD5_Q_RUN, cmd_q->reg_control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) mutex_unlock(&cmd_q->q_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) if (CCP5_CMD_IOC(desc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) /* Wait for the job to complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) ret = wait_event_interruptible(cmd_q->int_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) cmd_q->int_rcvd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) if (ret || cmd_q->cmd_error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) /* Log the error and flush the queue by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) * moving the head pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) if (cmd_q->cmd_error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) ccp_log_error(cmd_q->ccp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) cmd_q->cmd_error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) iowrite32(tail, cmd_q->reg_head_lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) cmd_q->int_rcvd = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) static int ccp5_perform_aes(struct ccp_op *op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) struct ccp5_desc desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) union ccp_function function;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) u32 key_addr = op->sb_key * LSB_ITEM_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) op->cmd_q->total_aes_ops++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) /* Zero out all the fields of the command desc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) memset(&desc, 0, Q_DESC_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_AES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) CCP5_CMD_SOC(&desc) = op->soc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) CCP5_CMD_IOC(&desc) = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) CCP5_CMD_INIT(&desc) = op->init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) CCP5_CMD_EOM(&desc) = op->eom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) CCP5_CMD_PROT(&desc) = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) function.raw = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) CCP_AES_ENCRYPT(&function) = op->u.aes.action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) CCP_AES_MODE(&function) = op->u.aes.mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) CCP_AES_TYPE(&function) = op->u.aes.type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) CCP_AES_SIZE(&function) = op->u.aes.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) CCP5_CMD_FUNCTION(&desc) = function.raw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) CCP5_CMD_LEN(&desc) = op->src.u.dma.length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) CCP5_CMD_KEY_LO(&desc) = lower_32_bits(key_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) CCP5_CMD_KEY_HI(&desc) = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) CCP5_CMD_LSB_ID(&desc) = op->sb_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) return ccp5_do_cmd(&desc, op->cmd_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) static int ccp5_perform_xts_aes(struct ccp_op *op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) struct ccp5_desc desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) union ccp_function function;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) u32 key_addr = op->sb_key * LSB_ITEM_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) op->cmd_q->total_xts_aes_ops++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) /* Zero out all the fields of the command desc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) memset(&desc, 0, Q_DESC_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_XTS_AES_128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) CCP5_CMD_SOC(&desc) = op->soc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) CCP5_CMD_IOC(&desc) = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) CCP5_CMD_INIT(&desc) = op->init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) CCP5_CMD_EOM(&desc) = op->eom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) CCP5_CMD_PROT(&desc) = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) function.raw = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) CCP_XTS_TYPE(&function) = op->u.xts.type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) CCP_XTS_ENCRYPT(&function) = op->u.xts.action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) CCP_XTS_SIZE(&function) = op->u.xts.unit_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) CCP5_CMD_FUNCTION(&desc) = function.raw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) CCP5_CMD_LEN(&desc) = op->src.u.dma.length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) CCP5_CMD_KEY_LO(&desc) = lower_32_bits(key_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) CCP5_CMD_KEY_HI(&desc) = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) CCP5_CMD_LSB_ID(&desc) = op->sb_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) return ccp5_do_cmd(&desc, op->cmd_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) static int ccp5_perform_sha(struct ccp_op *op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) struct ccp5_desc desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) union ccp_function function;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) op->cmd_q->total_sha_ops++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) /* Zero out all the fields of the command desc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) memset(&desc, 0, Q_DESC_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_SHA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) CCP5_CMD_SOC(&desc) = op->soc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) CCP5_CMD_IOC(&desc) = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) CCP5_CMD_INIT(&desc) = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) CCP5_CMD_EOM(&desc) = op->eom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) CCP5_CMD_PROT(&desc) = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) function.raw = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) CCP_SHA_TYPE(&function) = op->u.sha.type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) CCP5_CMD_FUNCTION(&desc) = function.raw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) CCP5_CMD_LEN(&desc) = op->src.u.dma.length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) CCP5_CMD_LSB_ID(&desc) = op->sb_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) if (op->eom) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) CCP5_CMD_SHA_LO(&desc) = lower_32_bits(op->u.sha.msg_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) CCP5_CMD_SHA_HI(&desc) = upper_32_bits(op->u.sha.msg_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) CCP5_CMD_SHA_LO(&desc) = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) CCP5_CMD_SHA_HI(&desc) = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) return ccp5_do_cmd(&desc, op->cmd_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) static int ccp5_perform_des3(struct ccp_op *op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) struct ccp5_desc desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) union ccp_function function;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) u32 key_addr = op->sb_key * LSB_ITEM_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) op->cmd_q->total_3des_ops++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) /* Zero out all the fields of the command desc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) memset(&desc, 0, sizeof(struct ccp5_desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_DES3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) CCP5_CMD_SOC(&desc) = op->soc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) CCP5_CMD_IOC(&desc) = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) CCP5_CMD_INIT(&desc) = op->init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) CCP5_CMD_EOM(&desc) = op->eom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) CCP5_CMD_PROT(&desc) = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) function.raw = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) CCP_DES3_ENCRYPT(&function) = op->u.des3.action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) CCP_DES3_MODE(&function) = op->u.des3.mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) CCP_DES3_TYPE(&function) = op->u.des3.type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) CCP5_CMD_FUNCTION(&desc) = function.raw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) CCP5_CMD_LEN(&desc) = op->src.u.dma.length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) CCP5_CMD_KEY_LO(&desc) = lower_32_bits(key_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) CCP5_CMD_KEY_HI(&desc) = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) CCP5_CMD_LSB_ID(&desc) = op->sb_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) return ccp5_do_cmd(&desc, op->cmd_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) static int ccp5_perform_rsa(struct ccp_op *op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) struct ccp5_desc desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) union ccp_function function;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) op->cmd_q->total_rsa_ops++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) /* Zero out all the fields of the command desc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) memset(&desc, 0, Q_DESC_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_RSA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) CCP5_CMD_SOC(&desc) = op->soc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) CCP5_CMD_IOC(&desc) = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) CCP5_CMD_INIT(&desc) = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) CCP5_CMD_EOM(&desc) = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) CCP5_CMD_PROT(&desc) = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) function.raw = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) CCP_RSA_SIZE(&function) = (op->u.rsa.mod_size + 7) >> 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) CCP5_CMD_FUNCTION(&desc) = function.raw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) CCP5_CMD_LEN(&desc) = op->u.rsa.input_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) /* Source is from external memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) /* Destination is in external memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) /* Key (Exponent) is in external memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) CCP5_CMD_KEY_LO(&desc) = ccp_addr_lo(&op->exp.u.dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) CCP5_CMD_KEY_HI(&desc) = ccp_addr_hi(&op->exp.u.dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) return ccp5_do_cmd(&desc, op->cmd_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) static int ccp5_perform_passthru(struct ccp_op *op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) struct ccp5_desc desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) union ccp_function function;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) struct ccp_dma_info *saddr = &op->src.u.dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) struct ccp_dma_info *daddr = &op->dst.u.dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) op->cmd_q->total_pt_ops++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) memset(&desc, 0, Q_DESC_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_PASSTHRU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) CCP5_CMD_SOC(&desc) = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) CCP5_CMD_IOC(&desc) = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) CCP5_CMD_INIT(&desc) = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) CCP5_CMD_EOM(&desc) = op->eom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) CCP5_CMD_PROT(&desc) = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) function.raw = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) CCP_PT_BYTESWAP(&function) = op->u.passthru.byte_swap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) CCP_PT_BITWISE(&function) = op->u.passthru.bit_mod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) CCP5_CMD_FUNCTION(&desc) = function.raw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) /* Length of source data is always 256 bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) if (op->src.type == CCP_MEMTYPE_SYSTEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) CCP5_CMD_LEN(&desc) = saddr->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) CCP5_CMD_LEN(&desc) = daddr->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) if (op->src.type == CCP_MEMTYPE_SYSTEM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) if (op->u.passthru.bit_mod != CCP_PASSTHRU_BITWISE_NOOP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) CCP5_CMD_LSB_ID(&desc) = op->sb_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) u32 key_addr = op->src.u.sb * CCP_SB_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) CCP5_CMD_SRC_LO(&desc) = lower_32_bits(key_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) CCP5_CMD_SRC_HI(&desc) = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) if (op->dst.type == CCP_MEMTYPE_SYSTEM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) u32 key_addr = op->dst.u.sb * CCP_SB_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) CCP5_CMD_DST_LO(&desc) = lower_32_bits(key_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) CCP5_CMD_DST_HI(&desc) = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) return ccp5_do_cmd(&desc, op->cmd_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) static int ccp5_perform_ecc(struct ccp_op *op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) struct ccp5_desc desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) union ccp_function function;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) op->cmd_q->total_ecc_ops++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) /* Zero out all the fields of the command desc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) memset(&desc, 0, Q_DESC_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_ECC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) CCP5_CMD_SOC(&desc) = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) CCP5_CMD_IOC(&desc) = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) CCP5_CMD_INIT(&desc) = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) CCP5_CMD_EOM(&desc) = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) CCP5_CMD_PROT(&desc) = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) function.raw = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) function.ecc.mode = op->u.ecc.function;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) CCP5_CMD_FUNCTION(&desc) = function.raw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) CCP5_CMD_LEN(&desc) = op->src.u.dma.length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) return ccp5_do_cmd(&desc, op->cmd_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) static int ccp_find_lsb_regions(struct ccp_cmd_queue *cmd_q, u64 status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) int q_mask = 1 << cmd_q->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) int queues = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) /* Build a bit mask to know which LSBs this queue has access to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) * Don't bother with segment 0 as it has special privileges.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) for (j = 1; j < MAX_LSB_CNT; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) if (status & q_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) bitmap_set(cmd_q->lsbmask, j, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) status >>= LSB_REGION_WIDTH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) queues = bitmap_weight(cmd_q->lsbmask, MAX_LSB_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) dev_dbg(cmd_q->ccp->dev, "Queue %d can access %d LSB regions\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) cmd_q->id, queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) return queues ? 0 : -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) static int ccp_find_and_assign_lsb_to_q(struct ccp_device *ccp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) int lsb_cnt, int n_lsbs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) unsigned long *lsb_pub)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) DECLARE_BITMAP(qlsb, MAX_LSB_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) int bitno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) int qlsb_wgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) /* For each queue:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) * If the count of potential LSBs available to a queue matches the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) * ordinal given to us in lsb_cnt:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) * Copy the mask of possible LSBs for this queue into "qlsb";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) * For each bit in qlsb, see if the corresponding bit in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) * aggregation mask is set; if so, we have a match.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) * If we have a match, clear the bit in the aggregation to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) * mark it as no longer available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) * If there is no match, clear the bit in qlsb and keep looking.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) for (i = 0; i < ccp->cmd_q_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) struct ccp_cmd_queue *cmd_q = &ccp->cmd_q[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) qlsb_wgt = bitmap_weight(cmd_q->lsbmask, MAX_LSB_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) if (qlsb_wgt == lsb_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) bitmap_copy(qlsb, cmd_q->lsbmask, MAX_LSB_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) bitno = find_first_bit(qlsb, MAX_LSB_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) while (bitno < MAX_LSB_CNT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) if (test_bit(bitno, lsb_pub)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) /* We found an available LSB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) * that this queue can access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) cmd_q->lsb = bitno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) bitmap_clear(lsb_pub, bitno, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) dev_dbg(ccp->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) "Queue %d gets LSB %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) i, bitno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) bitmap_clear(qlsb, bitno, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) bitno = find_first_bit(qlsb, MAX_LSB_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) if (bitno >= MAX_LSB_CNT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) n_lsbs--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) return n_lsbs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) /* For each queue, from the most- to least-constrained:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) * find an LSB that can be assigned to the queue. If there are N queues that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) * can only use M LSBs, where N > M, fail; otherwise, every queue will get a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) * dedicated LSB. Remaining LSB regions become a shared resource.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) * If we have fewer LSBs than queues, all LSB regions become shared resources.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) static int ccp_assign_lsbs(struct ccp_device *ccp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) DECLARE_BITMAP(lsb_pub, MAX_LSB_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) DECLARE_BITMAP(qlsb, MAX_LSB_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) int n_lsbs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) int bitno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) int i, lsb_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) bitmap_zero(lsb_pub, MAX_LSB_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) /* Create an aggregate bitmap to get a total count of available LSBs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) for (i = 0; i < ccp->cmd_q_count; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) bitmap_or(lsb_pub,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) lsb_pub, ccp->cmd_q[i].lsbmask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) MAX_LSB_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) n_lsbs = bitmap_weight(lsb_pub, MAX_LSB_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) if (n_lsbs >= ccp->cmd_q_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) /* We have enough LSBS to give every queue a private LSB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) * Brute force search to start with the queues that are more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) * constrained in LSB choice. When an LSB is privately
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) * assigned, it is removed from the public mask.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) * This is an ugly N squared algorithm with some optimization.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) for (lsb_cnt = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) n_lsbs && (lsb_cnt <= MAX_LSB_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) lsb_cnt++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) rc = ccp_find_and_assign_lsb_to_q(ccp, lsb_cnt, n_lsbs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) lsb_pub);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) n_lsbs = rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) /* What's left of the LSBs, according to the public mask, now become
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) * shared. Any zero bits in the lsb_pub mask represent an LSB region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) * that can't be used as a shared resource, so mark the LSB slots for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) * them as "in use".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) bitmap_copy(qlsb, lsb_pub, MAX_LSB_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) bitno = find_first_zero_bit(qlsb, MAX_LSB_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) while (bitno < MAX_LSB_CNT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) bitmap_set(ccp->lsbmap, bitno * LSB_SIZE, LSB_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) bitmap_set(qlsb, bitno, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) bitno = find_first_zero_bit(qlsb, MAX_LSB_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) static void ccp5_disable_queue_interrupts(struct ccp_device *ccp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) for (i = 0; i < ccp->cmd_q_count; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) iowrite32(0x0, ccp->cmd_q[i].reg_int_enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) static void ccp5_enable_queue_interrupts(struct ccp_device *ccp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) for (i = 0; i < ccp->cmd_q_count; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) iowrite32(SUPPORTED_INTERRUPTS, ccp->cmd_q[i].reg_int_enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) static void ccp5_irq_bh(unsigned long data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) struct ccp_device *ccp = (struct ccp_device *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) u32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) for (i = 0; i < ccp->cmd_q_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) struct ccp_cmd_queue *cmd_q = &ccp->cmd_q[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) status = ioread32(cmd_q->reg_interrupt_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) cmd_q->int_status = status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) cmd_q->q_status = ioread32(cmd_q->reg_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) cmd_q->q_int_status = ioread32(cmd_q->reg_int_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) /* On error, only save the first error value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) if ((status & INT_ERROR) && !cmd_q->cmd_error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) cmd_q->int_rcvd = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) /* Acknowledge the interrupt and wake the kthread */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) iowrite32(status, cmd_q->reg_interrupt_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) wake_up_interruptible(&cmd_q->int_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) ccp5_enable_queue_interrupts(ccp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) static irqreturn_t ccp5_irq_handler(int irq, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) struct ccp_device *ccp = (struct ccp_device *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) ccp5_disable_queue_interrupts(ccp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) ccp->total_interrupts++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) if (ccp->use_tasklet)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) tasklet_schedule(&ccp->irq_tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) ccp5_irq_bh((unsigned long)ccp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) static int ccp5_init(struct ccp_device *ccp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) struct device *dev = ccp->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) struct ccp_cmd_queue *cmd_q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) struct dma_pool *dma_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) char dma_pool_name[MAX_DMAPOOL_NAME_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) unsigned int qmr, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) u64 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) u32 status_lo, status_hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) /* Find available queues */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) qmr = ioread32(ccp->io_regs + Q_MASK_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) * Check for a access to the registers. If this read returns
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) * 0xffffffff, it's likely that the system is running a broken
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) * BIOS which disallows access to the device. Stop here and fail
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) * the initialization (but not the load, as the PSP could get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) * properly initialized).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) if (qmr == 0xffffffff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) dev_notice(dev, "ccp: unable to access the device: you might be running a broken BIOS.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) for (i = 0; (i < MAX_HW_QUEUES) && (ccp->cmd_q_count < ccp->max_q_count); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) if (!(qmr & (1 << i)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) /* Allocate a dma pool for this queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) snprintf(dma_pool_name, sizeof(dma_pool_name), "%s_q%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) ccp->name, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) dma_pool = dma_pool_create(dma_pool_name, dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) CCP_DMAPOOL_MAX_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) CCP_DMAPOOL_ALIGN, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) if (!dma_pool) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) dev_err(dev, "unable to allocate dma pool\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) goto e_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) cmd_q = &ccp->cmd_q[ccp->cmd_q_count];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) ccp->cmd_q_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) cmd_q->ccp = ccp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) cmd_q->id = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) cmd_q->dma_pool = dma_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) mutex_init(&cmd_q->q_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) /* Page alignment satisfies our needs for N <= 128 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) BUILD_BUG_ON(COMMANDS_PER_QUEUE > 128);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) cmd_q->qsize = Q_SIZE(Q_DESC_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) cmd_q->qbase = dmam_alloc_coherent(dev, cmd_q->qsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) &cmd_q->qbase_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) if (!cmd_q->qbase) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) dev_err(dev, "unable to allocate command queue\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) goto e_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) cmd_q->qidx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) /* Preset some register values and masks that are queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) * number dependent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) cmd_q->reg_control = ccp->io_regs +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) CMD5_Q_STATUS_INCR * (i + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) cmd_q->reg_tail_lo = cmd_q->reg_control + CMD5_Q_TAIL_LO_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) cmd_q->reg_head_lo = cmd_q->reg_control + CMD5_Q_HEAD_LO_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) cmd_q->reg_int_enable = cmd_q->reg_control +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) CMD5_Q_INT_ENABLE_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) cmd_q->reg_interrupt_status = cmd_q->reg_control +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) CMD5_Q_INTERRUPT_STATUS_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) cmd_q->reg_status = cmd_q->reg_control + CMD5_Q_STATUS_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) cmd_q->reg_int_status = cmd_q->reg_control +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) CMD5_Q_INT_STATUS_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) cmd_q->reg_dma_status = cmd_q->reg_control +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) CMD5_Q_DMA_STATUS_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) cmd_q->reg_dma_read_status = cmd_q->reg_control +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) CMD5_Q_DMA_READ_STATUS_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) cmd_q->reg_dma_write_status = cmd_q->reg_control +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) CMD5_Q_DMA_WRITE_STATUS_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) init_waitqueue_head(&cmd_q->int_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) dev_dbg(dev, "queue #%u available\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) if (ccp->cmd_q_count == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) dev_notice(dev, "no command queues available\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) goto e_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) /* Turn off the queues and disable interrupts until ready */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) ccp5_disable_queue_interrupts(ccp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) for (i = 0; i < ccp->cmd_q_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) cmd_q = &ccp->cmd_q[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) cmd_q->qcontrol = 0; /* Start with nothing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) iowrite32(cmd_q->qcontrol, cmd_q->reg_control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) ioread32(cmd_q->reg_int_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) ioread32(cmd_q->reg_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) /* Clear the interrupt status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_interrupt_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) dev_dbg(dev, "Requesting an IRQ...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) /* Request an irq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) ret = sp_request_ccp_irq(ccp->sp, ccp5_irq_handler, ccp->name, ccp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) dev_err(dev, "unable to allocate an IRQ\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) goto e_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) /* Initialize the ISR tasklet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) if (ccp->use_tasklet)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) tasklet_init(&ccp->irq_tasklet, ccp5_irq_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) (unsigned long)ccp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) dev_dbg(dev, "Loading LSB map...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) /* Copy the private LSB mask to the public registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) status_lo = ioread32(ccp->io_regs + LSB_PRIVATE_MASK_LO_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) status_hi = ioread32(ccp->io_regs + LSB_PRIVATE_MASK_HI_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) iowrite32(status_lo, ccp->io_regs + LSB_PUBLIC_MASK_LO_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) iowrite32(status_hi, ccp->io_regs + LSB_PUBLIC_MASK_HI_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) status = ((u64)status_hi<<30) | (u64)status_lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) dev_dbg(dev, "Configuring virtual queues...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) /* Configure size of each virtual queue accessible to host */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) for (i = 0; i < ccp->cmd_q_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) u32 dma_addr_lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) u32 dma_addr_hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) cmd_q = &ccp->cmd_q[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) cmd_q->qcontrol &= ~(CMD5_Q_SIZE << CMD5_Q_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) cmd_q->qcontrol |= QUEUE_SIZE_VAL << CMD5_Q_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) cmd_q->qdma_tail = cmd_q->qbase_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) dma_addr_lo = low_address(cmd_q->qdma_tail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) iowrite32((u32)dma_addr_lo, cmd_q->reg_tail_lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) iowrite32((u32)dma_addr_lo, cmd_q->reg_head_lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) dma_addr_hi = high_address(cmd_q->qdma_tail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) cmd_q->qcontrol |= (dma_addr_hi << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) iowrite32(cmd_q->qcontrol, cmd_q->reg_control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) /* Find the LSB regions accessible to the queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) ccp_find_lsb_regions(cmd_q, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) cmd_q->lsb = -1; /* Unassigned value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) dev_dbg(dev, "Assigning LSBs...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) ret = ccp_assign_lsbs(ccp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) dev_err(dev, "Unable to assign LSBs (%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) goto e_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) /* Optimization: pre-allocate LSB slots for each queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) for (i = 0; i < ccp->cmd_q_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) ccp->cmd_q[i].sb_key = ccp_lsb_alloc(&ccp->cmd_q[i], 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) ccp->cmd_q[i].sb_ctx = ccp_lsb_alloc(&ccp->cmd_q[i], 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) dev_dbg(dev, "Starting threads...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) /* Create a kthread for each queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) for (i = 0; i < ccp->cmd_q_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) struct task_struct *kthread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) cmd_q = &ccp->cmd_q[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) kthread = kthread_create(ccp_cmd_queue_thread, cmd_q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) "%s-q%u", ccp->name, cmd_q->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) if (IS_ERR(kthread)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) dev_err(dev, "error creating queue thread (%ld)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) PTR_ERR(kthread));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) ret = PTR_ERR(kthread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) goto e_kthread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) cmd_q->kthread = kthread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) wake_up_process(kthread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) dev_dbg(dev, "Enabling interrupts...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) ccp5_enable_queue_interrupts(ccp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) dev_dbg(dev, "Registering device...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) /* Put this on the unit list to make it available */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) ccp_add_device(ccp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) ret = ccp_register_rng(ccp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) goto e_kthread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) /* Register the DMA engine support */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) ret = ccp_dmaengine_register(ccp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) goto e_hwrng;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) #ifdef CONFIG_CRYPTO_DEV_CCP_DEBUGFS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) /* Set up debugfs entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) ccp5_debugfs_setup(ccp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) e_hwrng:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) ccp_unregister_rng(ccp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) e_kthread:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) for (i = 0; i < ccp->cmd_q_count; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) if (ccp->cmd_q[i].kthread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) kthread_stop(ccp->cmd_q[i].kthread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) e_irq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) sp_free_ccp_irq(ccp->sp, ccp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) e_pool:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) for (i = 0; i < ccp->cmd_q_count; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) dma_pool_destroy(ccp->cmd_q[i].dma_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) static void ccp5_destroy(struct ccp_device *ccp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) struct ccp_cmd_queue *cmd_q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) struct ccp_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) /* Unregister the DMA engine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) ccp_dmaengine_unregister(ccp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) /* Unregister the RNG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) ccp_unregister_rng(ccp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) /* Remove this device from the list of available units first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) ccp_del_device(ccp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) #ifdef CONFIG_CRYPTO_DEV_CCP_DEBUGFS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) /* We're in the process of tearing down the entire driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) * when all the devices are gone clean up debugfs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) if (ccp_present())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) ccp5_debugfs_destroy();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) /* Disable and clear interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) ccp5_disable_queue_interrupts(ccp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) for (i = 0; i < ccp->cmd_q_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) cmd_q = &ccp->cmd_q[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) /* Turn off the run bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) iowrite32(cmd_q->qcontrol & ~CMD5_Q_RUN, cmd_q->reg_control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) /* Clear the interrupt status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_interrupt_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) ioread32(cmd_q->reg_int_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) ioread32(cmd_q->reg_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) /* Stop the queue kthreads */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) for (i = 0; i < ccp->cmd_q_count; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) if (ccp->cmd_q[i].kthread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) kthread_stop(ccp->cmd_q[i].kthread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) sp_free_ccp_irq(ccp->sp, ccp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) /* Flush the cmd and backlog queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) while (!list_empty(&ccp->cmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) /* Invoke the callback directly with an error code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) list_del(&cmd->entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) cmd->callback(cmd->data, -ENODEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) while (!list_empty(&ccp->backlog)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) /* Invoke the callback directly with an error code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) cmd = list_first_entry(&ccp->backlog, struct ccp_cmd, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) list_del(&cmd->entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) cmd->callback(cmd->data, -ENODEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) static void ccp5_config(struct ccp_device *ccp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) /* Public side */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) iowrite32(0x0, ccp->io_regs + CMD5_REQID_CONFIG_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) static void ccp5other_config(struct ccp_device *ccp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) u32 rnd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) /* We own all of the queues on the NTB CCP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) iowrite32(0x00012D57, ccp->io_regs + CMD5_TRNG_CTL_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) iowrite32(0x00000003, ccp->io_regs + CMD5_CONFIG_0_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) for (i = 0; i < 12; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) rnd = ioread32(ccp->io_regs + TRNG_OUT_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) iowrite32(rnd, ccp->io_regs + CMD5_AES_MASK_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) iowrite32(0x0000001F, ccp->io_regs + CMD5_QUEUE_MASK_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) iowrite32(0x00005B6D, ccp->io_regs + CMD5_QUEUE_PRIO_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) iowrite32(0x00000000, ccp->io_regs + CMD5_CMD_TIMEOUT_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) iowrite32(0x3FFFFFFF, ccp->io_regs + LSB_PRIVATE_MASK_LO_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) iowrite32(0x000003FF, ccp->io_regs + LSB_PRIVATE_MASK_HI_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) iowrite32(0x00108823, ccp->io_regs + CMD5_CLK_GATE_CTL_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) ccp5_config(ccp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) /* Version 5 adds some function, but is essentially the same as v5 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) static const struct ccp_actions ccp5_actions = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) .aes = ccp5_perform_aes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) .xts_aes = ccp5_perform_xts_aes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) .sha = ccp5_perform_sha,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) .des3 = ccp5_perform_des3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) .rsa = ccp5_perform_rsa,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) .passthru = ccp5_perform_passthru,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) .ecc = ccp5_perform_ecc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) .sballoc = ccp_lsb_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) .sbfree = ccp_lsb_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) .init = ccp5_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) .destroy = ccp5_destroy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) .get_free_slots = ccp5_get_free_slots,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) const struct ccp_vdata ccpv5a = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) .version = CCP_VERSION(5, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) .setup = ccp5_config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) .perform = &ccp5_actions,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) .offset = 0x0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) .rsamax = CCP5_RSA_MAX_WIDTH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) const struct ccp_vdata ccpv5b = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) .version = CCP_VERSION(5, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) .dma_chan_attr = DMA_PRIVATE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) .setup = ccp5other_config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) .perform = &ccp5_actions,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) .offset = 0x0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) .rsamax = CCP5_RSA_MAX_WIDTH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) };