Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * AMD Cryptographic Coprocessor (CCP) driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright (C) 2013,2019 Advanced Micro Devices, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * Author: Tom Lendacky <thomas.lendacky@amd.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  * Author: Gary R Hook <gary.hook@amd.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/kthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/spinlock_types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <linux/hw_random.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <linux/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #ifdef CONFIG_X86
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #include <asm/cpu_device_id.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #include <linux/ccp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #include "ccp-dev.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) #define MAX_CCPS 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) /* Limit CCP use to a specifed number of queues per device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) static unsigned int nqueues = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) module_param(nqueues, uint, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) MODULE_PARM_DESC(nqueues, "Number of queues per CCP (minimum 1; default: all available)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) /* Limit the maximum number of configured CCPs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) static atomic_t dev_count = ATOMIC_INIT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) static unsigned int max_devs = MAX_CCPS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) module_param(max_devs, uint, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) MODULE_PARM_DESC(max_devs, "Maximum number of CCPs to enable (default: all; 0 disables all CCPs)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) struct ccp_tasklet_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	struct completion completion;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	struct ccp_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) /* Human-readable error strings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) #define CCP_MAX_ERROR_CODE	64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) static char *ccp_error_codes[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	"",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	"ILLEGAL_ENGINE",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	"ILLEGAL_KEY_ID",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	"ILLEGAL_FUNCTION_TYPE",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	"ILLEGAL_FUNCTION_MODE",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	"ILLEGAL_FUNCTION_ENCRYPT",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	"ILLEGAL_FUNCTION_SIZE",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	"Zlib_MISSING_INIT_EOM",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	"ILLEGAL_FUNCTION_RSVD",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	"ILLEGAL_BUFFER_LENGTH",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	"VLSB_FAULT",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	"ILLEGAL_MEM_ADDR",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	"ILLEGAL_MEM_SEL",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	"ILLEGAL_CONTEXT_ID",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	"ILLEGAL_KEY_ADDR",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	"0xF Reserved",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	"Zlib_ILLEGAL_MULTI_QUEUE",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	"Zlib_ILLEGAL_JOBID_CHANGE",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	"CMD_TIMEOUT",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	"IDMA0_AXI_SLVERR",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	"IDMA0_AXI_DECERR",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	"0x15 Reserved",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	"IDMA1_AXI_SLAVE_FAULT",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	"IDMA1_AIXI_DECERR",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	"0x18 Reserved",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	"ZLIBVHB_AXI_SLVERR",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	"ZLIBVHB_AXI_DECERR",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	"0x1B Reserved",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	"ZLIB_UNEXPECTED_EOM",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	"ZLIB_EXTRA_DATA",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	"ZLIB_BTYPE",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	"ZLIB_UNDEFINED_SYMBOL",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	"ZLIB_UNDEFINED_DISTANCE_S",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	"ZLIB_CODE_LENGTH_SYMBOL",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	"ZLIB _VHB_ILLEGAL_FETCH",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	"ZLIB_UNCOMPRESSED_LEN",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	"ZLIB_LIMIT_REACHED",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	"ZLIB_CHECKSUM_MISMATCH0",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	"ODMA0_AXI_SLVERR",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	"ODMA0_AXI_DECERR",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	"0x28 Reserved",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	"ODMA1_AXI_SLVERR",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	"ODMA1_AXI_DECERR",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) void ccp_log_error(struct ccp_device *d, unsigned int e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	if (WARN_ON(e >= CCP_MAX_ERROR_CODE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	if (e < ARRAY_SIZE(ccp_error_codes))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 		dev_err(d->dev, "CCP error %d: %s\n", e, ccp_error_codes[e]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 		dev_err(d->dev, "CCP error %d: Unknown Error\n", e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) /* List of CCPs, CCP count, read-write access lock, and access functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)  * Lock structure: get ccp_unit_lock for reading whenever we need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)  * examine the CCP list. While holding it for reading we can acquire
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)  * the RR lock to update the round-robin next-CCP pointer. The unit lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)  * must be acquired before the RR lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)  * If the unit-lock is acquired for writing, we have total control over
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)  * the list, so there's no value in getting the RR lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) static DEFINE_RWLOCK(ccp_unit_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) static LIST_HEAD(ccp_units);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) /* Round-robin counter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) static DEFINE_SPINLOCK(ccp_rr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) static struct ccp_device *ccp_rr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)  * ccp_add_device - add a CCP device to the list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)  * @ccp: ccp_device struct pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)  * Put this CCP on the unit list, which makes it available
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)  * for use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)  * Returns zero if a CCP device is present, -ENODEV otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) void ccp_add_device(struct ccp_device *ccp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	write_lock_irqsave(&ccp_unit_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	list_add_tail(&ccp->entry, &ccp_units);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	if (!ccp_rr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 		/* We already have the list lock (we're first) so this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 		 * pointer can't change on us. Set its initial value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 		ccp_rr = ccp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	write_unlock_irqrestore(&ccp_unit_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)  * ccp_del_device - remove a CCP device from the list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)  * @ccp: ccp_device struct pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)  * Remove this unit from the list of devices. If the next device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)  * up for use is this one, adjust the pointer. If this is the last
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)  * device, NULL the pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) void ccp_del_device(struct ccp_device *ccp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	write_lock_irqsave(&ccp_unit_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	if (ccp_rr == ccp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 		/* ccp_unit_lock is read/write; any read access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 		 * will be suspended while we make changes to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 		 * list and RR pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 		if (list_is_last(&ccp_rr->entry, &ccp_units))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 			ccp_rr = list_first_entry(&ccp_units, struct ccp_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 						  entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 			ccp_rr = list_next_entry(ccp_rr, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	list_del(&ccp->entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	if (list_empty(&ccp_units))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 		ccp_rr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	write_unlock_irqrestore(&ccp_unit_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) int ccp_register_rng(struct ccp_device *ccp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	dev_dbg(ccp->dev, "Registering RNG...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	/* Register an RNG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	ccp->hwrng.name = ccp->rngname;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	ccp->hwrng.read = ccp_trng_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	ret = hwrng_register(&ccp->hwrng);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 		dev_err(ccp->dev, "error registering hwrng (%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) void ccp_unregister_rng(struct ccp_device *ccp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	if (ccp->hwrng.name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 		hwrng_unregister(&ccp->hwrng);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) static struct ccp_device *ccp_get_device(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	struct ccp_device *dp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	/* We round-robin through the unit list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	 * The (ccp_rr) pointer refers to the next unit to use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	read_lock_irqsave(&ccp_unit_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	if (!list_empty(&ccp_units)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 		spin_lock(&ccp_rr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 		dp = ccp_rr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 		if (list_is_last(&ccp_rr->entry, &ccp_units))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 			ccp_rr = list_first_entry(&ccp_units, struct ccp_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 						  entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 			ccp_rr = list_next_entry(ccp_rr, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 		spin_unlock(&ccp_rr_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	read_unlock_irqrestore(&ccp_unit_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	return dp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)  * ccp_present - check if a CCP device is present
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)  * Returns zero if a CCP device is present, -ENODEV otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) int ccp_present(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	read_lock_irqsave(&ccp_unit_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	ret = list_empty(&ccp_units);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	read_unlock_irqrestore(&ccp_unit_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	return ret ? -ENODEV : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) EXPORT_SYMBOL_GPL(ccp_present);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)  * ccp_version - get the version of the CCP device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)  * Returns the version from the first unit on the list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)  * otherwise a zero if no CCP device is present
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) unsigned int ccp_version(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	struct ccp_device *dp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	read_lock_irqsave(&ccp_unit_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	if (!list_empty(&ccp_units)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 		dp = list_first_entry(&ccp_units, struct ccp_device, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 		ret = dp->vdata->version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	read_unlock_irqrestore(&ccp_unit_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) EXPORT_SYMBOL_GPL(ccp_version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)  * ccp_enqueue_cmd - queue an operation for processing by the CCP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)  * @cmd: ccp_cmd struct to be processed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)  * Queue a cmd to be processed by the CCP. If queueing the cmd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)  * would exceed the defined length of the cmd queue the cmd will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)  * only be queued if the CCP_CMD_MAY_BACKLOG flag is set and will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)  * result in a return code of -EBUSY.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)  * The callback routine specified in the ccp_cmd struct will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)  * called to notify the caller of completion (if the cmd was not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)  * backlogged) or advancement out of the backlog. If the cmd has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)  * advanced out of the backlog the "err" value of the callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)  * will be -EINPROGRESS. Any other "err" value during callback is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)  * the result of the operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)  * The cmd has been successfully queued if:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)  *   the return code is -EINPROGRESS or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)  *   the return code is -EBUSY and CCP_CMD_MAY_BACKLOG flag is set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) int ccp_enqueue_cmd(struct ccp_cmd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	struct ccp_device *ccp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	/* Some commands might need to be sent to a specific device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	ccp = cmd->ccp ? cmd->ccp : ccp_get_device();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	if (!ccp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	/* Caller must supply a callback routine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	if (!cmd->callback)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	cmd->ccp = ccp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	spin_lock_irqsave(&ccp->cmd_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	i = ccp->cmd_q_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	if (ccp->cmd_count >= MAX_CMD_QLEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 		if (cmd->flags & CCP_CMD_MAY_BACKLOG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 			ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 			list_add_tail(&cmd->entry, &ccp->backlog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 			ret = -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 		ret = -EINPROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 		ccp->cmd_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 		list_add_tail(&cmd->entry, &ccp->cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 		/* Find an idle queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 		if (!ccp->suspending) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 			for (i = 0; i < ccp->cmd_q_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 				if (ccp->cmd_q[i].active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 					continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 	spin_unlock_irqrestore(&ccp->cmd_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 	/* If we found an idle queue, wake it up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	if (i < ccp->cmd_q_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 		wake_up_process(ccp->cmd_q[i].kthread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) EXPORT_SYMBOL_GPL(ccp_enqueue_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) static void ccp_do_cmd_backlog(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 	struct ccp_cmd *cmd = container_of(work, struct ccp_cmd, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 	struct ccp_device *ccp = cmd->ccp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 	cmd->callback(cmd->data, -EINPROGRESS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 	spin_lock_irqsave(&ccp->cmd_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 	ccp->cmd_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 	list_add_tail(&cmd->entry, &ccp->cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 	/* Find an idle queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 	for (i = 0; i < ccp->cmd_q_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 		if (ccp->cmd_q[i].active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	spin_unlock_irqrestore(&ccp->cmd_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 	/* If we found an idle queue, wake it up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 	if (i < ccp->cmd_q_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 		wake_up_process(ccp->cmd_q[i].kthread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) static struct ccp_cmd *ccp_dequeue_cmd(struct ccp_cmd_queue *cmd_q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 	struct ccp_device *ccp = cmd_q->ccp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 	struct ccp_cmd *cmd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 	struct ccp_cmd *backlog = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 	spin_lock_irqsave(&ccp->cmd_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 	cmd_q->active = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 	if (ccp->suspending) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 		cmd_q->suspended = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 		spin_unlock_irqrestore(&ccp->cmd_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 		wake_up_interruptible(&ccp->suspend_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 	if (ccp->cmd_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 		cmd_q->active = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 		cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 		list_del(&cmd->entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 		ccp->cmd_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 	if (!list_empty(&ccp->backlog)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 		backlog = list_first_entry(&ccp->backlog, struct ccp_cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 					   entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 		list_del(&backlog->entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 	spin_unlock_irqrestore(&ccp->cmd_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	if (backlog) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 		INIT_WORK(&backlog->work, ccp_do_cmd_backlog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 		schedule_work(&backlog->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 	return cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) static void ccp_do_cmd_complete(unsigned long data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 	struct ccp_tasklet_data *tdata = (struct ccp_tasklet_data *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 	struct ccp_cmd *cmd = tdata->cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 	cmd->callback(cmd->data, cmd->ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 	complete(&tdata->completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)  * ccp_cmd_queue_thread - create a kernel thread to manage a CCP queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)  * @data: thread-specific data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) int ccp_cmd_queue_thread(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 	struct ccp_cmd_queue *cmd_q = (struct ccp_cmd_queue *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 	struct ccp_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 	struct ccp_tasklet_data tdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 	struct tasklet_struct tasklet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 	tasklet_init(&tasklet, ccp_do_cmd_complete, (unsigned long)&tdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 	set_current_state(TASK_INTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 	while (!kthread_should_stop()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 		schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 		set_current_state(TASK_INTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 		cmd = ccp_dequeue_cmd(cmd_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 		if (!cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 		__set_current_state(TASK_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 		/* Execute the command */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 		cmd->ret = ccp_run_cmd(cmd_q, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 		/* Schedule the completion callback */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 		tdata.cmd = cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 		init_completion(&tdata.completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 		tasklet_schedule(&tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 		wait_for_completion(&tdata.completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 	__set_current_state(TASK_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)  * ccp_alloc_struct - allocate and initialize the ccp_device struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)  * @dev: device struct of the CCP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) struct ccp_device *ccp_alloc_struct(struct sp_device *sp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 	struct device *dev = sp->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 	struct ccp_device *ccp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 	ccp = devm_kzalloc(dev, sizeof(*ccp), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 	if (!ccp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 	ccp->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 	ccp->sp = sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 	ccp->axcache = sp->axcache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 	INIT_LIST_HEAD(&ccp->cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 	INIT_LIST_HEAD(&ccp->backlog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 	spin_lock_init(&ccp->cmd_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 	mutex_init(&ccp->req_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 	mutex_init(&ccp->sb_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 	ccp->sb_count = KSB_COUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 	ccp->sb_start = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 	/* Initialize the wait queues */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 	init_waitqueue_head(&ccp->sb_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 	init_waitqueue_head(&ccp->suspend_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 	snprintf(ccp->name, MAX_CCP_NAME_LEN, "ccp-%u", sp->ord);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 	snprintf(ccp->rngname, MAX_CCP_NAME_LEN, "ccp-%u-rng", sp->ord);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 	return ccp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) int ccp_trng_read(struct hwrng *rng, void *data, size_t max, bool wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 	struct ccp_device *ccp = container_of(rng, struct ccp_device, hwrng);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 	u32 trng_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 	int len = min_t(int, sizeof(trng_value), max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 	/* Locking is provided by the caller so we can update device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 	 * hwrng-related fields safely
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 	trng_value = ioread32(ccp->io_regs + TRNG_OUT_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 	if (!trng_value) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 		/* Zero is returned if not data is available or if a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 		 * bad-entropy error is present. Assume an error if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 		 * we exceed TRNG_RETRIES reads of zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 		if (ccp->hwrng_retries++ > TRNG_RETRIES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 			return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 	/* Reset the counter and save the rng value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 	ccp->hwrng_retries = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 	memcpy(data, &trng_value, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 	return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) bool ccp_queues_suspended(struct ccp_device *ccp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 	unsigned int suspended = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 	spin_lock_irqsave(&ccp->cmd_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 	for (i = 0; i < ccp->cmd_q_count; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 		if (ccp->cmd_q[i].suspended)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 			suspended++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 	spin_unlock_irqrestore(&ccp->cmd_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 	return ccp->cmd_q_count == suspended;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) int ccp_dev_suspend(struct sp_device *sp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 	struct ccp_device *ccp = sp->ccp_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 	/* If there's no device there's nothing to do */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 	if (!ccp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 	spin_lock_irqsave(&ccp->cmd_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 	ccp->suspending = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 	/* Wake all the queue kthreads to prepare for suspend */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 	for (i = 0; i < ccp->cmd_q_count; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 		wake_up_process(ccp->cmd_q[i].kthread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 	spin_unlock_irqrestore(&ccp->cmd_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 	/* Wait for all queue kthreads to say they're done */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 	while (!ccp_queues_suspended(ccp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 		wait_event_interruptible(ccp->suspend_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 					 ccp_queues_suspended(ccp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) int ccp_dev_resume(struct sp_device *sp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 	struct ccp_device *ccp = sp->ccp_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 	/* If there's no device there's nothing to do */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 	if (!ccp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 	spin_lock_irqsave(&ccp->cmd_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 	ccp->suspending = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 	/* Wake up all the kthreads */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 	for (i = 0; i < ccp->cmd_q_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 		ccp->cmd_q[i].suspended = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 		wake_up_process(ccp->cmd_q[i].kthread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 	spin_unlock_irqrestore(&ccp->cmd_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) int ccp_dev_init(struct sp_device *sp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 	struct device *dev = sp->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 	struct ccp_device *ccp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 	 * Check how many we have so far, and stop after reaching
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 	 * that number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 	if (atomic_inc_return(&dev_count) > max_devs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) 		return 0; /* don't fail the load */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 	ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 	ccp = ccp_alloc_struct(sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 	if (!ccp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 		goto e_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) 	sp->ccp_data = ccp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) 	if (!nqueues || (nqueues > MAX_HW_QUEUES))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) 		ccp->max_q_count = MAX_HW_QUEUES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) 		ccp->max_q_count = nqueues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) 	ccp->vdata = (struct ccp_vdata *)sp->dev_vdata->ccp_vdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) 	if (!ccp->vdata || !ccp->vdata->version) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) 		ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) 		dev_err(dev, "missing driver data\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) 		goto e_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) 	ccp->use_tasklet = sp->use_tasklet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) 	ccp->io_regs = sp->io_map + ccp->vdata->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) 	if (ccp->vdata->setup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) 		ccp->vdata->setup(ccp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) 	ret = ccp->vdata->perform->init(ccp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) 		/* A positive number means that the device cannot be initialized,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) 		 * but no additional message is required.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) 		if (ret > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) 			goto e_quiet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) 		/* An unexpected problem occurred, and should be reported in the log */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) 		goto e_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) 	dev_notice(dev, "ccp enabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) e_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) 	dev_notice(dev, "ccp initialization failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) e_quiet:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) 	sp->ccp_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) void ccp_dev_destroy(struct sp_device *sp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) 	struct ccp_device *ccp = sp->ccp_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) 	if (!ccp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) 	ccp->vdata->perform->destroy(ccp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) }