Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Copyright (C) 2012-2020 IBM Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Author: Ashley Lai <ashleydlai@gmail.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * Maintained by: <tpmdd-devel@lists.sourceforge.net>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  * Device driver for TCG/TCPA TPM (trusted platform module).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  * Specifications at www.trustedcomputinggroup.org
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/dmapool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <asm/vio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <asm/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <linux/wait.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #include <asm/prom.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #include "tpm.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #include "tpm_ibmvtpm.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) static const char tpm_ibmvtpm_driver_name[] = "tpm_ibmvtpm";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) static const struct vio_device_id tpm_ibmvtpm_device_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 	{ "IBM,vtpm", "IBM,vtpm"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	{ "IBM,vtpm", "IBM,vtpm20"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 	{ "", "" }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) MODULE_DEVICE_TABLE(vio, tpm_ibmvtpm_device_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38)  * ibmvtpm_send_crq_word() - Send a CRQ request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39)  * @vdev:	vio device struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40)  * @w1:		pre-constructed first word of tpm crq (second word is reserved)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42)  * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43)  *	0 - Success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44)  *	Non-zero - Failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) static int ibmvtpm_send_crq_word(struct vio_dev *vdev, u64 w1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, w1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52)  * ibmvtpm_send_crq() - Send a CRQ request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54)  * @vdev:	vio device struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55)  * @valid:	Valid field
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56)  * @msg:	Type field
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57)  * @len:	Length field
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58)  * @data:	Data field
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60)  * The ibmvtpm crq is defined as follows:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62)  * Byte  |   0   |   1   |   2   |   3   |   4   |   5   |   6   |   7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63)  * -----------------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64)  * Word0 | Valid | Type  |     Length    |              Data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65)  * -----------------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66)  * Word1 |                Reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67)  * -----------------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69)  * Which matches the following structure (on bigendian host):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71)  * struct ibmvtpm_crq {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72)  *         u8 valid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73)  *         u8 msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74)  *         __be16 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75)  *         __be32 data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76)  *         __be64 reserved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77)  * } __attribute__((packed, aligned(8)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79)  * However, the value is passed in a register so just compute the numeric value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80)  * to load into the register avoiding byteswap altogether. Endian only affects
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81)  * memory loads and stores - registers are internally represented the same.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83)  * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84)  *	0 (H_SUCCESS) - Success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85)  *	Non-zero - Failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) static int ibmvtpm_send_crq(struct vio_dev *vdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 		u8 valid, u8 msg, u16 len, u32 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	u64 w1 = ((u64)valid << 56) | ((u64)msg << 48) | ((u64)len << 32) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 		(u64)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	return ibmvtpm_send_crq_word(vdev, w1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96)  * tpm_ibmvtpm_recv - Receive data after send
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98)  * @chip:	tpm chip struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99)  * @buf:	buffer to read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)  * @count:	size of buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)  * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)  *	Number of bytes read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	u16 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	if (!ibmvtpm->rtce_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 		dev_err(ibmvtpm->dev, "ibmvtpm device is not ready\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	len = ibmvtpm->res_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	if (count < len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 		dev_err(ibmvtpm->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 			"Invalid size in recv: count=%zd, crq_size=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 			count, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	spin_lock(&ibmvtpm->rtce_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	memcpy((void *)buf, (void *)ibmvtpm->rtce_buf, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	memset(ibmvtpm->rtce_buf, 0, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	ibmvtpm->res_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	spin_unlock(&ibmvtpm->rtce_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)  * ibmvtpm_crq_send_init - Send a CRQ initialize message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)  * @ibmvtpm:	vtpm device struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)  * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)  *	0 on success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)  *	Non-zero on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) static int ibmvtpm_crq_send_init(struct ibmvtpm_dev *ibmvtpm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	rc = ibmvtpm_send_crq_word(ibmvtpm->vdev, INIT_CRQ_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	if (rc != H_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 		dev_err(ibmvtpm->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 			"%s failed rc=%d\n", __func__, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)  * tpm_ibmvtpm_resume - Resume from suspend
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)  * @dev:	device struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)  * Return: Always 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) static int tpm_ibmvtpm_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	struct tpm_chip *chip = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 		if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 			msleep(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 		rc = plpar_hcall_norets(H_ENABLE_CRQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 					ibmvtpm->vdev->unit_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	} while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 		dev_err(dev, "Error enabling ibmvtpm rc=%d\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	rc = vio_enable_interrupts(ibmvtpm->vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 		dev_err(dev, "Error vio_enable_interrupts rc=%d\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	rc = ibmvtpm_crq_send_init(ibmvtpm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 		dev_err(dev, "Error send_init rc=%d\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)  * tpm_ibmvtpm_send() - Send a TPM command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)  * @chip:	tpm chip struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)  * @buf:	buffer contains data to send
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)  * @count:	size of buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)  * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)  *   0 on success,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)  *   -errno on error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	bool retry = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	int rc, sig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	if (!ibmvtpm->rtce_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 		dev_err(ibmvtpm->dev, "ibmvtpm device is not ready\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	if (count > ibmvtpm->rtce_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 		dev_err(ibmvtpm->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 			"Invalid size in send: count=%zd, rtce_size=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 			count, ibmvtpm->rtce_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	if (ibmvtpm->tpm_processing_cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 		dev_info(ibmvtpm->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 		         "Need to wait for TPM to finish\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 		/* wait for previous command to finish */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 		sig = wait_event_interruptible(ibmvtpm->wq, !ibmvtpm->tpm_processing_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 		if (sig)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 			return -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	spin_lock(&ibmvtpm->rtce_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	ibmvtpm->res_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	memcpy((void *)ibmvtpm->rtce_buf, (void *)buf, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	 * set the processing flag before the Hcall, since we may get the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	 * result (interrupt) before even being able to check rc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	ibmvtpm->tpm_processing_cmd = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	rc = ibmvtpm_send_crq(ibmvtpm->vdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 			IBMVTPM_VALID_CMD, VTPM_TPM_COMMAND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 			count, ibmvtpm->rtce_dma_handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	if (rc != H_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 		 * H_CLOSED can be returned after LPM resume.  Call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 		 * tpm_ibmvtpm_resume() to re-enable the CRQ then retry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 		 * ibmvtpm_send_crq() once before failing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 		if (rc == H_CLOSED && retry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 			tpm_ibmvtpm_resume(ibmvtpm->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 			retry = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 			goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 		dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 		ibmvtpm->tpm_processing_cmd = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	spin_unlock(&ibmvtpm->rtce_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) static void tpm_ibmvtpm_cancel(struct tpm_chip *chip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) static u8 tpm_ibmvtpm_status(struct tpm_chip *chip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	return ibmvtpm->tpm_processing_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)  * ibmvtpm_crq_get_rtce_size - Send a CRQ request to get rtce size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)  * @ibmvtpm:	vtpm device struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)  * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)  *	0 on success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)  *	Non-zero on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) static int ibmvtpm_crq_get_rtce_size(struct ibmvtpm_dev *ibmvtpm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	rc = ibmvtpm_send_crq(ibmvtpm->vdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 			IBMVTPM_VALID_CMD, VTPM_GET_RTCE_BUFFER_SIZE, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	if (rc != H_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 		dev_err(ibmvtpm->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 			"ibmvtpm_crq_get_rtce_size failed rc=%d\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)  * ibmvtpm_crq_get_version - Send a CRQ request to get vtpm version
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)  *			   - Note that this is vtpm version and not tpm version
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)  * @ibmvtpm:	vtpm device struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)  * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)  *	0 on success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)  *	Non-zero on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) static int ibmvtpm_crq_get_version(struct ibmvtpm_dev *ibmvtpm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	rc = ibmvtpm_send_crq(ibmvtpm->vdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 			IBMVTPM_VALID_CMD, VTPM_GET_VERSION, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	if (rc != H_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 		dev_err(ibmvtpm->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 			"ibmvtpm_crq_get_version failed rc=%d\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)  * ibmvtpm_crq_send_init_complete - Send a CRQ initialize complete message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)  * @ibmvtpm:	vtpm device struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)  * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)  *	0 on success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)  *	Non-zero on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) static int ibmvtpm_crq_send_init_complete(struct ibmvtpm_dev *ibmvtpm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	rc = ibmvtpm_send_crq_word(ibmvtpm->vdev, INIT_CRQ_COMP_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	if (rc != H_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 		dev_err(ibmvtpm->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 			"ibmvtpm_crq_send_init_complete failed rc=%d\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)  * tpm_ibmvtpm_remove - ibm vtpm remove entry point
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)  * @vdev:	vio device struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)  * Return: Always 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) static int tpm_ibmvtpm_remove(struct vio_dev *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 	struct tpm_chip *chip = dev_get_drvdata(&vdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 	struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 	int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 	tpm_chip_unregister(chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 	free_irq(vdev->irq, ibmvtpm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 		if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 			msleep(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 	dma_unmap_single(ibmvtpm->dev, ibmvtpm->crq_dma_handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 			 CRQ_RES_BUF_SIZE, DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 	free_page((unsigned long)ibmvtpm->crq_queue.crq_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 	if (ibmvtpm->rtce_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 		dma_unmap_single(ibmvtpm->dev, ibmvtpm->rtce_dma_handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 				 ibmvtpm->rtce_size, DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 		kfree(ibmvtpm->rtce_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 	kfree(ibmvtpm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 	/* For tpm_ibmvtpm_get_desired_dma */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 	dev_set_drvdata(&vdev->dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)  * tpm_ibmvtpm_get_desired_dma - Get DMA size needed by this driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)  * @vdev:	vio device struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)  * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)  *	Number of bytes the driver needs to DMA map.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) static unsigned long tpm_ibmvtpm_get_desired_dma(struct vio_dev *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 	struct tpm_chip *chip = dev_get_drvdata(&vdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 	struct ibmvtpm_dev *ibmvtpm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 	 * ibmvtpm initializes at probe time, so the data we are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 	 * asking for may not be set yet. Estimate that 4K required
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 	 * for TCE-mapped buffer in addition to CRQ.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 	if (chip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 		ibmvtpm = dev_get_drvdata(&chip->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 		return CRQ_RES_BUF_SIZE + PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 	return CRQ_RES_BUF_SIZE + ibmvtpm->rtce_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)  * tpm_ibmvtpm_suspend - Suspend
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)  * @dev:	device struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)  * Return: Always 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) static int tpm_ibmvtpm_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 	struct tpm_chip *chip = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 	struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 	rc = ibmvtpm_send_crq(ibmvtpm->vdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 			IBMVTPM_VALID_CMD, VTPM_PREPARE_TO_SUSPEND, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 	if (rc != H_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 		dev_err(ibmvtpm->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 			"tpm_ibmvtpm_suspend failed rc=%d\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)  * ibmvtpm_reset_crq - Reset CRQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)  * @ibmvtpm:	ibm vtpm struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)  * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)  *	0 on success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)  *	Non-zero on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) static int ibmvtpm_reset_crq(struct ibmvtpm_dev *ibmvtpm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 	int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 		if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 			msleep(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 		rc = plpar_hcall_norets(H_FREE_CRQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 					ibmvtpm->vdev->unit_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 	memset(ibmvtpm->crq_queue.crq_addr, 0, CRQ_RES_BUF_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 	ibmvtpm->crq_queue.index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 	return plpar_hcall_norets(H_REG_CRQ, ibmvtpm->vdev->unit_address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 				  ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) static bool tpm_ibmvtpm_req_canceled(struct tpm_chip *chip, u8 status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 	return (status == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) static const struct tpm_class_ops tpm_ibmvtpm = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 	.recv = tpm_ibmvtpm_recv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 	.send = tpm_ibmvtpm_send,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 	.cancel = tpm_ibmvtpm_cancel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 	.status = tpm_ibmvtpm_status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 	.req_complete_mask = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 	.req_complete_val = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 	.req_canceled = tpm_ibmvtpm_req_canceled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) static const struct dev_pm_ops tpm_ibmvtpm_pm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 	.suspend = tpm_ibmvtpm_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 	.resume = tpm_ibmvtpm_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)  * ibmvtpm_crq_get_next - Get next responded crq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)  * @ibmvtpm:	vtpm device struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)  * Return: vtpm crq pointer or NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) static struct ibmvtpm_crq *ibmvtpm_crq_get_next(struct ibmvtpm_dev *ibmvtpm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 	struct ibmvtpm_crq_queue *crq_q = &ibmvtpm->crq_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 	struct ibmvtpm_crq *crq = &crq_q->crq_addr[crq_q->index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 	if (crq->valid & VTPM_MSG_RES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 		if (++crq_q->index == crq_q->num_entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 			crq_q->index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 		smp_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 		crq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 	return crq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)  * ibmvtpm_crq_process - Process responded crq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)  * @crq:	crq to be processed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)  * @ibmvtpm:	vtpm device struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) static void ibmvtpm_crq_process(struct ibmvtpm_crq *crq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 				struct ibmvtpm_dev *ibmvtpm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 	int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 	switch (crq->valid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 	case VALID_INIT_CRQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 		switch (crq->msg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 		case INIT_CRQ_RES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 			dev_info(ibmvtpm->dev, "CRQ initialized\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 			rc = ibmvtpm_crq_send_init_complete(ibmvtpm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 			if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 				dev_err(ibmvtpm->dev, "Unable to send CRQ init complete rc=%d\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 		case INIT_CRQ_COMP_RES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 			dev_info(ibmvtpm->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 				 "CRQ initialization completed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 			dev_err(ibmvtpm->dev, "Unknown crq message type: %d\n", crq->msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 	case IBMVTPM_VALID_CMD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 		switch (crq->msg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 		case VTPM_GET_RTCE_BUFFER_SIZE_RES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 			if (be16_to_cpu(crq->len) <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 				dev_err(ibmvtpm->dev, "Invalid rtce size\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 				return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 			ibmvtpm->rtce_size = be16_to_cpu(crq->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 			ibmvtpm->rtce_buf = kmalloc(ibmvtpm->rtce_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 						    GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 			if (!ibmvtpm->rtce_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 				dev_err(ibmvtpm->dev, "Failed to allocate memory for rtce buffer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 				return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 			ibmvtpm->rtce_dma_handle = dma_map_single(ibmvtpm->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 				ibmvtpm->rtce_buf, ibmvtpm->rtce_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 				DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 			if (dma_mapping_error(ibmvtpm->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 					      ibmvtpm->rtce_dma_handle)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 				kfree(ibmvtpm->rtce_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 				ibmvtpm->rtce_buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 				dev_err(ibmvtpm->dev, "Failed to dma map rtce buffer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 		case VTPM_GET_VERSION_RES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 			ibmvtpm->vtpm_version = be32_to_cpu(crq->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 		case VTPM_TPM_COMMAND_RES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 			/* len of the data in rtce buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 			ibmvtpm->res_len = be16_to_cpu(crq->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 			ibmvtpm->tpm_processing_cmd = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 			wake_up_interruptible(&ibmvtpm->wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)  * ibmvtpm_interrupt -	Interrupt handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)  * @irq:		irq number to handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)  * @vtpm_instance:	vtpm that received interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)  * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)  *	IRQ_HANDLED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) static irqreturn_t ibmvtpm_interrupt(int irq, void *vtpm_instance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 	struct ibmvtpm_dev *ibmvtpm = (struct ibmvtpm_dev *) vtpm_instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 	struct ibmvtpm_crq *crq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 	/* while loop is needed for initial setup (get version and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 	 * get rtce_size). There should be only one tpm request at any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 	 * given time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 	while ((crq = ibmvtpm_crq_get_next(ibmvtpm)) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 		ibmvtpm_crq_process(crq, ibmvtpm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 		wake_up_interruptible(&ibmvtpm->crq_queue.wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 		crq->valid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 		smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)  * tpm_ibmvtpm_probe - ibm vtpm initialize entry point
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)  * @vio_dev:	vio device struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)  * @id:		vio device id struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)  * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)  *	0 on success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)  *	Non-zero on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) 				   const struct vio_device_id *id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 	struct ibmvtpm_dev *ibmvtpm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 	struct device *dev = &vio_dev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) 	struct ibmvtpm_crq_queue *crq_q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 	struct tpm_chip *chip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 	int rc = -ENOMEM, rc1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 	chip = tpmm_chip_alloc(dev, &tpm_ibmvtpm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) 	if (IS_ERR(chip))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 		return PTR_ERR(chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 	ibmvtpm = kzalloc(sizeof(struct ibmvtpm_dev), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 	if (!ibmvtpm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) 		dev_err(dev, "kzalloc for ibmvtpm failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 		goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 	ibmvtpm->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 	ibmvtpm->vdev = vio_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 	crq_q = &ibmvtpm->crq_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) 	crq_q->crq_addr = (struct ibmvtpm_crq *)get_zeroed_page(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) 	if (!crq_q->crq_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) 		dev_err(dev, "Unable to allocate memory for crq_addr\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) 		goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) 	crq_q->num_entry = CRQ_RES_BUF_SIZE / sizeof(*crq_q->crq_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) 	init_waitqueue_head(&crq_q->wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) 	ibmvtpm->crq_dma_handle = dma_map_single(dev, crq_q->crq_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) 						 CRQ_RES_BUF_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) 						 DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) 	if (dma_mapping_error(dev, ibmvtpm->crq_dma_handle)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) 		dev_err(dev, "dma mapping failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) 		goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) 	rc = plpar_hcall_norets(H_REG_CRQ, vio_dev->unit_address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) 				ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) 	if (rc == H_RESOURCE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) 		rc = ibmvtpm_reset_crq(ibmvtpm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) 	if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) 		dev_err(dev, "Unable to register CRQ rc=%d\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) 		goto reg_crq_cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) 	rc = request_irq(vio_dev->irq, ibmvtpm_interrupt, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) 			 tpm_ibmvtpm_driver_name, ibmvtpm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) 	if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) 		dev_err(dev, "Error %d register irq 0x%x\n", rc, vio_dev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) 		goto init_irq_cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) 	rc = vio_enable_interrupts(vio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) 	if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) 		dev_err(dev, "Error %d enabling interrupts\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) 		goto init_irq_cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) 	init_waitqueue_head(&ibmvtpm->wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) 	crq_q->index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) 	dev_set_drvdata(&chip->dev, ibmvtpm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) 	spin_lock_init(&ibmvtpm->rtce_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) 	rc = ibmvtpm_crq_send_init(ibmvtpm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) 		goto init_irq_cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) 	rc = ibmvtpm_crq_get_version(ibmvtpm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) 		goto init_irq_cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) 	rc = ibmvtpm_crq_get_rtce_size(ibmvtpm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) 		goto init_irq_cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) 	if (!wait_event_timeout(ibmvtpm->crq_queue.wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) 				ibmvtpm->rtce_buf != NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) 				HZ)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) 		dev_err(dev, "CRQ response timed out\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) 		goto init_irq_cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) 	if (!strcmp(id->compat, "IBM,vtpm20"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) 		chip->flags |= TPM_CHIP_FLAG_TPM2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) 	rc = tpm_get_timeouts(chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) 		goto init_irq_cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) 	if (chip->flags & TPM_CHIP_FLAG_TPM2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) 		rc = tpm2_get_cc_attrs_tbl(chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) 		if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) 			goto init_irq_cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) 	return tpm_chip_register(chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) init_irq_cleanup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) 		rc1 = plpar_hcall_norets(H_FREE_CRQ, vio_dev->unit_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) 	} while (rc1 == H_BUSY || H_IS_LONG_BUSY(rc1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) reg_crq_cleanup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) 	dma_unmap_single(dev, ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) 			 DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) cleanup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) 	if (ibmvtpm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) 		if (crq_q->crq_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) 			free_page((unsigned long)crq_q->crq_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) 		kfree(ibmvtpm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) static struct vio_driver ibmvtpm_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) 	.id_table	 = tpm_ibmvtpm_device_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) 	.probe		 = tpm_ibmvtpm_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) 	.remove		 = tpm_ibmvtpm_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) 	.get_desired_dma = tpm_ibmvtpm_get_desired_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) 	.name		 = tpm_ibmvtpm_driver_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) 	.pm		 = &tpm_ibmvtpm_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732)  * ibmvtpm_module_init - Initialize ibm vtpm module.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)  * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)  *	0 on success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737)  *	Non-zero on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) static int __init ibmvtpm_module_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) 	return vio_register_driver(&ibmvtpm_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)  * ibmvtpm_module_exit - Tear down ibm vtpm module.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) static void __exit ibmvtpm_module_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) 	vio_unregister_driver(&ibmvtpm_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) module_init(ibmvtpm_module_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) module_exit(ibmvtpm_module_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) MODULE_AUTHOR("adlai@us.ibm.com");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) MODULE_DESCRIPTION("IBM vTPM Driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) MODULE_VERSION("1.0");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) MODULE_LICENSE("GPL");