^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright 2014 IBM Corp.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/sched/clock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <asm/synch.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <asm/switch_to.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <misc/cxl-base.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include "cxl.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include "trace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) static int afu_control(struct cxl_afu *afu, u64 command, u64 clear,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) u64 result, u64 mask, bool enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) u64 AFU_Cntl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) spin_lock(&afu->afu_cntl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) pr_devel("AFU command starting: %llx\n", command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) trace_cxl_afu_ctrl(afu, command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) cxl_p2n_write(afu, CXL_AFU_Cntl_An, (AFU_Cntl & ~clear) | command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) while ((AFU_Cntl & mask) != result) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) if (time_after_eq(jiffies, timeout)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) dev_warn(&afu->dev, "WARNING: AFU control timed out!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) rc = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) if (!cxl_ops->link_ok(afu->adapter, afu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) afu->enabled = enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) rc = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) pr_devel_ratelimited("AFU control... (0x%016llx)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) AFU_Cntl | command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) if (AFU_Cntl & CXL_AFU_Cntl_An_RA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * Workaround for a bug in the XSL used in the Mellanox CX4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * that fails to clear the RA bit after an AFU reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * preventing subsequent AFU resets from working.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) cxl_p2n_write(afu, CXL_AFU_Cntl_An, AFU_Cntl & ~CXL_AFU_Cntl_An_RA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) pr_devel("AFU command complete: %llx\n", command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) afu->enabled = enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) trace_cxl_afu_ctrl_done(afu, command, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) spin_unlock(&afu->afu_cntl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) static int afu_enable(struct cxl_afu *afu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) pr_devel("AFU enable request\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) return afu_control(afu, CXL_AFU_Cntl_An_E, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) CXL_AFU_Cntl_An_ES_Enabled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) CXL_AFU_Cntl_An_ES_MASK, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) int cxl_afu_disable(struct cxl_afu *afu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) pr_devel("AFU disable request\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) return afu_control(afu, 0, CXL_AFU_Cntl_An_E,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) CXL_AFU_Cntl_An_ES_Disabled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) CXL_AFU_Cntl_An_ES_MASK, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) /* This will disable as well as reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) static int native_afu_reset(struct cxl_afu *afu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) u64 serr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) pr_devel("AFU reset request\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) rc = afu_control(afu, CXL_AFU_Cntl_An_RA, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) CXL_AFU_Cntl_An_RS_Complete | CXL_AFU_Cntl_An_ES_Disabled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) CXL_AFU_Cntl_An_RS_MASK | CXL_AFU_Cntl_An_ES_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * Re-enable any masked interrupts when the AFU is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * activated to avoid side effects after attaching a process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * in dedicated mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) if (afu->current_mode == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) serr = cxl_p1n_read(afu, CXL_PSL_SERR_An);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) serr &= ~CXL_PSL_SERR_An_IRQ_MASKS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) cxl_p1n_write(afu, CXL_PSL_SERR_An, serr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) static int native_afu_check_and_enable(struct cxl_afu *afu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) if (!cxl_ops->link_ok(afu->adapter, afu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) WARN(1, "Refusing to enable afu while link down!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) if (afu->enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) return afu_enable(afu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) int cxl_psl_purge(struct cxl_afu *afu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) u64 PSL_CNTL = cxl_p1n_read(afu, CXL_PSL_SCNTL_An);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) u64 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) u64 dsisr, dar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) u64 start, end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) u64 trans_fault = 0x0ULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) trace_cxl_psl_ctrl(afu, CXL_PSL_SCNTL_An_Pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) pr_devel("PSL purge request\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) if (cxl_is_power8())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) trans_fault = CXL_PSL_DSISR_TRANS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) if (cxl_is_power9())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) trans_fault = CXL_PSL9_DSISR_An_TF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) if (!cxl_ops->link_ok(afu->adapter, afu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) dev_warn(&afu->dev, "PSL Purge called with link down, ignoring\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) rc = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) if ((AFU_Cntl & CXL_AFU_Cntl_An_ES_MASK) != CXL_AFU_Cntl_An_ES_Disabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) WARN(1, "psl_purge request while AFU not disabled!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) cxl_afu_disable(afu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) cxl_p1n_write(afu, CXL_PSL_SCNTL_An,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) PSL_CNTL | CXL_PSL_SCNTL_An_Pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) start = local_clock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) PSL_CNTL = cxl_p1n_read(afu, CXL_PSL_SCNTL_An);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) while ((PSL_CNTL & CXL_PSL_SCNTL_An_Ps_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) == CXL_PSL_SCNTL_An_Ps_Pending) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) if (time_after_eq(jiffies, timeout)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) dev_warn(&afu->dev, "WARNING: PSL Purge timed out!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) rc = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) if (!cxl_ops->link_ok(afu->adapter, afu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) rc = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) pr_devel_ratelimited("PSL purging... PSL_CNTL: 0x%016llx PSL_DSISR: 0x%016llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) PSL_CNTL, dsisr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) if (dsisr & trans_fault) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) dar = cxl_p2n_read(afu, CXL_PSL_DAR_An);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) dev_notice(&afu->dev, "PSL purge terminating pending translation, DSISR: 0x%016llx, DAR: 0x%016llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) dsisr, dar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) } else if (dsisr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) dev_notice(&afu->dev, "PSL purge acknowledging pending non-translation fault, DSISR: 0x%016llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) dsisr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) PSL_CNTL = cxl_p1n_read(afu, CXL_PSL_SCNTL_An);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) end = local_clock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) pr_devel("PSL purged in %lld ns\n", end - start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) cxl_p1n_write(afu, CXL_PSL_SCNTL_An,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) PSL_CNTL & ~CXL_PSL_SCNTL_An_Pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) trace_cxl_psl_ctrl_done(afu, CXL_PSL_SCNTL_An_Pc, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) static int spa_max_procs(int spa_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * From the CAIA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) * end_of_SPA_area = SPA_Base + ((n+4) * 128) + (( ((n*8) + 127) >> 7) * 128) + 255
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) * Most of that junk is really just an overly-complicated way of saying
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) * the last 256 bytes are __aligned(128), so it's really:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * end_of_SPA_area = end_of_PSL_queue_area + __aligned(128) 255
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) * and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) * end_of_PSL_queue_area = SPA_Base + ((n+4) * 128) + (n*8) - 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) * so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) * sizeof(SPA) = ((n+4) * 128) + (n*8) + __aligned(128) 256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) * Ignore the alignment (which is safe in this case as long as we are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) * careful with our rounding) and solve for n:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) return ((spa_size / 8) - 96) / 17;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) static int cxl_alloc_spa(struct cxl_afu *afu, int mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) unsigned spa_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) /* Work out how many pages to allocate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) afu->native->spa_order = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) afu->native->spa_order++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) spa_size = (1 << afu->native->spa_order) * PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) if (spa_size > 0x100000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) dev_warn(&afu->dev, "num_of_processes too large for the SPA, limiting to %i (0x%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) afu->native->spa_max_procs, afu->native->spa_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) if (mode != CXL_MODE_DEDICATED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) afu->num_procs = afu->native->spa_max_procs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) afu->native->spa_size = spa_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) afu->native->spa_max_procs = spa_max_procs(afu->native->spa_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) } while (afu->native->spa_max_procs < afu->num_procs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) if (!(afu->native->spa = (struct cxl_process_element *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) __get_free_pages(GFP_KERNEL | __GFP_ZERO, afu->native->spa_order))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) pr_err("cxl_alloc_spa: Unable to allocate scheduled process area\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) pr_devel("spa pages: %i afu->spa_max_procs: %i afu->num_procs: %i\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 1<<afu->native->spa_order, afu->native->spa_max_procs, afu->num_procs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) static void attach_spa(struct cxl_afu *afu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) u64 spap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) afu->native->sw_command_status = (__be64 *)((char *)afu->native->spa +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) ((afu->native->spa_max_procs + 3) * 128));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) spap = virt_to_phys(afu->native->spa) & CXL_PSL_SPAP_Addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) spap |= ((afu->native->spa_size >> (12 - CXL_PSL_SPAP_Size_Shift)) - 1) & CXL_PSL_SPAP_Size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) spap |= CXL_PSL_SPAP_V;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) pr_devel("cxl: SPA allocated at 0x%p. Max processes: %i, sw_command_status: 0x%p CXL_PSL_SPAP_An=0x%016llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) afu->native->spa, afu->native->spa_max_procs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) afu->native->sw_command_status, spap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) cxl_p1n_write(afu, CXL_PSL_SPAP_An, spap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) static inline void detach_spa(struct cxl_afu *afu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) cxl_p1n_write(afu, CXL_PSL_SPAP_An, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) void cxl_release_spa(struct cxl_afu *afu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) if (afu->native->spa) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) free_pages((unsigned long) afu->native->spa,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) afu->native->spa_order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) afu->native->spa = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) * Invalidation of all ERAT entries is no longer required by CAIA2. Use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) * only for debug.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) int cxl_invalidate_all_psl9(struct cxl *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) u64 ierat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) pr_devel("CXL adapter - invalidation of all ERAT entries\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) /* Invalidates all ERAT entries for Radix or HPT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) ierat = CXL_XSL9_IERAT_IALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) if (radix_enabled())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) ierat |= CXL_XSL9_IERAT_INVR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) cxl_p1_write(adapter, CXL_XSL9_IERAT, ierat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) while (cxl_p1_read(adapter, CXL_XSL9_IERAT) & CXL_XSL9_IERAT_IINPROG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) if (time_after_eq(jiffies, timeout)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) dev_warn(&adapter->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) "WARNING: CXL adapter invalidation of all ERAT entries timed out!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) if (!cxl_ops->link_ok(adapter, NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) int cxl_invalidate_all_psl8(struct cxl *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) pr_devel("CXL adapter wide TLBIA & SLBIA\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) cxl_p1_write(adapter, CXL_PSL_AFUSEL, CXL_PSL_AFUSEL_A);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) cxl_p1_write(adapter, CXL_PSL_TLBIA, CXL_TLB_SLB_IQ_ALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) while (cxl_p1_read(adapter, CXL_PSL_TLBIA) & CXL_TLB_SLB_P) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) if (time_after_eq(jiffies, timeout)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) dev_warn(&adapter->dev, "WARNING: CXL adapter wide TLBIA timed out!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) if (!cxl_ops->link_ok(adapter, NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) cxl_p1_write(adapter, CXL_PSL_SLBIA, CXL_TLB_SLB_IQ_ALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) while (cxl_p1_read(adapter, CXL_PSL_SLBIA) & CXL_TLB_SLB_P) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) if (time_after_eq(jiffies, timeout)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) dev_warn(&adapter->dev, "WARNING: CXL adapter wide SLBIA timed out!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) if (!cxl_ops->link_ok(adapter, NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) int cxl_data_cache_flush(struct cxl *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) u64 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) * Do a datacache flush only if datacache is available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) * In case of PSL9D datacache absent hence flush operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) * would timeout.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) if (adapter->native->no_data_cache) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) pr_devel("No PSL data cache. Ignoring cache flush req.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) pr_devel("Flushing data cache\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) reg = cxl_p1_read(adapter, CXL_PSL_Control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) reg |= CXL_PSL_Control_Fr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) cxl_p1_write(adapter, CXL_PSL_Control, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) reg = cxl_p1_read(adapter, CXL_PSL_Control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) while ((reg & CXL_PSL_Control_Fs_MASK) != CXL_PSL_Control_Fs_Complete) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) if (time_after_eq(jiffies, timeout)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) dev_warn(&adapter->dev, "WARNING: cache flush timed out!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) if (!cxl_ops->link_ok(adapter, NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) dev_warn(&adapter->dev, "WARNING: link down when flushing cache\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) reg = cxl_p1_read(adapter, CXL_PSL_Control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) reg &= ~CXL_PSL_Control_Fr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) cxl_p1_write(adapter, CXL_PSL_Control, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) static int cxl_write_sstp(struct cxl_afu *afu, u64 sstp0, u64 sstp1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) /* 1. Disable SSTP by writing 0 to SSTP1[V] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) cxl_p2n_write(afu, CXL_SSTP1_An, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) /* 2. Invalidate all SLB entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) if ((rc = cxl_afu_slbia(afu)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) /* 3. Set SSTP0_An */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) cxl_p2n_write(afu, CXL_SSTP0_An, sstp0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) /* 4. Set SSTP1_An */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) cxl_p2n_write(afu, CXL_SSTP1_An, sstp1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) /* Using per slice version may improve performance here. (ie. SLBIA_An) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) static void slb_invalid(struct cxl_context *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) struct cxl *adapter = ctx->afu->adapter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) u64 slbia;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) WARN_ON(!mutex_is_locked(&ctx->afu->native->spa_mutex));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) cxl_p1_write(adapter, CXL_PSL_LBISEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) ((u64)be32_to_cpu(ctx->elem->common.pid) << 32) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) be32_to_cpu(ctx->elem->lpid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) cxl_p1_write(adapter, CXL_PSL_SLBIA, CXL_TLB_SLB_IQ_LPIDPID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) if (!cxl_ops->link_ok(adapter, NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) slbia = cxl_p1_read(adapter, CXL_PSL_SLBIA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) if (!(slbia & CXL_TLB_SLB_P))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) static int do_process_element_cmd(struct cxl_context *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) u64 cmd, u64 pe_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) u64 state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) trace_cxl_llcmd(ctx, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) WARN_ON(!ctx->afu->enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) ctx->elem->software_state = cpu_to_be32(pe_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) *(ctx->afu->native->sw_command_status) = cpu_to_be64(cmd | 0 | ctx->pe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) cxl_p1n_write(ctx->afu, CXL_PSL_LLCMD_An, cmd | ctx->pe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) if (time_after_eq(jiffies, timeout)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) dev_warn(&ctx->afu->dev, "WARNING: Process Element Command timed out!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) rc = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) dev_warn(&ctx->afu->dev, "WARNING: Device link down, aborting Process Element Command!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) rc = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) state = be64_to_cpup(ctx->afu->native->sw_command_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) if (state == ~0ULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) pr_err("cxl: Error adding process element to AFU\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) rc = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) if ((state & (CXL_SPA_SW_CMD_MASK | CXL_SPA_SW_STATE_MASK | CXL_SPA_SW_LINK_MASK)) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) (cmd | (cmd >> 16) | ctx->pe))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) * The command won't finish in the PSL if there are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) * outstanding DSIs. Hence we need to yield here in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) * case there are outstanding DSIs that we need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) * service. Tuning possiblity: we could wait for a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) * while before sched
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) trace_cxl_llcmd_done(ctx, cmd, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) static int add_process_element(struct cxl_context *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) mutex_lock(&ctx->afu->native->spa_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) pr_devel("%s Adding pe: %i started\n", __func__, ctx->pe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) if (!(rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_ADD, CXL_PE_SOFTWARE_STATE_V)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) ctx->pe_inserted = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) pr_devel("%s Adding pe: %i finished\n", __func__, ctx->pe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) mutex_unlock(&ctx->afu->native->spa_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) static int terminate_process_element(struct cxl_context *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) /* fast path terminate if it's already invalid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) if (!(ctx->elem->software_state & cpu_to_be32(CXL_PE_SOFTWARE_STATE_V)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) mutex_lock(&ctx->afu->native->spa_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) pr_devel("%s Terminate pe: %i started\n", __func__, ctx->pe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) /* We could be asked to terminate when the hw is down. That
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) * should always succeed: it's not running if the hw has gone
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) * away and is being reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) if (cxl_ops->link_ok(ctx->afu->adapter, ctx->afu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_TERMINATE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) CXL_PE_SOFTWARE_STATE_V | CXL_PE_SOFTWARE_STATE_T);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) ctx->elem->software_state = 0; /* Remove Valid bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) pr_devel("%s Terminate pe: %i finished\n", __func__, ctx->pe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) mutex_unlock(&ctx->afu->native->spa_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) static int remove_process_element(struct cxl_context *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) mutex_lock(&ctx->afu->native->spa_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) pr_devel("%s Remove pe: %i started\n", __func__, ctx->pe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) /* We could be asked to remove when the hw is down. Again, if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) * the hw is down, the PE is gone, so we succeed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) if (cxl_ops->link_ok(ctx->afu->adapter, ctx->afu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_REMOVE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) if (!rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) ctx->pe_inserted = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) if (cxl_is_power8())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) slb_invalid(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) pr_devel("%s Remove pe: %i finished\n", __func__, ctx->pe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) mutex_unlock(&ctx->afu->native->spa_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) void cxl_assign_psn_space(struct cxl_context *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) if (!ctx->afu->pp_size || ctx->master) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) ctx->psn_phys = ctx->afu->psn_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) ctx->psn_size = ctx->afu->adapter->ps_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) ctx->psn_phys = ctx->afu->psn_phys +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) (ctx->afu->native->pp_offset + ctx->afu->pp_size * ctx->pe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) ctx->psn_size = ctx->afu->pp_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) static int activate_afu_directed(struct cxl_afu *afu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) dev_info(&afu->dev, "Activating AFU directed mode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) afu->num_procs = afu->max_procs_virtualised;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) if (afu->native->spa == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) if (cxl_alloc_spa(afu, CXL_MODE_DIRECTED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) attach_spa(afu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) cxl_p1n_write(afu, CXL_PSL_SCNTL_An, CXL_PSL_SCNTL_An_PM_AFU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) if (cxl_is_power8())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) cxl_p1n_write(afu, CXL_PSL_AMOR_An, 0xFFFFFFFFFFFFFFFFULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) cxl_p1n_write(afu, CXL_PSL_ID_An, CXL_PSL_ID_An_F | CXL_PSL_ID_An_L);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) afu->current_mode = CXL_MODE_DIRECTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) if ((rc = cxl_chardev_m_afu_add(afu)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) if ((rc = cxl_sysfs_afu_m_add(afu)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) if ((rc = cxl_chardev_s_afu_add(afu)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) goto err1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) err1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) cxl_sysfs_afu_m_remove(afu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) cxl_chardev_afu_remove(afu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) #ifdef CONFIG_CPU_LITTLE_ENDIAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) #define set_endian(sr) ((sr) |= CXL_PSL_SR_An_LE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) #define set_endian(sr) ((sr) &= ~(CXL_PSL_SR_An_LE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) u64 cxl_calculate_sr(bool master, bool kernel, bool real_mode, bool p9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) u64 sr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) set_endian(sr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) if (master)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) sr |= CXL_PSL_SR_An_MP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) if (mfspr(SPRN_LPCR) & LPCR_TC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) sr |= CXL_PSL_SR_An_TC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) if (kernel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) if (!real_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) sr |= CXL_PSL_SR_An_R;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) sr |= (mfmsr() & MSR_SF) | CXL_PSL_SR_An_HV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) sr |= CXL_PSL_SR_An_PR | CXL_PSL_SR_An_R;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) if (radix_enabled())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) sr |= CXL_PSL_SR_An_HV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) sr &= ~(CXL_PSL_SR_An_HV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) if (!test_tsk_thread_flag(current, TIF_32BIT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) sr |= CXL_PSL_SR_An_SF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) if (p9) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) if (radix_enabled())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) sr |= CXL_PSL_SR_An_XLAT_ror;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) sr |= CXL_PSL_SR_An_XLAT_hpt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) return sr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) static u64 calculate_sr(struct cxl_context *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) return cxl_calculate_sr(ctx->master, ctx->kernel, false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) cxl_is_power9());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) static void update_ivtes_directed(struct cxl_context *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) bool need_update = (ctx->status == STARTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) if (need_update) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) WARN_ON(terminate_process_element(ctx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) WARN_ON(remove_process_element(ctx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) for (r = 0; r < CXL_IRQ_RANGES; r++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) ctx->elem->ivte_offsets[r] = cpu_to_be16(ctx->irqs.offset[r]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) ctx->elem->ivte_ranges[r] = cpu_to_be16(ctx->irqs.range[r]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) * Theoretically we could use the update llcmd, instead of a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) * terminate/remove/add (or if an atomic update was required we could
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) * do a suspend/update/resume), however it seems there might be issues
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) * with the update llcmd on some cards (including those using an XSL on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) * an ASIC) so for now it's safest to go with the commands that are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) * known to work. In the future if we come across a situation where the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) * card may be performing transactions using the same PE while we are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) * doing this update we might need to revisit this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) if (need_update)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) WARN_ON(add_process_element(ctx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) static int process_element_entry_psl9(struct cxl_context *ctx, u64 wed, u64 amr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) u32 pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) cxl_assign_psn_space(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) ctx->elem->ctxtime = 0; /* disable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) ctx->elem->lpid = cpu_to_be32(mfspr(SPRN_LPID));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) ctx->elem->haurp = 0; /* disable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) if (ctx->kernel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) pid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) if (ctx->mm == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) pr_devel("%s: unable to get mm for pe=%d pid=%i\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) __func__, ctx->pe, pid_nr(ctx->pid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) pid = ctx->mm->context.id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) /* Assign a unique TIDR (thread id) for the current thread */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) if (!(ctx->tidr) && (ctx->assign_tidr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) rc = set_thread_tidr(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) ctx->tidr = current->thread.tidr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) pr_devel("%s: current tidr: %d\n", __func__, ctx->tidr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) ctx->elem->common.tid = cpu_to_be32(ctx->tidr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) ctx->elem->common.pid = cpu_to_be32(pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) ctx->elem->sr = cpu_to_be64(calculate_sr(ctx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) ctx->elem->common.csrp = 0; /* disable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) cxl_prefault(ctx, wed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) * Ensure we have the multiplexed PSL interrupt set up to take faults
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) * for kernel contexts that may not have allocated any AFU IRQs at all:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) if (ctx->irqs.range[0] == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) ctx->irqs.offset[0] = ctx->afu->native->psl_hwirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) ctx->irqs.range[0] = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) ctx->elem->common.amr = cpu_to_be64(amr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) ctx->elem->common.wed = cpu_to_be64(wed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) int cxl_attach_afu_directed_psl9(struct cxl_context *ctx, u64 wed, u64 amr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) int result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) /* fill the process element entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) result = process_element_entry_psl9(ctx, wed, amr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) if (result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) update_ivtes_directed(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) /* first guy needs to enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) result = cxl_ops->afu_check_and_enable(ctx->afu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) if (result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) return add_process_element(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) int cxl_attach_afu_directed_psl8(struct cxl_context *ctx, u64 wed, u64 amr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) u32 pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) int result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) cxl_assign_psn_space(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) ctx->elem->ctxtime = 0; /* disable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) ctx->elem->lpid = cpu_to_be32(mfspr(SPRN_LPID));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) ctx->elem->haurp = 0; /* disable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) ctx->elem->u.sdr = cpu_to_be64(mfspr(SPRN_SDR1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) pid = current->pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) if (ctx->kernel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) pid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) ctx->elem->common.tid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) ctx->elem->common.pid = cpu_to_be32(pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) ctx->elem->sr = cpu_to_be64(calculate_sr(ctx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) ctx->elem->common.csrp = 0; /* disable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) ctx->elem->common.u.psl8.aurp0 = 0; /* disable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) ctx->elem->common.u.psl8.aurp1 = 0; /* disable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) cxl_prefault(ctx, wed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) ctx->elem->common.u.psl8.sstp0 = cpu_to_be64(ctx->sstp0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) ctx->elem->common.u.psl8.sstp1 = cpu_to_be64(ctx->sstp1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) * Ensure we have the multiplexed PSL interrupt set up to take faults
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) * for kernel contexts that may not have allocated any AFU IRQs at all:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) if (ctx->irqs.range[0] == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) ctx->irqs.offset[0] = ctx->afu->native->psl_hwirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) ctx->irqs.range[0] = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) update_ivtes_directed(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) ctx->elem->common.amr = cpu_to_be64(amr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) ctx->elem->common.wed = cpu_to_be64(wed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) /* first guy needs to enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) if ((result = cxl_ops->afu_check_and_enable(ctx->afu)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) return add_process_element(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) static int deactivate_afu_directed(struct cxl_afu *afu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) dev_info(&afu->dev, "Deactivating AFU directed mode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) afu->current_mode = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) afu->num_procs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) cxl_sysfs_afu_m_remove(afu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) cxl_chardev_afu_remove(afu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) * The CAIA section 2.2.1 indicates that the procedure for starting and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) * stopping an AFU in AFU directed mode is AFU specific, which is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) * ideal since this code is generic and with one exception has no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) * knowledge of the AFU. This is in contrast to the procedure for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) * disabling a dedicated process AFU, which is documented to just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) * require a reset. The architecture does indicate that both an AFU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) * reset and an AFU disable should result in the AFU being disabled and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) * we do both followed by a PSL purge for safety.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) * Notably we used to have some issues with the disable sequence on PSL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) * cards, which is why we ended up using this heavy weight procedure in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) * the first place, however a bug was discovered that had rendered the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) * disable operation ineffective, so it is conceivable that was the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) * sole explanation for those difficulties. Careful regression testing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) * is recommended if anyone attempts to remove or reorder these
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) * operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) * The XSL on the Mellanox CX4 behaves a little differently from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) * PSL based cards and will time out an AFU reset if the AFU is still
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) * enabled. That card is special in that we do have a means to identify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) * it from this code, so in that case we skip the reset and just use a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) * disable/purge to avoid the timeout and corresponding noise in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) * kernel log.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) if (afu->adapter->native->sl_ops->needs_reset_before_disable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) cxl_ops->afu_reset(afu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) cxl_afu_disable(afu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) cxl_psl_purge(afu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) int cxl_activate_dedicated_process_psl9(struct cxl_afu *afu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) dev_info(&afu->dev, "Activating dedicated process mode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) * If XSL is set to dedicated mode (Set in PSL_SCNTL reg), the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) * XSL and AFU are programmed to work with a single context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) * The context information should be configured in the SPA area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) * index 0 (so PSL_SPAP must be configured before enabling the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) * AFU).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) afu->num_procs = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) if (afu->native->spa == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) if (cxl_alloc_spa(afu, CXL_MODE_DEDICATED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) attach_spa(afu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) cxl_p1n_write(afu, CXL_PSL_SCNTL_An, CXL_PSL_SCNTL_An_PM_Process);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) cxl_p1n_write(afu, CXL_PSL_ID_An, CXL_PSL_ID_An_F | CXL_PSL_ID_An_L);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) afu->current_mode = CXL_MODE_DEDICATED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) return cxl_chardev_d_afu_add(afu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) int cxl_activate_dedicated_process_psl8(struct cxl_afu *afu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) dev_info(&afu->dev, "Activating dedicated process mode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) cxl_p1n_write(afu, CXL_PSL_SCNTL_An, CXL_PSL_SCNTL_An_PM_Process);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) cxl_p1n_write(afu, CXL_PSL_CtxTime_An, 0); /* disable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) cxl_p1n_write(afu, CXL_PSL_SPAP_An, 0); /* disable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) cxl_p1n_write(afu, CXL_PSL_AMOR_An, 0xFFFFFFFFFFFFFFFFULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) cxl_p1n_write(afu, CXL_PSL_LPID_An, mfspr(SPRN_LPID));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) cxl_p1n_write(afu, CXL_HAURP_An, 0); /* disable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) cxl_p1n_write(afu, CXL_PSL_SDR_An, mfspr(SPRN_SDR1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) cxl_p2n_write(afu, CXL_CSRP_An, 0); /* disable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) cxl_p2n_write(afu, CXL_AURP0_An, 0); /* disable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) cxl_p2n_write(afu, CXL_AURP1_An, 0); /* disable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) afu->current_mode = CXL_MODE_DEDICATED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) afu->num_procs = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) return cxl_chardev_d_afu_add(afu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) void cxl_update_dedicated_ivtes_psl9(struct cxl_context *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) for (r = 0; r < CXL_IRQ_RANGES; r++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) ctx->elem->ivte_offsets[r] = cpu_to_be16(ctx->irqs.offset[r]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) ctx->elem->ivte_ranges[r] = cpu_to_be16(ctx->irqs.range[r]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) void cxl_update_dedicated_ivtes_psl8(struct cxl_context *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) struct cxl_afu *afu = ctx->afu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) cxl_p1n_write(afu, CXL_PSL_IVTE_Offset_An,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) (((u64)ctx->irqs.offset[0] & 0xffff) << 48) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) (((u64)ctx->irqs.offset[1] & 0xffff) << 32) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) (((u64)ctx->irqs.offset[2] & 0xffff) << 16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) ((u64)ctx->irqs.offset[3] & 0xffff));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) cxl_p1n_write(afu, CXL_PSL_IVTE_Limit_An, (u64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) (((u64)ctx->irqs.range[0] & 0xffff) << 48) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) (((u64)ctx->irqs.range[1] & 0xffff) << 32) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) (((u64)ctx->irqs.range[2] & 0xffff) << 16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) ((u64)ctx->irqs.range[3] & 0xffff));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) int cxl_attach_dedicated_process_psl9(struct cxl_context *ctx, u64 wed, u64 amr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) struct cxl_afu *afu = ctx->afu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) int result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) /* fill the process element entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) result = process_element_entry_psl9(ctx, wed, amr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) if (result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) if (ctx->afu->adapter->native->sl_ops->update_dedicated_ivtes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) afu->adapter->native->sl_ops->update_dedicated_ivtes(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) ctx->elem->software_state = cpu_to_be32(CXL_PE_SOFTWARE_STATE_V);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) * Ideally we should do a wmb() here to make sure the changes to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) * PE are visible to the card before we call afu_enable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) * On ppc64 though all mmios are preceded by a 'sync' instruction hence
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) * we dont dont need one here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) result = cxl_ops->afu_reset(afu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) if (result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) return afu_enable(afu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) int cxl_attach_dedicated_process_psl8(struct cxl_context *ctx, u64 wed, u64 amr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) struct cxl_afu *afu = ctx->afu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) u64 pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) pid = (u64)current->pid << 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) if (ctx->kernel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) pid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) cxl_p2n_write(afu, CXL_PSL_PID_TID_An, pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) cxl_p1n_write(afu, CXL_PSL_SR_An, calculate_sr(ctx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) if ((rc = cxl_write_sstp(afu, ctx->sstp0, ctx->sstp1)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) cxl_prefault(ctx, wed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) if (ctx->afu->adapter->native->sl_ops->update_dedicated_ivtes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) afu->adapter->native->sl_ops->update_dedicated_ivtes(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) cxl_p2n_write(afu, CXL_PSL_AMR_An, amr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) /* master only context for dedicated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) cxl_assign_psn_space(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) if ((rc = cxl_ops->afu_reset(afu)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) cxl_p2n_write(afu, CXL_PSL_WED_An, wed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) return afu_enable(afu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) static int deactivate_dedicated_process(struct cxl_afu *afu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) dev_info(&afu->dev, "Deactivating dedicated process mode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) afu->current_mode = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) afu->num_procs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) cxl_chardev_afu_remove(afu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) static int native_afu_deactivate_mode(struct cxl_afu *afu, int mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) if (mode == CXL_MODE_DIRECTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) return deactivate_afu_directed(afu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) if (mode == CXL_MODE_DEDICATED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) return deactivate_dedicated_process(afu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) static int native_afu_activate_mode(struct cxl_afu *afu, int mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) if (!mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) if (!(mode & afu->modes_supported))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) if (!cxl_ops->link_ok(afu->adapter, afu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) WARN(1, "Device link is down, refusing to activate!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) if (mode == CXL_MODE_DIRECTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) return activate_afu_directed(afu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) if ((mode == CXL_MODE_DEDICATED) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) (afu->adapter->native->sl_ops->activate_dedicated_process))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) return afu->adapter->native->sl_ops->activate_dedicated_process(afu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) static int native_attach_process(struct cxl_context *ctx, bool kernel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) u64 wed, u64 amr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) WARN(1, "Device link is down, refusing to attach process!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) ctx->kernel = kernel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) if ((ctx->afu->current_mode == CXL_MODE_DIRECTED) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) (ctx->afu->adapter->native->sl_ops->attach_afu_directed))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) return ctx->afu->adapter->native->sl_ops->attach_afu_directed(ctx, wed, amr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) if ((ctx->afu->current_mode == CXL_MODE_DEDICATED) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) (ctx->afu->adapter->native->sl_ops->attach_dedicated_process))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) return ctx->afu->adapter->native->sl_ops->attach_dedicated_process(ctx, wed, amr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) static inline int detach_process_native_dedicated(struct cxl_context *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) * The CAIA section 2.1.1 indicates that we need to do an AFU reset to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) * stop the AFU in dedicated mode (we therefore do not make that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) * optional like we do in the afu directed path). It does not indicate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) * that we need to do an explicit disable (which should occur
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) * implicitly as part of the reset) or purge, but we do these as well
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) * to be on the safe side.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) * Notably we used to have some issues with the disable sequence
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) * (before the sequence was spelled out in the architecture) which is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) * why we were so heavy weight in the first place, however a bug was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) * discovered that had rendered the disable operation ineffective, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) * it is conceivable that was the sole explanation for those
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) * difficulties. Point is, we should be careful and do some regression
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) * testing if we ever attempt to remove any part of this procedure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) cxl_ops->afu_reset(ctx->afu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) cxl_afu_disable(ctx->afu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) cxl_psl_purge(ctx->afu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) static void native_update_ivtes(struct cxl_context *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) if (ctx->afu->current_mode == CXL_MODE_DIRECTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) return update_ivtes_directed(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) if ((ctx->afu->current_mode == CXL_MODE_DEDICATED) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) (ctx->afu->adapter->native->sl_ops->update_dedicated_ivtes))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) return ctx->afu->adapter->native->sl_ops->update_dedicated_ivtes(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) WARN(1, "native_update_ivtes: Bad mode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) static inline int detach_process_native_afu_directed(struct cxl_context *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) if (!ctx->pe_inserted)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) if (terminate_process_element(ctx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) if (remove_process_element(ctx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) static int native_detach_process(struct cxl_context *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) trace_cxl_detach(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) if (ctx->afu->current_mode == CXL_MODE_DEDICATED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) return detach_process_native_dedicated(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) return detach_process_native_afu_directed(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) static int native_get_irq_info(struct cxl_afu *afu, struct cxl_irq_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) /* If the adapter has gone away, we can't get any meaningful
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) * information.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) if (!cxl_ops->link_ok(afu->adapter, afu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) info->dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) info->dar = cxl_p2n_read(afu, CXL_PSL_DAR_An);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) if (cxl_is_power8())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) info->dsr = cxl_p2n_read(afu, CXL_PSL_DSR_An);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) info->afu_err = cxl_p2n_read(afu, CXL_AFU_ERR_An);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) info->errstat = cxl_p2n_read(afu, CXL_PSL_ErrStat_An);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) info->proc_handle = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) void cxl_native_irq_dump_regs_psl9(struct cxl_context *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) u64 fir1, serr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) fir1 = cxl_p1_read(ctx->afu->adapter, CXL_PSL9_FIR1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) dev_crit(&ctx->afu->dev, "PSL_FIR1: 0x%016llx\n", fir1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) if (ctx->afu->adapter->native->sl_ops->register_serr_irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) serr = cxl_p1n_read(ctx->afu, CXL_PSL_SERR_An);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) cxl_afu_decode_psl_serr(ctx->afu, serr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) void cxl_native_irq_dump_regs_psl8(struct cxl_context *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) u64 fir1, fir2, fir_slice, serr, afu_debug;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) fir1 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) fir2 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) fir_slice = cxl_p1n_read(ctx->afu, CXL_PSL_FIR_SLICE_An);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) afu_debug = cxl_p1n_read(ctx->afu, CXL_AFU_DEBUG_An);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) dev_crit(&ctx->afu->dev, "PSL_FIR1: 0x%016llx\n", fir1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) dev_crit(&ctx->afu->dev, "PSL_FIR2: 0x%016llx\n", fir2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) if (ctx->afu->adapter->native->sl_ops->register_serr_irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) serr = cxl_p1n_read(ctx->afu, CXL_PSL_SERR_An);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) cxl_afu_decode_psl_serr(ctx->afu, serr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) dev_crit(&ctx->afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) dev_crit(&ctx->afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) static irqreturn_t native_handle_psl_slice_error(struct cxl_context *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) u64 dsisr, u64 errstat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) dev_crit(&ctx->afu->dev, "PSL ERROR STATUS: 0x%016llx\n", errstat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) if (ctx->afu->adapter->native->sl_ops->psl_irq_dump_registers)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) ctx->afu->adapter->native->sl_ops->psl_irq_dump_registers(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) if (ctx->afu->adapter->native->sl_ops->debugfs_stop_trace) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) dev_crit(&ctx->afu->dev, "STOPPING CXL TRACE\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) ctx->afu->adapter->native->sl_ops->debugfs_stop_trace(ctx->afu->adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) return cxl_ops->ack_irq(ctx, 0, errstat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) static bool cxl_is_translation_fault(struct cxl_afu *afu, u64 dsisr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) if ((cxl_is_power8()) && (dsisr & CXL_PSL_DSISR_TRANS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) if ((cxl_is_power9()) && (dsisr & CXL_PSL9_DSISR_An_TF))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) irqreturn_t cxl_fail_irq_psl(struct cxl_afu *afu, struct cxl_irq_info *irq_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) if (cxl_is_translation_fault(afu, irq_info->dsisr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) static irqreturn_t native_irq_multiplexed(int irq, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) struct cxl_afu *afu = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) struct cxl_context *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) struct cxl_irq_info irq_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) u64 phreg = cxl_p2n_read(afu, CXL_PSL_PEHandle_An);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) int ph, ret = IRQ_HANDLED, res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) /* check if eeh kicked in while the interrupt was in flight */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) if (unlikely(phreg == ~0ULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) dev_warn(&afu->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) "Ignoring slice interrupt(%d) due to fenced card",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) /* Mask the pe-handle from register value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) ph = phreg & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) if ((res = native_get_irq_info(afu, &irq_info))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) WARN(1, "Unable to get CXL IRQ Info: %i\n", res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) if (afu->adapter->native->sl_ops->fail_irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) return afu->adapter->native->sl_ops->fail_irq(afu, &irq_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) ctx = idr_find(&afu->contexts_idr, ph);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) if (ctx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) if (afu->adapter->native->sl_ops->handle_interrupt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) ret = afu->adapter->native->sl_ops->handle_interrupt(irq, ctx, &irq_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) WARN(1, "Unable to demultiplex CXL PSL IRQ for PE %i DSISR %016llx DAR"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) " %016llx\n(Possible AFU HW issue - was a term/remove acked"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) " with outstanding transactions?)\n", ph, irq_info.dsisr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) irq_info.dar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) if (afu->adapter->native->sl_ops->fail_irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) ret = afu->adapter->native->sl_ops->fail_irq(afu, &irq_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) static void native_irq_wait(struct cxl_context *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) u64 dsisr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) int timeout = 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) int ph;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) * Wait until no further interrupts are presented by the PSL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) * for this context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) while (timeout--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) ph = cxl_p2n_read(ctx->afu, CXL_PSL_PEHandle_An) & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) if (ph != ctx->pe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) dsisr = cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) if (cxl_is_power8() &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) ((dsisr & CXL_PSL_DSISR_PENDING) == 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) if (cxl_is_power9() &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) ((dsisr & CXL_PSL9_DSISR_PENDING) == 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) * We are waiting for the workqueue to process our
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) * irq, so need to let that run here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) msleep(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) dev_warn(&ctx->afu->dev, "WARNING: waiting on DSI for PE %i"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) " DSISR %016llx!\n", ph, dsisr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) static irqreturn_t native_slice_irq_err(int irq, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) struct cxl_afu *afu = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) u64 errstat, serr, afu_error, dsisr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) u64 fir_slice, afu_debug, irq_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) * slice err interrupt is only used with full PSL (no XSL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) serr = cxl_p1n_read(afu, CXL_PSL_SERR_An);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) errstat = cxl_p2n_read(afu, CXL_PSL_ErrStat_An);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) afu_error = cxl_p2n_read(afu, CXL_AFU_ERR_An);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) cxl_afu_decode_psl_serr(afu, serr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) if (cxl_is_power8()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) fir_slice = cxl_p1n_read(afu, CXL_PSL_FIR_SLICE_An);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) afu_debug = cxl_p1n_read(afu, CXL_AFU_DEBUG_An);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) dev_crit(&afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) dev_crit(&afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) dev_crit(&afu->dev, "CXL_PSL_ErrStat_An: 0x%016llx\n", errstat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) dev_crit(&afu->dev, "AFU_ERR_An: 0x%.16llx\n", afu_error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) dev_crit(&afu->dev, "PSL_DSISR_An: 0x%.16llx\n", dsisr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) /* mask off the IRQ so it won't retrigger until the AFU is reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) irq_mask = (serr & CXL_PSL_SERR_An_IRQS) >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) serr |= irq_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) cxl_p1n_write(afu, CXL_PSL_SERR_An, serr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) dev_info(&afu->dev, "Further such interrupts will be masked until the AFU is reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) void cxl_native_err_irq_dump_regs_psl9(struct cxl *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) u64 fir1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) fir1 = cxl_p1_read(adapter, CXL_PSL9_FIR1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) dev_crit(&adapter->dev, "PSL_FIR: 0x%016llx\n", fir1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) void cxl_native_err_irq_dump_regs_psl8(struct cxl *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) u64 fir1, fir2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) fir1 = cxl_p1_read(adapter, CXL_PSL_FIR1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) fir2 = cxl_p1_read(adapter, CXL_PSL_FIR2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) dev_crit(&adapter->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) "PSL_FIR1: 0x%016llx\nPSL_FIR2: 0x%016llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) fir1, fir2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) static irqreturn_t native_irq_err(int irq, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) struct cxl *adapter = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) u64 err_ivte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) WARN(1, "CXL ERROR interrupt %i\n", irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) err_ivte = cxl_p1_read(adapter, CXL_PSL_ErrIVTE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) dev_crit(&adapter->dev, "PSL_ErrIVTE: 0x%016llx\n", err_ivte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) if (adapter->native->sl_ops->debugfs_stop_trace) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) dev_crit(&adapter->dev, "STOPPING CXL TRACE\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) adapter->native->sl_ops->debugfs_stop_trace(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) if (adapter->native->sl_ops->err_irq_dump_registers)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) adapter->native->sl_ops->err_irq_dump_registers(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) int cxl_native_register_psl_err_irq(struct cxl *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) adapter->irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) dev_name(&adapter->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) if (!adapter->irq_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) if ((rc = cxl_register_one_irq(adapter, native_irq_err, adapter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) &adapter->native->err_hwirq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) &adapter->native->err_virq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) adapter->irq_name))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) kfree(adapter->irq_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) adapter->irq_name = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) cxl_p1_write(adapter, CXL_PSL_ErrIVTE, adapter->native->err_hwirq & 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) void cxl_native_release_psl_err_irq(struct cxl *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) if (adapter->native->err_virq == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) adapter->native->err_virq !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) irq_find_mapping(NULL, adapter->native->err_hwirq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) cxl_p1_write(adapter, CXL_PSL_ErrIVTE, 0x0000000000000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) cxl_unmap_irq(adapter->native->err_virq, adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) cxl_ops->release_one_irq(adapter, adapter->native->err_hwirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) kfree(adapter->irq_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) adapter->native->err_virq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) int cxl_native_register_serr_irq(struct cxl_afu *afu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) u64 serr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) afu->err_irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) dev_name(&afu->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) if (!afu->err_irq_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) if ((rc = cxl_register_one_irq(afu->adapter, native_slice_irq_err, afu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) &afu->serr_hwirq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) &afu->serr_virq, afu->err_irq_name))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) kfree(afu->err_irq_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) afu->err_irq_name = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) serr = cxl_p1n_read(afu, CXL_PSL_SERR_An);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) if (cxl_is_power8())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) serr = (serr & 0x00ffffffffff0000ULL) | (afu->serr_hwirq & 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) if (cxl_is_power9()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) * By default, all errors are masked. So don't set all masks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) * Slice errors will be transfered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) serr = (serr & ~0xff0000007fffffffULL) | (afu->serr_hwirq & 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) cxl_p1n_write(afu, CXL_PSL_SERR_An, serr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) void cxl_native_release_serr_irq(struct cxl_afu *afu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) if (afu->serr_virq == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) afu->serr_virq != irq_find_mapping(NULL, afu->serr_hwirq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) cxl_p1n_write(afu, CXL_PSL_SERR_An, 0x0000000000000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) cxl_unmap_irq(afu->serr_virq, afu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) cxl_ops->release_one_irq(afu->adapter, afu->serr_hwirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) kfree(afu->err_irq_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) afu->serr_virq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) int cxl_native_register_psl_irq(struct cxl_afu *afu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) afu->psl_irq_name = kasprintf(GFP_KERNEL, "cxl-%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) dev_name(&afu->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) if (!afu->psl_irq_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) if ((rc = cxl_register_one_irq(afu->adapter, native_irq_multiplexed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) afu, &afu->native->psl_hwirq, &afu->native->psl_virq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) afu->psl_irq_name))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) kfree(afu->psl_irq_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) afu->psl_irq_name = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) void cxl_native_release_psl_irq(struct cxl_afu *afu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) if (afu->native->psl_virq == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) afu->native->psl_virq !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) irq_find_mapping(NULL, afu->native->psl_hwirq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) cxl_unmap_irq(afu->native->psl_virq, afu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) cxl_ops->release_one_irq(afu->adapter, afu->native->psl_hwirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) kfree(afu->psl_irq_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) afu->native->psl_virq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) static void recover_psl_err(struct cxl_afu *afu, u64 errstat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) u64 dsisr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) pr_devel("RECOVERING FROM PSL ERROR... (0x%016llx)\n", errstat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) /* Clear PSL_DSISR[PE] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) cxl_p2n_write(afu, CXL_PSL_DSISR_An, dsisr & ~CXL_PSL_DSISR_An_PE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) /* Write 1s to clear error status bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) cxl_p2n_write(afu, CXL_PSL_ErrStat_An, errstat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) static int native_ack_irq(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) trace_cxl_psl_irq_ack(ctx, tfc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) if (tfc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) cxl_p2n_write(ctx->afu, CXL_PSL_TFC_An, tfc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) if (psl_reset_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) recover_psl_err(ctx->afu, psl_reset_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) int cxl_check_error(struct cxl_afu *afu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) return (cxl_p1n_read(afu, CXL_PSL_SCNTL_An) == ~0ULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) static bool native_support_attributes(const char *attr_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) enum cxl_attrs type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) static int native_afu_cr_read64(struct cxl_afu *afu, int cr, u64 off, u64 *out)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) if (unlikely(!cxl_ops->link_ok(afu->adapter, afu)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) if (unlikely(off >= afu->crs_len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) return -ERANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) *out = in_le64(afu->native->afu_desc_mmio + afu->crs_offset +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) (cr * afu->crs_len) + off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) static int native_afu_cr_read32(struct cxl_afu *afu, int cr, u64 off, u32 *out)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) if (unlikely(!cxl_ops->link_ok(afu->adapter, afu)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) if (unlikely(off >= afu->crs_len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) return -ERANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) *out = in_le32(afu->native->afu_desc_mmio + afu->crs_offset +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) (cr * afu->crs_len) + off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) static int native_afu_cr_read16(struct cxl_afu *afu, int cr, u64 off, u16 *out)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) u64 aligned_off = off & ~0x3L;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) rc = native_afu_cr_read32(afu, cr, aligned_off, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) if (!rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) *out = (val >> ((off & 0x3) * 8)) & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) static int native_afu_cr_read8(struct cxl_afu *afu, int cr, u64 off, u8 *out)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) u64 aligned_off = off & ~0x3L;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) rc = native_afu_cr_read32(afu, cr, aligned_off, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) if (!rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) *out = (val >> ((off & 0x3) * 8)) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) static int native_afu_cr_write32(struct cxl_afu *afu, int cr, u64 off, u32 in)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) if (unlikely(!cxl_ops->link_ok(afu->adapter, afu)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) if (unlikely(off >= afu->crs_len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) return -ERANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) out_le32(afu->native->afu_desc_mmio + afu->crs_offset +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) (cr * afu->crs_len) + off, in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) static int native_afu_cr_write16(struct cxl_afu *afu, int cr, u64 off, u16 in)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) u64 aligned_off = off & ~0x3L;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) u32 val32, mask, shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) rc = native_afu_cr_read32(afu, cr, aligned_off, &val32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) shift = (off & 0x3) * 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) WARN_ON(shift == 24);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) mask = 0xffff << shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) val32 = (val32 & ~mask) | (in << shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) rc = native_afu_cr_write32(afu, cr, aligned_off, val32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) static int native_afu_cr_write8(struct cxl_afu *afu, int cr, u64 off, u8 in)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) u64 aligned_off = off & ~0x3L;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) u32 val32, mask, shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) rc = native_afu_cr_read32(afu, cr, aligned_off, &val32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) shift = (off & 0x3) * 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) mask = 0xff << shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) val32 = (val32 & ~mask) | (in << shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) rc = native_afu_cr_write32(afu, cr, aligned_off, val32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) const struct cxl_backend_ops cxl_native_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) .module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) .adapter_reset = cxl_pci_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) .alloc_one_irq = cxl_pci_alloc_one_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) .release_one_irq = cxl_pci_release_one_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) .alloc_irq_ranges = cxl_pci_alloc_irq_ranges,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) .release_irq_ranges = cxl_pci_release_irq_ranges,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) .setup_irq = cxl_pci_setup_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) .handle_psl_slice_error = native_handle_psl_slice_error,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) .psl_interrupt = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) .ack_irq = native_ack_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) .irq_wait = native_irq_wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) .attach_process = native_attach_process,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) .detach_process = native_detach_process,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) .update_ivtes = native_update_ivtes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) .support_attributes = native_support_attributes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) .link_ok = cxl_adapter_link_ok,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) .release_afu = cxl_pci_release_afu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) .afu_read_err_buffer = cxl_pci_afu_read_err_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) .afu_check_and_enable = native_afu_check_and_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) .afu_activate_mode = native_afu_activate_mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) .afu_deactivate_mode = native_afu_deactivate_mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) .afu_reset = native_afu_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) .afu_cr_read8 = native_afu_cr_read8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) .afu_cr_read16 = native_afu_cr_read16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) .afu_cr_read32 = native_afu_cr_read32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) .afu_cr_read64 = native_afu_cr_read64,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) .afu_cr_write8 = native_afu_cr_write8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) .afu_cr_write16 = native_afu_cr_write16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) .afu_cr_write32 = native_afu_cr_write32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) .read_adapter_vpd = cxl_pci_read_adapter_vpd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) };