^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright 2014 IBM Corp.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/workqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/wait.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/pid.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <asm/cputable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <misc/cxl-base.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include "cxl.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include "trace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) static int afu_irq_range_start(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) if (cpu_has_feature(CPU_FTR_HVMODE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) static irqreturn_t schedule_cxl_fault(struct cxl_context *ctx, u64 dsisr, u64 dar)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) ctx->dsisr = dsisr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) ctx->dar = dar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) schedule_work(&ctx->fault_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) irqreturn_t cxl_irq_psl9(int irq, struct cxl_context *ctx, struct cxl_irq_info *irq_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) u64 dsisr, dar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) dsisr = irq_info->dsisr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) dar = irq_info->dar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) trace_cxl_psl9_irq(ctx, irq, dsisr, dar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) pr_devel("CXL interrupt %i for afu pe: %i DSISR: %#llx DAR: %#llx\n", irq, ctx->pe, dsisr, dar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) if (dsisr & CXL_PSL9_DSISR_An_TF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) pr_devel("CXL interrupt: Scheduling translation fault handling for later (pe: %i)\n", ctx->pe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) return schedule_cxl_fault(ctx, dsisr, dar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) if (dsisr & CXL_PSL9_DSISR_An_PE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) return cxl_ops->handle_psl_slice_error(ctx, dsisr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) irq_info->errstat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) if (dsisr & CXL_PSL9_DSISR_An_AE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) pr_devel("CXL interrupt: AFU Error 0x%016llx\n", irq_info->afu_err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) if (ctx->pending_afu_err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * This shouldn't happen - the PSL treats these errors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * as fatal and will have reset the AFU, so there's not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * much point buffering multiple AFU errors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * OTOH if we DO ever see a storm of these come in it's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * probably best that we log them somewhere:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) dev_err_ratelimited(&ctx->afu->dev, "CXL AFU Error undelivered to pe %i: 0x%016llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) ctx->pe, irq_info->afu_err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) spin_lock(&ctx->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) ctx->afu_err = irq_info->afu_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) ctx->pending_afu_err = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) spin_unlock(&ctx->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) wake_up_all(&ctx->wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_A, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) if (dsisr & CXL_PSL9_DSISR_An_OC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) pr_devel("CXL interrupt: OS Context Warning\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) WARN(1, "Unhandled CXL PSL IRQ\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) irqreturn_t cxl_irq_psl8(int irq, struct cxl_context *ctx, struct cxl_irq_info *irq_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) u64 dsisr, dar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) dsisr = irq_info->dsisr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) dar = irq_info->dar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) trace_cxl_psl_irq(ctx, irq, dsisr, dar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) pr_devel("CXL interrupt %i for afu pe: %i DSISR: %#llx DAR: %#llx\n", irq, ctx->pe, dsisr, dar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) if (dsisr & CXL_PSL_DSISR_An_DS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * We don't inherently need to sleep to handle this, but we do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * need to get a ref to the task's mm, which we can't do from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * irq context without the potential for a deadlock since it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * takes the task_lock. An alternate option would be to keep a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * reference to the task's mm the entire time it has cxl open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * but to do that we need to solve the issue where we hold a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * ref to the mm, but the mm can hold a ref to the fd after an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * mmap preventing anything from being cleaned up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) pr_devel("Scheduling segment miss handling for later pe: %i\n", ctx->pe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) return schedule_cxl_fault(ctx, dsisr, dar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) if (dsisr & CXL_PSL_DSISR_An_M)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) pr_devel("CXL interrupt: PTE not found\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) if (dsisr & CXL_PSL_DSISR_An_P)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) pr_devel("CXL interrupt: Storage protection violation\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) if (dsisr & CXL_PSL_DSISR_An_A)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) pr_devel("CXL interrupt: AFU lock access to write through or cache inhibited storage\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) if (dsisr & CXL_PSL_DSISR_An_S)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) pr_devel("CXL interrupt: Access was afu_wr or afu_zero\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) if (dsisr & CXL_PSL_DSISR_An_K)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) pr_devel("CXL interrupt: Access not permitted by virtual page class key protection\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) if (dsisr & CXL_PSL_DSISR_An_DM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * In some cases we might be able to handle the fault
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * immediately if hash_page would succeed, but we still need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * the task's mm, which as above we can't get without a lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) pr_devel("Scheduling page fault handling for later pe: %i\n", ctx->pe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) return schedule_cxl_fault(ctx, dsisr, dar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) if (dsisr & CXL_PSL_DSISR_An_ST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) WARN(1, "CXL interrupt: Segment Table PTE not found\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) if (dsisr & CXL_PSL_DSISR_An_UR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) pr_devel("CXL interrupt: AURP PTE not found\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) if (dsisr & CXL_PSL_DSISR_An_PE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) return cxl_ops->handle_psl_slice_error(ctx, dsisr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) irq_info->errstat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) if (dsisr & CXL_PSL_DSISR_An_AE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) pr_devel("CXL interrupt: AFU Error 0x%016llx\n", irq_info->afu_err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) if (ctx->pending_afu_err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * This shouldn't happen - the PSL treats these errors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * as fatal and will have reset the AFU, so there's not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * much point buffering multiple AFU errors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * OTOH if we DO ever see a storm of these come in it's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * probably best that we log them somewhere:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) dev_err_ratelimited(&ctx->afu->dev, "CXL AFU Error "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) "undelivered to pe %i: 0x%016llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) ctx->pe, irq_info->afu_err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) spin_lock(&ctx->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) ctx->afu_err = irq_info->afu_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) ctx->pending_afu_err = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) spin_unlock(&ctx->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) wake_up_all(&ctx->wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_A, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) if (dsisr & CXL_PSL_DSISR_An_OC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) pr_devel("CXL interrupt: OS Context Warning\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) WARN(1, "Unhandled CXL PSL IRQ\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) static irqreturn_t cxl_irq_afu(int irq, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) struct cxl_context *ctx = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) irq_hw_number_t hwirq = irqd_to_hwirq(irq_get_irq_data(irq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) int irq_off, afu_irq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) __u16 range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) * Look for the interrupt number.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) * On bare-metal, we know range 0 only contains the PSL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * interrupt so we could start counting at range 1 and initialize
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * afu_irq at 1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * In a guest, range 0 also contains AFU interrupts, so it must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * be counted for. Therefore we initialize afu_irq at 0 to take into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * account the PSL interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) * For code-readability, it just seems easier to go over all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) * the ranges on bare-metal and guest. The end result is the same.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) for (r = 0; r < CXL_IRQ_RANGES; r++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) irq_off = hwirq - ctx->irqs.offset[r];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) range = ctx->irqs.range[r];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) if (irq_off >= 0 && irq_off < range) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) afu_irq += irq_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) afu_irq += range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) if (unlikely(r >= CXL_IRQ_RANGES)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) WARN(1, "Received AFU IRQ out of range for pe %i (virq %i hwirq %lx)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) ctx->pe, irq, hwirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) trace_cxl_afu_irq(ctx, afu_irq, irq, hwirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) pr_devel("Received AFU interrupt %i for pe: %i (virq %i hwirq %lx)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) afu_irq, ctx->pe, irq, hwirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) if (unlikely(!ctx->irq_bitmap)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) WARN(1, "Received AFU IRQ for context with no IRQ bitmap\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) spin_lock(&ctx->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) set_bit(afu_irq - 1, ctx->irq_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) ctx->pending_irq = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) spin_unlock(&ctx->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) wake_up_all(&ctx->wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) unsigned int cxl_map_irq(struct cxl *adapter, irq_hw_number_t hwirq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) irq_handler_t handler, void *cookie, const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) unsigned int virq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) int result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) /* IRQ Domain? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) virq = irq_create_mapping(NULL, hwirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) if (!virq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) dev_warn(&adapter->dev, "cxl_map_irq: irq_create_mapping failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) if (cxl_ops->setup_irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) cxl_ops->setup_irq(adapter, hwirq, virq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) pr_devel("hwirq %#lx mapped to virq %u\n", hwirq, virq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) result = request_irq(virq, handler, 0, name, cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) if (result) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) dev_warn(&adapter->dev, "cxl_map_irq: request_irq failed: %i\n", result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) return virq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) void cxl_unmap_irq(unsigned int virq, void *cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) free_irq(virq, cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) int cxl_register_one_irq(struct cxl *adapter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) irq_handler_t handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) void *cookie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) irq_hw_number_t *dest_hwirq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) unsigned int *dest_virq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) int hwirq, virq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) if ((hwirq = cxl_ops->alloc_one_irq(adapter)) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) return hwirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) if (!(virq = cxl_map_irq(adapter, hwirq, handler, cookie, name)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) *dest_hwirq = hwirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) *dest_virq = virq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) cxl_ops->release_one_irq(adapter, hwirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) void afu_irq_name_free(struct cxl_context *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) struct cxl_irq_name *irq_name, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) list_for_each_entry_safe(irq_name, tmp, &ctx->irq_names, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) kfree(irq_name->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) list_del(&irq_name->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) kfree(irq_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) int afu_allocate_irqs(struct cxl_context *ctx, u32 count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) int rc, r, i, j = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) struct cxl_irq_name *irq_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) int alloc_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) * In native mode, range 0 is reserved for the multiplexed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) * PSL interrupt. It has been allocated when the AFU was initialized.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) * In a guest, the PSL interrupt is not mutliplexed, but per-context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) * and is the first interrupt from range 0. It still needs to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) * allocated, so bump the count by one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) if (cpu_has_feature(CPU_FTR_HVMODE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) alloc_count = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) alloc_count = count + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) if ((rc = cxl_ops->alloc_irq_ranges(&ctx->irqs, ctx->afu->adapter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) alloc_count)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) if (cpu_has_feature(CPU_FTR_HVMODE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) /* Multiplexed PSL Interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) ctx->irqs.offset[0] = ctx->afu->native->psl_hwirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) ctx->irqs.range[0] = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) ctx->irq_count = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) ctx->irq_bitmap = kcalloc(BITS_TO_LONGS(count),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) sizeof(*ctx->irq_bitmap), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) if (!ctx->irq_bitmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) * Allocate names first. If any fail, bail out before allocating
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) * actual hardware IRQs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) for (r = afu_irq_range_start(); r < CXL_IRQ_RANGES; r++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) for (i = 0; i < ctx->irqs.range[r]; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) irq_name = kmalloc(sizeof(struct cxl_irq_name),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) if (!irq_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) irq_name->name = kasprintf(GFP_KERNEL, "cxl-%s-pe%i-%i",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) dev_name(&ctx->afu->dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) ctx->pe, j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) if (!irq_name->name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) kfree(irq_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) /* Add to tail so next look get the correct order */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) list_add_tail(&irq_name->list, &ctx->irq_names);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) j++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) cxl_ops->release_irq_ranges(&ctx->irqs, ctx->afu->adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) afu_irq_name_free(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) static void afu_register_hwirqs(struct cxl_context *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) irq_hw_number_t hwirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) struct cxl_irq_name *irq_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) int r, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) irqreturn_t (*handler)(int irq, void *data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) /* We've allocated all memory now, so let's do the irq allocations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) irq_name = list_first_entry(&ctx->irq_names, struct cxl_irq_name, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) for (r = afu_irq_range_start(); r < CXL_IRQ_RANGES; r++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) hwirq = ctx->irqs.offset[r];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) if (r == 0 && i == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) * The very first interrupt of range 0 is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) * always the PSL interrupt, but we only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) * need to connect a handler for guests,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) * because there's one PSL interrupt per
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) * context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) * On bare-metal, the PSL interrupt is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) * multiplexed and was setup when the AFU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) * was configured.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) handler = cxl_ops->psl_interrupt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) handler = cxl_irq_afu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) cxl_map_irq(ctx->afu->adapter, hwirq, handler, ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) irq_name->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) irq_name = list_next_entry(irq_name, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) int afu_register_irqs(struct cxl_context *ctx, u32 count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) rc = afu_allocate_irqs(ctx, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) afu_register_hwirqs(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) void afu_release_irqs(struct cxl_context *ctx, void *cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) irq_hw_number_t hwirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) unsigned int virq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) int r, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) for (r = afu_irq_range_start(); r < CXL_IRQ_RANGES; r++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) hwirq = ctx->irqs.offset[r];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) virq = irq_find_mapping(NULL, hwirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) if (virq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) cxl_unmap_irq(virq, cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) afu_irq_name_free(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) cxl_ops->release_irq_ranges(&ctx->irqs, ctx->afu->adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) ctx->irq_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) void cxl_afu_decode_psl_serr(struct cxl_afu *afu, u64 serr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) dev_crit(&afu->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) "PSL Slice error received. Check AFU for root cause.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) dev_crit(&afu->dev, "PSL_SERR_An: 0x%016llx\n", serr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) if (serr & CXL_PSL_SERR_An_afuto)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) dev_crit(&afu->dev, "AFU MMIO Timeout\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) if (serr & CXL_PSL_SERR_An_afudis)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) dev_crit(&afu->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) "MMIO targeted Accelerator that was not enabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) if (serr & CXL_PSL_SERR_An_afuov)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) dev_crit(&afu->dev, "AFU CTAG Overflow\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) if (serr & CXL_PSL_SERR_An_badsrc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) dev_crit(&afu->dev, "Bad Interrupt Source\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) if (serr & CXL_PSL_SERR_An_badctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) dev_crit(&afu->dev, "Bad Context Handle\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) if (serr & CXL_PSL_SERR_An_llcmdis)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) dev_crit(&afu->dev, "LLCMD to Disabled AFU\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) if (serr & CXL_PSL_SERR_An_llcmdto)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) dev_crit(&afu->dev, "LLCMD Timeout to AFU\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) if (serr & CXL_PSL_SERR_An_afupar)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) dev_crit(&afu->dev, "AFU MMIO Parity Error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) if (serr & CXL_PSL_SERR_An_afudup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) dev_crit(&afu->dev, "AFU MMIO Duplicate CTAG Error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) if (serr & CXL_PSL_SERR_An_AE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) dev_crit(&afu->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) "AFU asserted JDONE with JERROR in AFU Directed Mode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) }