Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Copyright 2014 IBM Corp.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) #include <linux/workqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/sched/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/pid.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/moduleparam.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #undef MODULE_PARAM_PREFIX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #define MODULE_PARAM_PREFIX "cxl" "."
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <asm/current.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <asm/copro.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <asm/mmu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include "cxl.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include "trace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) static bool sste_matches(struct cxl_sste *sste, struct copro_slb *slb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 	return ((sste->vsid_data == cpu_to_be64(slb->vsid)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 		(sste->esid_data == cpu_to_be64(slb->esid)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29)  * This finds a free SSTE for the given SLB, or returns NULL if it's already in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30)  * the segment table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) static struct cxl_sste *find_free_sste(struct cxl_context *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 				       struct copro_slb *slb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	struct cxl_sste *primary, *sste, *ret = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	unsigned int mask = (ctx->sst_size >> 7) - 1; /* SSTP0[SegTableSize] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	unsigned int entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	unsigned int hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	if (slb->vsid & SLB_VSID_B_1T)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 		hash = (slb->esid >> SID_SHIFT_1T) & mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	else /* 256M */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 		hash = (slb->esid >> SID_SHIFT) & mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	primary = ctx->sstp + (hash << 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	for (entry = 0, sste = primary; entry < 8; entry++, sste++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 		if (!ret && !(be64_to_cpu(sste->esid_data) & SLB_ESID_V))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 			ret = sste;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 		if (sste_matches(sste, slb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	/* Nothing free, select an entry to cast out */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	ret = primary + ctx->sst_lru;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	ctx->sst_lru = (ctx->sst_lru + 1) & 0x7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) static void cxl_load_segment(struct cxl_context *ctx, struct copro_slb *slb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	/* mask is the group index, we search primary and secondary here. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	struct cxl_sste *sste;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	spin_lock_irqsave(&ctx->sste_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	sste = find_free_sste(ctx, slb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	if (!sste)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	pr_devel("CXL Populating SST[%li]: %#llx %#llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 			sste - ctx->sstp, slb->vsid, slb->esid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	trace_cxl_ste_write(ctx, sste - ctx->sstp, slb->esid, slb->vsid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	sste->vsid_data = cpu_to_be64(slb->vsid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	sste->esid_data = cpu_to_be64(slb->esid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	spin_unlock_irqrestore(&ctx->sste_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) static int cxl_fault_segment(struct cxl_context *ctx, struct mm_struct *mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 			     u64 ea)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	struct copro_slb slb = {0,0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	if (!(rc = copro_calculate_slb(mm, ea, &slb))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 		cxl_load_segment(ctx, &slb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) static void cxl_ack_ae(struct cxl_context *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_AE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	spin_lock_irqsave(&ctx->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	ctx->pending_fault = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	ctx->fault_addr = ctx->dar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	ctx->fault_dsisr = ctx->dsisr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	spin_unlock_irqrestore(&ctx->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	wake_up_all(&ctx->wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) static int cxl_handle_segment_miss(struct cxl_context *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 				   struct mm_struct *mm, u64 ea)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	pr_devel("CXL interrupt: Segment fault pe: %i ea: %#llx\n", ctx->pe, ea);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	trace_cxl_ste_miss(ctx, ea);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	if ((rc = cxl_fault_segment(ctx, mm, ea)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 		cxl_ack_ae(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 		mb(); /* Order seg table write to TFC MMIO write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 		cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_R, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) int cxl_handle_mm_fault(struct mm_struct *mm, u64 dsisr, u64 dar)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	vm_fault_t flt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	int result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	unsigned long access, flags, inv_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	 * Add the fault handling cpu to task mm cpumask so that we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	 * can do a safe lockless page table walk when inserting the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	 * hash page table entry. This function get called with a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	 * valid mm for user space addresses. Hence using the if (mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	 * check is sufficient here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	if (mm && !cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 		cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 		 * We need to make sure we walk the table only after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 		 * we update the cpumask. The other side of the barrier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 		 * is explained in serialize_against_pte_lookup()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 		smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	if ((result = copro_handle_mm_fault(mm, dar, dsisr, &flt))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 		pr_devel("copro_handle_mm_fault failed: %#x\n", result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 		return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	if (!radix_enabled()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 		 * update_mmu_cache() will not have loaded the hash since current->trap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 		 * is not a 0x400 or 0x300, so just call hash_page_mm() here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 		access = _PAGE_PRESENT | _PAGE_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 		if (dsisr & CXL_PSL_DSISR_An_S)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 			access |= _PAGE_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 		if (!mm && (get_region_id(dar) != USER_REGION_ID))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 			access |= _PAGE_PRIVILEGED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 		if (dsisr & DSISR_NOHPTE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 			inv_flags |= HPTE_NOHPTE_UPDATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 		local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 		hash_page_mm(mm, dar, access, 0x300, inv_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 		local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) static void cxl_handle_page_fault(struct cxl_context *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 				  struct mm_struct *mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 				  u64 dsisr, u64 dar)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	trace_cxl_pte_miss(ctx, dsisr, dar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	if (cxl_handle_mm_fault(mm, dsisr, dar)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 		cxl_ack_ae(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 		pr_devel("Page fault successfully handled for pe: %i!\n", ctx->pe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 		cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_R, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)  * Returns the mm_struct corresponding to the context ctx.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)  * mm_users == 0, the context may be in the process of being closed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) static struct mm_struct *get_mem_context(struct cxl_context *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	if (ctx->mm == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	if (!atomic_inc_not_zero(&ctx->mm->mm_users))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	return ctx->mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) static bool cxl_is_segment_miss(struct cxl_context *ctx, u64 dsisr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	if ((cxl_is_power8() && (dsisr & CXL_PSL_DSISR_An_DS)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) static bool cxl_is_page_fault(struct cxl_context *ctx, u64 dsisr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	if ((cxl_is_power8()) && (dsisr & CXL_PSL_DSISR_An_DM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	if (cxl_is_power9())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) void cxl_handle_fault(struct work_struct *fault_work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	struct cxl_context *ctx =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 		container_of(fault_work, struct cxl_context, fault_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	u64 dsisr = ctx->dsisr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	u64 dar = ctx->dar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	struct mm_struct *mm = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	if (cpu_has_feature(CPU_FTR_HVMODE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 		if (cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An) != dsisr ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 		    cxl_p2n_read(ctx->afu, CXL_PSL_DAR_An) != dar ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 		    cxl_p2n_read(ctx->afu, CXL_PSL_PEHandle_An) != ctx->pe) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 			/* Most likely explanation is harmless - a dedicated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 			 * process has detached and these were cleared by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 			 * PSL purge, but warn about it just in case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 			dev_notice(&ctx->afu->dev, "cxl_handle_fault: Translation fault regs changed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	/* Early return if the context is being / has been detached */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	if (ctx->status == CLOSED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 		cxl_ack_ae(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	pr_devel("CXL BOTTOM HALF handling fault for afu pe: %i. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 		"DSISR: %#llx DAR: %#llx\n", ctx->pe, dsisr, dar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	if (!ctx->kernel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 		mm = get_mem_context(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 		if (mm == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 			pr_devel("%s: unable to get mm for pe=%d pid=%i\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 				 __func__, ctx->pe, pid_nr(ctx->pid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 			cxl_ack_ae(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 			pr_devel("Handling page fault for pe=%d pid=%i\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 				 ctx->pe, pid_nr(ctx->pid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	if (cxl_is_segment_miss(ctx, dsisr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 		cxl_handle_segment_miss(ctx, mm, dar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	else if (cxl_is_page_fault(ctx, dsisr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 		cxl_handle_page_fault(ctx, mm, dsisr, dar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 		WARN(1, "cxl_handle_fault has nothing to handle\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	if (mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 		mmput(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) static void cxl_prefault_one(struct cxl_context *ctx, u64 ea)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	struct mm_struct *mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	mm = get_mem_context(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	if (mm == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 		pr_devel("cxl_prefault_one unable to get mm %i\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 			 pid_nr(ctx->pid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	cxl_fault_segment(ctx, mm, ea);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	mmput(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) static u64 next_segment(u64 ea, u64 vsid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	if (vsid & SLB_VSID_B_1T)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 		ea |= (1ULL << 40) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 		ea |= (1ULL << 28) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	return ea + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) static void cxl_prefault_vma(struct cxl_context *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	u64 ea, last_esid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	struct copro_slb slb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	struct vm_area_struct *vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	struct mm_struct *mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	mm = get_mem_context(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	if (mm == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 		pr_devel("cxl_prefault_vm unable to get mm %i\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 			 pid_nr(ctx->pid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	mmap_read_lock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 		for (ea = vma->vm_start; ea < vma->vm_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 				ea = next_segment(ea, slb.vsid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 			rc = copro_calculate_slb(mm, ea, &slb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 			if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 			if (last_esid == slb.esid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 			cxl_load_segment(ctx, &slb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 			last_esid = slb.esid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 	mmap_read_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	mmput(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) void cxl_prefault(struct cxl_context *ctx, u64 wed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 	switch (ctx->afu->prefault_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 	case CXL_PREFAULT_WED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 		cxl_prefault_one(ctx, wed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	case CXL_PREFAULT_ALL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 		cxl_prefault_vma(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) }