^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * SN Platform GRU Driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * KERNEL SERVICES THAT USE THE GRU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/miscdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/proc_fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/sync_core.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <asm/io_apic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include "gru.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include "grulib.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include "grutables.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include "grukservices.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include "gru_instructions.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <asm/uv/uv_hub.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * Kernel GRU Usage
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * The following is an interim algorithm for management of kernel GRU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * resources. This will likely be replaced when we better understand the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * kernel/user requirements.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * Blade percpu resources reserved for kernel use. These resources are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * reserved whenever the the kernel context for the blade is loaded. Note
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * that the kernel context is not guaranteed to be always available. It is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * loaded on demand & can be stolen by a user if the user demand exceeds the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * kernel demand. The kernel can always reload the kernel context but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * a SLEEP may be required!!!.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * Async Overview:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * Each blade has one "kernel context" that owns GRU kernel resources
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * located on the blade. Kernel drivers use GRU resources in this context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * for sending messages, zeroing memory, etc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * The kernel context is dynamically loaded on demand. If it is not in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * use by the kernel, the kernel context can be unloaded & given to a user.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * The kernel context will be reloaded when needed. This may require that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * a context be stolen from a user.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * NOTE: frequent unloading/reloading of the kernel context is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * expensive. We are depending on batch schedulers, cpusets, sane
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * drivers or some other mechanism to prevent the need for frequent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * stealing/reloading.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * The kernel context consists of two parts:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * - 1 CB & a few DSRs that are reserved for each cpu on the blade.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * Each cpu has it's own private resources & does not share them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * with other cpus. These resources are used serially, ie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * locked, used & unlocked on each call to a function in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * grukservices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * (Now that we have dynamic loading of kernel contexts, I
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * may rethink this & allow sharing between cpus....)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * - Additional resources can be reserved long term & used directly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * by UV drivers located in the kernel. Drivers using these GRU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * resources can use asynchronous GRU instructions that send
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * interrupts on completion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * - these resources must be explicitly locked/unlocked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * - locked resources prevent (obviously) the kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * context from being unloaded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * - drivers using these resource directly issue their own
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * GRU instruction and must wait/check completion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * When these resources are reserved, the caller can optionally
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * associate a wait_queue with the resources and use asynchronous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * GRU instructions. When an async GRU instruction completes, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * driver will do a wakeup on the event.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #define ASYNC_HAN_TO_BID(h) ((h) - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) #define ASYNC_BID_TO_HAN(b) ((b) + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #define ASYNC_HAN_TO_BS(h) gru_base[ASYNC_HAN_TO_BID(h)]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) #define GRU_NUM_KERNEL_CBR 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) #define GRU_NUM_KERNEL_DSR_BYTES 256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) #define GRU_NUM_KERNEL_DSR_CL (GRU_NUM_KERNEL_DSR_BYTES / \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) GRU_CACHE_LINE_BYTES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) /* GRU instruction attributes for all instructions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) #define IMA IMA_CB_DELAY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) /* GRU cacheline size is always 64 bytes - even on arches with 128 byte lines */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #define __gru_cacheline_aligned__ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) __attribute__((__aligned__(GRU_CACHE_LINE_BYTES)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #define MAGIC 0x1234567887654321UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) /* Default retry count for GRU errors on kernel instructions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #define EXCEPTION_RETRY_LIMIT 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) /* Status of message queue sections */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) #define MQS_EMPTY 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #define MQS_FULL 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) #define MQS_NOOP 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) /*----------------- RESOURCE MANAGEMENT -------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) /* optimized for x86_64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) struct message_queue {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) union gru_mesqhead head __gru_cacheline_aligned__; /* CL 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) int qlines; /* DW 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) long hstatus[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) void *next __gru_cacheline_aligned__;/* CL 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) void *limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) void *start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) void *start2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) char data ____cacheline_aligned; /* CL 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) /* First word in every message - used by mesq interface */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) struct message_header {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) char present;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) char present2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) char lines;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) char fill;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) #define HSTATUS(mq, h) ((mq) + offsetof(struct message_queue, hstatus[h]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) * Reload the blade's kernel context into a GRU chiplet. Called holding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) * the bs_kgts_sema for READ. Will steal user contexts if necessary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) static void gru_load_kernel_context(struct gru_blade_state *bs, int blade_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) struct gru_state *gru;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) struct gru_thread_state *kgts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) void *vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) int ctxnum, ncpus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) up_read(&bs->bs_kgts_sema);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) down_write(&bs->bs_kgts_sema);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) if (!bs->bs_kgts) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) bs->bs_kgts = gru_alloc_gts(NULL, 0, 0, 0, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) if (!IS_ERR(bs->bs_kgts))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) msleep(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) } while (true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) bs->bs_kgts->ts_user_blade_id = blade_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) kgts = bs->bs_kgts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) if (!kgts->ts_gru) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) STAT(load_kernel_context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) ncpus = uv_blade_nr_possible_cpus(blade_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) kgts->ts_cbr_au_count = GRU_CB_COUNT_TO_AU(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) GRU_NUM_KERNEL_CBR * ncpus + bs->bs_async_cbrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) kgts->ts_dsr_au_count = GRU_DS_BYTES_TO_AU(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) GRU_NUM_KERNEL_DSR_BYTES * ncpus +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) bs->bs_async_dsr_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) while (!gru_assign_gru_context(kgts)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) msleep(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) gru_steal_context(kgts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) gru_load_context(kgts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) gru = bs->bs_kgts->ts_gru;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) vaddr = gru->gs_gru_base_vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) ctxnum = kgts->ts_ctxnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) bs->kernel_cb = get_gseg_base_address_cb(vaddr, ctxnum, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) bs->kernel_dsr = get_gseg_base_address_ds(vaddr, ctxnum, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) downgrade_write(&bs->bs_kgts_sema);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * Free all kernel contexts that are not currently in use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * Returns 0 if all freed, else number of inuse context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) static int gru_free_kernel_contexts(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) struct gru_blade_state *bs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) struct gru_thread_state *kgts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) int bid, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) for (bid = 0; bid < GRU_MAX_BLADES; bid++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) bs = gru_base[bid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) if (!bs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) /* Ignore busy contexts. Don't want to block here. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) if (down_write_trylock(&bs->bs_kgts_sema)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) kgts = bs->bs_kgts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) if (kgts && kgts->ts_gru)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) gru_unload_context(kgts, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) bs->bs_kgts = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) up_write(&bs->bs_kgts_sema);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) kfree(kgts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) ret++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) * Lock & load the kernel context for the specified blade.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) static struct gru_blade_state *gru_lock_kernel_context(int blade_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) struct gru_blade_state *bs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) int bid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) STAT(lock_kernel_context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) bid = blade_id < 0 ? uv_numa_blade_id() : blade_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) bs = gru_base[bid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) /* Handle the case where migration occurred while waiting for the sema */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) down_read(&bs->bs_kgts_sema);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) if (blade_id < 0 && bid != uv_numa_blade_id()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) up_read(&bs->bs_kgts_sema);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) if (!bs->bs_kgts || !bs->bs_kgts->ts_gru)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) gru_load_kernel_context(bs, bid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) return bs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) * Unlock the kernel context for the specified blade. Context is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) * unloaded but may be stolen before next use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) static void gru_unlock_kernel_context(int blade_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) struct gru_blade_state *bs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) bs = gru_base[blade_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) up_read(&bs->bs_kgts_sema);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) STAT(unlock_kernel_context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) * Reserve & get pointers to the DSR/CBRs reserved for the current cpu.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) * - returns with preemption disabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) static int gru_get_cpu_resources(int dsr_bytes, void **cb, void **dsr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) struct gru_blade_state *bs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) int lcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) BUG_ON(dsr_bytes > GRU_NUM_KERNEL_DSR_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) bs = gru_lock_kernel_context(-1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) lcpu = uv_blade_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) *cb = bs->kernel_cb + lcpu * GRU_HANDLE_STRIDE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) *dsr = bs->kernel_dsr + lcpu * GRU_NUM_KERNEL_DSR_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) * Free the current cpus reserved DSR/CBR resources.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) static void gru_free_cpu_resources(void *cb, void *dsr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) gru_unlock_kernel_context(uv_numa_blade_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) * Reserve GRU resources to be used asynchronously.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) * Note: currently supports only 1 reservation per blade.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) * input:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) * blade_id - blade on which resources should be reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) * cbrs - number of CBRs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) * dsr_bytes - number of DSR bytes needed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) * output:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) * handle to identify resource
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) * (0 = async resources already reserved)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) unsigned long gru_reserve_async_resources(int blade_id, int cbrs, int dsr_bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) struct completion *cmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) struct gru_blade_state *bs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) struct gru_thread_state *kgts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) bs = gru_base[blade_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) down_write(&bs->bs_kgts_sema);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) /* Verify no resources already reserved */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) if (bs->bs_async_dsr_bytes + bs->bs_async_cbrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) bs->bs_async_dsr_bytes = dsr_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) bs->bs_async_cbrs = cbrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) bs->bs_async_wq = cmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) kgts = bs->bs_kgts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) /* Resources changed. Unload context if already loaded */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) if (kgts && kgts->ts_gru)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) gru_unload_context(kgts, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) ret = ASYNC_BID_TO_HAN(blade_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) up_write(&bs->bs_kgts_sema);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) * Release async resources previously reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) * input:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) * han - handle to identify resources
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) void gru_release_async_resources(unsigned long han)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) struct gru_blade_state *bs = ASYNC_HAN_TO_BS(han);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) down_write(&bs->bs_kgts_sema);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) bs->bs_async_dsr_bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) bs->bs_async_cbrs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) bs->bs_async_wq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) up_write(&bs->bs_kgts_sema);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) * Wait for async GRU instructions to complete.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) * input:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) * han - handle to identify resources
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) void gru_wait_async_cbr(unsigned long han)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) struct gru_blade_state *bs = ASYNC_HAN_TO_BS(han);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) wait_for_completion(bs->bs_async_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) * Lock previous reserved async GRU resources
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) * input:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) * han - handle to identify resources
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) * output:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) * cb - pointer to first CBR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) * dsr - pointer to first DSR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) void gru_lock_async_resource(unsigned long han, void **cb, void **dsr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) struct gru_blade_state *bs = ASYNC_HAN_TO_BS(han);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) int blade_id = ASYNC_HAN_TO_BID(han);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) int ncpus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) gru_lock_kernel_context(blade_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) ncpus = uv_blade_nr_possible_cpus(blade_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) if (cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) *cb = bs->kernel_cb + ncpus * GRU_HANDLE_STRIDE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) if (dsr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) *dsr = bs->kernel_dsr + ncpus * GRU_NUM_KERNEL_DSR_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) * Unlock previous reserved async GRU resources
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) * input:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) * han - handle to identify resources
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) void gru_unlock_async_resource(unsigned long han)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) int blade_id = ASYNC_HAN_TO_BID(han);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) gru_unlock_kernel_context(blade_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) /*----------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) int gru_get_cb_exception_detail(void *cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) struct control_block_extended_exc_detail *excdet)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) struct gru_control_block_extended *cbe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) struct gru_thread_state *kgts = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) unsigned long off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) int cbrnum, bid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) * Locate kgts for cb. This algorithm is SLOW but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) * this function is rarely called (ie., almost never).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) * Performance does not matter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) for_each_possible_blade(bid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) if (!gru_base[bid])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) kgts = gru_base[bid]->bs_kgts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) if (!kgts || !kgts->ts_gru)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) off = cb - kgts->ts_gru->gs_gru_base_vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) if (off < GRU_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) kgts = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) BUG_ON(!kgts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) cbrnum = thread_cbr_number(kgts, get_cb_number(cb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) cbe = get_cbe(GRUBASE(cb), cbrnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) gru_flush_cache(cbe); /* CBE not coherent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) sync_core();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) excdet->opc = cbe->opccpy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) excdet->exopc = cbe->exopccpy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) excdet->ecause = cbe->ecause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) excdet->exceptdet0 = cbe->idef1upd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) excdet->exceptdet1 = cbe->idef3upd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) gru_flush_cache(cbe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) static char *gru_get_cb_exception_detail_str(int ret, void *cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) char *buf, int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) struct gru_control_block_status *gen = (void *)cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) struct control_block_extended_exc_detail excdet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) if (ret > 0 && gen->istatus == CBS_EXCEPTION) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) gru_get_cb_exception_detail(cb, &excdet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) snprintf(buf, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) "GRU:%d exception: cb %p, opc %d, exopc %d, ecause 0x%x,"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) "excdet0 0x%lx, excdet1 0x%x", smp_processor_id(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) gen, excdet.opc, excdet.exopc, excdet.ecause,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) excdet.exceptdet0, excdet.exceptdet1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) snprintf(buf, size, "No exception");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) return buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) static int gru_wait_idle_or_exception(struct gru_control_block_status *gen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) while (gen->istatus >= CBS_ACTIVE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) return gen->istatus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) static int gru_retry_exception(void *cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) struct gru_control_block_status *gen = (void *)cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) struct control_block_extended_exc_detail excdet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) int retry = EXCEPTION_RETRY_LIMIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) if (gru_wait_idle_or_exception(gen) == CBS_IDLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) return CBS_IDLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) if (gru_get_cb_message_queue_substatus(cb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) return CBS_EXCEPTION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) gru_get_cb_exception_detail(cb, &excdet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) if ((excdet.ecause & ~EXCEPTION_RETRY_BITS) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) (excdet.cbrexecstatus & CBR_EXS_ABORT_OCC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) if (retry-- == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) gen->icmd = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) gru_flush_cache(gen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) return CBS_EXCEPTION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) int gru_check_status_proc(void *cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) struct gru_control_block_status *gen = (void *)cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) ret = gen->istatus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) if (ret == CBS_EXCEPTION)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) ret = gru_retry_exception(cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) int gru_wait_proc(void *cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) struct gru_control_block_status *gen = (void *)cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) ret = gru_wait_idle_or_exception(gen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) if (ret == CBS_EXCEPTION)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) ret = gru_retry_exception(cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) static void gru_abort(int ret, void *cb, char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) char buf[GRU_EXC_STR_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) panic("GRU FATAL ERROR: %s - %s\n", str,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) gru_get_cb_exception_detail_str(ret, cb, buf, sizeof(buf)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) void gru_wait_abort_proc(void *cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) ret = gru_wait_proc(cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) gru_abort(ret, cb, "gru_wait_abort");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) /*------------------------------ MESSAGE QUEUES -----------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) /* Internal status . These are NOT returned to the user. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) #define MQIE_AGAIN -1 /* try again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) * Save/restore the "present" flag that is in the second line of 2-line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) * messages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) static inline int get_present2(void *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) struct message_header *mhdr = p + GRU_CACHE_LINE_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) return mhdr->present;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) static inline void restore_present2(void *p, int val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) struct message_header *mhdr = p + GRU_CACHE_LINE_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) mhdr->present = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) * Create a message queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) * qlines - message queue size in cache lines. Includes 2-line header.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) int gru_create_message_queue(struct gru_message_queue_desc *mqd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) void *p, unsigned int bytes, int nasid, int vector, int apicid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) struct message_queue *mq = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) unsigned int qlines;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) qlines = bytes / GRU_CACHE_LINE_BYTES - 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) memset(mq, 0, bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) mq->start = &mq->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) mq->start2 = &mq->data + (qlines / 2 - 1) * GRU_CACHE_LINE_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) mq->next = &mq->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) mq->limit = &mq->data + (qlines - 2) * GRU_CACHE_LINE_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) mq->qlines = qlines;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) mq->hstatus[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) mq->hstatus[1] = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) mq->head = gru_mesq_head(2, qlines / 2 + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) mqd->mq = mq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) mqd->mq_gpa = uv_gpa(mq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) mqd->qlines = qlines;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) mqd->interrupt_pnode = nasid >> 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) mqd->interrupt_vector = vector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) mqd->interrupt_apicid = apicid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) EXPORT_SYMBOL_GPL(gru_create_message_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) * Send a NOOP message to a message queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) * 0 - if queue is full after the send. This is the normal case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) * but various races can change this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) * -1 - if mesq sent successfully but queue not full
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) * >0 - unexpected error. MQE_xxx returned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) static int send_noop_message(void *cb, struct gru_message_queue_desc *mqd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) void *mesg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) const struct message_header noop_header = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) .present = MQS_NOOP, .lines = 1};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) unsigned long m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) int substatus, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) struct message_header save_mhdr, *mhdr = mesg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) STAT(mesq_noop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) save_mhdr = *mhdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) *mhdr = noop_header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) gru_mesq(cb, mqd->mq_gpa, gru_get_tri(mhdr), 1, IMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) ret = gru_wait(cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) substatus = gru_get_cb_message_queue_substatus(cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) switch (substatus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) case CBSS_NO_ERROR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) STAT(mesq_noop_unexpected_error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) ret = MQE_UNEXPECTED_CB_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) case CBSS_LB_OVERFLOWED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) STAT(mesq_noop_lb_overflow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) ret = MQE_CONGESTION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) case CBSS_QLIMIT_REACHED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) STAT(mesq_noop_qlimit_reached);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) case CBSS_AMO_NACKED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) STAT(mesq_noop_amo_nacked);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) ret = MQE_CONGESTION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) case CBSS_PUT_NACKED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) STAT(mesq_noop_put_nacked);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) m = mqd->mq_gpa + (gru_get_amo_value_head(cb) << 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) gru_vstore(cb, m, gru_get_tri(mesg), XTYPE_CL, 1, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) IMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) if (gru_wait(cb) == CBS_IDLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) ret = MQIE_AGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) ret = MQE_UNEXPECTED_CB_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) case CBSS_PAGE_OVERFLOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) STAT(mesq_noop_page_overflow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) *mhdr = save_mhdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) * Handle a gru_mesq full.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) static int send_message_queue_full(void *cb, struct gru_message_queue_desc *mqd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) void *mesg, int lines)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) union gru_mesqhead mqh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) unsigned int limit, head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) unsigned long avalue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) int half, qlines;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) /* Determine if switching to first/second half of q */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) avalue = gru_get_amo_value(cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) head = gru_get_amo_value_head(cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) limit = gru_get_amo_value_limit(cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) qlines = mqd->qlines;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) half = (limit != qlines);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) if (half)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) mqh = gru_mesq_head(qlines / 2 + 1, qlines);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) mqh = gru_mesq_head(2, qlines / 2 + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) /* Try to get lock for switching head pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) gru_gamir(cb, EOP_IR_CLR, HSTATUS(mqd->mq_gpa, half), XTYPE_DW, IMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) if (gru_wait(cb) != CBS_IDLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) goto cberr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) if (!gru_get_amo_value(cb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) STAT(mesq_qf_locked);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) return MQE_QUEUE_FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) /* Got the lock. Send optional NOP if queue not full, */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) if (head != limit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) if (send_noop_message(cb, mqd, mesg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) gru_gamir(cb, EOP_IR_INC, HSTATUS(mqd->mq_gpa, half),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) XTYPE_DW, IMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) if (gru_wait(cb) != CBS_IDLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) goto cberr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) STAT(mesq_qf_noop_not_full);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) return MQIE_AGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) avalue++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) /* Then flip queuehead to other half of queue. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) gru_gamer(cb, EOP_ERR_CSWAP, mqd->mq_gpa, XTYPE_DW, mqh.val, avalue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) IMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) if (gru_wait(cb) != CBS_IDLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) goto cberr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) /* If not successfully in swapping queue head, clear the hstatus lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) if (gru_get_amo_value(cb) != avalue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) STAT(mesq_qf_switch_head_failed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) gru_gamir(cb, EOP_IR_INC, HSTATUS(mqd->mq_gpa, half), XTYPE_DW,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) IMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) if (gru_wait(cb) != CBS_IDLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) goto cberr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) return MQIE_AGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) cberr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) STAT(mesq_qf_unexpected_error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) return MQE_UNEXPECTED_CB_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) * Handle a PUT failure. Note: if message was a 2-line message, one of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) * lines might have successfully have been written. Before sending the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) * message, "present" must be cleared in BOTH lines to prevent the receiver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) * from prematurely seeing the full message.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) static int send_message_put_nacked(void *cb, struct gru_message_queue_desc *mqd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) void *mesg, int lines)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) unsigned long m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) int ret, loops = 200; /* experimentally determined */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) m = mqd->mq_gpa + (gru_get_amo_value_head(cb) << 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) if (lines == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) gru_vset(cb, m, 0, XTYPE_CL, lines, 1, IMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) if (gru_wait(cb) != CBS_IDLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) return MQE_UNEXPECTED_CB_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) gru_vstore(cb, m, gru_get_tri(mesg), XTYPE_CL, lines, 1, IMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) if (gru_wait(cb) != CBS_IDLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) return MQE_UNEXPECTED_CB_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) if (!mqd->interrupt_vector)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) return MQE_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) * Send a noop message in order to deliver a cross-partition interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) * to the SSI that contains the target message queue. Normally, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) * interrupt is automatically delivered by hardware following mesq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) * operations, but some error conditions require explicit delivery.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) * The noop message will trigger delivery. Otherwise partition failures
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) * could cause unrecovered errors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) ret = send_noop_message(cb, mqd, mesg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) } while ((ret == MQIE_AGAIN || ret == MQE_CONGESTION) && (loops-- > 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) if (ret == MQIE_AGAIN || ret == MQE_CONGESTION) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) * Don't indicate to the app to resend the message, as it's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) * already been successfully sent. We simply send an OK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) * (rather than fail the send with MQE_UNEXPECTED_CB_ERR),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) * assuming that the other side is receiving enough
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) * interrupts to get this message processed anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) ret = MQE_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) * Handle a gru_mesq failure. Some of these failures are software recoverable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) * or retryable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) static int send_message_failure(void *cb, struct gru_message_queue_desc *mqd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) void *mesg, int lines)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) int substatus, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) substatus = gru_get_cb_message_queue_substatus(cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) switch (substatus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) case CBSS_NO_ERROR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) STAT(mesq_send_unexpected_error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) ret = MQE_UNEXPECTED_CB_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) case CBSS_LB_OVERFLOWED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) STAT(mesq_send_lb_overflow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) ret = MQE_CONGESTION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) case CBSS_QLIMIT_REACHED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) STAT(mesq_send_qlimit_reached);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) ret = send_message_queue_full(cb, mqd, mesg, lines);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) case CBSS_AMO_NACKED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) STAT(mesq_send_amo_nacked);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) ret = MQE_CONGESTION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) case CBSS_PUT_NACKED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) STAT(mesq_send_put_nacked);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) ret = send_message_put_nacked(cb, mqd, mesg, lines);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) case CBSS_PAGE_OVERFLOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) STAT(mesq_page_overflow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) * Send a message to a message queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) * mqd message queue descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) * mesg message. ust be vaddr within a GSEG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) * bytes message size (<= 2 CL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) int gru_send_message_gpa(struct gru_message_queue_desc *mqd, void *mesg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) unsigned int bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) struct message_header *mhdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) void *cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) void *dsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) int istatus, clines, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) STAT(mesq_send);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) BUG_ON(bytes < sizeof(int) || bytes > 2 * GRU_CACHE_LINE_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) clines = DIV_ROUND_UP(bytes, GRU_CACHE_LINE_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) if (gru_get_cpu_resources(bytes, &cb, &dsr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) return MQE_BUG_NO_RESOURCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) memcpy(dsr, mesg, bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) mhdr = dsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) mhdr->present = MQS_FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) mhdr->lines = clines;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) if (clines == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) mhdr->present2 = get_present2(mhdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) restore_present2(mhdr, MQS_FULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) ret = MQE_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) gru_mesq(cb, mqd->mq_gpa, gru_get_tri(mhdr), clines, IMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) istatus = gru_wait(cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) if (istatus != CBS_IDLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) ret = send_message_failure(cb, mqd, dsr, clines);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) } while (ret == MQIE_AGAIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) gru_free_cpu_resources(cb, dsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) STAT(mesq_send_failed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) EXPORT_SYMBOL_GPL(gru_send_message_gpa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) * Advance the receive pointer for the queue to the next message.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) void gru_free_message(struct gru_message_queue_desc *mqd, void *mesg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) struct message_queue *mq = mqd->mq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) struct message_header *mhdr = mq->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) void *next, *pnext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) int half = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) int lines = mhdr->lines;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) if (lines == 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) restore_present2(mhdr, MQS_EMPTY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) mhdr->present = MQS_EMPTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) pnext = mq->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) next = pnext + GRU_CACHE_LINE_BYTES * lines;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) if (next == mq->limit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) next = mq->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) half = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) } else if (pnext < mq->start2 && next >= mq->start2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) half = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) if (half >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) mq->hstatus[half] = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) mq->next = next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) EXPORT_SYMBOL_GPL(gru_free_message);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) * Get next message from message queue. Return NULL if no message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) * present. User must call next_message() to move to next message.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) * rmq message queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) void *gru_get_next_message(struct gru_message_queue_desc *mqd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) struct message_queue *mq = mqd->mq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) struct message_header *mhdr = mq->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) int present = mhdr->present;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) /* skip NOOP messages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) while (present == MQS_NOOP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) gru_free_message(mqd, mhdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) mhdr = mq->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) present = mhdr->present;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) /* Wait for both halves of 2 line messages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) if (present == MQS_FULL && mhdr->lines == 2 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) get_present2(mhdr) == MQS_EMPTY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) present = MQS_EMPTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) if (!present) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) STAT(mesq_receive_none);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) if (mhdr->lines == 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) restore_present2(mhdr, mhdr->present2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) STAT(mesq_receive);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) return mhdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) EXPORT_SYMBOL_GPL(gru_get_next_message);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) /* ---------------------- GRU DATA COPY FUNCTIONS ---------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) * Load a DW from a global GPA. The GPA can be a memory or MMR address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) int gru_read_gpa(unsigned long *value, unsigned long gpa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) void *cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) void *dsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) int ret, iaa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) STAT(read_gpa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) if (gru_get_cpu_resources(GRU_NUM_KERNEL_DSR_BYTES, &cb, &dsr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) return MQE_BUG_NO_RESOURCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) iaa = gpa >> 62;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) gru_vload_phys(cb, gpa, gru_get_tri(dsr), iaa, IMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) ret = gru_wait(cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) if (ret == CBS_IDLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) *value = *(unsigned long *)dsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) gru_free_cpu_resources(cb, dsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) EXPORT_SYMBOL_GPL(gru_read_gpa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) * Copy a block of data using the GRU resources
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) int gru_copy_gpa(unsigned long dest_gpa, unsigned long src_gpa,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) unsigned int bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) void *cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) void *dsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) STAT(copy_gpa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) if (gru_get_cpu_resources(GRU_NUM_KERNEL_DSR_BYTES, &cb, &dsr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) return MQE_BUG_NO_RESOURCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) gru_bcopy(cb, src_gpa, dest_gpa, gru_get_tri(dsr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) XTYPE_B, bytes, GRU_NUM_KERNEL_DSR_CL, IMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) ret = gru_wait(cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) gru_free_cpu_resources(cb, dsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) EXPORT_SYMBOL_GPL(gru_copy_gpa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) /* ------------------- KERNEL QUICKTESTS RUN AT STARTUP ----------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) /* Temp - will delete after we gain confidence in the GRU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) static int quicktest0(unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) unsigned long word0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) unsigned long word1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) void *cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) void *dsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) unsigned long *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) int ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) if (gru_get_cpu_resources(GRU_CACHE_LINE_BYTES, &cb, &dsr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) return MQE_BUG_NO_RESOURCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) p = dsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) word0 = MAGIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) word1 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) gru_vload(cb, uv_gpa(&word0), gru_get_tri(dsr), XTYPE_DW, 1, 1, IMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) if (gru_wait(cb) != CBS_IDLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) printk(KERN_DEBUG "GRU:%d quicktest0: CBR failure 1\n", smp_processor_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) if (*p != MAGIC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) printk(KERN_DEBUG "GRU:%d quicktest0 bad magic 0x%lx\n", smp_processor_id(), *p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) gru_vstore(cb, uv_gpa(&word1), gru_get_tri(dsr), XTYPE_DW, 1, 1, IMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) if (gru_wait(cb) != CBS_IDLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) printk(KERN_DEBUG "GRU:%d quicktest0: CBR failure 2\n", smp_processor_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) if (word0 != word1 || word1 != MAGIC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) printk(KERN_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) "GRU:%d quicktest0 err: found 0x%lx, expected 0x%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) smp_processor_id(), word1, MAGIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) gru_free_cpu_resources(cb, dsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) #define ALIGNUP(p, q) ((void *)(((unsigned long)(p) + (q) - 1) & ~(q - 1)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) static int quicktest1(unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) struct gru_message_queue_desc mqd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) void *p, *mq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) int i, ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) char mes[GRU_CACHE_LINE_BYTES], *m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) /* Need 1K cacheline aligned that does not cross page boundary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) p = kmalloc(4096, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) if (p == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) mq = ALIGNUP(p, 1024);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) memset(mes, 0xee, sizeof(mes));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) gru_create_message_queue(&mqd, mq, 8 * GRU_CACHE_LINE_BYTES, 0, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) for (i = 0; i < 6; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) mes[8] = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) ret = gru_send_message_gpa(&mqd, mes, sizeof(mes));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) } while (ret == MQE_CONGESTION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) if (ret != MQE_QUEUE_FULL || i != 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) printk(KERN_DEBUG "GRU:%d quicktest1: unexpect status %d, i %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) smp_processor_id(), ret, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) for (i = 0; i < 6; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) m = gru_get_next_message(&mqd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) if (!m || m[8] != i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) gru_free_message(&mqd, m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) if (i != 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) printk(KERN_DEBUG "GRU:%d quicktest2: bad message, i %d, m %p, m8 %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) smp_processor_id(), i, m, m ? m[8] : -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) kfree(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) static int quicktest2(unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) static DECLARE_COMPLETION(cmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) unsigned long han;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) int blade_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) int numcb = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) unsigned long *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) void *cb0, *cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) struct gru_control_block_status *gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) int i, k, istatus, bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) bytes = numcb * 4 * 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) buf = kmalloc(bytes, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) if (!buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) han = gru_reserve_async_resources(blade_id, numcb, 0, &cmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) if (!han)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) gru_lock_async_resource(han, &cb0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) memset(buf, 0xee, bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) for (i = 0; i < numcb; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) gru_vset(cb0 + i * GRU_HANDLE_STRIDE, uv_gpa(&buf[i * 4]), 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) XTYPE_DW, 4, 1, IMA_INTERRUPT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) k = numcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) gru_wait_async_cbr(han);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) for (i = 0; i < numcb; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) cb = cb0 + i * GRU_HANDLE_STRIDE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) istatus = gru_check_status(cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) if (istatus != CBS_ACTIVE && istatus != CBS_CALL_OS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) if (i == numcb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) if (istatus != CBS_IDLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) printk(KERN_DEBUG "GRU:%d quicktest2: cb %d, exception\n", smp_processor_id(), i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) } else if (buf[4 * i] || buf[4 * i + 1] || buf[4 * i + 2] ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) buf[4 * i + 3]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) printk(KERN_DEBUG "GRU:%d quicktest2:cb %d, buf 0x%lx, 0x%lx, 0x%lx, 0x%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) smp_processor_id(), i, buf[4 * i], buf[4 * i + 1], buf[4 * i + 2], buf[4 * i + 3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) k--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) gen = cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) gen->istatus = CBS_CALL_OS; /* don't handle this CBR again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) } while (k);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) BUG_ON(cmp.done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) gru_unlock_async_resource(han);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) gru_release_async_resources(han);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) kfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) #define BUFSIZE 200
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) static int quicktest3(unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) char buf1[BUFSIZE], buf2[BUFSIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) memset(buf2, 0, sizeof(buf2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) memset(buf1, get_cycles() & 255, sizeof(buf1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) gru_copy_gpa(uv_gpa(buf2), uv_gpa(buf1), BUFSIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) if (memcmp(buf1, buf2, BUFSIZE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) printk(KERN_DEBUG "GRU:%d quicktest3 error\n", smp_processor_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) * Debugging only. User hook for various kernel tests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) * of driver & gru.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) int gru_ktest(unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) int ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) switch (arg & 0xff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) ret = quicktest0(arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) ret = quicktest1(arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) ret = quicktest2(arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) ret = quicktest3(arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) case 99:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) ret = gru_free_kernel_contexts();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) int gru_kservices_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) void gru_kservices_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) if (gru_free_kernel_contexts())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159)