^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * RackMac vu-meter driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * (c) Copyright 2006 Benjamin Herrenschmidt, IBM Corp.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * <benh@kernel.crashing.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Support the CPU-meter LEDs of the Xserve G5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * TODO: Implement PWM to do variable intensity and provide userland
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * interface for fun. Also, the CPU-meter could be made nicer by being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * a bit less "immediate" but giving instead a more average load over
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * time. Patches welcome :-)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #undef DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/kernel_stat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/of_address.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/of_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <asm/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <asm/prom.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <asm/machdep.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <asm/pmac_feature.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <asm/dbdma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <asm/macio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <asm/keylargo.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) /* Number of samples in a sample buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define SAMPLE_COUNT 256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) /* CPU meter sampling rate in ms */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define CPU_SAMPLING_RATE 250
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) struct rackmeter_dma {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) struct dbdma_cmd cmd[4] ____cacheline_aligned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) u32 mark ____cacheline_aligned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) u32 buf1[SAMPLE_COUNT] ____cacheline_aligned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) u32 buf2[SAMPLE_COUNT] ____cacheline_aligned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) } ____cacheline_aligned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) struct rackmeter_cpu {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) struct delayed_work sniffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) struct rackmeter *rm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) u64 prev_wall;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) u64 prev_idle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) int zero;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) } ____cacheline_aligned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) struct rackmeter {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) struct macio_dev *mdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) unsigned int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) struct device_node *i2s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) u8 *ubuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) struct dbdma_regs __iomem *dma_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) void __iomem *i2s_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) dma_addr_t dma_buf_p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) struct rackmeter_dma *dma_buf_v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) int stale_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) struct rackmeter_cpu cpu[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) int paused;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) struct mutex sem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) /* To be set as a tunable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) static int rackmeter_ignore_nice;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) /* This GPIO is whacked by the OS X driver when initializing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #define RACKMETER_MAGIC_GPIO 0x78
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) /* This is copied from cpufreq_ondemand, maybe we should put it in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * a common header somewhere
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) static inline u64 get_cpu_idle_time(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) struct kernel_cpustat *kcpustat = &kcpustat_cpu(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) u64 retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) retval = kcpustat->cpustat[CPUTIME_IDLE] +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) kcpustat->cpustat[CPUTIME_IOWAIT];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) if (rackmeter_ignore_nice)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) retval += kcpustat_field(kcpustat, CPUTIME_NICE, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) static void rackmeter_setup_i2s(struct rackmeter *rm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) struct macio_chip *macio = rm->mdev->bus->chip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) /* First whack magic GPIO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, RACKMETER_MAGIC_GPIO, 5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) /* Call feature code to enable the sound channel and the proper
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * clock sources
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) pmac_call_feature(PMAC_FTR_SOUND_CHIP_ENABLE, rm->i2s, 0, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) /* Power i2s and stop i2s clock. We whack MacIO FCRs directly for now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * This is a bit racy, thus we should add new platform functions to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * handle that. snd-aoa needs that too
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) MACIO_BIS(KEYLARGO_FCR1, KL1_I2S0_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) MACIO_BIC(KEYLARGO_FCR1, KL1_I2S0_CLK_ENABLE_BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) (void)MACIO_IN32(KEYLARGO_FCR1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) /* Then setup i2s. For now, we use the same magic value that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * the OS X driver seems to use. We might want to play around
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * with the clock divisors later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) out_le32(rm->i2s_regs + 0x10, 0x01fa0000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) (void)in_le32(rm->i2s_regs + 0x10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) /* Fully restart i2s*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) MACIO_BIS(KEYLARGO_FCR1, KL1_I2S0_CELL_ENABLE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) KL1_I2S0_CLK_ENABLE_BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) (void)MACIO_IN32(KEYLARGO_FCR1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) static void rackmeter_set_default_pattern(struct rackmeter *rm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) for (i = 0; i < 16; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) if (i < 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) rm->ubuf[i] = (i & 1) * 255;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) rm->ubuf[i] = ((~i) & 1) * 255;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) static void rackmeter_do_pause(struct rackmeter *rm, int pause)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) struct rackmeter_dma *rdma = rm->dma_buf_v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) pr_debug("rackmeter: %s\n", pause ? "paused" : "started");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) rm->paused = pause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) if (pause) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) DBDMA_DO_STOP(rm->dma_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) memset(rdma->buf1, 0, sizeof(rdma->buf1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) memset(rdma->buf2, 0, sizeof(rdma->buf2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) rm->dma_buf_v->mark = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) out_le32(&rm->dma_regs->cmdptr_hi, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) out_le32(&rm->dma_regs->cmdptr, rm->dma_buf_p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) out_le32(&rm->dma_regs->control, (RUN << 16) | RUN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) static void rackmeter_setup_dbdma(struct rackmeter *rm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) struct rackmeter_dma *db = rm->dma_buf_v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) struct dbdma_cmd *cmd = db->cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) /* Make sure dbdma is reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) DBDMA_DO_RESET(rm->dma_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) pr_debug("rackmeter: mark offset=0x%zx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) offsetof(struct rackmeter_dma, mark));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) pr_debug("rackmeter: buf1 offset=0x%zx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) offsetof(struct rackmeter_dma, buf1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) pr_debug("rackmeter: buf2 offset=0x%zx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) offsetof(struct rackmeter_dma, buf2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) /* Prepare 4 dbdma commands for the 2 buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) memset(cmd, 0, 4 * sizeof(struct dbdma_cmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) cmd->req_count = cpu_to_le16(4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) cmd->command = cpu_to_le16(STORE_WORD | INTR_ALWAYS | KEY_SYSTEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) cmd->phy_addr = cpu_to_le32(rm->dma_buf_p +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) offsetof(struct rackmeter_dma, mark));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) cmd->cmd_dep = cpu_to_le32(0x02000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) cmd++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) cmd->req_count = cpu_to_le16(SAMPLE_COUNT * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) cmd->command = cpu_to_le16(OUTPUT_MORE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) cmd->phy_addr = cpu_to_le32(rm->dma_buf_p +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) offsetof(struct rackmeter_dma, buf1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) cmd++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) cmd->req_count = cpu_to_le16(4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) cmd->command = cpu_to_le16(STORE_WORD | INTR_ALWAYS | KEY_SYSTEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) cmd->phy_addr = cpu_to_le32(rm->dma_buf_p +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) offsetof(struct rackmeter_dma, mark));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) cmd->cmd_dep = cpu_to_le32(0x01000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) cmd++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) cmd->req_count = cpu_to_le16(SAMPLE_COUNT * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) cmd->command = cpu_to_le16(OUTPUT_MORE | BR_ALWAYS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) cmd->phy_addr = cpu_to_le32(rm->dma_buf_p +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) offsetof(struct rackmeter_dma, buf2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) cmd->cmd_dep = cpu_to_le32(rm->dma_buf_p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) rackmeter_do_pause(rm, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) static void rackmeter_do_timer(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) struct rackmeter_cpu *rcpu =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) container_of(work, struct rackmeter_cpu, sniffer.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) struct rackmeter *rm = rcpu->rm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) unsigned int cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) u64 cur_nsecs, total_idle_nsecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) u64 total_nsecs, idle_nsecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) int i, offset, load, cumm, pause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) cur_nsecs = jiffies64_to_nsecs(get_jiffies_64());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) total_nsecs = cur_nsecs - rcpu->prev_wall;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) rcpu->prev_wall = cur_nsecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) total_idle_nsecs = get_cpu_idle_time(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) idle_nsecs = total_idle_nsecs - rcpu->prev_idle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) idle_nsecs = min(idle_nsecs, total_nsecs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) rcpu->prev_idle = total_idle_nsecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) /* We do a very dumb calculation to update the LEDs for now,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) * we'll do better once we have actual PWM implemented
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) load = div64_u64(9 * (total_nsecs - idle_nsecs), total_nsecs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) offset = cpu << 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) cumm = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) for (i = 0; i < 8; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) u8 ub = (load > i) ? 0xff : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) rm->ubuf[i + offset] = ub;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) cumm |= ub;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) rcpu->zero = (cumm == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) /* Now check if LEDs are all 0, we can stop DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) pause = (rm->cpu[0].zero && rm->cpu[1].zero);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) if (pause != rm->paused) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) mutex_lock(&rm->sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) pause = (rm->cpu[0].zero && rm->cpu[1].zero);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) rackmeter_do_pause(rm, pause);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) mutex_unlock(&rm->sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) schedule_delayed_work_on(cpu, &rcpu->sniffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) msecs_to_jiffies(CPU_SAMPLING_RATE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) static void rackmeter_init_cpu_sniffer(struct rackmeter *rm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) unsigned int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) /* This driver works only with 1 or 2 CPUs numbered 0 and 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) * but that's really all we have on Apple Xserve. It doesn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) * play very nice with CPU hotplug neither but we don't do that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) * on those machines yet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) rm->cpu[0].rm = rm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) INIT_DELAYED_WORK(&rm->cpu[0].sniffer, rackmeter_do_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) rm->cpu[1].rm = rm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) INIT_DELAYED_WORK(&rm->cpu[1].sniffer, rackmeter_do_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) for_each_online_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) struct rackmeter_cpu *rcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) if (cpu > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) rcpu = &rm->cpu[cpu];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) rcpu->prev_idle = get_cpu_idle_time(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) rcpu->prev_wall = jiffies64_to_nsecs(get_jiffies_64());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) schedule_delayed_work_on(cpu, &rm->cpu[cpu].sniffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) msecs_to_jiffies(CPU_SAMPLING_RATE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) static void rackmeter_stop_cpu_sniffer(struct rackmeter *rm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) cancel_delayed_work_sync(&rm->cpu[0].sniffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) cancel_delayed_work_sync(&rm->cpu[1].sniffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) static int rackmeter_setup(struct rackmeter *rm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) pr_debug("rackmeter: setting up i2s..\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) rackmeter_setup_i2s(rm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) pr_debug("rackmeter: setting up default pattern..\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) rackmeter_set_default_pattern(rm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) pr_debug("rackmeter: setting up dbdma..\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) rackmeter_setup_dbdma(rm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) pr_debug("rackmeter: start CPU measurements..\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) rackmeter_init_cpu_sniffer(rm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) printk(KERN_INFO "RackMeter initialized\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) /* XXX FIXME: No PWM yet, this is 0/1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) static u32 rackmeter_calc_sample(struct rackmeter *rm, unsigned int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) int led;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) u32 sample = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) for (led = 0; led < 16; led++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) sample >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) sample |= ((rm->ubuf[led] >= 0x80) << 15);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) return (sample << 17) | (sample >> 15);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) static irqreturn_t rackmeter_irq(int irq, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) struct rackmeter *rm = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) struct rackmeter_dma *db = rm->dma_buf_v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) unsigned int mark, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) u32 *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) /* Flush PCI buffers with an MMIO read. Maybe we could actually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) * check the status one day ... in case things go wrong, though
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) * this never happened to me
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) (void)in_le32(&rm->dma_regs->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) /* Make sure the CPU gets us in order */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) /* Read mark */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) mark = db->mark;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) if (mark != 1 && mark != 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) printk(KERN_WARNING "rackmeter: Incorrect DMA mark 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) mark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) /* We allow for 3 errors like that (stale DBDMA irqs) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) if (++rm->stale_irq > 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) printk(KERN_ERR "rackmeter: Too many errors,"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) " stopping DMA\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) DBDMA_DO_RESET(rm->dma_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) /* Next buffer we need to fill is mark value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) buf = mark == 1 ? db->buf1 : db->buf2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) /* Fill it now. This routine converts the 8 bits depth sample array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) * into the PWM bitmap for each LED.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) for (i = 0; i < SAMPLE_COUNT; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) buf[i] = rackmeter_calc_sample(rm, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) static int rackmeter_probe(struct macio_dev* mdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) const struct of_device_id *match)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) struct device_node *i2s = NULL, *np = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) struct rackmeter *rm = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) struct resource ri2s, rdma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) int rc = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) pr_debug("rackmeter_probe()\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) /* Get i2s-a node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) for_each_child_of_node(mdev->ofdev.dev.of_node, i2s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) if (of_node_name_eq(i2s, "i2s-a"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) if (i2s == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) pr_debug(" i2s-a child not found\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) /* Get lightshow or virtual sound */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) for_each_child_of_node(i2s, np) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) if (of_node_name_eq(np, "lightshow"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) if (of_node_name_eq(np, "sound") &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) of_get_property(np, "virtual", NULL) != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) if (np == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) pr_debug(" lightshow or sound+virtual child not found\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) /* Create and initialize our instance data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) rm = kzalloc(sizeof(*rm), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) if (rm == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) printk(KERN_ERR "rackmeter: failed to allocate memory !\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) goto bail_release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) rm->mdev = mdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) rm->i2s = i2s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) mutex_init(&rm->sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) dev_set_drvdata(&mdev->ofdev.dev, rm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) /* Check resources availability. We need at least resource 0 and 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) #if 0 /* Use that when i2s-a is finally an mdev per-se */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) if (macio_resource_count(mdev) < 2 || macio_irq_count(mdev) < 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) printk(KERN_ERR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) "rackmeter: found match but lacks resources: %pOF"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) " (%d resources, %d interrupts)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) mdev->ofdev.dev.of_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) rc = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) goto bail_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) if (macio_request_resources(mdev, "rackmeter")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) printk(KERN_ERR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) "rackmeter: failed to request resources: %pOF\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) mdev->ofdev.dev.of_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) rc = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) goto bail_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) rm->irq = macio_irq(mdev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) rm->irq = irq_of_parse_and_map(i2s, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) if (!rm->irq ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) of_address_to_resource(i2s, 0, &ri2s) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) of_address_to_resource(i2s, 1, &rdma)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) printk(KERN_ERR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) "rackmeter: found match but lacks resources: %pOF",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) mdev->ofdev.dev.of_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) rc = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) goto bail_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) pr_debug(" i2s @0x%08x\n", (unsigned int)ri2s.start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) pr_debug(" dma @0x%08x\n", (unsigned int)rdma.start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) pr_debug(" irq %d\n", rm->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) rm->ubuf = (u8 *)__get_free_page(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) if (rm->ubuf == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) printk(KERN_ERR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) "rackmeter: failed to allocate samples page !\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) goto bail_release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) rm->dma_buf_v = dma_alloc_coherent(&macio_get_pci_dev(mdev)->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) sizeof(struct rackmeter_dma),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) &rm->dma_buf_p, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) if (rm->dma_buf_v == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) printk(KERN_ERR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) "rackmeter: failed to allocate dma buffer !\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) goto bail_free_samples;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) rm->i2s_regs = ioremap(macio_resource_start(mdev, 0), 0x1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) rm->i2s_regs = ioremap(ri2s.start, 0x1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) if (rm->i2s_regs == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) printk(KERN_ERR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) "rackmeter: failed to map i2s registers !\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) rc = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) goto bail_free_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) rm->dma_regs = ioremap(macio_resource_start(mdev, 1), 0x100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) rm->dma_regs = ioremap(rdma.start, 0x100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) if (rm->dma_regs == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) printk(KERN_ERR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) "rackmeter: failed to map dma registers !\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) rc = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) goto bail_unmap_i2s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) rc = rackmeter_setup(rm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) printk(KERN_ERR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) "rackmeter: failed to initialize !\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) rc = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) goto bail_unmap_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) rc = request_irq(rm->irq, rackmeter_irq, 0, "rackmeter", rm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) if (rc != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) printk(KERN_ERR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) "rackmeter: failed to request interrupt !\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) goto bail_stop_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) of_node_put(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) bail_stop_dma:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) DBDMA_DO_RESET(rm->dma_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) bail_unmap_dma:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) iounmap(rm->dma_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) bail_unmap_i2s:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) iounmap(rm->i2s_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) bail_free_dma:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) dma_free_coherent(&macio_get_pci_dev(mdev)->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) sizeof(struct rackmeter_dma),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) rm->dma_buf_v, rm->dma_buf_p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) bail_free_samples:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) free_page((unsigned long)rm->ubuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) bail_release:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) macio_release_resources(mdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) bail_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) kfree(rm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) of_node_put(i2s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) of_node_put(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) dev_set_drvdata(&mdev->ofdev.dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) static int rackmeter_remove(struct macio_dev* mdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) struct rackmeter *rm = dev_get_drvdata(&mdev->ofdev.dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) /* Stop CPU sniffer timer & work queues */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) rackmeter_stop_cpu_sniffer(rm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) /* Clear reference to private data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) dev_set_drvdata(&mdev->ofdev.dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) /* Stop/reset dbdma */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) DBDMA_DO_RESET(rm->dma_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) /* Release the IRQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) free_irq(rm->irq, rm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) /* Unmap registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) iounmap(rm->dma_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) iounmap(rm->i2s_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) /* Free DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) dma_free_coherent(&macio_get_pci_dev(mdev)->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) sizeof(struct rackmeter_dma),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) rm->dma_buf_v, rm->dma_buf_p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) /* Free samples */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) free_page((unsigned long)rm->ubuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) /* Release resources */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) macio_release_resources(mdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) /* Get rid of me */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) kfree(rm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) static int rackmeter_shutdown(struct macio_dev* mdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) struct rackmeter *rm = dev_get_drvdata(&mdev->ofdev.dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) if (rm == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) /* Stop CPU sniffer timer & work queues */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) rackmeter_stop_cpu_sniffer(rm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) /* Stop/reset dbdma */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) DBDMA_DO_RESET(rm->dma_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) static const struct of_device_id rackmeter_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) { .name = "i2s" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) MODULE_DEVICE_TABLE(of, rackmeter_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) static struct macio_driver rackmeter_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) .name = "rackmeter",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) .of_match_table = rackmeter_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) .probe = rackmeter_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) .remove = rackmeter_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) .shutdown = rackmeter_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) static int __init rackmeter_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) pr_debug("rackmeter_init()\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) return macio_register_driver(&rackmeter_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) static void __exit rackmeter_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) pr_debug("rackmeter_exit()\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) macio_unregister_driver(&rackmeter_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) module_init(rackmeter_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) module_exit(rackmeter_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) MODULE_AUTHOR("Benjamin Herrenschmidt <benh@kernel.crashing.org>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) MODULE_DESCRIPTION("RackMeter: Support vu-meter on XServe front panel");