^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /* us2e_cpufreq.c: UltraSPARC-IIe cpu frequency support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 2003 David S. Miller (davem@redhat.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Many thanks to Dominik Brodowski for fixing up the cpufreq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * infrastructure in order to make this driver easier to implement.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/cpufreq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/threads.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <asm/asi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <asm/timer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) static struct cpufreq_driver *cpufreq_us2e_driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) struct us2e_freq_percpu_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) struct cpufreq_frequency_table table[6];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) /* Indexed by cpu number. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) static struct us2e_freq_percpu_info *us2e_freq_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define HBIRD_MEM_CNTL0_ADDR 0x1fe0000f010UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define HBIRD_ESTAR_MODE_ADDR 0x1fe0000f080UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) /* UltraSPARC-IIe has five dividers: 1, 2, 4, 6, and 8. These are controlled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * in the ESTAR mode control register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define ESTAR_MODE_DIV_1 0x0000000000000000UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define ESTAR_MODE_DIV_2 0x0000000000000001UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define ESTAR_MODE_DIV_4 0x0000000000000003UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define ESTAR_MODE_DIV_6 0x0000000000000002UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define ESTAR_MODE_DIV_8 0x0000000000000004UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define ESTAR_MODE_DIV_MASK 0x0000000000000007UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define MCTRL0_SREFRESH_ENAB 0x0000000000010000UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define MCTRL0_REFR_COUNT_MASK 0x0000000000007f00UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define MCTRL0_REFR_COUNT_SHIFT 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define MCTRL0_REFR_INTERVAL 7800
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define MCTRL0_REFR_CLKS_P_CNT 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) static unsigned long read_hbreg(unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) unsigned long ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) __asm__ __volatile__("ldxa [%1] %2, %0"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) : "=&r" (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) static void write_hbreg(unsigned long addr, unsigned long val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) __asm__ __volatile__("stxa %0, [%1] %2\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) "membar #Sync"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) : /* no outputs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) : "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) : "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) if (addr == HBIRD_ESTAR_MODE_ADDR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) /* Need to wait 16 clock cycles for the PLL to lock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) static void self_refresh_ctl(int enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) unsigned long mctrl = read_hbreg(HBIRD_MEM_CNTL0_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) if (enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) mctrl |= MCTRL0_SREFRESH_ENAB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) mctrl &= ~MCTRL0_SREFRESH_ENAB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) write_hbreg(HBIRD_MEM_CNTL0_ADDR, mctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) (void) read_hbreg(HBIRD_MEM_CNTL0_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) static void frob_mem_refresh(int cpu_slowing_down,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) unsigned long clock_tick,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) unsigned long old_divisor, unsigned long divisor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) unsigned long old_refr_count, refr_count, mctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) refr_count = (clock_tick * MCTRL0_REFR_INTERVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) refr_count /= (MCTRL0_REFR_CLKS_P_CNT * divisor * 1000000000UL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) mctrl = read_hbreg(HBIRD_MEM_CNTL0_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) old_refr_count = (mctrl & MCTRL0_REFR_COUNT_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) >> MCTRL0_REFR_COUNT_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) mctrl &= ~MCTRL0_REFR_COUNT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) mctrl |= refr_count << MCTRL0_REFR_COUNT_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) write_hbreg(HBIRD_MEM_CNTL0_ADDR, mctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) mctrl = read_hbreg(HBIRD_MEM_CNTL0_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) if (cpu_slowing_down && !(mctrl & MCTRL0_SREFRESH_ENAB)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) unsigned long usecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) /* We have to wait for both refresh counts (old
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * and new) to go to zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) usecs = (MCTRL0_REFR_CLKS_P_CNT *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) (refr_count + old_refr_count) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 1000000UL *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) old_divisor) / clock_tick;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) udelay(usecs + 1UL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) static void us2e_transition(unsigned long estar, unsigned long new_bits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) unsigned long clock_tick,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) unsigned long old_divisor, unsigned long divisor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) estar &= ~ESTAR_MODE_DIV_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) /* This is based upon the state transition diagram in the IIe manual. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) if (old_divisor == 2 && divisor == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) self_refresh_ctl(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) frob_mem_refresh(0, clock_tick, old_divisor, divisor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) } else if (old_divisor == 1 && divisor == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) frob_mem_refresh(1, clock_tick, old_divisor, divisor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) self_refresh_ctl(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) } else if (old_divisor == 1 && divisor > 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) us2e_transition(estar, ESTAR_MODE_DIV_2, clock_tick,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 1, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) us2e_transition(estar, new_bits, clock_tick,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 2, divisor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) } else if (old_divisor > 2 && divisor == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) us2e_transition(estar, ESTAR_MODE_DIV_2, clock_tick,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) old_divisor, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) us2e_transition(estar, new_bits, clock_tick,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 2, divisor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) } else if (old_divisor < divisor) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) frob_mem_refresh(0, clock_tick, old_divisor, divisor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) } else if (old_divisor > divisor) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) frob_mem_refresh(1, clock_tick, old_divisor, divisor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) static unsigned long index_to_estar_mode(unsigned int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) switch (index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) return ESTAR_MODE_DIV_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) return ESTAR_MODE_DIV_2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) return ESTAR_MODE_DIV_4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) return ESTAR_MODE_DIV_6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) return ESTAR_MODE_DIV_8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) static unsigned long index_to_divisor(unsigned int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) switch (index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) return 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) return 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) return 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) return 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) static unsigned long estar_to_divisor(unsigned long estar)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) unsigned long ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) switch (estar & ESTAR_MODE_DIV_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) case ESTAR_MODE_DIV_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) case ESTAR_MODE_DIV_2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) ret = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) case ESTAR_MODE_DIV_4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) ret = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) case ESTAR_MODE_DIV_6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) ret = 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) case ESTAR_MODE_DIV_8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) ret = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) static void __us2e_freq_get(void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) unsigned long *estar = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) *estar = read_hbreg(HBIRD_ESTAR_MODE_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) static unsigned int us2e_freq_get(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) unsigned long clock_tick, estar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) clock_tick = sparc64_get_clock_tick(cpu) / 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) if (smp_call_function_single(cpu, __us2e_freq_get, &estar, 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) return clock_tick / estar_to_divisor(estar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) static void __us2e_freq_target(void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) unsigned int cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) unsigned int *index = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) unsigned long new_bits, new_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) unsigned long clock_tick, divisor, old_divisor, estar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) new_freq = clock_tick = sparc64_get_clock_tick(cpu) / 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) new_bits = index_to_estar_mode(*index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) divisor = index_to_divisor(*index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) new_freq /= divisor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) estar = read_hbreg(HBIRD_ESTAR_MODE_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) old_divisor = estar_to_divisor(estar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) if (old_divisor != divisor) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) us2e_transition(estar, new_bits, clock_tick * 1000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) old_divisor, divisor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) static int us2e_freq_target(struct cpufreq_policy *policy, unsigned int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) unsigned int cpu = policy->cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) return smp_call_function_single(cpu, __us2e_freq_target, &index, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) static int __init us2e_freq_cpu_init(struct cpufreq_policy *policy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) unsigned int cpu = policy->cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) unsigned long clock_tick = sparc64_get_clock_tick(cpu) / 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) struct cpufreq_frequency_table *table =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) &us2e_freq_table[cpu].table[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) table[0].driver_data = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) table[0].frequency = clock_tick / 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) table[1].driver_data = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) table[1].frequency = clock_tick / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) table[2].driver_data = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) table[2].frequency = clock_tick / 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) table[2].driver_data = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) table[2].frequency = clock_tick / 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) table[2].driver_data = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) table[2].frequency = clock_tick / 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) table[2].driver_data = 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) table[3].frequency = CPUFREQ_TABLE_END;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) policy->cpuinfo.transition_latency = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) policy->cur = clock_tick;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) policy->freq_table = table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) static int us2e_freq_cpu_exit(struct cpufreq_policy *policy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) if (cpufreq_us2e_driver)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) us2e_freq_target(policy, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) static int __init us2e_freq_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) unsigned long manuf, impl, ver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) if (tlb_type != spitfire)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) __asm__("rdpr %%ver, %0" : "=r" (ver));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) manuf = ((ver >> 48) & 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) impl = ((ver >> 32) & 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) if (manuf == 0x17 && impl == 0x13) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) struct cpufreq_driver *driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) driver = kzalloc(sizeof(*driver), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) if (!driver)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) us2e_freq_table = kzalloc((NR_CPUS * sizeof(*us2e_freq_table)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) if (!us2e_freq_table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) driver->init = us2e_freq_cpu_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) driver->verify = cpufreq_generic_frequency_table_verify;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) driver->target_index = us2e_freq_target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) driver->get = us2e_freq_get;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) driver->exit = us2e_freq_cpu_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) strcpy(driver->name, "UltraSPARC-IIe");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) cpufreq_us2e_driver = driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) ret = cpufreq_register_driver(driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) if (driver) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) kfree(driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) cpufreq_us2e_driver = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) kfree(us2e_freq_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) us2e_freq_table = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) static void __exit us2e_freq_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) if (cpufreq_us2e_driver) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) cpufreq_unregister_driver(cpufreq_us2e_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) kfree(cpufreq_us2e_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) cpufreq_us2e_driver = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) kfree(us2e_freq_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) us2e_freq_table = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) MODULE_DESCRIPTION("cpufreq driver for UltraSPARC-IIe");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) module_init(us2e_freq_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) module_exit(us2e_freq_exit);