^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /* calibrate.c: default delay calibration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Excised from init/main.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 1991, 1992 Linus Torvalds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/jiffies.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/timex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/percpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) unsigned long lpj_fine;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) unsigned long preset_lpj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) static int __init lpj_setup(char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) preset_lpj = simple_strtoul(str,NULL,0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) __setup("lpj=", lpj_setup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #ifdef ARCH_HAS_READ_CURRENT_TIMER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) /* This routine uses the read_current_timer() routine and gets the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * loops per jiffy directly, instead of guessing it using delay().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * Also, this code tries to handle non-maskable asynchronous events
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * (like SMIs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define DELAY_CALIBRATION_TICKS ((HZ < 100) ? 1 : (HZ/100))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define MAX_DIRECT_CALIBRATION_RETRIES 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) static unsigned long calibrate_delay_direct(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) unsigned long pre_start, start, post_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) unsigned long pre_end, end, post_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) unsigned long start_jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) unsigned long timer_rate_min, timer_rate_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) unsigned long good_timer_sum = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) unsigned long good_timer_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) unsigned long measured_times[MAX_DIRECT_CALIBRATION_RETRIES];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) int max = -1; /* index of measured_times with max/min values or not set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) int min = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) if (read_current_timer(&pre_start) < 0 )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * A simple loop like
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * while ( jiffies < start_jiffies+1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * start = read_current_timer();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * will not do. As we don't really know whether jiffy switch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * happened first or timer_value was read first. And some asynchronous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * event can happen between these two events introducing errors in lpj.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * So, we do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * 1. pre_start <- When we are sure that jiffy switch hasn't happened
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * 2. check jiffy switch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * 3. start <- timer value before or after jiffy switch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * 4. post_start <- When we are sure that jiffy switch has happened
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * Note, we don't know anything about order of 2 and 3.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * Now, by looking at post_start and pre_start difference, we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * check whether any asynchronous event happened or not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) for (i = 0; i < MAX_DIRECT_CALIBRATION_RETRIES; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) pre_start = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) read_current_timer(&start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) start_jiffies = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) while (time_before_eq(jiffies, start_jiffies + 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) pre_start = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) read_current_timer(&start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) read_current_timer(&post_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) pre_end = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) end = post_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) while (time_before_eq(jiffies, start_jiffies + 1 +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) DELAY_CALIBRATION_TICKS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) pre_end = end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) read_current_timer(&end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) read_current_timer(&post_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) timer_rate_max = (post_end - pre_start) /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) DELAY_CALIBRATION_TICKS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) timer_rate_min = (pre_end - post_start) /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) DELAY_CALIBRATION_TICKS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * If the upper limit and lower limit of the timer_rate is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * >= 12.5% apart, redo calibration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) if (start >= post_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) printk(KERN_NOTICE "calibrate_delay_direct() ignoring "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) "timer_rate as we had a TSC wrap around"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) " start=%lu >=post_end=%lu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) start, post_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) if (start < post_end && pre_start != 0 && pre_end != 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) (timer_rate_max - timer_rate_min) < (timer_rate_max >> 3)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) good_timer_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) good_timer_sum += timer_rate_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) measured_times[i] = timer_rate_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) if (max < 0 || timer_rate_max > measured_times[max])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) max = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) if (min < 0 || timer_rate_max < measured_times[min])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) min = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) measured_times[i] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * Find the maximum & minimum - if they differ too much throw out the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * one with the largest difference from the mean and try again...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) while (good_timer_count > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) unsigned long estimate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) unsigned long maxdiff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) /* compute the estimate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) estimate = (good_timer_sum/good_timer_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) maxdiff = estimate >> 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) /* if range is within 12% let's take it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) if ((measured_times[max] - measured_times[min]) < maxdiff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) return estimate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) /* ok - drop the worse value and try again... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) good_timer_sum = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) good_timer_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) if ((measured_times[max] - estimate) <
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) (estimate - measured_times[min])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) printk(KERN_NOTICE "calibrate_delay_direct() dropping "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) "min bogoMips estimate %d = %lu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) min, measured_times[min]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) measured_times[min] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) min = max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) printk(KERN_NOTICE "calibrate_delay_direct() dropping "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) "max bogoMips estimate %d = %lu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) max, measured_times[max]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) measured_times[max] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) max = min;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) for (i = 0; i < MAX_DIRECT_CALIBRATION_RETRIES; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) if (measured_times[i] == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) good_timer_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) good_timer_sum += measured_times[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) if (measured_times[i] < measured_times[min])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) min = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) if (measured_times[i] > measured_times[max])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) max = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) printk(KERN_NOTICE "calibrate_delay_direct() failed to get a good "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) "estimate for loops_per_jiffy.\nProbably due to long platform "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) "interrupts. Consider using \"lpj=\" boot option.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) static unsigned long calibrate_delay_direct(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * This is the number of bits of precision for the loops_per_jiffy. Each
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * time we refine our estimate after the first takes 1.5/HZ seconds, so try
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) * to start with a good estimate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) * For the boot cpu we can skip the delay calibration and assign it a value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * calculated based on the timer frequency.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * For the rest of the CPUs we cannot assume that the timer frequency is same as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * the cpu frequency, hence do the calibration for those.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) #define LPS_PREC 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) static unsigned long calibrate_delay_converge(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) /* First stage - slowly accelerate to find initial bounds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) unsigned long lpj, lpj_base, ticks, loopadd, loopadd_base, chop_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) int trials = 0, band = 0, trial_in_band = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) lpj = (1<<12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) /* wait for "start of" clock tick */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) ticks = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) while (ticks == jiffies)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) ; /* nothing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) /* Go .. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) ticks = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) if (++trial_in_band == (1<<band)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) ++band;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) trial_in_band = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) __delay(lpj * band);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) trials += band;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) } while (ticks == jiffies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) * We overshot, so retreat to a clear underestimate. Then estimate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * the largest likely undershoot. This defines our chop bounds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) trials -= band;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) loopadd_base = lpj * band;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) lpj_base = lpj * trials;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) recalibrate:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) lpj = lpj_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) loopadd = loopadd_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) * Do a binary approximation to get lpj set to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) * equal one clock (up to LPS_PREC bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) chop_limit = lpj >> LPS_PREC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) while (loopadd > chop_limit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) lpj += loopadd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) ticks = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) while (ticks == jiffies)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) ; /* nothing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) ticks = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) __delay(lpj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) if (jiffies != ticks) /* longer than 1 tick */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) lpj -= loopadd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) loopadd >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) * If we incremented every single time possible, presume we've
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) * massively underestimated initially, and retry with a higher
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) * start, and larger range. (Only seen on x86_64, due to SMIs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) if (lpj + loopadd * 2 == lpj_base + loopadd_base * 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) lpj_base = lpj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) loopadd_base <<= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) goto recalibrate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) return lpj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) static DEFINE_PER_CPU(unsigned long, cpu_loops_per_jiffy) = { 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) * Check if cpu calibration delay is already known. For example,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) * some processors with multi-core sockets may have all cores
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) * with the same calibration delay.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) * Architectures should override this function if a faster calibration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) * method is available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) unsigned long __attribute__((weak)) calibrate_delay_is_known(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) * Indicate the cpu delay calibration is done. This can be used by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) * architectures to stop accepting delay timer registrations after this point.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) void __attribute__((weak)) calibration_delay_done(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) void calibrate_delay(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) unsigned long lpj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) static bool printed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) int this_cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) if (per_cpu(cpu_loops_per_jiffy, this_cpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) lpj = per_cpu(cpu_loops_per_jiffy, this_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) if (!printed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) pr_info("Calibrating delay loop (skipped) "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) "already calibrated this CPU");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) } else if (preset_lpj) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) lpj = preset_lpj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) if (!printed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) pr_info("Calibrating delay loop (skipped) "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) "preset value.. ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) } else if ((!printed) && lpj_fine) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) lpj = lpj_fine;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) pr_info("Calibrating delay loop (skipped), "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) "value calculated using timer frequency.. ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) } else if ((lpj = calibrate_delay_is_known())) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) } else if ((lpj = calibrate_delay_direct()) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) if (!printed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) pr_info("Calibrating delay using timer "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) "specific routine.. ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) if (!printed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) pr_info("Calibrating delay loop... ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) lpj = calibrate_delay_converge();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) per_cpu(cpu_loops_per_jiffy, this_cpu) = lpj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) if (!printed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) pr_cont("%lu.%02lu BogoMIPS (lpj=%lu)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) lpj/(500000/HZ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) (lpj/(5000/HZ)) % 100, lpj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) loops_per_jiffy = lpj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) printed = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) calibration_delay_done();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) }