Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * coupled.c - helper functions to enter the same idle state on multiple cpus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright (c) 2011 Google, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * Author: Colin Cross <ccross@android.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/cpuidle.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include "cpuidle.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21)  * DOC: Coupled cpuidle states
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23)  * On some ARM SMP SoCs (OMAP4460, Tegra 2, and probably more), the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24)  * cpus cannot be independently powered down, either due to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25)  * sequencing restrictions (on Tegra 2, cpu 0 must be the last to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26)  * power down), or due to HW bugs (on OMAP4460, a cpu powering up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27)  * will corrupt the gic state unless the other cpu runs a work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28)  * around).  Each cpu has a power state that it can enter without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29)  * coordinating with the other cpu (usually Wait For Interrupt, or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30)  * WFI), and one or more "coupled" power states that affect blocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31)  * shared between the cpus (L2 cache, interrupt controller, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32)  * sometimes the whole SoC).  Entering a coupled power state must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33)  * be tightly controlled on both cpus.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35)  * This file implements a solution, where each cpu will wait in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36)  * WFI state until all cpus are ready to enter a coupled state, at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37)  * which point the coupled state function will be called on all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38)  * cpus at approximately the same time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40)  * Once all cpus are ready to enter idle, they are woken by an smp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41)  * cross call.  At this point, there is a chance that one of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42)  * cpus will find work to do, and choose not to enter idle.  A
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43)  * final pass is needed to guarantee that all cpus will call the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44)  * power state enter function at the same time.  During this pass,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45)  * each cpu will increment the ready counter, and continue once the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46)  * ready counter matches the number of online coupled cpus.  If any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47)  * cpu exits idle, the other cpus will decrement their counter and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48)  * retry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50)  * requested_state stores the deepest coupled idle state each cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51)  * is ready for.  It is assumed that the states are indexed from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52)  * shallowest (highest power, lowest exit latency) to deepest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53)  * (lowest power, highest exit latency).  The requested_state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54)  * variable is not locked.  It is only written from the cpu that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55)  * it stores (or by the on/offlining cpu if that cpu is offline),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56)  * and only read after all the cpus are ready for the coupled idle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57)  * state are are no longer updating it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59)  * Three atomic counters are used.  alive_count tracks the number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60)  * of cpus in the coupled set that are currently or soon will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61)  * online.  waiting_count tracks the number of cpus that are in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62)  * the waiting loop, in the ready loop, or in the coupled idle state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63)  * ready_count tracks the number of cpus that are in the ready loop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64)  * or in the coupled idle state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66)  * To use coupled cpuidle states, a cpuidle driver must:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68)  *    Set struct cpuidle_device.coupled_cpus to the mask of all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69)  *    coupled cpus, usually the same as cpu_possible_mask if all cpus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70)  *    are part of the same cluster.  The coupled_cpus mask must be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71)  *    set in the struct cpuidle_device for each cpu.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73)  *    Set struct cpuidle_device.safe_state to a state that is not a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74)  *    coupled state.  This is usually WFI.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76)  *    Set CPUIDLE_FLAG_COUPLED in struct cpuidle_state.flags for each
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77)  *    state that affects multiple cpus.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79)  *    Provide a struct cpuidle_state.enter function for each state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80)  *    that affects multiple cpus.  This function is guaranteed to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81)  *    called on all cpus at approximately the same time.  The driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82)  *    should ensure that the cpus all abort together if any cpu tries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83)  *    to abort once the function is called.  The function should return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84)  *    with interrupts still disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88)  * struct cpuidle_coupled - data for set of cpus that share a coupled idle state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89)  * @coupled_cpus: mask of cpus that are part of the coupled set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90)  * @requested_state: array of requested states for cpus in the coupled set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91)  * @ready_waiting_counts: combined count of cpus  in ready or waiting loops
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92)  * @abort_barrier: synchronisation point for abort cases
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93)  * @online_count: count of cpus that are online
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94)  * @refcnt: reference count of cpuidle devices that are using this struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95)  * @prevent: flag to prevent coupled idle while a cpu is hotplugging
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) struct cpuidle_coupled {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	cpumask_t coupled_cpus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	int requested_state[NR_CPUS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	atomic_t ready_waiting_counts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	atomic_t abort_barrier;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	int online_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	int refcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	int prevent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #define WAITING_BITS 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #define MAX_WAITING_CPUS (1 << WAITING_BITS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) #define WAITING_MASK (MAX_WAITING_CPUS - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #define READY_MASK (~WAITING_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) #define CPUIDLE_COUPLED_NOT_IDLE	(-1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) static DEFINE_PER_CPU(call_single_data_t, cpuidle_coupled_poke_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)  * The cpuidle_coupled_poke_pending mask is used to avoid calling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)  * __smp_call_function_single with the per cpu call_single_data_t struct already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)  * in use.  This prevents a deadlock where two cpus are waiting for each others
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)  * call_single_data_t struct to be available
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) static cpumask_t cpuidle_coupled_poke_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)  * The cpuidle_coupled_poked mask is used to ensure that each cpu has been poked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)  * once to minimize entering the ready loop with a poke pending, which would
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)  * require aborting and retrying.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) static cpumask_t cpuidle_coupled_poked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)  * cpuidle_coupled_parallel_barrier - synchronize all online coupled cpus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)  * @dev: cpuidle_device of the calling cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)  * @a:   atomic variable to hold the barrier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)  * No caller to this function will return from this function until all online
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)  * cpus in the same coupled group have called this function.  Once any caller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)  * has returned from this function, the barrier is immediately available for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)  * reuse.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)  * The atomic variable must be initialized to 0 before any cpu calls
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)  * this function, will be reset to 0 before any cpu returns from this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)  * Must only be called from within a coupled idle state handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)  * (state.enter when state.flags has CPUIDLE_FLAG_COUPLED set).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)  * Provides full smp barrier semantics before and after calling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	int n = dev->coupled->online_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	smp_mb__before_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	atomic_inc(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	while (atomic_read(a) < n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 		cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	if (atomic_inc_return(a) == n * 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 		atomic_set(a, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	while (atomic_read(a) > n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 		cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)  * cpuidle_state_is_coupled - check if a state is part of a coupled set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)  * @drv: struct cpuidle_driver for the platform
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)  * @state: index of the target state in drv->states
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)  * Returns true if the target state is coupled with cpus besides this one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) bool cpuidle_state_is_coupled(struct cpuidle_driver *drv, int state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	return drv->states[state].flags & CPUIDLE_FLAG_COUPLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)  * cpuidle_coupled_state_verify - check if the coupled states are correctly set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)  * @drv: struct cpuidle_driver for the platform
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)  * Returns 0 for valid state values, a negative error code otherwise:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)  *  * -EINVAL if any coupled state(safe_state_index) is wrongly set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) int cpuidle_coupled_state_verify(struct cpuidle_driver *drv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	for (i = drv->state_count - 1; i >= 0; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 		if (cpuidle_state_is_coupled(drv, i) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 		    (drv->safe_state_index == i ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 		     drv->safe_state_index < 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 		     drv->safe_state_index >= drv->state_count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)  * cpuidle_coupled_set_ready - mark a cpu as ready
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)  * @coupled: the struct coupled that contains the current cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) static inline void cpuidle_coupled_set_ready(struct cpuidle_coupled *coupled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	atomic_add(MAX_WAITING_CPUS, &coupled->ready_waiting_counts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)  * cpuidle_coupled_set_not_ready - mark a cpu as not ready
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)  * @coupled: the struct coupled that contains the current cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)  * Decrements the ready counter, unless the ready (and thus the waiting) counter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)  * is equal to the number of online cpus.  Prevents a race where one cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)  * decrements the waiting counter and then re-increments it just before another
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)  * cpu has decremented its ready counter, leading to the ready counter going
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)  * down from the number of online cpus without going through the coupled idle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)  * state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)  * Returns 0 if the counter was decremented successfully, -EINVAL if the ready
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)  * counter was equal to the number of online cpus.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) inline int cpuidle_coupled_set_not_ready(struct cpuidle_coupled *coupled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	int all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	all = coupled->online_count | (coupled->online_count << WAITING_BITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	ret = atomic_add_unless(&coupled->ready_waiting_counts,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 		-MAX_WAITING_CPUS, all);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	return ret ? 0 : -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)  * cpuidle_coupled_no_cpus_ready - check if no cpus in a coupled set are ready
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)  * @coupled: the struct coupled that contains the current cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)  * Returns true if all of the cpus in a coupled set are out of the ready loop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) static inline int cpuidle_coupled_no_cpus_ready(struct cpuidle_coupled *coupled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	int r = atomic_read(&coupled->ready_waiting_counts) >> WAITING_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	return r == 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)  * cpuidle_coupled_cpus_ready - check if all cpus in a coupled set are ready
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)  * @coupled: the struct coupled that contains the current cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)  * Returns true if all cpus coupled to this target state are in the ready loop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) static inline bool cpuidle_coupled_cpus_ready(struct cpuidle_coupled *coupled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	int r = atomic_read(&coupled->ready_waiting_counts) >> WAITING_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	return r == coupled->online_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)  * cpuidle_coupled_cpus_waiting - check if all cpus in a coupled set are waiting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)  * @coupled: the struct coupled that contains the current cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)  * Returns true if all cpus coupled to this target state are in the wait loop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) static inline bool cpuidle_coupled_cpus_waiting(struct cpuidle_coupled *coupled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 	int w = atomic_read(&coupled->ready_waiting_counts) & WAITING_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	return w == coupled->online_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)  * cpuidle_coupled_no_cpus_waiting - check if no cpus in coupled set are waiting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)  * @coupled: the struct coupled that contains the current cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)  * Returns true if all of the cpus in a coupled set are out of the waiting loop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) static inline int cpuidle_coupled_no_cpus_waiting(struct cpuidle_coupled *coupled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	int w = atomic_read(&coupled->ready_waiting_counts) & WAITING_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	return w == 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)  * cpuidle_coupled_get_state - determine the deepest idle state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)  * @dev: struct cpuidle_device for this cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)  * @coupled: the struct coupled that contains the current cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)  * Returns the deepest idle state that all coupled cpus can enter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) static inline int cpuidle_coupled_get_state(struct cpuidle_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 		struct cpuidle_coupled *coupled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	int state = INT_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	 * Read barrier ensures that read of requested_state is ordered after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	 * reads of ready_count.  Matches the write barriers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	 * cpuidle_set_state_waiting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	smp_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	for_each_cpu(i, &coupled->coupled_cpus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 		if (cpu_online(i) && coupled->requested_state[i] < state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 			state = coupled->requested_state[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	return state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) static void cpuidle_coupled_handle_poke(void *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	int cpu = (unsigned long)info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	cpumask_set_cpu(cpu, &cpuidle_coupled_poked);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	cpumask_clear_cpu(cpu, &cpuidle_coupled_poke_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)  * cpuidle_coupled_poke - wake up a cpu that may be waiting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)  * @cpu: target cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)  * Ensures that the target cpu exits it's waiting idle state (if it is in it)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)  * and will see updates to waiting_count before it re-enters it's waiting idle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)  * state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)  * If cpuidle_coupled_poked_mask is already set for the target cpu, that cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)  * either has or will soon have a pending IPI that will wake it out of idle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)  * or it is currently processing the IPI and is not in idle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) static void cpuidle_coupled_poke(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	call_single_data_t *csd = &per_cpu(cpuidle_coupled_poke_cb, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	if (!cpumask_test_and_set_cpu(cpu, &cpuidle_coupled_poke_pending))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 		smp_call_function_single_async(cpu, csd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)  * cpuidle_coupled_poke_others - wake up all other cpus that may be waiting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)  * @this_cpu: target cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)  * @coupled: the struct coupled that contains the current cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)  * Calls cpuidle_coupled_poke on all other online cpus.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) static void cpuidle_coupled_poke_others(int this_cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 		struct cpuidle_coupled *coupled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 	for_each_cpu(cpu, &coupled->coupled_cpus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 		if (cpu != this_cpu && cpu_online(cpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 			cpuidle_coupled_poke(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)  * cpuidle_coupled_set_waiting - mark this cpu as in the wait loop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)  * @cpu: target cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)  * @coupled: the struct coupled that contains the current cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)  * @next_state: the index in drv->states of the requested state for this cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)  * Updates the requested idle state for the specified cpuidle device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)  * Returns the number of waiting cpus.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) static int cpuidle_coupled_set_waiting(int cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 		struct cpuidle_coupled *coupled, int next_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 	coupled->requested_state[cpu] = next_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 	 * The atomic_inc_return provides a write barrier to order the write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 	 * to requested_state with the later write that increments ready_count.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 	return atomic_inc_return(&coupled->ready_waiting_counts) & WAITING_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)  * cpuidle_coupled_set_not_waiting - mark this cpu as leaving the wait loop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)  * @cpu: target cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)  * @coupled: the struct coupled that contains the current cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)  * Removes the requested idle state for the specified cpuidle device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) static void cpuidle_coupled_set_not_waiting(int cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 		struct cpuidle_coupled *coupled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 	 * Decrementing waiting count can race with incrementing it in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 	 * cpuidle_coupled_set_waiting, but that's OK.  Worst case, some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 	 * cpus will increment ready_count and then spin until they
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 	 * notice that this cpu has cleared it's requested_state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 	atomic_dec(&coupled->ready_waiting_counts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 	coupled->requested_state[cpu] = CPUIDLE_COUPLED_NOT_IDLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)  * cpuidle_coupled_set_done - mark this cpu as leaving the ready loop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)  * @cpu: the current cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)  * @coupled: the struct coupled that contains the current cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)  * Marks this cpu as no longer in the ready and waiting loops.  Decrements
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)  * the waiting count first to prevent another cpu looping back in and seeing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)  * this cpu as waiting just before it exits idle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) static void cpuidle_coupled_set_done(int cpu, struct cpuidle_coupled *coupled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 	cpuidle_coupled_set_not_waiting(cpu, coupled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	atomic_sub(MAX_WAITING_CPUS, &coupled->ready_waiting_counts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)  * cpuidle_coupled_clear_pokes - spin until the poke interrupt is processed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)  * @cpu: this cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)  * Turns on interrupts and spins until any outstanding poke interrupts have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)  * been processed and the poke bit has been cleared.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)  * Other interrupts may also be processed while interrupts are enabled, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)  * need_resched() must be tested after this function returns to make sure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)  * the interrupt didn't schedule work that should take the cpu out of idle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)  * Returns 0 if no poke was pending, 1 if a poke was cleared.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) static int cpuidle_coupled_clear_pokes(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 	if (!cpumask_test_cpu(cpu, &cpuidle_coupled_poke_pending))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 	local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 	while (cpumask_test_cpu(cpu, &cpuidle_coupled_poke_pending))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 		cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 	local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) static bool cpuidle_coupled_any_pokes_pending(struct cpuidle_coupled *coupled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 	cpumask_t cpus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 	cpumask_and(&cpus, cpu_online_mask, &coupled->coupled_cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 	ret = cpumask_and(&cpus, &cpuidle_coupled_poke_pending, &cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)  * cpuidle_enter_state_coupled - attempt to enter a state with coupled cpus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)  * @dev: struct cpuidle_device for the current cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)  * @drv: struct cpuidle_driver for the platform
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)  * @next_state: index of the requested state in drv->states
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)  * Coordinate with coupled cpus to enter the target state.  This is a two
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)  * stage process.  In the first stage, the cpus are operating independently,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)  * and may call into cpuidle_enter_state_coupled at completely different times.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)  * To save as much power as possible, the first cpus to call this function will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)  * go to an intermediate state (the cpuidle_device's safe state), and wait for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)  * all the other cpus to call this function.  Once all coupled cpus are idle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)  * the second stage will start.  Each coupled cpu will spin until all cpus have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)  * guaranteed that they will call the target_state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)  * This function must be called with interrupts disabled.  It may enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)  * interrupts while preparing for idle, and it will always return with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)  * interrupts enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) int cpuidle_enter_state_coupled(struct cpuidle_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 		struct cpuidle_driver *drv, int next_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 	int entered_state = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 	struct cpuidle_coupled *coupled = dev->coupled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 	int w;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 	if (!coupled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 	while (coupled->prevent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 		cpuidle_coupled_clear_pokes(dev->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 		if (need_resched()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 			local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 			return entered_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 		entered_state = cpuidle_enter_state(dev, drv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 			drv->safe_state_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 		local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 	/* Read barrier ensures online_count is read after prevent is cleared */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 	smp_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) reset:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 	cpumask_clear_cpu(dev->cpu, &cpuidle_coupled_poked);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 	w = cpuidle_coupled_set_waiting(dev->cpu, coupled, next_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 	 * If this is the last cpu to enter the waiting state, poke
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 	 * all the other cpus out of their waiting state so they can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 	 * enter a deeper state.  This can race with one of the cpus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 	 * exiting the waiting state due to an interrupt and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 	 * decrementing waiting_count, see comment below.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 	if (w == coupled->online_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 		cpumask_set_cpu(dev->cpu, &cpuidle_coupled_poked);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 		cpuidle_coupled_poke_others(dev->cpu, coupled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 	 * Wait for all coupled cpus to be idle, using the deepest state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 	 * allowed for a single cpu.  If this was not the poking cpu, wait
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 	 * for at least one poke before leaving to avoid a race where
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 	 * two cpus could arrive at the waiting loop at the same time,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 	 * but the first of the two to arrive could skip the loop without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 	 * processing the pokes from the last to arrive.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 	while (!cpuidle_coupled_cpus_waiting(coupled) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 			!cpumask_test_cpu(dev->cpu, &cpuidle_coupled_poked)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 		if (cpuidle_coupled_clear_pokes(dev->cpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 		if (need_resched()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 			cpuidle_coupled_set_not_waiting(dev->cpu, coupled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 		if (coupled->prevent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 			cpuidle_coupled_set_not_waiting(dev->cpu, coupled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 		entered_state = cpuidle_enter_state(dev, drv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 			drv->safe_state_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 		local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 	cpuidle_coupled_clear_pokes(dev->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 	if (need_resched()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 		cpuidle_coupled_set_not_waiting(dev->cpu, coupled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 	 * Make sure final poke status for this cpu is visible before setting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 	 * cpu as ready.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 	smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 	 * All coupled cpus are probably idle.  There is a small chance that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 	 * one of the other cpus just became active.  Increment the ready count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 	 * and spin until all coupled cpus have incremented the counter. Once a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 	 * cpu has incremented the ready counter, it cannot abort idle and must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 	 * spin until either all cpus have incremented the ready counter, or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 	 * another cpu leaves idle and decrements the waiting counter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 	cpuidle_coupled_set_ready(coupled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 	while (!cpuidle_coupled_cpus_ready(coupled)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 		/* Check if any other cpus bailed out of idle. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 		if (!cpuidle_coupled_cpus_waiting(coupled))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 			if (!cpuidle_coupled_set_not_ready(coupled))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 				goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 		cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 	 * Make sure read of all cpus ready is done before reading pending pokes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 	smp_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 	 * There is a small chance that a cpu left and reentered idle after this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 	 * cpu saw that all cpus were waiting.  The cpu that reentered idle will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 	 * have sent this cpu a poke, which will still be pending after the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 	 * ready loop.  The pending interrupt may be lost by the interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 	 * controller when entering the deep idle state.  It's not possible to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 	 * clear a pending interrupt without turning interrupts on and handling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 	 * it, and it's too late to turn on interrupts here, so reset the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 	 * coupled idle state of all cpus and retry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 	if (cpuidle_coupled_any_pokes_pending(coupled)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 		cpuidle_coupled_set_done(dev->cpu, coupled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 		/* Wait for all cpus to see the pending pokes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 		cpuidle_coupled_parallel_barrier(dev, &coupled->abort_barrier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 		goto reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 	/* all cpus have acked the coupled state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 	next_state = cpuidle_coupled_get_state(dev, coupled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 	entered_state = cpuidle_enter_state(dev, drv, next_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 	cpuidle_coupled_set_done(dev->cpu, coupled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) 	 * Normal cpuidle states are expected to return with irqs enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) 	 * That leads to an inefficiency where a cpu receiving an interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 	 * that brings it out of idle will process that interrupt before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 	 * exiting the idle enter function and decrementing ready_count.  All
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) 	 * other cpus will need to spin waiting for the cpu that is processing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 	 * the interrupt.  If the driver returns with interrupts disabled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 	 * all other cpus will loop back into the safe idle state instead of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 	 * spinning, saving power.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) 	 * Calling local_irq_enable here allows coupled states to return with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 	 * interrupts disabled, but won't cause problems for drivers that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 	 * exit with interrupts enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 	local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 	 * Wait until all coupled cpus have exited idle.  There is no risk that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 	 * a cpu exits and re-enters the ready state because this cpu has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 	 * already decremented its waiting_count.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) 	while (!cpuidle_coupled_no_cpus_ready(coupled))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 		cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) 	return entered_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) static void cpuidle_coupled_update_online_cpus(struct cpuidle_coupled *coupled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) 	cpumask_t cpus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) 	cpumask_and(&cpus, cpu_online_mask, &coupled->coupled_cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) 	coupled->online_count = cpumask_weight(&cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)  * cpuidle_coupled_register_device - register a coupled cpuidle device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)  * @dev: struct cpuidle_device for the current cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)  * Called from cpuidle_register_device to handle coupled idle init.  Finds the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)  * cpuidle_coupled struct for this set of coupled cpus, or creates one if none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)  * exists yet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) int cpuidle_coupled_register_device(struct cpuidle_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) 	int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) 	struct cpuidle_device *other_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) 	call_single_data_t *csd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) 	struct cpuidle_coupled *coupled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) 	if (cpumask_empty(&dev->coupled_cpus))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) 	for_each_cpu(cpu, &dev->coupled_cpus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) 		other_dev = per_cpu(cpuidle_devices, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) 		if (other_dev && other_dev->coupled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) 			coupled = other_dev->coupled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) 			goto have_coupled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) 	/* No existing coupled info found, create a new one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) 	coupled = kzalloc(sizeof(struct cpuidle_coupled), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) 	if (!coupled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) 	coupled->coupled_cpus = dev->coupled_cpus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) have_coupled:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) 	dev->coupled = coupled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) 	if (WARN_ON(!cpumask_equal(&dev->coupled_cpus, &coupled->coupled_cpus)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) 		coupled->prevent++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) 	cpuidle_coupled_update_online_cpus(coupled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) 	coupled->refcnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) 	csd = &per_cpu(cpuidle_coupled_poke_cb, dev->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) 	csd->func = cpuidle_coupled_handle_poke;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) 	csd->info = (void *)(unsigned long)dev->cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)  * cpuidle_coupled_unregister_device - unregister a coupled cpuidle device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685)  * @dev: struct cpuidle_device for the current cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)  * Called from cpuidle_unregister_device to tear down coupled idle.  Removes the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)  * cpu from the coupled idle set, and frees the cpuidle_coupled_info struct if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)  * this was the last cpu in the set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) void cpuidle_coupled_unregister_device(struct cpuidle_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) 	struct cpuidle_coupled *coupled = dev->coupled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) 	if (cpumask_empty(&dev->coupled_cpus))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) 	if (--coupled->refcnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) 		kfree(coupled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) 	dev->coupled = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)  * cpuidle_coupled_prevent_idle - prevent cpus from entering a coupled state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)  * @coupled: the struct coupled that contains the cpu that is changing state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)  * Disables coupled cpuidle on a coupled set of cpus.  Used to ensure that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)  * cpu_online_mask doesn't change while cpus are coordinating coupled idle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) static void cpuidle_coupled_prevent_idle(struct cpuidle_coupled *coupled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) 	int cpu = get_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) 	/* Force all cpus out of the waiting loop. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) 	coupled->prevent++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) 	cpuidle_coupled_poke_others(cpu, coupled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) 	put_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) 	while (!cpuidle_coupled_no_cpus_waiting(coupled))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) 		cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)  * cpuidle_coupled_allow_idle - allows cpus to enter a coupled state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724)  * @coupled: the struct coupled that contains the cpu that is changing state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)  * Enables coupled cpuidle on a coupled set of cpus.  Used to ensure that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727)  * cpu_online_mask doesn't change while cpus are coordinating coupled idle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) static void cpuidle_coupled_allow_idle(struct cpuidle_coupled *coupled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) 	int cpu = get_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) 	 * Write barrier ensures readers see the new online_count when they
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) 	 * see prevent == 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) 	smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) 	coupled->prevent--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) 	/* Force cpus out of the prevent loop. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) 	cpuidle_coupled_poke_others(cpu, coupled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) 	put_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) static int coupled_cpu_online(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) 	struct cpuidle_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) 	mutex_lock(&cpuidle_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) 	dev = per_cpu(cpuidle_devices, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) 	if (dev && dev->coupled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) 		cpuidle_coupled_update_online_cpus(dev->coupled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) 		cpuidle_coupled_allow_idle(dev->coupled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) 	mutex_unlock(&cpuidle_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) static int coupled_cpu_up_prepare(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) 	struct cpuidle_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) 	mutex_lock(&cpuidle_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) 	dev = per_cpu(cpuidle_devices, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) 	if (dev && dev->coupled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) 		cpuidle_coupled_prevent_idle(dev->coupled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) 	mutex_unlock(&cpuidle_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) static int __init cpuidle_coupled_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) 	ret = cpuhp_setup_state_nocalls(CPUHP_CPUIDLE_COUPLED_PREPARE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) 					"cpuidle/coupled:prepare",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) 					coupled_cpu_up_prepare,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) 					coupled_cpu_online);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) 	ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) 					"cpuidle/coupled:online",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) 					coupled_cpu_online,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) 					coupled_cpu_up_prepare);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) 		cpuhp_remove_state_nocalls(CPUHP_CPUIDLE_COUPLED_PREPARE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) core_initcall(cpuidle_coupled_init);