@@ -192,7 +192,7 @@ governors for computations related to idle state selection:
:c:member:`flags`
Flags representing idle state properties. Currently, governors only use
- the ``CPUIDLE_FLAG_POLLING`` flag which is set if the given object
+ the ``CPUIDLE_FLAG_POLLING_SOFT`` flag which is set if the given object
does not represent a real idle state, but an interface to a software
"loop" that can be used in order to avoid asking the processor to enter
any idle state at all. [There are other flags used by the ``CPUIdle``
@@ -158,7 +158,7 @@ static struct cpuidle_state powernv_states[CPUIDLE_STATE_MAX] = {
.exit_latency = 0,
.target_residency = 0,
.enter = snooze_loop,
- .flags = CPUIDLE_FLAG_POLLING },
+ .flags = CPUIDLE_FLAG_POLLING_SOFT },
};
static int powernv_cpuidle_cpu_online(unsigned int cpu)
@@ -271,7 +271,7 @@ static struct cpuidle_state dedicated_states[NR_DEDICATED_STATES] = {
.exit_latency = 0,
.target_residency = 0,
.enter = &snooze_loop,
- .flags = CPUIDLE_FLAG_POLLING },
+ .flags = CPUIDLE_FLAG_POLLING_SOFT },
{ /* CEDE */
.name = "CEDE",
.desc = "CEDE",
@@ -290,7 +290,7 @@ static struct cpuidle_state shared_states[] = {
.exit_latency = 0,
.target_residency = 0,
.enter = &snooze_loop,
- .flags = CPUIDLE_FLAG_POLLING },
+ .flags = CPUIDLE_FLAG_POLLING_SOFT },
{ /* Shared Cede */
.name = "Shared Cede",
.desc = "Shared Cede",
@@ -69,7 +69,7 @@ static int ladder_select_state(struct cpuidle_driver *drv,
struct ladder_device *ldev = this_cpu_ptr(&ladder_devices);
struct ladder_device_state *last_state;
int last_idx = dev->last_state_idx;
- int first_idx = drv->states[0].flags & CPUIDLE_FLAG_POLLING ? 1 : 0;
+ int first_idx = drv->states[0].flags & CPUIDLE_FLAG_POLLING_SOFT ? 1 : 0;
s64 latency_req = cpuidle_governor_latency_req(dev->cpu);
s64 last_residency;
@@ -133,7 +133,7 @@ static int ladder_enable_device(struct cpuidle_driver *drv,
struct cpuidle_device *dev)
{
int i;
- int first_idx = drv->states[0].flags & CPUIDLE_FLAG_POLLING ? 1 : 0;
+ int first_idx = drv->states[0].flags & CPUIDLE_FLAG_POLLING_SOFT ? 1 : 0;
struct ladder_device *ldev = &per_cpu(ladder_devices, dev->cpu);
struct ladder_device_state *lstate;
struct cpuidle_state *state;
@@ -320,7 +320,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
* it right away and keep the tick running if state[0] is a
* polling one.
*/
- *stop_tick = !(drv->states[0].flags & CPUIDLE_FLAG_POLLING);
+ *stop_tick = !(drv->states[0].flags & CPUIDLE_FLAG_POLLING_SOFT);
return 0;
}
@@ -365,7 +365,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
* Use a physical idle state, not busy polling, unless
* a timer is going to trigger soon enough.
*/
- if ((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) &&
+ if ((drv->states[idx].flags & CPUIDLE_FLAG_POLLING_SOFT) &&
s->exit_latency_ns <= latency_req &&
s->target_residency_ns <= data->next_timer_ns) {
predicted_ns = s->target_residency_ns;
@@ -411,7 +411,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
* Don't stop the tick if the selected state is a polling one or if the
* expected idle duration is shorter than the tick period length.
*/
- if (((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) ||
+ if (((drv->states[idx].flags & CPUIDLE_FLAG_POLLING_SOFT) ||
predicted_ns < TICK_NSEC) && !tick_nohz_tick_stopped()) {
*stop_tick = false;
@@ -492,7 +492,7 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
* duration predictor do a better job next time.
*/
measured_ns = 9 * MAX_INTERESTING / 10;
- } else if ((drv->states[last_idx].flags & CPUIDLE_FLAG_POLLING) &&
+ } else if ((drv->states[last_idx].flags & CPUIDLE_FLAG_POLLING_SOFT) &&
dev->poll_time_limit) {
/*
* The CPU exited the "polling" state due to a time limit, so
@@ -354,7 +354,7 @@ static int teo_find_shallower_state(struct cpuidle_driver *drv,
for (i = state_idx - 1; i >= 0; i--) {
if (dev->states_usage[i].disable ||
- (no_poll && drv->states[i].flags & CPUIDLE_FLAG_POLLING))
+ (no_poll && drv->states[i].flags & CPUIDLE_FLAG_POLLING_SOFT))
continue;
state_idx = i;
@@ -426,7 +426,7 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
* all. If state 1 is disabled, though, state 0 must be used
* anyway.
*/
- if ((!idx && !(drv->states[0].flags & CPUIDLE_FLAG_POLLING) &&
+ if ((!idx && !(drv->states[0].flags & CPUIDLE_FLAG_POLLING_SOFT) &&
teo_state_ok(0, drv)) || dev->states_usage[1].disable) {
idx = 0;
goto out_tick;
@@ -584,7 +584,7 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
* the current candidate state is low enough and skip the timers
* check in that case too.
*/
- if ((drv->states[0].flags & CPUIDLE_FLAG_POLLING) &&
+ if ((drv->states[0].flags & CPUIDLE_FLAG_POLLING_SOFT) &&
drv->states[idx].target_residency_ns < RESIDENCY_THRESHOLD_NS)
goto out_tick;
@@ -616,7 +616,7 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
* one or the expected idle duration is shorter than the tick period
* length.
*/
- if ((!(drv->states[idx].flags & CPUIDLE_FLAG_POLLING) &&
+ if ((!(drv->states[idx].flags & CPUIDLE_FLAG_POLLING_SOFT) &&
duration_ns >= TICK_NSEC) || tick_nohz_tick_stopped())
return idx;
@@ -57,6 +57,6 @@ void cpuidle_poll_state_init(struct cpuidle_driver *drv)
state->target_residency_ns = 0;
state->power_usage = -1;
state->enter = poll_idle;
- state->flags = CPUIDLE_FLAG_POLLING;
+ state->flags = CPUIDLE_FLAG_POLLING_SOFT;
}
EXPORT_SYMBOL_GPL(cpuidle_poll_state_init);
@@ -78,7 +78,7 @@ struct cpuidle_state {
/* Idle State Flags */
#define CPUIDLE_FLAG_NONE (0x00)
-#define CPUIDLE_FLAG_POLLING BIT(0) /* polling state */
+#define CPUIDLE_FLAG_POLLING_SOFT BIT(0) /* polling state */
#define CPUIDLE_FLAG_COUPLED BIT(1) /* state applies to multiple cpus */
#define CPUIDLE_FLAG_TIMER_STOP BIT(2) /* timer is stopped on this state */
#define CPUIDLE_FLAG_UNUSABLE BIT(3) /* avoid using this state */
In order to further distinguish software and hardware TIF_NEED_RESCHED polling cpuidle states, rename CPUIDLE_FLAG_POLLING to CPUIDLE_FLAG_POLLING_SOFT before introducing CPUIDLE_FLAG_POLLING_HARD and tag mwait users with it. This will allow cpuidle core to manage TIF_NR_POLLING on behalf of all kinds of TIF_NEED_RESCHED polling states while keeping a necessary distinction for the governors between software loops polling on TIF_NEED_RESCHED and hardware monitored writes to thread flags. Signed-off-by: Frederic Weisbecker <frederic@kernel.org> --- Documentation/driver-api/pm/cpuidle.rst | 2 +- drivers/cpuidle/cpuidle-powernv.c | 2 +- drivers/cpuidle/cpuidle-pseries.c | 4 ++-- drivers/cpuidle/governors/ladder.c | 4 ++-- drivers/cpuidle/governors/menu.c | 8 ++++---- drivers/cpuidle/governors/teo.c | 8 ++++---- drivers/cpuidle/poll_state.c | 2 +- include/linux/cpuidle.h | 2 +- 8 files changed, 16 insertions(+), 16 deletions(-)