mirror of https://gitee.com/openkylin/linux.git
Merge branch 'pm-cpuidle'
* pm-cpuidle: cpuidle: menu: Avoid computations when result will be discarded cpuidle: menu: Drop redundant comparison cpuidle: menu: Simplify checks related to the polling state cpuidle: poll_state: Revise loop termination condition cpuidle: menu: Move the latency_req == 0 special case check cpuidle: menu: Avoid computations for very close timers cpuidle: menu: Do not update last_state_idx in menu_select() cpuidle: menu: Get rid of first_idx from menu_select() cpuidle: menu: Compute first_idx when latency_req is known cpuidle: menu: Fix wakeup statistics updates for polling state cpuidle: menu: Replace data->predicted_us with local variable cpuidle: enter_state: Don't needlessly calculate diff time cpuidle: Remove unnecessary wrapper cpuidle_get_last_residency() intel_idle: Get rid of custom ICPU() macro
This commit is contained in:
commit
41fd838cda
|
@ -247,17 +247,17 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
|
||||||
if (!cpuidle_state_is_coupled(drv, index))
|
if (!cpuidle_state_is_coupled(drv, index))
|
||||||
local_irq_enable();
|
local_irq_enable();
|
||||||
|
|
||||||
diff = ktime_us_delta(time_end, time_start);
|
|
||||||
if (diff > INT_MAX)
|
|
||||||
diff = INT_MAX;
|
|
||||||
|
|
||||||
dev->last_residency = (int) diff;
|
|
||||||
|
|
||||||
if (entered_state >= 0) {
|
if (entered_state >= 0) {
|
||||||
/* Update cpuidle counters */
|
/*
|
||||||
/* This can be moved to within driver enter routine
|
* Update cpuidle counters
|
||||||
|
* This can be moved to within driver enter routine,
|
||||||
* but that results in multiple copies of same code.
|
* but that results in multiple copies of same code.
|
||||||
*/
|
*/
|
||||||
|
diff = ktime_us_delta(time_end, time_start);
|
||||||
|
if (diff > INT_MAX)
|
||||||
|
diff = INT_MAX;
|
||||||
|
|
||||||
|
dev->last_residency = (int)diff;
|
||||||
dev->states_usage[entered_state].time += dev->last_residency;
|
dev->states_usage[entered_state].time += dev->last_residency;
|
||||||
dev->states_usage[entered_state].usage++;
|
dev->states_usage[entered_state].usage++;
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -80,7 +80,7 @@ static int ladder_select_state(struct cpuidle_driver *drv,
|
||||||
|
|
||||||
last_state = &ldev->states[last_idx];
|
last_state = &ldev->states[last_idx];
|
||||||
|
|
||||||
last_residency = cpuidle_get_last_residency(dev) - drv->states[last_idx].exit_latency;
|
last_residency = dev->last_residency - drv->states[last_idx].exit_latency;
|
||||||
|
|
||||||
/* consider promotion */
|
/* consider promotion */
|
||||||
if (last_idx < drv->state_count - 1 &&
|
if (last_idx < drv->state_count - 1 &&
|
||||||
|
|
|
@ -124,7 +124,6 @@ struct menu_device {
|
||||||
int tick_wakeup;
|
int tick_wakeup;
|
||||||
|
|
||||||
unsigned int next_timer_us;
|
unsigned int next_timer_us;
|
||||||
unsigned int predicted_us;
|
|
||||||
unsigned int bucket;
|
unsigned int bucket;
|
||||||
unsigned int correction_factor[BUCKETS];
|
unsigned int correction_factor[BUCKETS];
|
||||||
unsigned int intervals[INTERVALS];
|
unsigned int intervals[INTERVALS];
|
||||||
|
@ -197,10 +196,11 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev);
|
||||||
* of points is below a threshold. If it is... then use the
|
* of points is below a threshold. If it is... then use the
|
||||||
* average of these 8 points as the estimated value.
|
* average of these 8 points as the estimated value.
|
||||||
*/
|
*/
|
||||||
static unsigned int get_typical_interval(struct menu_device *data)
|
static unsigned int get_typical_interval(struct menu_device *data,
|
||||||
|
unsigned int predicted_us)
|
||||||
{
|
{
|
||||||
int i, divisor;
|
int i, divisor;
|
||||||
unsigned int max, thresh, avg;
|
unsigned int min, max, thresh, avg;
|
||||||
uint64_t sum, variance;
|
uint64_t sum, variance;
|
||||||
|
|
||||||
thresh = UINT_MAX; /* Discard outliers above this value */
|
thresh = UINT_MAX; /* Discard outliers above this value */
|
||||||
|
@ -208,6 +208,7 @@ static unsigned int get_typical_interval(struct menu_device *data)
|
||||||
again:
|
again:
|
||||||
|
|
||||||
/* First calculate the average of past intervals */
|
/* First calculate the average of past intervals */
|
||||||
|
min = UINT_MAX;
|
||||||
max = 0;
|
max = 0;
|
||||||
sum = 0;
|
sum = 0;
|
||||||
divisor = 0;
|
divisor = 0;
|
||||||
|
@ -218,8 +219,19 @@ static unsigned int get_typical_interval(struct menu_device *data)
|
||||||
divisor++;
|
divisor++;
|
||||||
if (value > max)
|
if (value > max)
|
||||||
max = value;
|
max = value;
|
||||||
|
|
||||||
|
if (value < min)
|
||||||
|
min = value;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If the result of the computation is going to be discarded anyway,
|
||||||
|
* avoid the computation altogether.
|
||||||
|
*/
|
||||||
|
if (min >= predicted_us)
|
||||||
|
return UINT_MAX;
|
||||||
|
|
||||||
if (divisor == INTERVALS)
|
if (divisor == INTERVALS)
|
||||||
avg = sum >> INTERVAL_SHIFT;
|
avg = sum >> INTERVAL_SHIFT;
|
||||||
else
|
else
|
||||||
|
@ -286,10 +298,9 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
|
||||||
struct menu_device *data = this_cpu_ptr(&menu_devices);
|
struct menu_device *data = this_cpu_ptr(&menu_devices);
|
||||||
int latency_req = cpuidle_governor_latency_req(dev->cpu);
|
int latency_req = cpuidle_governor_latency_req(dev->cpu);
|
||||||
int i;
|
int i;
|
||||||
int first_idx;
|
|
||||||
int idx;
|
int idx;
|
||||||
unsigned int interactivity_req;
|
unsigned int interactivity_req;
|
||||||
unsigned int expected_interval;
|
unsigned int predicted_us;
|
||||||
unsigned long nr_iowaiters, cpu_load;
|
unsigned long nr_iowaiters, cpu_load;
|
||||||
ktime_t delta_next;
|
ktime_t delta_next;
|
||||||
|
|
||||||
|
@ -298,50 +309,36 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
|
||||||
data->needs_update = 0;
|
data->needs_update = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Special case when user has set very strict latency requirement */
|
|
||||||
if (unlikely(latency_req == 0)) {
|
|
||||||
*stop_tick = false;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* determine the expected residency time, round up */
|
/* determine the expected residency time, round up */
|
||||||
data->next_timer_us = ktime_to_us(tick_nohz_get_sleep_length(&delta_next));
|
data->next_timer_us = ktime_to_us(tick_nohz_get_sleep_length(&delta_next));
|
||||||
|
|
||||||
get_iowait_load(&nr_iowaiters, &cpu_load);
|
get_iowait_load(&nr_iowaiters, &cpu_load);
|
||||||
data->bucket = which_bucket(data->next_timer_us, nr_iowaiters);
|
data->bucket = which_bucket(data->next_timer_us, nr_iowaiters);
|
||||||
|
|
||||||
|
if (unlikely(drv->state_count <= 1 || latency_req == 0) ||
|
||||||
|
((data->next_timer_us < drv->states[1].target_residency ||
|
||||||
|
latency_req < drv->states[1].exit_latency) &&
|
||||||
|
!drv->states[0].disabled && !dev->states_usage[0].disable)) {
|
||||||
|
/*
|
||||||
|
* In this case state[0] will be used no matter what, so return
|
||||||
|
* it right away and keep the tick running.
|
||||||
|
*/
|
||||||
|
*stop_tick = false;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Force the result of multiplication to be 64 bits even if both
|
* Force the result of multiplication to be 64 bits even if both
|
||||||
* operands are 32 bits.
|
* operands are 32 bits.
|
||||||
* Make sure to round up for half microseconds.
|
* Make sure to round up for half microseconds.
|
||||||
*/
|
*/
|
||||||
data->predicted_us = DIV_ROUND_CLOSEST_ULL((uint64_t)data->next_timer_us *
|
predicted_us = DIV_ROUND_CLOSEST_ULL((uint64_t)data->next_timer_us *
|
||||||
data->correction_factor[data->bucket],
|
data->correction_factor[data->bucket],
|
||||||
RESOLUTION * DECAY);
|
RESOLUTION * DECAY);
|
||||||
|
|
||||||
expected_interval = get_typical_interval(data);
|
|
||||||
expected_interval = min(expected_interval, data->next_timer_us);
|
|
||||||
|
|
||||||
first_idx = 0;
|
|
||||||
if (drv->states[0].flags & CPUIDLE_FLAG_POLLING) {
|
|
||||||
struct cpuidle_state *s = &drv->states[1];
|
|
||||||
unsigned int polling_threshold;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Default to a physical idle state, not to busy polling, unless
|
|
||||||
* a timer is going to trigger really really soon.
|
|
||||||
*/
|
|
||||||
polling_threshold = max_t(unsigned int, 20, s->target_residency);
|
|
||||||
if (data->next_timer_us > polling_threshold &&
|
|
||||||
latency_req > s->exit_latency && !s->disabled &&
|
|
||||||
!dev->states_usage[1].disable)
|
|
||||||
first_idx = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Use the lowest expected idle interval to pick the idle state.
|
* Use the lowest expected idle interval to pick the idle state.
|
||||||
*/
|
*/
|
||||||
data->predicted_us = min(data->predicted_us, expected_interval);
|
predicted_us = min(predicted_us, get_typical_interval(data, predicted_us));
|
||||||
|
|
||||||
if (tick_nohz_tick_stopped()) {
|
if (tick_nohz_tick_stopped()) {
|
||||||
/*
|
/*
|
||||||
|
@ -352,34 +349,46 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
|
||||||
* the known time till the closest timer event for the idle
|
* the known time till the closest timer event for the idle
|
||||||
* state selection.
|
* state selection.
|
||||||
*/
|
*/
|
||||||
if (data->predicted_us < TICK_USEC)
|
if (predicted_us < TICK_USEC)
|
||||||
data->predicted_us = ktime_to_us(delta_next);
|
predicted_us = ktime_to_us(delta_next);
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
* Use the performance multiplier and the user-configurable
|
* Use the performance multiplier and the user-configurable
|
||||||
* latency_req to determine the maximum exit latency.
|
* latency_req to determine the maximum exit latency.
|
||||||
*/
|
*/
|
||||||
interactivity_req = data->predicted_us / performance_multiplier(nr_iowaiters, cpu_load);
|
interactivity_req = predicted_us / performance_multiplier(nr_iowaiters, cpu_load);
|
||||||
if (latency_req > interactivity_req)
|
if (latency_req > interactivity_req)
|
||||||
latency_req = interactivity_req;
|
latency_req = interactivity_req;
|
||||||
}
|
}
|
||||||
|
|
||||||
expected_interval = data->predicted_us;
|
|
||||||
/*
|
/*
|
||||||
* Find the idle state with the lowest power while satisfying
|
* Find the idle state with the lowest power while satisfying
|
||||||
* our constraints.
|
* our constraints.
|
||||||
*/
|
*/
|
||||||
idx = -1;
|
idx = -1;
|
||||||
for (i = first_idx; i < drv->state_count; i++) {
|
for (i = 0; i < drv->state_count; i++) {
|
||||||
struct cpuidle_state *s = &drv->states[i];
|
struct cpuidle_state *s = &drv->states[i];
|
||||||
struct cpuidle_state_usage *su = &dev->states_usage[i];
|
struct cpuidle_state_usage *su = &dev->states_usage[i];
|
||||||
|
|
||||||
if (s->disabled || su->disable)
|
if (s->disabled || su->disable)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (idx == -1)
|
if (idx == -1)
|
||||||
idx = i; /* first enabled state */
|
idx = i; /* first enabled state */
|
||||||
if (s->target_residency > data->predicted_us) {
|
|
||||||
if (data->predicted_us < TICK_USEC)
|
if (s->target_residency > predicted_us) {
|
||||||
|
/*
|
||||||
|
* Use a physical idle state, not busy polling, unless
|
||||||
|
* a timer is going to trigger soon enough.
|
||||||
|
*/
|
||||||
|
if ((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) &&
|
||||||
|
s->exit_latency <= latency_req &&
|
||||||
|
s->target_residency <= data->next_timer_us) {
|
||||||
|
predicted_us = s->target_residency;
|
||||||
|
idx = i;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
if (predicted_us < TICK_USEC)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
if (!tick_nohz_tick_stopped()) {
|
if (!tick_nohz_tick_stopped()) {
|
||||||
|
@ -389,7 +398,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
|
||||||
* tick in that case and let the governor run
|
* tick in that case and let the governor run
|
||||||
* again in the next iteration of the loop.
|
* again in the next iteration of the loop.
|
||||||
*/
|
*/
|
||||||
expected_interval = drv->states[idx].target_residency;
|
predicted_us = drv->states[idx].target_residency;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -403,7 +412,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
|
||||||
s->target_residency <= ktime_to_us(delta_next))
|
s->target_residency <= ktime_to_us(delta_next))
|
||||||
idx = i;
|
idx = i;
|
||||||
|
|
||||||
goto out;
|
return idx;
|
||||||
}
|
}
|
||||||
if (s->exit_latency > latency_req) {
|
if (s->exit_latency > latency_req) {
|
||||||
/*
|
/*
|
||||||
|
@ -412,7 +421,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
|
||||||
* expected idle duration so that the tick is retained
|
* expected idle duration so that the tick is retained
|
||||||
* as long as that target residency is low enough.
|
* as long as that target residency is low enough.
|
||||||
*/
|
*/
|
||||||
expected_interval = drv->states[idx].target_residency;
|
predicted_us = drv->states[idx].target_residency;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
idx = i;
|
idx = i;
|
||||||
|
@ -426,7 +435,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
|
||||||
* expected idle duration is shorter than the tick period length.
|
* expected idle duration is shorter than the tick period length.
|
||||||
*/
|
*/
|
||||||
if (((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) ||
|
if (((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) ||
|
||||||
expected_interval < TICK_USEC) && !tick_nohz_tick_stopped()) {
|
predicted_us < TICK_USEC) && !tick_nohz_tick_stopped()) {
|
||||||
unsigned int delta_next_us = ktime_to_us(delta_next);
|
unsigned int delta_next_us = ktime_to_us(delta_next);
|
||||||
|
|
||||||
*stop_tick = false;
|
*stop_tick = false;
|
||||||
|
@ -450,10 +459,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
out:
|
return idx;
|
||||||
data->last_state_idx = idx;
|
|
||||||
|
|
||||||
return data->last_state_idx;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -512,9 +518,19 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
|
||||||
* duration predictor do a better job next time.
|
* duration predictor do a better job next time.
|
||||||
*/
|
*/
|
||||||
measured_us = 9 * MAX_INTERESTING / 10;
|
measured_us = 9 * MAX_INTERESTING / 10;
|
||||||
|
} else if ((drv->states[last_idx].flags & CPUIDLE_FLAG_POLLING) &&
|
||||||
|
dev->poll_time_limit) {
|
||||||
|
/*
|
||||||
|
* The CPU exited the "polling" state due to a time limit, so
|
||||||
|
* the idle duration prediction leading to the selection of that
|
||||||
|
* state was inaccurate. If a better prediction had been made,
|
||||||
|
* the CPU might have been woken up from idle by the next timer.
|
||||||
|
* Assume that to be the case.
|
||||||
|
*/
|
||||||
|
measured_us = data->next_timer_us;
|
||||||
} else {
|
} else {
|
||||||
/* measured value */
|
/* measured value */
|
||||||
measured_us = cpuidle_get_last_residency(dev);
|
measured_us = dev->last_residency;
|
||||||
|
|
||||||
/* Deduct exit latency */
|
/* Deduct exit latency */
|
||||||
if (measured_us > 2 * target->exit_latency)
|
if (measured_us > 2 * target->exit_latency)
|
||||||
|
|
|
@ -9,7 +9,6 @@
|
||||||
#include <linux/sched/clock.h>
|
#include <linux/sched/clock.h>
|
||||||
#include <linux/sched/idle.h>
|
#include <linux/sched/idle.h>
|
||||||
|
|
||||||
#define POLL_IDLE_TIME_LIMIT (TICK_NSEC / 16)
|
|
||||||
#define POLL_IDLE_RELAX_COUNT 200
|
#define POLL_IDLE_RELAX_COUNT 200
|
||||||
|
|
||||||
static int __cpuidle poll_idle(struct cpuidle_device *dev,
|
static int __cpuidle poll_idle(struct cpuidle_device *dev,
|
||||||
|
@ -17,8 +16,11 @@ static int __cpuidle poll_idle(struct cpuidle_device *dev,
|
||||||
{
|
{
|
||||||
u64 time_start = local_clock();
|
u64 time_start = local_clock();
|
||||||
|
|
||||||
|
dev->poll_time_limit = false;
|
||||||
|
|
||||||
local_irq_enable();
|
local_irq_enable();
|
||||||
if (!current_set_polling_and_test()) {
|
if (!current_set_polling_and_test()) {
|
||||||
|
u64 limit = (u64)drv->states[1].target_residency * NSEC_PER_USEC;
|
||||||
unsigned int loop_count = 0;
|
unsigned int loop_count = 0;
|
||||||
|
|
||||||
while (!need_resched()) {
|
while (!need_resched()) {
|
||||||
|
@ -27,8 +29,10 @@ static int __cpuidle poll_idle(struct cpuidle_device *dev,
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
loop_count = 0;
|
loop_count = 0;
|
||||||
if (local_clock() - time_start > POLL_IDLE_TIME_LIMIT)
|
if (local_clock() - time_start > limit) {
|
||||||
|
dev->poll_time_limit = true;
|
||||||
break;
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
current_clr_polling();
|
current_clr_polling();
|
||||||
|
|
|
@ -1066,46 +1066,43 @@ static const struct idle_cpu idle_cpu_dnv = {
|
||||||
.disable_promotion_to_c1e = true,
|
.disable_promotion_to_c1e = true,
|
||||||
};
|
};
|
||||||
|
|
||||||
#define ICPU(model, cpu) \
|
|
||||||
{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&cpu }
|
|
||||||
|
|
||||||
static const struct x86_cpu_id intel_idle_ids[] __initconst = {
|
static const struct x86_cpu_id intel_idle_ids[] __initconst = {
|
||||||
ICPU(INTEL_FAM6_NEHALEM_EP, idle_cpu_nehalem),
|
INTEL_CPU_FAM6(NEHALEM_EP, idle_cpu_nehalem),
|
||||||
ICPU(INTEL_FAM6_NEHALEM, idle_cpu_nehalem),
|
INTEL_CPU_FAM6(NEHALEM, idle_cpu_nehalem),
|
||||||
ICPU(INTEL_FAM6_NEHALEM_G, idle_cpu_nehalem),
|
INTEL_CPU_FAM6(NEHALEM_G, idle_cpu_nehalem),
|
||||||
ICPU(INTEL_FAM6_WESTMERE, idle_cpu_nehalem),
|
INTEL_CPU_FAM6(WESTMERE, idle_cpu_nehalem),
|
||||||
ICPU(INTEL_FAM6_WESTMERE_EP, idle_cpu_nehalem),
|
INTEL_CPU_FAM6(WESTMERE_EP, idle_cpu_nehalem),
|
||||||
ICPU(INTEL_FAM6_NEHALEM_EX, idle_cpu_nehalem),
|
INTEL_CPU_FAM6(NEHALEM_EX, idle_cpu_nehalem),
|
||||||
ICPU(INTEL_FAM6_ATOM_PINEVIEW, idle_cpu_atom),
|
INTEL_CPU_FAM6(ATOM_PINEVIEW, idle_cpu_atom),
|
||||||
ICPU(INTEL_FAM6_ATOM_LINCROFT, idle_cpu_lincroft),
|
INTEL_CPU_FAM6(ATOM_LINCROFT, idle_cpu_lincroft),
|
||||||
ICPU(INTEL_FAM6_WESTMERE_EX, idle_cpu_nehalem),
|
INTEL_CPU_FAM6(WESTMERE_EX, idle_cpu_nehalem),
|
||||||
ICPU(INTEL_FAM6_SANDYBRIDGE, idle_cpu_snb),
|
INTEL_CPU_FAM6(SANDYBRIDGE, idle_cpu_snb),
|
||||||
ICPU(INTEL_FAM6_SANDYBRIDGE_X, idle_cpu_snb),
|
INTEL_CPU_FAM6(SANDYBRIDGE_X, idle_cpu_snb),
|
||||||
ICPU(INTEL_FAM6_ATOM_CEDARVIEW, idle_cpu_atom),
|
INTEL_CPU_FAM6(ATOM_CEDARVIEW, idle_cpu_atom),
|
||||||
ICPU(INTEL_FAM6_ATOM_SILVERMONT1, idle_cpu_byt),
|
INTEL_CPU_FAM6(ATOM_SILVERMONT1, idle_cpu_byt),
|
||||||
ICPU(INTEL_FAM6_ATOM_MERRIFIELD, idle_cpu_tangier),
|
INTEL_CPU_FAM6(ATOM_MERRIFIELD, idle_cpu_tangier),
|
||||||
ICPU(INTEL_FAM6_ATOM_AIRMONT, idle_cpu_cht),
|
INTEL_CPU_FAM6(ATOM_AIRMONT, idle_cpu_cht),
|
||||||
ICPU(INTEL_FAM6_IVYBRIDGE, idle_cpu_ivb),
|
INTEL_CPU_FAM6(IVYBRIDGE, idle_cpu_ivb),
|
||||||
ICPU(INTEL_FAM6_IVYBRIDGE_X, idle_cpu_ivt),
|
INTEL_CPU_FAM6(IVYBRIDGE_X, idle_cpu_ivt),
|
||||||
ICPU(INTEL_FAM6_HASWELL_CORE, idle_cpu_hsw),
|
INTEL_CPU_FAM6(HASWELL_CORE, idle_cpu_hsw),
|
||||||
ICPU(INTEL_FAM6_HASWELL_X, idle_cpu_hsw),
|
INTEL_CPU_FAM6(HASWELL_X, idle_cpu_hsw),
|
||||||
ICPU(INTEL_FAM6_HASWELL_ULT, idle_cpu_hsw),
|
INTEL_CPU_FAM6(HASWELL_ULT, idle_cpu_hsw),
|
||||||
ICPU(INTEL_FAM6_HASWELL_GT3E, idle_cpu_hsw),
|
INTEL_CPU_FAM6(HASWELL_GT3E, idle_cpu_hsw),
|
||||||
ICPU(INTEL_FAM6_ATOM_SILVERMONT2, idle_cpu_avn),
|
INTEL_CPU_FAM6(ATOM_SILVERMONT2, idle_cpu_avn),
|
||||||
ICPU(INTEL_FAM6_BROADWELL_CORE, idle_cpu_bdw),
|
INTEL_CPU_FAM6(BROADWELL_CORE, idle_cpu_bdw),
|
||||||
ICPU(INTEL_FAM6_BROADWELL_GT3E, idle_cpu_bdw),
|
INTEL_CPU_FAM6(BROADWELL_GT3E, idle_cpu_bdw),
|
||||||
ICPU(INTEL_FAM6_BROADWELL_X, idle_cpu_bdw),
|
INTEL_CPU_FAM6(BROADWELL_X, idle_cpu_bdw),
|
||||||
ICPU(INTEL_FAM6_BROADWELL_XEON_D, idle_cpu_bdw),
|
INTEL_CPU_FAM6(BROADWELL_XEON_D, idle_cpu_bdw),
|
||||||
ICPU(INTEL_FAM6_SKYLAKE_MOBILE, idle_cpu_skl),
|
INTEL_CPU_FAM6(SKYLAKE_MOBILE, idle_cpu_skl),
|
||||||
ICPU(INTEL_FAM6_SKYLAKE_DESKTOP, idle_cpu_skl),
|
INTEL_CPU_FAM6(SKYLAKE_DESKTOP, idle_cpu_skl),
|
||||||
ICPU(INTEL_FAM6_KABYLAKE_MOBILE, idle_cpu_skl),
|
INTEL_CPU_FAM6(KABYLAKE_MOBILE, idle_cpu_skl),
|
||||||
ICPU(INTEL_FAM6_KABYLAKE_DESKTOP, idle_cpu_skl),
|
INTEL_CPU_FAM6(KABYLAKE_DESKTOP, idle_cpu_skl),
|
||||||
ICPU(INTEL_FAM6_SKYLAKE_X, idle_cpu_skx),
|
INTEL_CPU_FAM6(SKYLAKE_X, idle_cpu_skx),
|
||||||
ICPU(INTEL_FAM6_XEON_PHI_KNL, idle_cpu_knl),
|
INTEL_CPU_FAM6(XEON_PHI_KNL, idle_cpu_knl),
|
||||||
ICPU(INTEL_FAM6_XEON_PHI_KNM, idle_cpu_knl),
|
INTEL_CPU_FAM6(XEON_PHI_KNM, idle_cpu_knl),
|
||||||
ICPU(INTEL_FAM6_ATOM_GOLDMONT, idle_cpu_bxt),
|
INTEL_CPU_FAM6(ATOM_GOLDMONT, idle_cpu_bxt),
|
||||||
ICPU(INTEL_FAM6_ATOM_GEMINI_LAKE, idle_cpu_bxt),
|
INTEL_CPU_FAM6(ATOM_GEMINI_LAKE, idle_cpu_bxt),
|
||||||
ICPU(INTEL_FAM6_ATOM_DENVERTON, idle_cpu_dnv),
|
INTEL_CPU_FAM6(ATOM_DENVERTON, idle_cpu_dnv),
|
||||||
{}
|
{}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -81,6 +81,7 @@ struct cpuidle_device {
|
||||||
unsigned int registered:1;
|
unsigned int registered:1;
|
||||||
unsigned int enabled:1;
|
unsigned int enabled:1;
|
||||||
unsigned int use_deepest_state:1;
|
unsigned int use_deepest_state:1;
|
||||||
|
unsigned int poll_time_limit:1;
|
||||||
unsigned int cpu;
|
unsigned int cpu;
|
||||||
|
|
||||||
int last_residency;
|
int last_residency;
|
||||||
|
@ -99,16 +100,6 @@ struct cpuidle_device {
|
||||||
DECLARE_PER_CPU(struct cpuidle_device *, cpuidle_devices);
|
DECLARE_PER_CPU(struct cpuidle_device *, cpuidle_devices);
|
||||||
DECLARE_PER_CPU(struct cpuidle_device, cpuidle_dev);
|
DECLARE_PER_CPU(struct cpuidle_device, cpuidle_dev);
|
||||||
|
|
||||||
/**
|
|
||||||
* cpuidle_get_last_residency - retrieves the last state's residency time
|
|
||||||
* @dev: the target CPU
|
|
||||||
*/
|
|
||||||
static inline int cpuidle_get_last_residency(struct cpuidle_device *dev)
|
|
||||||
{
|
|
||||||
return dev->last_residency;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/****************************
|
/****************************
|
||||||
* CPUIDLE DRIVER INTERFACE *
|
* CPUIDLE DRIVER INTERFACE *
|
||||||
****************************/
|
****************************/
|
||||||
|
|
Loading…
Reference in New Issue