sched/idle, PPC: Remove redundant cpuidle_idle_call()
The core idle loop now takes care of it. However a few things need checking: - Invocation of cpuidle_idle_call() in pseries_lpar_idle() happened through arch_cpu_idle() and was therefore always preceded by a call to ppc64_runlatch_off(). To preserve this property now that cpuidle_idle_call() is invoked directly from core code, a call to ppc64_runlatch_off() has been added to idle_loop_prolog() in platforms/pseries/processor_idle.c. - Similarly, cpuidle_idle_call() was followed by ppc64_runlatch_off() so a call to the later has been added to idle_loop_epilog(). - And since arch_cpu_idle() always made sure to re-enable IRQs if they were not enabled, this is now done in idle_loop_epilog() as well. The above was made in order to keep the execution flow close to the original. I don't know if that was strictly necessary. Someone well aquainted with the platform details might find some room for possible optimizations. Signed-off-by: Nicolas Pitre <nico@linaro.org> Reviewed-by: Preeti U Murthy <preeti@linux.vnet.ibm.com> Cc: "Rafael J. Wysocki" <rjw@rjwysocki.net> Cc: Daniel Lezcano <daniel.lezcano@linaro.org> Cc: linux-arm-kernel@lists.infradead.org Cc: linuxppc-dev@lists.ozlabs.org Cc: linux-sh@vger.kernel.org Cc: linux-pm@vger.kernel.org Cc: Russell King <linux@arm.linux.org.uk> Cc: linaro-kernel@lists.linaro.org Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Paul Mundt <lethal@linux-sh.org> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/n/tip-47o4m03citrfg9y1vxic5asb@git.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
ad68cc7a77
commit
d8c6ad3184
|
@ -39,7 +39,6 @@
|
|||
#include <linux/irq.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/root_dev.h>
|
||||
#include <linux/cpuidle.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/kexec.h>
|
||||
|
||||
|
@ -356,12 +355,8 @@ early_initcall(alloc_dispatch_log_kmem_cache);
|
|||
|
||||
static void pseries_lpar_idle(void)
|
||||
{
|
||||
/* This would call on the cpuidle framework, and the back-end pseries
|
||||
* driver to go to idle states
|
||||
*/
|
||||
if (cpuidle_idle_call()) {
|
||||
/* On error, execute default handler
|
||||
* to go into low thread priority and possibly
|
||||
/*
|
||||
* Default handler to go into low thread priority and possibly
|
||||
* low power mode by cedeing processor to hypervisor
|
||||
*/
|
||||
|
||||
|
@ -379,7 +374,6 @@ static void pseries_lpar_idle(void)
|
|||
|
||||
get_lppaca()->idle = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Enable relocation on during exceptions. This has partition wide scope and
|
||||
|
|
|
@ -29,6 +29,7 @@ static struct cpuidle_state *cpuidle_state_table;
|
|||
|
||||
static inline void idle_loop_prolog(unsigned long *in_purr)
|
||||
{
|
||||
ppc64_runlatch_off();
|
||||
*in_purr = mfspr(SPRN_PURR);
|
||||
/*
|
||||
* Indicate to the HV that we are idle. Now would be
|
||||
|
@ -45,6 +46,10 @@ static inline void idle_loop_epilog(unsigned long in_purr)
|
|||
wait_cycles += mfspr(SPRN_PURR) - in_purr;
|
||||
get_lppaca()->wait_state_cycles = cpu_to_be64(wait_cycles);
|
||||
get_lppaca()->idle = 0;
|
||||
|
||||
if (irqs_disabled())
|
||||
local_irq_enable();
|
||||
ppc64_runlatch_on();
|
||||
}
|
||||
|
||||
static int snooze_loop(struct cpuidle_device *dev,
|
||||
|
|
Loading…
Reference in New Issue