mirror of https://gitee.com/openkylin/linux.git
Merge branch 'ib-omap-block-idle' into devel
This commit is contained in:
commit
347ae6e291
|
@ -109,6 +109,7 @@ static int omap3_enter_idle(struct cpuidle_device *dev,
|
|||
int index)
|
||||
{
|
||||
struct omap3_idle_statedata *cx = &omap3_idle_data[index];
|
||||
int error;
|
||||
|
||||
if (omap_irq_pending() || need_resched())
|
||||
goto return_sleep_time;
|
||||
|
@ -125,8 +126,11 @@ static int omap3_enter_idle(struct cpuidle_device *dev,
|
|||
* Call idle CPU PM enter notifier chain so that
|
||||
* VFP context is saved.
|
||||
*/
|
||||
if (cx->mpu_state == PWRDM_POWER_OFF)
|
||||
cpu_pm_enter();
|
||||
if (cx->mpu_state == PWRDM_POWER_OFF) {
|
||||
error = cpu_pm_enter();
|
||||
if (error)
|
||||
goto out_clkdm_set;
|
||||
}
|
||||
|
||||
/* Execute ARM wfi */
|
||||
omap_sram_idle();
|
||||
|
@ -139,6 +143,7 @@ static int omap3_enter_idle(struct cpuidle_device *dev,
|
|||
pwrdm_read_prev_pwrst(mpu_pd) == PWRDM_POWER_OFF)
|
||||
cpu_pm_exit();
|
||||
|
||||
out_clkdm_set:
|
||||
/* Re-allow idle for C1 */
|
||||
if (cx->flags & OMAP_CPUIDLE_CX_NO_CLKDM_IDLE)
|
||||
clkdm_allow_idle(mpu_pd->pwrdm_clkdms[0]);
|
||||
|
|
|
@ -122,6 +122,7 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
|
|||
{
|
||||
struct idle_statedata *cx = state_ptr + index;
|
||||
u32 mpuss_can_lose_context = 0;
|
||||
int error;
|
||||
|
||||
/*
|
||||
* CPU0 has to wait and stay ON until CPU1 is OFF state.
|
||||
|
@ -159,7 +160,9 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
|
|||
* Call idle CPU PM enter notifier chain so that
|
||||
* VFP and per CPU interrupt context is saved.
|
||||
*/
|
||||
cpu_pm_enter();
|
||||
error = cpu_pm_enter();
|
||||
if (error)
|
||||
goto cpu_pm_out;
|
||||
|
||||
if (dev->cpu == 0) {
|
||||
pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
|
||||
|
@ -169,13 +172,17 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
|
|||
* Call idle CPU cluster PM enter notifier chain
|
||||
* to save GIC and wakeupgen context.
|
||||
*/
|
||||
if (mpuss_can_lose_context)
|
||||
cpu_cluster_pm_enter();
|
||||
if (mpuss_can_lose_context) {
|
||||
error = cpu_cluster_pm_enter();
|
||||
if (error)
|
||||
goto cpu_cluster_pm_out;
|
||||
}
|
||||
}
|
||||
|
||||
omap4_enter_lowpower(dev->cpu, cx->cpu_state);
|
||||
cpu_done[dev->cpu] = true;
|
||||
|
||||
cpu_cluster_pm_out:
|
||||
/* Wakeup CPU1 only if it is not offlined */
|
||||
if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) {
|
||||
|
||||
|
@ -197,12 +204,6 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Call idle CPU PM exit notifier chain to restore
|
||||
* VFP and per CPU IRQ context.
|
||||
*/
|
||||
cpu_pm_exit();
|
||||
|
||||
/*
|
||||
* Call idle CPU cluster PM exit notifier chain
|
||||
* to restore GIC and wakeupgen context.
|
||||
|
@ -210,6 +211,13 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
|
|||
if (dev->cpu == 0 && mpuss_can_lose_context)
|
||||
cpu_cluster_pm_exit();
|
||||
|
||||
/*
|
||||
* Call idle CPU PM exit notifier chain to restore
|
||||
* VFP and per CPU IRQ context.
|
||||
*/
|
||||
cpu_pm_exit();
|
||||
|
||||
cpu_pm_out:
|
||||
tick_broadcast_exit();
|
||||
|
||||
fail:
|
||||
|
|
|
@ -191,6 +191,7 @@ void omap_sram_idle(void)
|
|||
int per_next_state = PWRDM_POWER_ON;
|
||||
int core_next_state = PWRDM_POWER_ON;
|
||||
u32 sdrc_pwr = 0;
|
||||
int error;
|
||||
|
||||
mpu_next_state = pwrdm_read_next_pwrst(mpu_pwrdm);
|
||||
switch (mpu_next_state) {
|
||||
|
@ -219,8 +220,11 @@ void omap_sram_idle(void)
|
|||
pwrdm_pre_transition(NULL);
|
||||
|
||||
/* PER */
|
||||
if (per_next_state == PWRDM_POWER_OFF)
|
||||
cpu_cluster_pm_enter();
|
||||
if (per_next_state == PWRDM_POWER_OFF) {
|
||||
error = cpu_cluster_pm_enter();
|
||||
if (error)
|
||||
return;
|
||||
}
|
||||
|
||||
/* CORE */
|
||||
if (core_next_state < PWRDM_POWER_ON) {
|
||||
|
|
|
@ -1102,23 +1102,13 @@ static void omap_gpio_idle(struct gpio_bank *bank, bool may_lose_context)
|
|||
{
|
||||
struct device *dev = bank->chip.parent;
|
||||
void __iomem *base = bank->base;
|
||||
u32 mask, nowake;
|
||||
u32 nowake;
|
||||
|
||||
bank->saved_datain = readl_relaxed(base + bank->regs->datain);
|
||||
|
||||
if (!bank->enabled_non_wakeup_gpios)
|
||||
goto update_gpio_context_count;
|
||||
|
||||
/* Check for pending EDGE_FALLING, ignore EDGE_BOTH */
|
||||
mask = bank->enabled_non_wakeup_gpios & bank->context.fallingdetect;
|
||||
mask &= ~bank->context.risingdetect;
|
||||
bank->saved_datain |= mask;
|
||||
|
||||
/* Check for pending EDGE_RISING, ignore EDGE_BOTH */
|
||||
mask = bank->enabled_non_wakeup_gpios & bank->context.risingdetect;
|
||||
mask &= ~bank->context.fallingdetect;
|
||||
bank->saved_datain &= ~mask;
|
||||
|
||||
if (!may_lose_context)
|
||||
goto update_gpio_context_count;
|
||||
|
||||
|
@ -1237,26 +1227,35 @@ static int gpio_omap_cpu_notifier(struct notifier_block *nb,
|
|||
{
|
||||
struct gpio_bank *bank;
|
||||
unsigned long flags;
|
||||
int ret = NOTIFY_OK;
|
||||
u32 isr, mask;
|
||||
|
||||
bank = container_of(nb, struct gpio_bank, nb);
|
||||
|
||||
raw_spin_lock_irqsave(&bank->lock, flags);
|
||||
if (bank->is_suspended)
|
||||
goto out_unlock;
|
||||
|
||||
switch (cmd) {
|
||||
case CPU_CLUSTER_PM_ENTER:
|
||||
if (bank->is_suspended)
|
||||
mask = omap_get_gpio_irqbank_mask(bank);
|
||||
isr = readl_relaxed(bank->base + bank->regs->irqstatus) & mask;
|
||||
if (isr) {
|
||||
ret = NOTIFY_BAD;
|
||||
break;
|
||||
}
|
||||
omap_gpio_idle(bank, true);
|
||||
break;
|
||||
case CPU_CLUSTER_PM_ENTER_FAILED:
|
||||
case CPU_CLUSTER_PM_EXIT:
|
||||
if (bank->is_suspended)
|
||||
break;
|
||||
omap_gpio_unidle(bank);
|
||||
break;
|
||||
}
|
||||
|
||||
out_unlock:
|
||||
raw_spin_unlock_irqrestore(&bank->lock, flags);
|
||||
|
||||
return NOTIFY_OK;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct omap_gpio_reg_offs omap2_gpio_regs = {
|
||||
|
|
Loading…
Reference in New Issue