clk: move clk_disable_unused after clk_core_disable_unprepare function

No function level change, just moving code place.
clk_disable_unused function will need to call clk_core_prepare_enable/
clk_core_disable_unprepare when adding CLK_OPS_PARENT_ENABLE features.
So move it after clk_core_disable_unprepare to avoid adding forward
declared functions later.

Cc: Michael Turquette <mturquette@baylibre.com>
Cc: Stephen Boyd <sboyd@codeaurora.org>
Cc: Shawn Guo <shawnguo@kernel.org>
Signed-off-by: Dong Aisheng <aisheng.dong@nxp.com>
Signed-off-by: Stephen Boyd <sboyd@codeaurora.org>
This commit is contained in:
Dong Aisheng 2016-06-30 17:31:12 +08:00 committed by Stephen Boyd
parent a6adc30ba7
commit 7ec986efed
1 changed files with 98 additions and 98 deletions

View File

@ -172,104 +172,6 @@ static bool clk_core_is_enabled(struct clk_core *core)
return core->ops->is_enabled(core->hw);
}
static void clk_unprepare_unused_subtree(struct clk_core *core)
{
struct clk_core *child;
lockdep_assert_held(&prepare_lock);
hlist_for_each_entry(child, &core->children, child_node)
clk_unprepare_unused_subtree(child);
if (core->prepare_count)
return;
if (core->flags & CLK_IGNORE_UNUSED)
return;
if (clk_core_is_prepared(core)) {
trace_clk_unprepare(core);
if (core->ops->unprepare_unused)
core->ops->unprepare_unused(core->hw);
else if (core->ops->unprepare)
core->ops->unprepare(core->hw);
trace_clk_unprepare_complete(core);
}
}
static void clk_disable_unused_subtree(struct clk_core *core)
{
struct clk_core *child;
unsigned long flags;
lockdep_assert_held(&prepare_lock);
hlist_for_each_entry(child, &core->children, child_node)
clk_disable_unused_subtree(child);
flags = clk_enable_lock();
if (core->enable_count)
goto unlock_out;
if (core->flags & CLK_IGNORE_UNUSED)
goto unlock_out;
/*
* some gate clocks have special needs during the disable-unused
* sequence. call .disable_unused if available, otherwise fall
* back to .disable
*/
if (clk_core_is_enabled(core)) {
trace_clk_disable(core);
if (core->ops->disable_unused)
core->ops->disable_unused(core->hw);
else if (core->ops->disable)
core->ops->disable(core->hw);
trace_clk_disable_complete(core);
}
unlock_out:
clk_enable_unlock(flags);
}
static bool clk_ignore_unused;
static int __init clk_ignore_unused_setup(char *__unused)
{
clk_ignore_unused = true;
return 1;
}
__setup("clk_ignore_unused", clk_ignore_unused_setup);
static int clk_disable_unused(void)
{
struct clk_core *core;
if (clk_ignore_unused) {
pr_warn("clk: Not disabling unused clocks\n");
return 0;
}
clk_prepare_lock();
hlist_for_each_entry(core, &clk_root_list, child_node)
clk_disable_unused_subtree(core);
hlist_for_each_entry(core, &clk_orphan_list, child_node)
clk_disable_unused_subtree(core);
hlist_for_each_entry(core, &clk_root_list, child_node)
clk_unprepare_unused_subtree(core);
hlist_for_each_entry(core, &clk_orphan_list, child_node)
clk_unprepare_unused_subtree(core);
clk_prepare_unlock();
return 0;
}
late_initcall_sync(clk_disable_unused);
/*** helper functions ***/
const char *__clk_get_name(const struct clk *clk)
@ -828,6 +730,104 @@ static void clk_core_disable_unprepare(struct clk_core *core)
clk_core_unprepare_lock(core);
}
static void clk_unprepare_unused_subtree(struct clk_core *core)
{
struct clk_core *child;
lockdep_assert_held(&prepare_lock);
hlist_for_each_entry(child, &core->children, child_node)
clk_unprepare_unused_subtree(child);
if (core->prepare_count)
return;
if (core->flags & CLK_IGNORE_UNUSED)
return;
if (clk_core_is_prepared(core)) {
trace_clk_unprepare(core);
if (core->ops->unprepare_unused)
core->ops->unprepare_unused(core->hw);
else if (core->ops->unprepare)
core->ops->unprepare(core->hw);
trace_clk_unprepare_complete(core);
}
}
static void clk_disable_unused_subtree(struct clk_core *core)
{
struct clk_core *child;
unsigned long flags;
lockdep_assert_held(&prepare_lock);
hlist_for_each_entry(child, &core->children, child_node)
clk_disable_unused_subtree(child);
flags = clk_enable_lock();
if (core->enable_count)
goto unlock_out;
if (core->flags & CLK_IGNORE_UNUSED)
goto unlock_out;
/*
* some gate clocks have special needs during the disable-unused
* sequence. call .disable_unused if available, otherwise fall
* back to .disable
*/
if (clk_core_is_enabled(core)) {
trace_clk_disable(core);
if (core->ops->disable_unused)
core->ops->disable_unused(core->hw);
else if (core->ops->disable)
core->ops->disable(core->hw);
trace_clk_disable_complete(core);
}
unlock_out:
clk_enable_unlock(flags);
}
static bool clk_ignore_unused;
static int __init clk_ignore_unused_setup(char *__unused)
{
clk_ignore_unused = true;
return 1;
}
__setup("clk_ignore_unused", clk_ignore_unused_setup);
static int clk_disable_unused(void)
{
struct clk_core *core;
if (clk_ignore_unused) {
pr_warn("clk: Not disabling unused clocks\n");
return 0;
}
clk_prepare_lock();
hlist_for_each_entry(core, &clk_root_list, child_node)
clk_disable_unused_subtree(core);
hlist_for_each_entry(core, &clk_orphan_list, child_node)
clk_disable_unused_subtree(core);
hlist_for_each_entry(core, &clk_root_list, child_node)
clk_unprepare_unused_subtree(core);
hlist_for_each_entry(core, &clk_orphan_list, child_node)
clk_unprepare_unused_subtree(core);
clk_prepare_unlock();
return 0;
}
late_initcall_sync(clk_disable_unused);
static int clk_core_round_rate_nolock(struct clk_core *core,
struct clk_rate_request *req)
{