PM: sleep: core: Use built-in RCU list checking

This patch passes the cond argument to list_for_each_entry_rcu()
to fix the following false-positive lockdep warnings:
(with CONFIG_PROVE_RCU_LIST = y)

[  330.302784] =============================
[  330.302789] WARNING: suspicious RCU usage
[  330.302796] 5.6.0-rc1+ #5 Not tainted
[  330.302801] -----------------------------
[  330.302808] drivers/base/power/main.c:326 RCU-list traversed in non-reader section!!

[  330.303303] =============================
[  330.303307] WARNING: suspicious RCU usage
[  330.303311] 5.6.0-rc1+ #5 Not tainted
[  330.303315] -----------------------------
[  330.303319] drivers/base/power/main.c:1698 RCU-list traversed in non-reader section!!

[  331.934969] =============================
[  331.934971] WARNING: suspicious RCU usage
[  331.934973] 5.6.0-rc1+ #5 Not tainted
[  331.934975] -----------------------------
[  331.934977] drivers/base/power/main.c:1238 RCU-list traversed in non-reader section!!

[  332.467772] WARNING: suspicious RCU usage
[  332.467775] 5.6.0-rc1+ #5 Not tainted
[  332.467775] -----------------------------
[  332.467778] drivers/base/power/main.c:269 RCU-list traversed in non-reader section!!

Signed-off-by: Madhuparna Bhowmik <madhuparnabhowmik10@gmail.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
This commit is contained in:
Madhuparna Bhowmik 2020-03-04 01:11:30 +05:30 committed by Rafael J. Wysocki
parent 98d54f81e3
commit 42beb82ec4
1 changed files with 8 additions and 4 deletions

View File

@ -40,6 +40,10 @@
typedef int (*pm_callback_t)(struct device *);
#define list_for_each_entry_rcu_locked(pos, head, member) \
list_for_each_entry_rcu(pos, head, member, \
device_links_read_lock_held())
/*
* The entries in the dpm_list list are in a depth first order, simply
* because children are guaranteed to be discovered after parents, and
@ -266,7 +270,7 @@ static void dpm_wait_for_suppliers(struct device *dev, bool async)
* callbacks freeing the link objects for the links in the list we're
* walking.
*/
list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
if (READ_ONCE(link->status) != DL_STATE_DORMANT)
dpm_wait(link->supplier, async);
@ -323,7 +327,7 @@ static void dpm_wait_for_consumers(struct device *dev, bool async)
* continue instead of trying to continue in parallel with its
* unregistration).
*/
list_for_each_entry_rcu(link, &dev->links.consumers, s_node)
list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node)
if (READ_ONCE(link->status) != DL_STATE_DORMANT)
dpm_wait(link->consumer, async);
@ -1235,7 +1239,7 @@ static void dpm_superior_set_must_resume(struct device *dev)
idx = device_links_read_lock();
list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
link->supplier->power.must_resume = true;
device_links_read_unlock(idx);
@ -1695,7 +1699,7 @@ static void dpm_clear_superiors_direct_complete(struct device *dev)
idx = device_links_read_lock();
list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) {
spin_lock_irq(&link->supplier->power.lock);
link->supplier->power.direct_complete = false;
spin_unlock_irq(&link->supplier->power.lock);