of: cache phandle nodes to reduce cost of of_find_node_by_phandle()
Create a cache of the nodes that contain a phandle property. Use this cache to find the node for a given phandle value instead of scanning the devicetree to find the node. If the phandle value is not found in the cache, of_find_node_by_phandle() will fall back to the tree scan algorithm. The cache is initialized in of_core_init(). The cache is freed via a late_initcall_sync() if modules are not enabled. If the devicetree is created by the dtc compiler, with all phandle property values auto generated, then the size required by the cache could be 4 * (1 + number of phandles) bytes. This results in an O(1) node lookup cost for a given phandle value. Due to a concern that the phandle property values might not be consistent with what is generated by the dtc compiler, a mask has been added to the cache lookup algorithm. To maintain the O(1) node lookup cost, the size of the cache has been increased by rounding the number of entries up to the next power of two. The overhead of finding the devicetree node containing a given phandle value has been noted by several people in the recent past, in some cases with a patch to add a hashed index of devicetree nodes, based on the phandle value of the node. One concern with this approach is the extra space added to each node. This patch takes advantage of the phandle property values auto generated by the dtc compiler, which begin with one and monotonically increase by one, resulting in a range of 1..n for n phandle values. This implementation should also provide a good reduction of overhead for any range of phandle values that are mostly in a monotonic range. Performance measurements by Chintan Pandya <cpandya@codeaurora.org> of several implementations of patches that are similar to this one suggest an expected reduction of boot time by ~400ms for his test system. If the cache size was decreased to 64 entries, the boot time was reduced by ~340 ms. The measurements were on a 4.9.73 kernel for arch/arm64/boot/dts/qcom/sda670-mtp.dts, contains 2371 nodes and 814 phandle values. Reported-by: Chintan Pandya <cpandya@codeaurora.org> Signed-off-by: Frank Rowand <frank.rowand@sony.com> Signed-off-by: Rob Herring <robh@kernel.org>
This commit is contained in:
parent
6adb1b9538
commit
0b3ce78e90
|
@ -91,10 +91,72 @@ int __weak of_node_to_nid(struct device_node *np)
|
|||
}
|
||||
#endif
|
||||
|
||||
static struct device_node **phandle_cache;
|
||||
static u32 phandle_cache_mask;
|
||||
|
||||
/*
|
||||
* Assumptions behind phandle_cache implementation:
|
||||
* - phandle property values are in a contiguous range of 1..n
|
||||
*
|
||||
* If the assumptions do not hold, then
|
||||
* - the phandle lookup overhead reduction provided by the cache
|
||||
* will likely be less
|
||||
*/
|
||||
static void of_populate_phandle_cache(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
u32 cache_entries;
|
||||
struct device_node *np;
|
||||
u32 phandles = 0;
|
||||
|
||||
raw_spin_lock_irqsave(&devtree_lock, flags);
|
||||
|
||||
kfree(phandle_cache);
|
||||
phandle_cache = NULL;
|
||||
|
||||
for_each_of_allnodes(np)
|
||||
if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL)
|
||||
phandles++;
|
||||
|
||||
cache_entries = roundup_pow_of_two(phandles);
|
||||
phandle_cache_mask = cache_entries - 1;
|
||||
|
||||
phandle_cache = kcalloc(cache_entries, sizeof(*phandle_cache),
|
||||
GFP_ATOMIC);
|
||||
if (!phandle_cache)
|
||||
goto out;
|
||||
|
||||
for_each_of_allnodes(np)
|
||||
if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL)
|
||||
phandle_cache[np->phandle & phandle_cache_mask] = np;
|
||||
|
||||
out:
|
||||
raw_spin_unlock_irqrestore(&devtree_lock, flags);
|
||||
}
|
||||
|
||||
#ifndef CONFIG_MODULES
|
||||
static int __init of_free_phandle_cache(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
raw_spin_lock_irqsave(&devtree_lock, flags);
|
||||
|
||||
kfree(phandle_cache);
|
||||
phandle_cache = NULL;
|
||||
|
||||
raw_spin_unlock_irqrestore(&devtree_lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
late_initcall_sync(of_free_phandle_cache);
|
||||
#endif
|
||||
|
||||
void __init of_core_init(void)
|
||||
{
|
||||
struct device_node *np;
|
||||
|
||||
of_populate_phandle_cache();
|
||||
|
||||
/* Create the kset, and register existing nodes */
|
||||
mutex_lock(&of_mutex);
|
||||
of_kset = kset_create_and_add("devicetree", NULL, firmware_kobj);
|
||||
|
@ -1021,16 +1083,32 @@ EXPORT_SYMBOL_GPL(of_modalias_node);
|
|||
*/
|
||||
struct device_node *of_find_node_by_phandle(phandle handle)
|
||||
{
|
||||
struct device_node *np;
|
||||
struct device_node *np = NULL;
|
||||
unsigned long flags;
|
||||
phandle masked_handle;
|
||||
|
||||
if (!handle)
|
||||
return NULL;
|
||||
|
||||
raw_spin_lock_irqsave(&devtree_lock, flags);
|
||||
for_each_of_allnodes(np)
|
||||
if (np->phandle == handle)
|
||||
break;
|
||||
|
||||
masked_handle = handle & phandle_cache_mask;
|
||||
|
||||
if (phandle_cache) {
|
||||
if (phandle_cache[masked_handle] &&
|
||||
handle == phandle_cache[masked_handle]->phandle)
|
||||
np = phandle_cache[masked_handle];
|
||||
}
|
||||
|
||||
if (!np) {
|
||||
for_each_of_allnodes(np)
|
||||
if (np->phandle == handle) {
|
||||
if (phandle_cache)
|
||||
phandle_cache[masked_handle] = np;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
of_node_get(np);
|
||||
raw_spin_unlock_irqrestore(&devtree_lock, flags);
|
||||
return np;
|
||||
|
|
|
@ -132,6 +132,9 @@ extern void __of_detach_node_sysfs(struct device_node *np);
|
|||
extern void __of_sysfs_remove_bin_file(struct device_node *np,
|
||||
struct property *prop);
|
||||
|
||||
/* illegal phandle value (set when unresolved) */
|
||||
#define OF_PHANDLE_ILLEGAL 0xdeadbeef
|
||||
|
||||
/* iterators for transactions, used for overlays */
|
||||
/* forward iterator */
|
||||
#define for_each_transaction_entry(_oft, _te) \
|
||||
|
|
|
@ -19,9 +19,6 @@
|
|||
|
||||
#include "of_private.h"
|
||||
|
||||
/* illegal phandle value (set when unresolved) */
|
||||
#define OF_PHANDLE_ILLEGAL 0xdeadbeef
|
||||
|
||||
static phandle live_tree_max_phandle(void)
|
||||
{
|
||||
struct device_node *node;
|
||||
|
|
Loading…
Reference in New Issue