2017-12-28 02:55:14 +08:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0+ */
|
2012-12-07 06:55:41 +08:00
|
|
|
#ifndef _LINUX_OF_PRIVATE_H
|
|
|
|
#define _LINUX_OF_PRIVATE_H
|
|
|
|
/*
|
|
|
|
* Private symbols used by OF support code
|
|
|
|
*
|
|
|
|
* Paul Mackerras August 1996.
|
|
|
|
* Copyright (C) 1996-2005 Paul Mackerras.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/**
|
|
|
|
* struct alias_prop - Alias property in 'aliases' node
|
|
|
|
* @link: List node to link the structure in aliases_lookup list
|
|
|
|
* @alias: Alias property name
|
|
|
|
* @np: Pointer to device_node that the alias stands for
|
|
|
|
* @id: Index value from end of alias name
|
|
|
|
* @stem: Alias string without the index
|
|
|
|
*
|
|
|
|
* The structure represents one alias property of 'aliases' node as
|
|
|
|
* an entry in aliases_lookup list.
|
|
|
|
*/
|
|
|
|
struct alias_prop {
|
|
|
|
struct list_head link;
|
|
|
|
const char *alias;
|
|
|
|
struct device_node *np;
|
|
|
|
int id;
|
|
|
|
char stem[0];
|
|
|
|
};
|
|
|
|
|
2018-08-30 06:21:45 +08:00
|
|
|
#if defined(CONFIG_SPARC)
|
|
|
|
#define OF_ROOT_NODE_ADDR_CELLS_DEFAULT 2
|
|
|
|
#else
|
|
|
|
#define OF_ROOT_NODE_ADDR_CELLS_DEFAULT 1
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#define OF_ROOT_NODE_SIZE_CELLS_DEFAULT 1
|
|
|
|
|
2014-07-05 00:58:03 +08:00
|
|
|
extern struct mutex of_mutex;
|
2012-12-07 06:55:41 +08:00
|
|
|
extern struct list_head aliases_lookup;
|
2014-07-24 07:05:06 +08:00
|
|
|
extern struct kset *of_kset;
|
|
|
|
|
2014-06-26 22:40:48 +08:00
|
|
|
#if defined(CONFIG_OF_DYNAMIC)
|
|
|
|
extern int of_property_notify(int action, struct device_node *np,
|
2014-07-17 02:48:23 +08:00
|
|
|
struct property *prop, struct property *old_prop);
|
2014-06-26 22:40:48 +08:00
|
|
|
extern void of_node_release(struct kobject *kobj);
|
2017-10-18 07:36:26 +08:00
|
|
|
extern int __of_changeset_apply_entries(struct of_changeset *ocs,
|
|
|
|
int *ret_revert);
|
|
|
|
extern int __of_changeset_apply_notify(struct of_changeset *ocs);
|
|
|
|
extern int __of_changeset_revert_entries(struct of_changeset *ocs,
|
|
|
|
int *ret_apply);
|
|
|
|
extern int __of_changeset_revert_notify(struct of_changeset *ocs);
|
2014-06-26 22:40:48 +08:00
|
|
|
#else /* CONFIG_OF_DYNAMIC */
|
|
|
|
static inline int of_property_notify(int action, struct device_node *np,
|
2014-07-17 02:48:23 +08:00
|
|
|
struct property *prop, struct property *old_prop)
|
2014-06-26 22:40:48 +08:00
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_OF_DYNAMIC */
|
|
|
|
|
2017-10-05 03:09:40 +08:00
|
|
|
#if defined(CONFIG_OF_KOBJ)
|
|
|
|
int of_node_is_attached(struct device_node *node);
|
|
|
|
int __of_add_property_sysfs(struct device_node *np, struct property *pp);
|
|
|
|
void __of_remove_property_sysfs(struct device_node *np, struct property *prop);
|
|
|
|
void __of_update_property_sysfs(struct device_node *np, struct property *newprop,
|
|
|
|
struct property *oldprop);
|
|
|
|
int __of_attach_node_sysfs(struct device_node *np);
|
|
|
|
void __of_detach_node_sysfs(struct device_node *np);
|
|
|
|
#else
|
|
|
|
static inline int __of_add_property_sysfs(struct device_node *np, struct property *pp)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
static inline void __of_remove_property_sysfs(struct device_node *np, struct property *prop) {}
|
|
|
|
static inline void __of_update_property_sysfs(struct device_node *np,
|
|
|
|
struct property *newprop, struct property *oldprop) {}
|
|
|
|
static inline int __of_attach_node_sysfs(struct device_node *np)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
static inline void __of_detach_node_sysfs(struct device_node *np) {}
|
|
|
|
#endif
|
|
|
|
|
2017-10-18 07:36:29 +08:00
|
|
|
#if defined(CONFIG_OF_RESOLVE)
|
|
|
|
int of_resolve_phandles(struct device_node *tree);
|
|
|
|
#endif
|
|
|
|
|
2018-12-19 03:40:03 +08:00
|
|
|
#if defined(CONFIG_OF_DYNAMIC)
|
|
|
|
void __of_free_phandle_cache_entry(phandle handle);
|
|
|
|
#endif
|
|
|
|
|
2017-10-18 07:36:29 +08:00
|
|
|
#if defined(CONFIG_OF_OVERLAY)
|
|
|
|
void of_overlay_mutex_lock(void);
|
|
|
|
void of_overlay_mutex_unlock(void);
|
2018-07-13 05:00:07 +08:00
|
|
|
int of_free_phandle_cache(void);
|
|
|
|
void of_populate_phandle_cache(void);
|
2017-10-18 07:36:29 +08:00
|
|
|
#else
|
|
|
|
static inline void of_overlay_mutex_lock(void) {};
|
|
|
|
static inline void of_overlay_mutex_unlock(void) {};
|
|
|
|
#endif
|
|
|
|
|
2017-04-28 17:44:12 +08:00
|
|
|
#if defined(CONFIG_OF_UNITTEST) && defined(CONFIG_OF_OVERLAY)
|
2017-04-26 08:09:54 +08:00
|
|
|
extern void __init unittest_unflatten_overlay_base(void);
|
|
|
|
#else
|
|
|
|
static inline void unittest_unflatten_overlay_base(void) {};
|
|
|
|
#endif
|
|
|
|
|
|
|
|
extern void *__unflatten_device_tree(const void *blob,
|
|
|
|
struct device_node *dad,
|
|
|
|
struct device_node **mynodes,
|
|
|
|
void *(*dt_alloc)(u64 size, u64 align),
|
|
|
|
bool detached);
|
|
|
|
|
2014-07-05 00:58:47 +08:00
|
|
|
/**
|
|
|
|
* General utilities for working with live trees.
|
|
|
|
*
|
|
|
|
* All functions with two leading underscores operate
|
|
|
|
* without taking node references, so you either have to
|
|
|
|
* own the devtree lock or work on detached trees only.
|
|
|
|
*/
|
|
|
|
struct property *__of_prop_dup(const struct property *prop, gfp_t allocflags);
|
2018-02-27 06:01:23 +08:00
|
|
|
struct device_node *__of_node_dup(const struct device_node *np,
|
|
|
|
const char *full_name);
|
2014-07-05 00:58:47 +08:00
|
|
|
|
2017-10-18 07:36:31 +08:00
|
|
|
struct device_node *__of_find_node_by_path(struct device_node *parent,
|
|
|
|
const char *path);
|
2017-06-03 01:43:18 +08:00
|
|
|
struct device_node *__of_find_node_by_full_path(struct device_node *node,
|
|
|
|
const char *path);
|
|
|
|
|
2014-07-16 13:25:43 +08:00
|
|
|
extern const void *__of_get_property(const struct device_node *np,
|
|
|
|
const char *name, int *lenp);
|
2014-07-05 00:58:46 +08:00
|
|
|
extern int __of_add_property(struct device_node *np, struct property *prop);
|
2014-07-24 07:05:06 +08:00
|
|
|
extern int __of_add_property_sysfs(struct device_node *np,
|
|
|
|
struct property *prop);
|
2014-07-05 00:58:46 +08:00
|
|
|
extern int __of_remove_property(struct device_node *np, struct property *prop);
|
2014-07-24 07:05:06 +08:00
|
|
|
extern void __of_remove_property_sysfs(struct device_node *np,
|
|
|
|
struct property *prop);
|
2014-07-05 00:58:46 +08:00
|
|
|
extern int __of_update_property(struct device_node *np,
|
|
|
|
struct property *newprop, struct property **oldprop);
|
2014-07-24 07:05:06 +08:00
|
|
|
extern void __of_update_property_sysfs(struct device_node *np,
|
|
|
|
struct property *newprop, struct property *oldprop);
|
2014-07-05 00:58:46 +08:00
|
|
|
|
2014-07-24 07:05:06 +08:00
|
|
|
extern int __of_attach_node_sysfs(struct device_node *np);
|
2014-07-05 00:58:46 +08:00
|
|
|
extern void __of_detach_node(struct device_node *np);
|
2014-07-24 07:05:06 +08:00
|
|
|
extern void __of_detach_node_sysfs(struct device_node *np);
|
2014-07-05 00:58:46 +08:00
|
|
|
|
2016-06-17 01:51:46 +08:00
|
|
|
extern void __of_sysfs_remove_bin_file(struct device_node *np,
|
|
|
|
struct property *prop);
|
|
|
|
|
of: cache phandle nodes to reduce cost of of_find_node_by_phandle()
Create a cache of the nodes that contain a phandle property. Use this
cache to find the node for a given phandle value instead of scanning
the devicetree to find the node. If the phandle value is not found
in the cache, of_find_node_by_phandle() will fall back to the tree
scan algorithm.
The cache is initialized in of_core_init().
The cache is freed via a late_initcall_sync() if modules are not
enabled.
If the devicetree is created by the dtc compiler, with all phandle
property values auto generated, then the size required by the cache
could be 4 * (1 + number of phandles) bytes. This results in an O(1)
node lookup cost for a given phandle value. Due to a concern that the
phandle property values might not be consistent with what is generated
by the dtc compiler, a mask has been added to the cache lookup algorithm.
To maintain the O(1) node lookup cost, the size of the cache has been
increased by rounding the number of entries up to the next power of
two.
The overhead of finding the devicetree node containing a given phandle
value has been noted by several people in the recent past, in some cases
with a patch to add a hashed index of devicetree nodes, based on the
phandle value of the node. One concern with this approach is the extra
space added to each node. This patch takes advantage of the phandle
property values auto generated by the dtc compiler, which begin with
one and monotonically increase by one, resulting in a range of 1..n
for n phandle values. This implementation should also provide a good
reduction of overhead for any range of phandle values that are mostly
in a monotonic range.
Performance measurements by Chintan Pandya <cpandya@codeaurora.org>
of several implementations of patches that are similar to this one
suggest an expected reduction of boot time by ~400ms for his test
system. If the cache size was decreased to 64 entries, the boot
time was reduced by ~340 ms. The measurements were on a 4.9.73 kernel
for arch/arm64/boot/dts/qcom/sda670-mtp.dts, contains 2371 nodes and
814 phandle values.
Reported-by: Chintan Pandya <cpandya@codeaurora.org>
Signed-off-by: Frank Rowand <frank.rowand@sony.com>
Signed-off-by: Rob Herring <robh@kernel.org>
2018-03-05 08:14:47 +08:00
|
|
|
/* illegal phandle value (set when unresolved) */
|
|
|
|
#define OF_PHANDLE_ILLEGAL 0xdeadbeef
|
|
|
|
|
2014-07-05 00:58:49 +08:00
|
|
|
/* iterators for transactions, used for overlays */
|
|
|
|
/* forward iterator */
|
|
|
|
#define for_each_transaction_entry(_oft, _te) \
|
|
|
|
list_for_each_entry(_te, &(_oft)->te_list, node)
|
|
|
|
|
|
|
|
/* reverse iterator */
|
|
|
|
#define for_each_transaction_entry_reverse(_oft, _te) \
|
|
|
|
list_for_each_entry_reverse(_te, &(_oft)->te_list, node)
|
|
|
|
|
2012-12-07 06:55:41 +08:00
|
|
|
#endif /* _LINUX_OF_PRIVATE_H */
|