Driver core update for 4.14-rc1
Here is the "big" driver core update for 4.14-rc1. It's really not all that big, the largest thing here being some firmware tests to help ensure that that crazy api is working properly. There's also a new uevent for when a driver is bound or unbound from a device, fixing a hole in the driver model that's been there since the very beginning. Many thanks to Dmitry for being persistent and pointing out how wrong I was about this all along :) Patches for the new uevents are already in the systemd tree, if people want to play around with them. Otherwise just a number of other small api changes and updates here, nothing major. All of these patches have been in linux-next for a while with no reported issues. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> -----BEGIN PGP SIGNATURE----- iG0EABECAC0WIQT0tgzFv3jCIUoxPcsxR9QN2y37KQUCWa1/IQ8cZ3JlZ0Brcm9h aC5jb20ACgkQMUfUDdst+yn8jACfdQg+YXGxTExonxnyiWgoDMMSO2gAn1ETOaak itLO5ll4b6EQ0r3pU27d =pCYl -----END PGP SIGNATURE----- Merge tag 'driver-core-4.14-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core Pull driver core update from Greg KH: "Here is the "big" driver core update for 4.14-rc1. It's really not all that big, the largest thing here being some firmware tests to help ensure that that crazy api is working properly. There's also a new uevent for when a driver is bound or unbound from a device, fixing a hole in the driver model that's been there since the very beginning. Many thanks to Dmitry for being persistent and pointing out how wrong I was about this all along :) Patches for the new uevents are already in the systemd tree, if people want to play around with them. Otherwise just a number of other small api changes and updates here, nothing major. All of these patches have been in linux-next for a while with no reported issues" * tag 'driver-core-4.14-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core: (28 commits) driver core: bus: Fix a potential double free Do not disable driver and bus shutdown hook when class shutdown hook is set. base: topology: constify attribute_group structures. base: Convert to using %pOF instead of full_name kernfs: Clarify lockdep name for kn->count fbdev: uvesafb: remove DRIVER_ATTR() usage xen: xen-pciback: remove DRIVER_ATTR() usage driver core: Document struct device:dma_ops mod_devicetable: Remove excess description from structured comment test_firmware: add batched firmware tests firmware: enable a debug print for batched requests firmware: define pr_fmt firmware: send -EINTR on signal abort on fallback mechanism test_firmware: add test case for SIGCHLD on sync fallback initcall_debug: add deferred probe times Input: axp20x-pek - switch to using devm_device_add_group() Input: synaptics_rmi4 - use devm_device_add_group() for attributes in F01 Input: gpio_keys - use devm_device_add_group() for attributes driver core: add devm_device_add_group() and friends driver core: add device_{add|remove}_group() helpers ...
This commit is contained in:
commit
44b1671fae
|
@ -41,8 +41,7 @@ static ssize_t cpu_capacity_show(struct device *dev,
|
|||
{
|
||||
struct cpu *cpu = container_of(dev, struct cpu, dev);
|
||||
|
||||
return sprintf(buf, "%lu\n",
|
||||
topology_get_cpu_scale(NULL, cpu->dev.id));
|
||||
return sprintf(buf, "%lu\n", topology_get_cpu_scale(NULL, cpu->dev.id));
|
||||
}
|
||||
|
||||
static ssize_t cpu_capacity_store(struct device *dev,
|
||||
|
@ -96,14 +95,21 @@ subsys_initcall(register_cpu_capacity_sysctl);
|
|||
|
||||
static u32 capacity_scale;
|
||||
static u32 *raw_capacity;
|
||||
static bool cap_parsing_failed;
|
||||
|
||||
static int __init free_raw_capacity(void)
|
||||
{
|
||||
kfree(raw_capacity);
|
||||
raw_capacity = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void topology_normalize_cpu_scale(void)
|
||||
{
|
||||
u64 capacity;
|
||||
int cpu;
|
||||
|
||||
if (!raw_capacity || cap_parsing_failed)
|
||||
if (!raw_capacity)
|
||||
return;
|
||||
|
||||
pr_debug("cpu_capacity: capacity_scale=%u\n", capacity_scale);
|
||||
|
@ -120,16 +126,16 @@ void topology_normalize_cpu_scale(void)
|
|||
mutex_unlock(&cpu_scale_mutex);
|
||||
}
|
||||
|
||||
int __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
|
||||
bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
|
||||
{
|
||||
int ret = 1;
|
||||
static bool cap_parsing_failed;
|
||||
int ret;
|
||||
u32 cpu_capacity;
|
||||
|
||||
if (cap_parsing_failed)
|
||||
return !ret;
|
||||
return false;
|
||||
|
||||
ret = of_property_read_u32(cpu_node,
|
||||
"capacity-dmips-mhz",
|
||||
ret = of_property_read_u32(cpu_node, "capacity-dmips-mhz",
|
||||
&cpu_capacity);
|
||||
if (!ret) {
|
||||
if (!raw_capacity) {
|
||||
|
@ -139,21 +145,21 @@ int __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
|
|||
if (!raw_capacity) {
|
||||
pr_err("cpu_capacity: failed to allocate memory for raw capacities\n");
|
||||
cap_parsing_failed = true;
|
||||
return 0;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
capacity_scale = max(cpu_capacity, capacity_scale);
|
||||
raw_capacity[cpu] = cpu_capacity;
|
||||
pr_debug("cpu_capacity: %s cpu_capacity=%u (raw)\n",
|
||||
cpu_node->full_name, raw_capacity[cpu]);
|
||||
pr_debug("cpu_capacity: %pOF cpu_capacity=%u (raw)\n",
|
||||
cpu_node, raw_capacity[cpu]);
|
||||
} else {
|
||||
if (raw_capacity) {
|
||||
pr_err("cpu_capacity: missing %s raw capacity\n",
|
||||
cpu_node->full_name);
|
||||
pr_err("cpu_capacity: missing %pOF raw capacity\n",
|
||||
cpu_node);
|
||||
pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n");
|
||||
}
|
||||
cap_parsing_failed = true;
|
||||
kfree(raw_capacity);
|
||||
free_raw_capacity();
|
||||
}
|
||||
|
||||
return !ret;
|
||||
|
@ -161,7 +167,6 @@ int __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
|
|||
|
||||
#ifdef CONFIG_CPU_FREQ
|
||||
static cpumask_var_t cpus_to_visit;
|
||||
static bool cap_parsing_done;
|
||||
static void parsing_done_workfn(struct work_struct *work);
|
||||
static DECLARE_WORK(parsing_done_work, parsing_done_workfn);
|
||||
|
||||
|
@ -173,30 +178,31 @@ init_cpu_capacity_callback(struct notifier_block *nb,
|
|||
struct cpufreq_policy *policy = data;
|
||||
int cpu;
|
||||
|
||||
if (cap_parsing_failed || cap_parsing_done)
|
||||
if (!raw_capacity)
|
||||
return 0;
|
||||
|
||||
switch (val) {
|
||||
case CPUFREQ_NOTIFY:
|
||||
pr_debug("cpu_capacity: init cpu capacity for CPUs [%*pbl] (to_visit=%*pbl)\n",
|
||||
cpumask_pr_args(policy->related_cpus),
|
||||
cpumask_pr_args(cpus_to_visit));
|
||||
cpumask_andnot(cpus_to_visit,
|
||||
cpus_to_visit,
|
||||
policy->related_cpus);
|
||||
for_each_cpu(cpu, policy->related_cpus) {
|
||||
raw_capacity[cpu] = topology_get_cpu_scale(NULL, cpu) *
|
||||
policy->cpuinfo.max_freq / 1000UL;
|
||||
capacity_scale = max(raw_capacity[cpu], capacity_scale);
|
||||
}
|
||||
if (cpumask_empty(cpus_to_visit)) {
|
||||
topology_normalize_cpu_scale();
|
||||
kfree(raw_capacity);
|
||||
pr_debug("cpu_capacity: parsing done\n");
|
||||
cap_parsing_done = true;
|
||||
schedule_work(&parsing_done_work);
|
||||
}
|
||||
if (val != CPUFREQ_NOTIFY)
|
||||
return 0;
|
||||
|
||||
pr_debug("cpu_capacity: init cpu capacity for CPUs [%*pbl] (to_visit=%*pbl)\n",
|
||||
cpumask_pr_args(policy->related_cpus),
|
||||
cpumask_pr_args(cpus_to_visit));
|
||||
|
||||
cpumask_andnot(cpus_to_visit, cpus_to_visit, policy->related_cpus);
|
||||
|
||||
for_each_cpu(cpu, policy->related_cpus) {
|
||||
raw_capacity[cpu] = topology_get_cpu_scale(NULL, cpu) *
|
||||
policy->cpuinfo.max_freq / 1000UL;
|
||||
capacity_scale = max(raw_capacity[cpu], capacity_scale);
|
||||
}
|
||||
|
||||
if (cpumask_empty(cpus_to_visit)) {
|
||||
topology_normalize_cpu_scale();
|
||||
free_raw_capacity();
|
||||
pr_debug("cpu_capacity: parsing done\n");
|
||||
schedule_work(&parsing_done_work);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -233,11 +239,5 @@ static void parsing_done_workfn(struct work_struct *work)
|
|||
}
|
||||
|
||||
#else
|
||||
static int __init free_raw_capacity(void)
|
||||
{
|
||||
kfree(raw_capacity);
|
||||
|
||||
return 0;
|
||||
}
|
||||
core_initcall(free_raw_capacity);
|
||||
#endif
|
||||
|
|
|
@ -126,11 +126,6 @@ extern int driver_add_groups(struct device_driver *drv,
|
|||
extern void driver_remove_groups(struct device_driver *drv,
|
||||
const struct attribute_group **groups);
|
||||
|
||||
extern int device_add_groups(struct device *dev,
|
||||
const struct attribute_group **groups);
|
||||
extern void device_remove_groups(struct device *dev,
|
||||
const struct attribute_group **groups);
|
||||
|
||||
extern char *make_class_name(const char *name, struct kobject *kobj);
|
||||
|
||||
extern int devres_release_all(struct device *dev);
|
||||
|
|
|
@ -698,7 +698,7 @@ int bus_add_driver(struct device_driver *drv)
|
|||
|
||||
out_unregister:
|
||||
kobject_put(&priv->kobj);
|
||||
kfree(drv->p);
|
||||
/* drv->p is freed in driver_release() */
|
||||
drv->p = NULL;
|
||||
out_put_bus:
|
||||
bus_put(bus);
|
||||
|
|
|
@ -1023,12 +1023,144 @@ int device_add_groups(struct device *dev, const struct attribute_group **groups)
|
|||
{
|
||||
return sysfs_create_groups(&dev->kobj, groups);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(device_add_groups);
|
||||
|
||||
void device_remove_groups(struct device *dev,
|
||||
const struct attribute_group **groups)
|
||||
{
|
||||
sysfs_remove_groups(&dev->kobj, groups);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(device_remove_groups);
|
||||
|
||||
union device_attr_group_devres {
|
||||
const struct attribute_group *group;
|
||||
const struct attribute_group **groups;
|
||||
};
|
||||
|
||||
static int devm_attr_group_match(struct device *dev, void *res, void *data)
|
||||
{
|
||||
return ((union device_attr_group_devres *)res)->group == data;
|
||||
}
|
||||
|
||||
static void devm_attr_group_remove(struct device *dev, void *res)
|
||||
{
|
||||
union device_attr_group_devres *devres = res;
|
||||
const struct attribute_group *group = devres->group;
|
||||
|
||||
dev_dbg(dev, "%s: removing group %p\n", __func__, group);
|
||||
sysfs_remove_group(&dev->kobj, group);
|
||||
}
|
||||
|
||||
static void devm_attr_groups_remove(struct device *dev, void *res)
|
||||
{
|
||||
union device_attr_group_devres *devres = res;
|
||||
const struct attribute_group **groups = devres->groups;
|
||||
|
||||
dev_dbg(dev, "%s: removing groups %p\n", __func__, groups);
|
||||
sysfs_remove_groups(&dev->kobj, groups);
|
||||
}
|
||||
|
||||
/**
|
||||
* devm_device_add_group - given a device, create a managed attribute group
|
||||
* @dev: The device to create the group for
|
||||
* @grp: The attribute group to create
|
||||
*
|
||||
* This function creates a group for the first time. It will explicitly
|
||||
* warn and error if any of the attribute files being created already exist.
|
||||
*
|
||||
* Returns 0 on success or error code on failure.
|
||||
*/
|
||||
int devm_device_add_group(struct device *dev, const struct attribute_group *grp)
|
||||
{
|
||||
union device_attr_group_devres *devres;
|
||||
int error;
|
||||
|
||||
devres = devres_alloc(devm_attr_group_remove,
|
||||
sizeof(*devres), GFP_KERNEL);
|
||||
if (!devres)
|
||||
return -ENOMEM;
|
||||
|
||||
error = sysfs_create_group(&dev->kobj, grp);
|
||||
if (error) {
|
||||
devres_free(devres);
|
||||
return error;
|
||||
}
|
||||
|
||||
devres->group = grp;
|
||||
devres_add(dev, devres);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(devm_device_add_group);
|
||||
|
||||
/**
|
||||
* devm_device_remove_group: remove a managed group from a device
|
||||
* @dev: device to remove the group from
|
||||
* @grp: group to remove
|
||||
*
|
||||
* This function removes a group of attributes from a device. The attributes
|
||||
* previously have to have been created for this group, otherwise it will fail.
|
||||
*/
|
||||
void devm_device_remove_group(struct device *dev,
|
||||
const struct attribute_group *grp)
|
||||
{
|
||||
WARN_ON(devres_release(dev, devm_attr_group_remove,
|
||||
devm_attr_group_match,
|
||||
/* cast away const */ (void *)grp));
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(devm_device_remove_group);
|
||||
|
||||
/**
|
||||
* devm_device_add_groups - create a bunch of managed attribute groups
|
||||
* @dev: The device to create the group for
|
||||
* @groups: The attribute groups to create, NULL terminated
|
||||
*
|
||||
* This function creates a bunch of managed attribute groups. If an error
|
||||
* occurs when creating a group, all previously created groups will be
|
||||
* removed, unwinding everything back to the original state when this
|
||||
* function was called. It will explicitly warn and error if any of the
|
||||
* attribute files being created already exist.
|
||||
*
|
||||
* Returns 0 on success or error code from sysfs_create_group on failure.
|
||||
*/
|
||||
int devm_device_add_groups(struct device *dev,
|
||||
const struct attribute_group **groups)
|
||||
{
|
||||
union device_attr_group_devres *devres;
|
||||
int error;
|
||||
|
||||
devres = devres_alloc(devm_attr_groups_remove,
|
||||
sizeof(*devres), GFP_KERNEL);
|
||||
if (!devres)
|
||||
return -ENOMEM;
|
||||
|
||||
error = sysfs_create_groups(&dev->kobj, groups);
|
||||
if (error) {
|
||||
devres_free(devres);
|
||||
return error;
|
||||
}
|
||||
|
||||
devres->groups = groups;
|
||||
devres_add(dev, devres);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(devm_device_add_groups);
|
||||
|
||||
/**
|
||||
* devm_device_remove_groups - remove a list of managed groups
|
||||
*
|
||||
* @dev: The device for the groups to be removed from
|
||||
* @groups: NULL terminated list of groups to be removed
|
||||
*
|
||||
* If groups is not NULL, remove the specified groups from the device.
|
||||
*/
|
||||
void devm_device_remove_groups(struct device *dev,
|
||||
const struct attribute_group **groups)
|
||||
{
|
||||
WARN_ON(devres_release(dev, devm_attr_groups_remove,
|
||||
devm_attr_group_match,
|
||||
/* cast away const */ (void *)groups));
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(devm_device_remove_groups);
|
||||
|
||||
static int device_add_attrs(struct device *dev)
|
||||
{
|
||||
|
@ -2664,11 +2796,12 @@ void device_shutdown(void)
|
|||
pm_runtime_get_noresume(dev);
|
||||
pm_runtime_barrier(dev);
|
||||
|
||||
if (dev->class && dev->class->shutdown) {
|
||||
if (dev->class && dev->class->shutdown_pre) {
|
||||
if (initcall_debug)
|
||||
dev_info(dev, "shutdown\n");
|
||||
dev->class->shutdown(dev);
|
||||
} else if (dev->bus && dev->bus->shutdown) {
|
||||
dev_info(dev, "shutdown_pre\n");
|
||||
dev->class->shutdown_pre(dev);
|
||||
}
|
||||
if (dev->bus && dev->bus->shutdown) {
|
||||
if (initcall_debug)
|
||||
dev_info(dev, "shutdown\n");
|
||||
dev->bus->shutdown(dev);
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
#include <linux/device.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/wait.h>
|
||||
|
@ -53,6 +54,7 @@ static DEFINE_MUTEX(deferred_probe_mutex);
|
|||
static LIST_HEAD(deferred_probe_pending_list);
|
||||
static LIST_HEAD(deferred_probe_active_list);
|
||||
static atomic_t deferred_trigger_count = ATOMIC_INIT(0);
|
||||
static bool initcalls_done;
|
||||
|
||||
/*
|
||||
* In some cases, like suspend to RAM or hibernation, It might be reasonable
|
||||
|
@ -61,6 +63,26 @@ static atomic_t deferred_trigger_count = ATOMIC_INIT(0);
|
|||
*/
|
||||
static bool defer_all_probes;
|
||||
|
||||
/*
|
||||
* For initcall_debug, show the deferred probes executed in late_initcall
|
||||
* processing.
|
||||
*/
|
||||
static void deferred_probe_debug(struct device *dev)
|
||||
{
|
||||
ktime_t calltime, delta, rettime;
|
||||
unsigned long long duration;
|
||||
|
||||
printk(KERN_DEBUG "deferred probe %s @ %i\n", dev_name(dev),
|
||||
task_pid_nr(current));
|
||||
calltime = ktime_get();
|
||||
bus_probe_device(dev);
|
||||
rettime = ktime_get();
|
||||
delta = ktime_sub(rettime, calltime);
|
||||
duration = (unsigned long long) ktime_to_ns(delta) >> 10;
|
||||
printk(KERN_DEBUG "deferred probe %s returned after %lld usecs\n",
|
||||
dev_name(dev), duration);
|
||||
}
|
||||
|
||||
/*
|
||||
* deferred_probe_work_func() - Retry probing devices in the active list.
|
||||
*/
|
||||
|
@ -106,7 +128,10 @@ static void deferred_probe_work_func(struct work_struct *work)
|
|||
device_pm_unlock();
|
||||
|
||||
dev_dbg(dev, "Retrying from deferred list\n");
|
||||
bus_probe_device(dev);
|
||||
if (initcall_debug && !initcalls_done)
|
||||
deferred_probe_debug(dev);
|
||||
else
|
||||
bus_probe_device(dev);
|
||||
|
||||
mutex_lock(&deferred_probe_mutex);
|
||||
|
||||
|
@ -215,6 +240,7 @@ static int deferred_probe_initcall(void)
|
|||
driver_deferred_probe_trigger();
|
||||
/* Sort as many dependencies as possible before exiting initcalls */
|
||||
flush_work(&deferred_probe_work);
|
||||
initcalls_done = true;
|
||||
return 0;
|
||||
}
|
||||
late_initcall(deferred_probe_initcall);
|
||||
|
@ -259,6 +285,8 @@ static void driver_bound(struct device *dev)
|
|||
if (dev->bus)
|
||||
blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
|
||||
BUS_NOTIFY_BOUND_DRIVER, dev);
|
||||
|
||||
kobject_uevent(&dev->kobj, KOBJ_BIND);
|
||||
}
|
||||
|
||||
static int driver_sysfs_add(struct device *dev)
|
||||
|
@ -848,6 +876,8 @@ static void __device_release_driver(struct device *dev, struct device *parent)
|
|||
blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
|
||||
BUS_NOTIFY_UNBOUND_DRIVER,
|
||||
dev);
|
||||
|
||||
kobject_uevent(&dev->kobj, KOBJ_UNBIND);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -7,6 +7,8 @@
|
|||
*
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/capability.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/module.h>
|
||||
|
@ -331,6 +333,7 @@ static struct firmware_buf *__fw_lookup_buf(const char *fw_name)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
/* Returns 1 for batching firmware requests with the same name */
|
||||
static int fw_lookup_and_allocate_buf(const char *fw_name,
|
||||
struct firmware_cache *fwc,
|
||||
struct firmware_buf **buf, void *dbuf,
|
||||
|
@ -344,6 +347,7 @@ static int fw_lookup_and_allocate_buf(const char *fw_name,
|
|||
kref_get(&tmp->ref);
|
||||
spin_unlock(&fwc->lock);
|
||||
*buf = tmp;
|
||||
pr_debug("batched request - sharing the same struct firmware_buf and lookup for multiple requests\n");
|
||||
return 1;
|
||||
}
|
||||
tmp = __allocate_fw_buf(fw_name, fwc, dbuf, size);
|
||||
|
@ -1085,9 +1089,12 @@ static int _request_firmware_load(struct firmware_priv *fw_priv,
|
|||
mutex_unlock(&fw_lock);
|
||||
}
|
||||
|
||||
if (fw_state_is_aborted(&buf->fw_st))
|
||||
retval = -EAGAIN;
|
||||
else if (buf->is_paged_buf && !buf->data)
|
||||
if (fw_state_is_aborted(&buf->fw_st)) {
|
||||
if (retval == -ERESTARTSYS)
|
||||
retval = -EINTR;
|
||||
else
|
||||
retval = -EAGAIN;
|
||||
} else if (buf->is_paged_buf && !buf->data)
|
||||
retval = -ENOMEM;
|
||||
|
||||
device_del(f_dev);
|
||||
|
|
|
@ -105,7 +105,7 @@ static struct attribute *default_attrs[] = {
|
|||
NULL
|
||||
};
|
||||
|
||||
static struct attribute_group topology_attr_group = {
|
||||
static const struct attribute_group topology_attr_group = {
|
||||
.attrs = default_attrs,
|
||||
.name = "topology"
|
||||
};
|
||||
|
|
|
@ -164,14 +164,7 @@ static int tpm_class_shutdown(struct device *dev)
|
|||
chip->ops = NULL;
|
||||
up_write(&chip->ops_sem);
|
||||
}
|
||||
/* Allow bus- and device-specific code to run. Note: since chip->ops
|
||||
* is NULL, more-specific shutdown code will not be able to issue TPM
|
||||
* commands.
|
||||
*/
|
||||
if (dev->bus && dev->bus->shutdown)
|
||||
dev->bus->shutdown(dev);
|
||||
else if (dev->driver && dev->driver->shutdown)
|
||||
dev->driver->shutdown(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -214,7 +207,7 @@ struct tpm_chip *tpm_chip_alloc(struct device *pdev,
|
|||
device_initialize(&chip->devs);
|
||||
|
||||
chip->dev.class = tpm_class;
|
||||
chip->dev.class->shutdown = tpm_class_shutdown;
|
||||
chip->dev.class->shutdown_pre = tpm_class_shutdown;
|
||||
chip->dev.release = tpm_dev_release;
|
||||
chip->dev.parent = pdev;
|
||||
chip->dev.groups = chip->groups;
|
||||
|
|
|
@ -827,7 +827,7 @@ static int gpio_keys_probe(struct platform_device *pdev)
|
|||
|
||||
fwnode_handle_put(child);
|
||||
|
||||
error = sysfs_create_group(&dev->kobj, &gpio_keys_attr_group);
|
||||
error = devm_device_add_group(dev, &gpio_keys_attr_group);
|
||||
if (error) {
|
||||
dev_err(dev, "Unable to export keys/switches, error: %d\n",
|
||||
error);
|
||||
|
@ -838,22 +838,11 @@ static int gpio_keys_probe(struct platform_device *pdev)
|
|||
if (error) {
|
||||
dev_err(dev, "Unable to register input device, error: %d\n",
|
||||
error);
|
||||
goto err_remove_group;
|
||||
return error;
|
||||
}
|
||||
|
||||
device_init_wakeup(dev, wakeup);
|
||||
|
||||
return 0;
|
||||
|
||||
err_remove_group:
|
||||
sysfs_remove_group(&dev->kobj, &gpio_keys_attr_group);
|
||||
return error;
|
||||
}
|
||||
|
||||
static int gpio_keys_remove(struct platform_device *pdev)
|
||||
{
|
||||
sysfs_remove_group(&pdev->dev.kobj, &gpio_keys_attr_group);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -912,7 +901,6 @@ static SIMPLE_DEV_PM_OPS(gpio_keys_pm_ops, gpio_keys_suspend, gpio_keys_resume);
|
|||
|
||||
static struct platform_driver gpio_keys_device_driver = {
|
||||
.probe = gpio_keys_probe,
|
||||
.remove = gpio_keys_remove,
|
||||
.driver = {
|
||||
.name = "gpio-keys",
|
||||
.pm = &gpio_keys_pm_ops,
|
||||
|
|
|
@ -182,13 +182,6 @@ static irqreturn_t axp20x_pek_irq(int irq, void *pwr)
|
|||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static void axp20x_remove_sysfs_group(void *_data)
|
||||
{
|
||||
struct device *dev = _data;
|
||||
|
||||
sysfs_remove_group(&dev->kobj, &axp20x_attribute_group);
|
||||
}
|
||||
|
||||
static int axp20x_pek_probe_input_device(struct axp20x_pek *axp20x_pek,
|
||||
struct platform_device *pdev)
|
||||
{
|
||||
|
@ -313,22 +306,13 @@ static int axp20x_pek_probe(struct platform_device *pdev)
|
|||
return error;
|
||||
}
|
||||
|
||||
error = sysfs_create_group(&pdev->dev.kobj, &axp20x_attribute_group);
|
||||
error = devm_device_add_group(&pdev->dev, &axp20x_attribute_group);
|
||||
if (error) {
|
||||
dev_err(&pdev->dev, "Failed to create sysfs attributes: %d\n",
|
||||
error);
|
||||
return error;
|
||||
}
|
||||
|
||||
error = devm_add_action(&pdev->dev,
|
||||
axp20x_remove_sysfs_group, &pdev->dev);
|
||||
if (error) {
|
||||
axp20x_remove_sysfs_group(&pdev->dev);
|
||||
dev_err(&pdev->dev, "Failed to add sysfs cleanup action: %d\n",
|
||||
error);
|
||||
return error;
|
||||
}
|
||||
|
||||
platform_set_drvdata(pdev, axp20x_pek);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -570,18 +570,14 @@ static int rmi_f01_probe(struct rmi_function *fn)
|
|||
|
||||
dev_set_drvdata(&fn->dev, f01);
|
||||
|
||||
error = sysfs_create_group(&fn->rmi_dev->dev.kobj, &rmi_f01_attr_group);
|
||||
error = devm_device_add_group(&fn->rmi_dev->dev, &rmi_f01_attr_group);
|
||||
if (error)
|
||||
dev_warn(&fn->dev, "Failed to create sysfs group: %d\n", error);
|
||||
dev_warn(&fn->dev,
|
||||
"Failed to create attribute group: %d\n", error);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void rmi_f01_remove(struct rmi_function *fn)
|
||||
{
|
||||
sysfs_remove_group(&fn->rmi_dev->dev.kobj, &rmi_f01_attr_group);
|
||||
}
|
||||
|
||||
static int rmi_f01_config(struct rmi_function *fn)
|
||||
{
|
||||
struct f01_data *f01 = dev_get_drvdata(&fn->dev);
|
||||
|
@ -721,7 +717,6 @@ struct rmi_function_handler rmi_f01_handler = {
|
|||
},
|
||||
.func = 0x01,
|
||||
.probe = rmi_f01_probe,
|
||||
.remove = rmi_f01_remove,
|
||||
.config = rmi_f01_config,
|
||||
.attention = rmi_f01_attention,
|
||||
.suspend = rmi_f01_suspend,
|
||||
|
|
|
@ -1860,19 +1860,18 @@ static int uvesafb_setup(char *options)
|
|||
}
|
||||
#endif /* !MODULE */
|
||||
|
||||
static ssize_t show_v86d(struct device_driver *dev, char *buf)
|
||||
static ssize_t v86d_show(struct device_driver *dev, char *buf)
|
||||
{
|
||||
return snprintf(buf, PAGE_SIZE, "%s\n", v86d_path);
|
||||
}
|
||||
|
||||
static ssize_t store_v86d(struct device_driver *dev, const char *buf,
|
||||
static ssize_t v86d_store(struct device_driver *dev, const char *buf,
|
||||
size_t count)
|
||||
{
|
||||
strncpy(v86d_path, buf, PATH_MAX);
|
||||
return count;
|
||||
}
|
||||
|
||||
static DRIVER_ATTR(v86d, S_IRUGO | S_IWUSR, show_v86d, store_v86d);
|
||||
static DRIVER_ATTR_RW(v86d);
|
||||
|
||||
static int uvesafb_init(void)
|
||||
{
|
||||
|
|
|
@ -1172,8 +1172,8 @@ static int pcistub_reg_add(int domain, int bus, int slot, int func,
|
|||
return err;
|
||||
}
|
||||
|
||||
static ssize_t pcistub_slot_add(struct device_driver *drv, const char *buf,
|
||||
size_t count)
|
||||
static ssize_t new_slot_store(struct device_driver *drv, const char *buf,
|
||||
size_t count)
|
||||
{
|
||||
int domain, bus, slot, func;
|
||||
int err;
|
||||
|
@ -1189,10 +1189,10 @@ static ssize_t pcistub_slot_add(struct device_driver *drv, const char *buf,
|
|||
err = count;
|
||||
return err;
|
||||
}
|
||||
static DRIVER_ATTR(new_slot, S_IWUSR, NULL, pcistub_slot_add);
|
||||
static DRIVER_ATTR_WO(new_slot);
|
||||
|
||||
static ssize_t pcistub_slot_remove(struct device_driver *drv, const char *buf,
|
||||
size_t count)
|
||||
static ssize_t remove_slot_store(struct device_driver *drv, const char *buf,
|
||||
size_t count)
|
||||
{
|
||||
int domain, bus, slot, func;
|
||||
int err;
|
||||
|
@ -1208,9 +1208,9 @@ static ssize_t pcistub_slot_remove(struct device_driver *drv, const char *buf,
|
|||
err = count;
|
||||
return err;
|
||||
}
|
||||
static DRIVER_ATTR(remove_slot, S_IWUSR, NULL, pcistub_slot_remove);
|
||||
static DRIVER_ATTR_WO(remove_slot);
|
||||
|
||||
static ssize_t pcistub_slot_show(struct device_driver *drv, char *buf)
|
||||
static ssize_t slots_show(struct device_driver *drv, char *buf)
|
||||
{
|
||||
struct pcistub_device_id *pci_dev_id;
|
||||
size_t count = 0;
|
||||
|
@ -1231,9 +1231,9 @@ static ssize_t pcistub_slot_show(struct device_driver *drv, char *buf)
|
|||
|
||||
return count;
|
||||
}
|
||||
static DRIVER_ATTR(slots, S_IRUSR, pcistub_slot_show, NULL);
|
||||
static DRIVER_ATTR_RO(slots);
|
||||
|
||||
static ssize_t pcistub_irq_handler_show(struct device_driver *drv, char *buf)
|
||||
static ssize_t irq_handlers_show(struct device_driver *drv, char *buf)
|
||||
{
|
||||
struct pcistub_device *psdev;
|
||||
struct xen_pcibk_dev_data *dev_data;
|
||||
|
@ -1260,11 +1260,10 @@ static ssize_t pcistub_irq_handler_show(struct device_driver *drv, char *buf)
|
|||
spin_unlock_irqrestore(&pcistub_devices_lock, flags);
|
||||
return count;
|
||||
}
|
||||
static DRIVER_ATTR(irq_handlers, S_IRUSR, pcistub_irq_handler_show, NULL);
|
||||
static DRIVER_ATTR_RO(irq_handlers);
|
||||
|
||||
static ssize_t pcistub_irq_handler_switch(struct device_driver *drv,
|
||||
const char *buf,
|
||||
size_t count)
|
||||
static ssize_t irq_handler_state_store(struct device_driver *drv,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct pcistub_device *psdev;
|
||||
struct xen_pcibk_dev_data *dev_data;
|
||||
|
@ -1301,11 +1300,10 @@ static ssize_t pcistub_irq_handler_switch(struct device_driver *drv,
|
|||
err = count;
|
||||
return err;
|
||||
}
|
||||
static DRIVER_ATTR(irq_handler_state, S_IWUSR, NULL,
|
||||
pcistub_irq_handler_switch);
|
||||
static DRIVER_ATTR_WO(irq_handler_state);
|
||||
|
||||
static ssize_t pcistub_quirk_add(struct device_driver *drv, const char *buf,
|
||||
size_t count)
|
||||
static ssize_t quirks_store(struct device_driver *drv, const char *buf,
|
||||
size_t count)
|
||||
{
|
||||
int domain, bus, slot, func, reg, size, mask;
|
||||
int err;
|
||||
|
@ -1323,7 +1321,7 @@ static ssize_t pcistub_quirk_add(struct device_driver *drv, const char *buf,
|
|||
return err;
|
||||
}
|
||||
|
||||
static ssize_t pcistub_quirk_show(struct device_driver *drv, char *buf)
|
||||
static ssize_t quirks_show(struct device_driver *drv, char *buf)
|
||||
{
|
||||
int count = 0;
|
||||
unsigned long flags;
|
||||
|
@ -1366,11 +1364,10 @@ static ssize_t pcistub_quirk_show(struct device_driver *drv, char *buf)
|
|||
|
||||
return count;
|
||||
}
|
||||
static DRIVER_ATTR(quirks, S_IRUSR | S_IWUSR, pcistub_quirk_show,
|
||||
pcistub_quirk_add);
|
||||
static DRIVER_ATTR_RW(quirks);
|
||||
|
||||
static ssize_t permissive_add(struct device_driver *drv, const char *buf,
|
||||
size_t count)
|
||||
static ssize_t permissive_store(struct device_driver *drv, const char *buf,
|
||||
size_t count)
|
||||
{
|
||||
int domain, bus, slot, func;
|
||||
int err;
|
||||
|
@ -1431,8 +1428,7 @@ static ssize_t permissive_show(struct device_driver *drv, char *buf)
|
|||
spin_unlock_irqrestore(&pcistub_devices_lock, flags);
|
||||
return count;
|
||||
}
|
||||
static DRIVER_ATTR(permissive, S_IRUSR | S_IWUSR, permissive_show,
|
||||
permissive_add);
|
||||
static DRIVER_ATTR_RW(permissive);
|
||||
|
||||
static void pcistub_exit(void)
|
||||
{
|
||||
|
|
|
@ -997,7 +997,7 @@ struct kernfs_node *__kernfs_create_file(struct kernfs_node *parent,
|
|||
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
if (key) {
|
||||
lockdep_init_map(&kn->dep_map, "s_active", key, 0);
|
||||
lockdep_init_map(&kn->dep_map, "kn->count", key, 0);
|
||||
kn->flags |= KERNFS_LOCKDEP;
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -4,10 +4,12 @@
|
|||
#ifndef _LINUX_ARCH_TOPOLOGY_H_
|
||||
#define _LINUX_ARCH_TOPOLOGY_H_
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
void topology_normalize_cpu_scale(void);
|
||||
|
||||
struct device_node;
|
||||
int topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu);
|
||||
bool topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu);
|
||||
|
||||
struct sched_domain;
|
||||
unsigned long topology_get_cpu_scale(struct sched_domain *sd, int cpu);
|
||||
|
|
|
@ -196,6 +196,14 @@ static inline struct dentry *debugfs_create_file(const char *name, umode_t mode,
|
|||
return ERR_PTR(-ENODEV);
|
||||
}
|
||||
|
||||
static inline struct dentry *debugfs_create_file_unsafe(const char *name,
|
||||
umode_t mode, struct dentry *parent,
|
||||
void *data,
|
||||
const struct file_operations *fops)
|
||||
{
|
||||
return ERR_PTR(-ENODEV);
|
||||
}
|
||||
|
||||
static inline struct dentry *debugfs_create_file_size(const char *name, umode_t mode,
|
||||
struct dentry *parent, void *data,
|
||||
const struct file_operations *fops,
|
||||
|
@ -289,6 +297,14 @@ static inline struct dentry *debugfs_create_u64(const char *name, umode_t mode,
|
|||
return ERR_PTR(-ENODEV);
|
||||
}
|
||||
|
||||
static inline struct dentry *debugfs_create_ulong(const char *name,
|
||||
umode_t mode,
|
||||
struct dentry *parent,
|
||||
unsigned long *value)
|
||||
{
|
||||
return ERR_PTR(-ENODEV);
|
||||
}
|
||||
|
||||
static inline struct dentry *debugfs_create_x8(const char *name, umode_t mode,
|
||||
struct dentry *parent,
|
||||
u8 *value)
|
||||
|
|
|
@ -375,7 +375,7 @@ int subsys_virtual_register(struct bus_type *subsys,
|
|||
* @suspend: Used to put the device to sleep mode, usually to a low power
|
||||
* state.
|
||||
* @resume: Used to bring the device from the sleep mode.
|
||||
* @shutdown: Called at shut-down time to quiesce the device.
|
||||
* @shutdown_pre: Called at shut-down time before driver shutdown.
|
||||
* @ns_type: Callbacks so sysfs can detemine namespaces.
|
||||
* @namespace: Namespace of the device belongs to this class.
|
||||
* @pm: The default device power management operations of this class.
|
||||
|
@ -404,7 +404,7 @@ struct class {
|
|||
|
||||
int (*suspend)(struct device *dev, pm_message_t state);
|
||||
int (*resume)(struct device *dev);
|
||||
int (*shutdown)(struct device *dev);
|
||||
int (*shutdown_pre)(struct device *dev);
|
||||
|
||||
const struct kobj_ns_type_operations *ns_type;
|
||||
const void *(*namespace)(struct device *dev);
|
||||
|
@ -847,6 +847,7 @@ struct dev_links_info {
|
|||
* @msi_list: Hosts MSI descriptors
|
||||
* @msi_domain: The generic MSI domain this device is using.
|
||||
* @numa_node: NUMA node this device is close to.
|
||||
* @dma_ops: DMA mapping operations for this device.
|
||||
* @dma_mask: Dma mask (if dma'ble device).
|
||||
* @coherent_dma_mask: Like dma_mask, but for alloc_coherent mapping as not all
|
||||
* hardware supports 64-bit addresses for consistent allocations
|
||||
|
@ -1200,6 +1201,36 @@ struct device *device_create_with_groups(struct class *cls,
|
|||
const char *fmt, ...);
|
||||
extern void device_destroy(struct class *cls, dev_t devt);
|
||||
|
||||
extern int __must_check device_add_groups(struct device *dev,
|
||||
const struct attribute_group **groups);
|
||||
extern void device_remove_groups(struct device *dev,
|
||||
const struct attribute_group **groups);
|
||||
|
||||
static inline int __must_check device_add_group(struct device *dev,
|
||||
const struct attribute_group *grp)
|
||||
{
|
||||
const struct attribute_group *groups[] = { grp, NULL };
|
||||
|
||||
return device_add_groups(dev, groups);
|
||||
}
|
||||
|
||||
static inline void device_remove_group(struct device *dev,
|
||||
const struct attribute_group *grp)
|
||||
{
|
||||
const struct attribute_group *groups[] = { grp, NULL };
|
||||
|
||||
return device_remove_groups(dev, groups);
|
||||
}
|
||||
|
||||
extern int __must_check devm_device_add_groups(struct device *dev,
|
||||
const struct attribute_group **groups);
|
||||
extern void devm_device_remove_groups(struct device *dev,
|
||||
const struct attribute_group **groups);
|
||||
extern int __must_check devm_device_add_group(struct device *dev,
|
||||
const struct attribute_group *grp);
|
||||
extern void devm_device_remove_group(struct device *dev,
|
||||
const struct attribute_group *grp);
|
||||
|
||||
/*
|
||||
* Platform "fixup" functions - allow the platform to have their say
|
||||
* about devices and actions that the general device layer doesn't
|
||||
|
|
|
@ -57,6 +57,8 @@ enum kobject_action {
|
|||
KOBJ_MOVE,
|
||||
KOBJ_ONLINE,
|
||||
KOBJ_OFFLINE,
|
||||
KOBJ_BIND,
|
||||
KOBJ_UNBIND,
|
||||
KOBJ_MAX
|
||||
};
|
||||
|
||||
|
|
|
@ -674,8 +674,6 @@ struct ulpi_device_id {
|
|||
* struct fsl_mc_device_id - MC object device identifier
|
||||
* @vendor: vendor ID
|
||||
* @obj_type: MC object type
|
||||
* @ver_major: MC object version major number
|
||||
* @ver_minor: MC object version minor number
|
||||
*
|
||||
* Type of entries in the "device Id" table for MC object devices supported by
|
||||
* a MC object device driver. The last entry of the table has vendor set to 0x0
|
||||
|
|
|
@ -52,6 +52,8 @@ static const char *kobject_actions[] = {
|
|||
[KOBJ_MOVE] = "move",
|
||||
[KOBJ_ONLINE] = "online",
|
||||
[KOBJ_OFFLINE] = "offline",
|
||||
[KOBJ_BIND] = "bind",
|
||||
[KOBJ_UNBIND] = "unbind",
|
||||
};
|
||||
|
||||
static int kobject_action_type(const char *buf, size_t count,
|
||||
|
|
|
@ -19,10 +19,85 @@
|
|||
#include <linux/miscdevice.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/kthread.h>
|
||||
|
||||
#define TEST_FIRMWARE_NAME "test-firmware.bin"
|
||||
#define TEST_FIRMWARE_NUM_REQS 4
|
||||
|
||||
static DEFINE_MUTEX(test_fw_mutex);
|
||||
static const struct firmware *test_firmware;
|
||||
|
||||
struct test_batched_req {
|
||||
u8 idx;
|
||||
int rc;
|
||||
bool sent;
|
||||
const struct firmware *fw;
|
||||
const char *name;
|
||||
struct completion completion;
|
||||
struct task_struct *task;
|
||||
struct device *dev;
|
||||
};
|
||||
|
||||
/**
|
||||
* test_config - represents configuration for the test for different triggers
|
||||
*
|
||||
* @name: the name of the firmware file to look for
|
||||
* @sync_direct: when the sync trigger is used if this is true
|
||||
* request_firmware_direct() will be used instead.
|
||||
* @send_uevent: whether or not to send a uevent for async requests
|
||||
* @num_requests: number of requests to try per test case. This is trigger
|
||||
* specific.
|
||||
* @reqs: stores all requests information
|
||||
* @read_fw_idx: index of thread from which we want to read firmware results
|
||||
* from through the read_fw trigger.
|
||||
* @test_result: a test may use this to collect the result from the call
|
||||
* of the request_firmware*() calls used in their tests. In order of
|
||||
* priority we always keep first any setup error. If no setup errors were
|
||||
* found then we move on to the first error encountered while running the
|
||||
* API. Note that for async calls this typically will be a successful
|
||||
* result (0) unless of course you've used bogus parameters, or the system
|
||||
* is out of memory. In the async case the callback is expected to do a
|
||||
* bit more homework to figure out what happened, unfortunately the only
|
||||
* information passed today on error is the fact that no firmware was
|
||||
* found so we can only assume -ENOENT on async calls if the firmware is
|
||||
* NULL.
|
||||
*
|
||||
* Errors you can expect:
|
||||
*
|
||||
* API specific:
|
||||
*
|
||||
* 0: success for sync, for async it means request was sent
|
||||
* -EINVAL: invalid parameters or request
|
||||
* -ENOENT: files not found
|
||||
*
|
||||
* System environment:
|
||||
*
|
||||
* -ENOMEM: memory pressure on system
|
||||
* -ENODEV: out of number of devices to test
|
||||
* -EINVAL: an unexpected error has occurred
|
||||
* @req_firmware: if @sync_direct is true this is set to
|
||||
* request_firmware_direct(), otherwise request_firmware()
|
||||
*/
|
||||
struct test_config {
|
||||
char *name;
|
||||
bool sync_direct;
|
||||
bool send_uevent;
|
||||
u8 num_requests;
|
||||
u8 read_fw_idx;
|
||||
|
||||
/*
|
||||
* These below don't belong her but we'll move them once we create
|
||||
* a struct fw_test_device and stuff the misc_dev under there later.
|
||||
*/
|
||||
struct test_batched_req *reqs;
|
||||
int test_result;
|
||||
int (*req_firmware)(const struct firmware **fw, const char *name,
|
||||
struct device *device);
|
||||
};
|
||||
|
||||
struct test_config *test_fw_config;
|
||||
|
||||
static ssize_t test_fw_misc_read(struct file *f, char __user *buf,
|
||||
size_t size, loff_t *offset)
|
||||
{
|
||||
|
@ -42,6 +117,338 @@ static const struct file_operations test_fw_fops = {
|
|||
.read = test_fw_misc_read,
|
||||
};
|
||||
|
||||
static void __test_release_all_firmware(void)
|
||||
{
|
||||
struct test_batched_req *req;
|
||||
u8 i;
|
||||
|
||||
if (!test_fw_config->reqs)
|
||||
return;
|
||||
|
||||
for (i = 0; i < test_fw_config->num_requests; i++) {
|
||||
req = &test_fw_config->reqs[i];
|
||||
if (req->fw)
|
||||
release_firmware(req->fw);
|
||||
}
|
||||
|
||||
vfree(test_fw_config->reqs);
|
||||
test_fw_config->reqs = NULL;
|
||||
}
|
||||
|
||||
static void test_release_all_firmware(void)
|
||||
{
|
||||
mutex_lock(&test_fw_mutex);
|
||||
__test_release_all_firmware();
|
||||
mutex_unlock(&test_fw_mutex);
|
||||
}
|
||||
|
||||
|
||||
static void __test_firmware_config_free(void)
|
||||
{
|
||||
__test_release_all_firmware();
|
||||
kfree_const(test_fw_config->name);
|
||||
test_fw_config->name = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* XXX: move to kstrncpy() once merged.
|
||||
*
|
||||
* Users should use kfree_const() when freeing these.
|
||||
*/
|
||||
static int __kstrncpy(char **dst, const char *name, size_t count, gfp_t gfp)
|
||||
{
|
||||
*dst = kstrndup(name, count, gfp);
|
||||
if (!*dst)
|
||||
return -ENOSPC;
|
||||
return count;
|
||||
}
|
||||
|
||||
static int __test_firmware_config_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = __kstrncpy(&test_fw_config->name, TEST_FIRMWARE_NAME,
|
||||
strlen(TEST_FIRMWARE_NAME), GFP_KERNEL);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
test_fw_config->num_requests = TEST_FIRMWARE_NUM_REQS;
|
||||
test_fw_config->send_uevent = true;
|
||||
test_fw_config->sync_direct = false;
|
||||
test_fw_config->req_firmware = request_firmware;
|
||||
test_fw_config->test_result = 0;
|
||||
test_fw_config->reqs = NULL;
|
||||
|
||||
return 0;
|
||||
|
||||
out:
|
||||
__test_firmware_config_free();
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t reset_store(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
int ret;
|
||||
|
||||
mutex_lock(&test_fw_mutex);
|
||||
|
||||
__test_firmware_config_free();
|
||||
|
||||
ret = __test_firmware_config_init();
|
||||
if (ret < 0) {
|
||||
ret = -ENOMEM;
|
||||
pr_err("could not alloc settings for config trigger: %d\n",
|
||||
ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
pr_info("reset\n");
|
||||
ret = count;
|
||||
|
||||
out:
|
||||
mutex_unlock(&test_fw_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
static DEVICE_ATTR_WO(reset);
|
||||
|
||||
static ssize_t config_show(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
int len = 0;
|
||||
|
||||
mutex_lock(&test_fw_mutex);
|
||||
|
||||
len += snprintf(buf, PAGE_SIZE,
|
||||
"Custom trigger configuration for: %s\n",
|
||||
dev_name(dev));
|
||||
|
||||
if (test_fw_config->name)
|
||||
len += snprintf(buf+len, PAGE_SIZE,
|
||||
"name:\t%s\n",
|
||||
test_fw_config->name);
|
||||
else
|
||||
len += snprintf(buf+len, PAGE_SIZE,
|
||||
"name:\tEMTPY\n");
|
||||
|
||||
len += snprintf(buf+len, PAGE_SIZE,
|
||||
"num_requests:\t%u\n", test_fw_config->num_requests);
|
||||
|
||||
len += snprintf(buf+len, PAGE_SIZE,
|
||||
"send_uevent:\t\t%s\n",
|
||||
test_fw_config->send_uevent ?
|
||||
"FW_ACTION_HOTPLUG" :
|
||||
"FW_ACTION_NOHOTPLUG");
|
||||
len += snprintf(buf+len, PAGE_SIZE,
|
||||
"sync_direct:\t\t%s\n",
|
||||
test_fw_config->sync_direct ? "true" : "false");
|
||||
len += snprintf(buf+len, PAGE_SIZE,
|
||||
"read_fw_idx:\t%u\n", test_fw_config->read_fw_idx);
|
||||
|
||||
mutex_unlock(&test_fw_mutex);
|
||||
|
||||
return len;
|
||||
}
|
||||
static DEVICE_ATTR_RO(config);
|
||||
|
||||
static ssize_t config_name_store(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
int ret;
|
||||
|
||||
mutex_lock(&test_fw_mutex);
|
||||
kfree_const(test_fw_config->name);
|
||||
ret = __kstrncpy(&test_fw_config->name, buf, count, GFP_KERNEL);
|
||||
mutex_unlock(&test_fw_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* As per sysfs_kf_seq_show() the buf is max PAGE_SIZE.
|
||||
*/
|
||||
static ssize_t config_test_show_str(char *dst,
|
||||
char *src)
|
||||
{
|
||||
int len;
|
||||
|
||||
mutex_lock(&test_fw_mutex);
|
||||
len = snprintf(dst, PAGE_SIZE, "%s\n", src);
|
||||
mutex_unlock(&test_fw_mutex);
|
||||
|
||||
return len;
|
||||
}
|
||||
|
||||
static int test_dev_config_update_bool(const char *buf, size_t size,
|
||||
bool *cfg)
|
||||
{
|
||||
int ret;
|
||||
|
||||
mutex_lock(&test_fw_mutex);
|
||||
if (strtobool(buf, cfg) < 0)
|
||||
ret = -EINVAL;
|
||||
else
|
||||
ret = size;
|
||||
mutex_unlock(&test_fw_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
test_dev_config_show_bool(char *buf,
|
||||
bool config)
|
||||
{
|
||||
bool val;
|
||||
|
||||
mutex_lock(&test_fw_mutex);
|
||||
val = config;
|
||||
mutex_unlock(&test_fw_mutex);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n", val);
|
||||
}
|
||||
|
||||
static ssize_t test_dev_config_show_int(char *buf, int cfg)
|
||||
{
|
||||
int val;
|
||||
|
||||
mutex_lock(&test_fw_mutex);
|
||||
val = cfg;
|
||||
mutex_unlock(&test_fw_mutex);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n", val);
|
||||
}
|
||||
|
||||
static int test_dev_config_update_u8(const char *buf, size_t size, u8 *cfg)
|
||||
{
|
||||
int ret;
|
||||
long new;
|
||||
|
||||
ret = kstrtol(buf, 10, &new);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (new > U8_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&test_fw_mutex);
|
||||
*(u8 *)cfg = new;
|
||||
mutex_unlock(&test_fw_mutex);
|
||||
|
||||
/* Always return full write size even if we didn't consume all */
|
||||
return size;
|
||||
}
|
||||
|
||||
static ssize_t test_dev_config_show_u8(char *buf, u8 cfg)
|
||||
{
|
||||
u8 val;
|
||||
|
||||
mutex_lock(&test_fw_mutex);
|
||||
val = cfg;
|
||||
mutex_unlock(&test_fw_mutex);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%u\n", val);
|
||||
}
|
||||
|
||||
static ssize_t config_name_show(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
return config_test_show_str(buf, test_fw_config->name);
|
||||
}
|
||||
static DEVICE_ATTR(config_name, 0644, config_name_show, config_name_store);
|
||||
|
||||
static ssize_t config_num_requests_store(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
int rc;
|
||||
|
||||
mutex_lock(&test_fw_mutex);
|
||||
if (test_fw_config->reqs) {
|
||||
pr_err("Must call release_all_firmware prior to changing config\n");
|
||||
rc = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
mutex_unlock(&test_fw_mutex);
|
||||
|
||||
rc = test_dev_config_update_u8(buf, count,
|
||||
&test_fw_config->num_requests);
|
||||
|
||||
out:
|
||||
return rc;
|
||||
}
|
||||
|
||||
static ssize_t config_num_requests_show(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
return test_dev_config_show_u8(buf, test_fw_config->num_requests);
|
||||
}
|
||||
static DEVICE_ATTR(config_num_requests, 0644, config_num_requests_show,
|
||||
config_num_requests_store);
|
||||
|
||||
static ssize_t config_sync_direct_store(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
int rc = test_dev_config_update_bool(buf, count,
|
||||
&test_fw_config->sync_direct);
|
||||
|
||||
if (rc == count)
|
||||
test_fw_config->req_firmware = test_fw_config->sync_direct ?
|
||||
request_firmware_direct :
|
||||
request_firmware;
|
||||
return rc;
|
||||
}
|
||||
|
||||
static ssize_t config_sync_direct_show(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
return test_dev_config_show_bool(buf, test_fw_config->sync_direct);
|
||||
}
|
||||
static DEVICE_ATTR(config_sync_direct, 0644, config_sync_direct_show,
|
||||
config_sync_direct_store);
|
||||
|
||||
static ssize_t config_send_uevent_store(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
return test_dev_config_update_bool(buf, count,
|
||||
&test_fw_config->send_uevent);
|
||||
}
|
||||
|
||||
static ssize_t config_send_uevent_show(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
return test_dev_config_show_bool(buf, test_fw_config->send_uevent);
|
||||
}
|
||||
static DEVICE_ATTR(config_send_uevent, 0644, config_send_uevent_show,
|
||||
config_send_uevent_store);
|
||||
|
||||
static ssize_t config_read_fw_idx_store(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
return test_dev_config_update_u8(buf, count,
|
||||
&test_fw_config->read_fw_idx);
|
||||
}
|
||||
|
||||
static ssize_t config_read_fw_idx_show(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
return test_dev_config_show_u8(buf, test_fw_config->read_fw_idx);
|
||||
}
|
||||
static DEVICE_ATTR(config_read_fw_idx, 0644, config_read_fw_idx_show,
|
||||
config_read_fw_idx_store);
|
||||
|
||||
|
||||
static ssize_t trigger_request_store(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
|
@ -170,12 +577,301 @@ static ssize_t trigger_custom_fallback_store(struct device *dev,
|
|||
}
|
||||
static DEVICE_ATTR_WO(trigger_custom_fallback);
|
||||
|
||||
static int test_fw_run_batch_request(void *data)
|
||||
{
|
||||
struct test_batched_req *req = data;
|
||||
|
||||
if (!req) {
|
||||
test_fw_config->test_result = -EINVAL;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
req->rc = test_fw_config->req_firmware(&req->fw, req->name, req->dev);
|
||||
if (req->rc) {
|
||||
pr_info("#%u: batched sync load failed: %d\n",
|
||||
req->idx, req->rc);
|
||||
if (!test_fw_config->test_result)
|
||||
test_fw_config->test_result = req->rc;
|
||||
} else if (req->fw) {
|
||||
req->sent = true;
|
||||
pr_info("#%u: batched sync loaded %zu\n",
|
||||
req->idx, req->fw->size);
|
||||
}
|
||||
complete(&req->completion);
|
||||
|
||||
req->task = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* We use a kthread as otherwise the kernel serializes all our sync requests
|
||||
* and we would not be able to mimic batched requests on a sync call. Batched
|
||||
* requests on a sync call can for instance happen on a device driver when
|
||||
* multiple cards are used and firmware loading happens outside of probe.
|
||||
*/
|
||||
static ssize_t trigger_batched_requests_store(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct test_batched_req *req;
|
||||
int rc;
|
||||
u8 i;
|
||||
|
||||
mutex_lock(&test_fw_mutex);
|
||||
|
||||
test_fw_config->reqs = vzalloc(sizeof(struct test_batched_req) *
|
||||
test_fw_config->num_requests * 2);
|
||||
if (!test_fw_config->reqs) {
|
||||
rc = -ENOMEM;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
pr_info("batched sync firmware loading '%s' %u times\n",
|
||||
test_fw_config->name, test_fw_config->num_requests);
|
||||
|
||||
for (i = 0; i < test_fw_config->num_requests; i++) {
|
||||
req = &test_fw_config->reqs[i];
|
||||
if (!req) {
|
||||
WARN_ON(1);
|
||||
rc = -ENOMEM;
|
||||
goto out_bail;
|
||||
}
|
||||
req->fw = NULL;
|
||||
req->idx = i;
|
||||
req->name = test_fw_config->name;
|
||||
req->dev = dev;
|
||||
init_completion(&req->completion);
|
||||
req->task = kthread_run(test_fw_run_batch_request, req,
|
||||
"%s-%u", KBUILD_MODNAME, req->idx);
|
||||
if (!req->task || IS_ERR(req->task)) {
|
||||
pr_err("Setting up thread %u failed\n", req->idx);
|
||||
req->task = NULL;
|
||||
rc = -ENOMEM;
|
||||
goto out_bail;
|
||||
}
|
||||
}
|
||||
|
||||
rc = count;
|
||||
|
||||
/*
|
||||
* We require an explicit release to enable more time and delay of
|
||||
* calling release_firmware() to improve our chances of forcing a
|
||||
* batched request. If we instead called release_firmware() right away
|
||||
* then we might miss on an opportunity of having a successful firmware
|
||||
* request pass on the opportunity to be come a batched request.
|
||||
*/
|
||||
|
||||
out_bail:
|
||||
for (i = 0; i < test_fw_config->num_requests; i++) {
|
||||
req = &test_fw_config->reqs[i];
|
||||
if (req->task || req->sent)
|
||||
wait_for_completion(&req->completion);
|
||||
}
|
||||
|
||||
/* Override any worker error if we had a general setup error */
|
||||
if (rc < 0)
|
||||
test_fw_config->test_result = rc;
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&test_fw_mutex);
|
||||
|
||||
return rc;
|
||||
}
|
||||
static DEVICE_ATTR_WO(trigger_batched_requests);
|
||||
|
||||
/*
|
||||
* We wait for each callback to return with the lock held, no need to lock here
|
||||
*/
|
||||
static void trigger_batched_cb(const struct firmware *fw, void *context)
|
||||
{
|
||||
struct test_batched_req *req = context;
|
||||
|
||||
if (!req) {
|
||||
test_fw_config->test_result = -EINVAL;
|
||||
return;
|
||||
}
|
||||
|
||||
/* forces *some* batched requests to queue up */
|
||||
if (!req->idx)
|
||||
ssleep(2);
|
||||
|
||||
req->fw = fw;
|
||||
|
||||
/*
|
||||
* Unfortunately the firmware API gives us nothing other than a null FW
|
||||
* if the firmware was not found on async requests. Best we can do is
|
||||
* just assume -ENOENT. A better API would pass the actual return
|
||||
* value to the callback.
|
||||
*/
|
||||
if (!fw && !test_fw_config->test_result)
|
||||
test_fw_config->test_result = -ENOENT;
|
||||
|
||||
complete(&req->completion);
|
||||
}
|
||||
|
||||
static
|
||||
ssize_t trigger_batched_requests_async_store(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct test_batched_req *req;
|
||||
bool send_uevent;
|
||||
int rc;
|
||||
u8 i;
|
||||
|
||||
mutex_lock(&test_fw_mutex);
|
||||
|
||||
test_fw_config->reqs = vzalloc(sizeof(struct test_batched_req) *
|
||||
test_fw_config->num_requests * 2);
|
||||
if (!test_fw_config->reqs) {
|
||||
rc = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
pr_info("batched loading '%s' custom fallback mechanism %u times\n",
|
||||
test_fw_config->name, test_fw_config->num_requests);
|
||||
|
||||
send_uevent = test_fw_config->send_uevent ? FW_ACTION_HOTPLUG :
|
||||
FW_ACTION_NOHOTPLUG;
|
||||
|
||||
for (i = 0; i < test_fw_config->num_requests; i++) {
|
||||
req = &test_fw_config->reqs[i];
|
||||
if (!req) {
|
||||
WARN_ON(1);
|
||||
goto out_bail;
|
||||
}
|
||||
req->name = test_fw_config->name;
|
||||
req->fw = NULL;
|
||||
req->idx = i;
|
||||
init_completion(&req->completion);
|
||||
rc = request_firmware_nowait(THIS_MODULE, send_uevent,
|
||||
req->name,
|
||||
dev, GFP_KERNEL, req,
|
||||
trigger_batched_cb);
|
||||
if (rc) {
|
||||
pr_info("#%u: batched async load failed setup: %d\n",
|
||||
i, rc);
|
||||
req->rc = rc;
|
||||
goto out_bail;
|
||||
} else
|
||||
req->sent = true;
|
||||
}
|
||||
|
||||
rc = count;
|
||||
|
||||
out_bail:
|
||||
|
||||
/*
|
||||
* We require an explicit release to enable more time and delay of
|
||||
* calling release_firmware() to improve our chances of forcing a
|
||||
* batched request. If we instead called release_firmware() right away
|
||||
* then we might miss on an opportunity of having a successful firmware
|
||||
* request pass on the opportunity to be come a batched request.
|
||||
*/
|
||||
|
||||
for (i = 0; i < test_fw_config->num_requests; i++) {
|
||||
req = &test_fw_config->reqs[i];
|
||||
if (req->sent)
|
||||
wait_for_completion(&req->completion);
|
||||
}
|
||||
|
||||
/* Override any worker error if we had a general setup error */
|
||||
if (rc < 0)
|
||||
test_fw_config->test_result = rc;
|
||||
|
||||
out:
|
||||
mutex_unlock(&test_fw_mutex);
|
||||
|
||||
return rc;
|
||||
}
|
||||
static DEVICE_ATTR_WO(trigger_batched_requests_async);
|
||||
|
||||
static ssize_t test_result_show(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
return test_dev_config_show_int(buf, test_fw_config->test_result);
|
||||
}
|
||||
static DEVICE_ATTR_RO(test_result);
|
||||
|
||||
static ssize_t release_all_firmware_store(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
test_release_all_firmware();
|
||||
return count;
|
||||
}
|
||||
static DEVICE_ATTR_WO(release_all_firmware);
|
||||
|
||||
static ssize_t read_firmware_show(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct test_batched_req *req;
|
||||
u8 idx;
|
||||
ssize_t rc = 0;
|
||||
|
||||
mutex_lock(&test_fw_mutex);
|
||||
|
||||
idx = test_fw_config->read_fw_idx;
|
||||
if (idx >= test_fw_config->num_requests) {
|
||||
rc = -ERANGE;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!test_fw_config->reqs) {
|
||||
rc = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
req = &test_fw_config->reqs[idx];
|
||||
if (!req->fw) {
|
||||
pr_err("#%u: failed to async load firmware\n", idx);
|
||||
rc = -ENOENT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
pr_info("#%u: loaded %zu\n", idx, req->fw->size);
|
||||
|
||||
if (req->fw->size > PAGE_SIZE) {
|
||||
pr_err("Testing interface must use PAGE_SIZE firmware for now\n");
|
||||
rc = -EINVAL;
|
||||
}
|
||||
memcpy(buf, req->fw->data, req->fw->size);
|
||||
|
||||
rc = req->fw->size;
|
||||
out:
|
||||
mutex_unlock(&test_fw_mutex);
|
||||
|
||||
return rc;
|
||||
}
|
||||
static DEVICE_ATTR_RO(read_firmware);
|
||||
|
||||
#define TEST_FW_DEV_ATTR(name) &dev_attr_##name.attr
|
||||
|
||||
static struct attribute *test_dev_attrs[] = {
|
||||
TEST_FW_DEV_ATTR(reset),
|
||||
|
||||
TEST_FW_DEV_ATTR(config),
|
||||
TEST_FW_DEV_ATTR(config_name),
|
||||
TEST_FW_DEV_ATTR(config_num_requests),
|
||||
TEST_FW_DEV_ATTR(config_sync_direct),
|
||||
TEST_FW_DEV_ATTR(config_send_uevent),
|
||||
TEST_FW_DEV_ATTR(config_read_fw_idx),
|
||||
|
||||
/* These don't use the config at all - they could be ported! */
|
||||
TEST_FW_DEV_ATTR(trigger_request),
|
||||
TEST_FW_DEV_ATTR(trigger_async_request),
|
||||
TEST_FW_DEV_ATTR(trigger_custom_fallback),
|
||||
|
||||
/* These use the config and can use the test_result */
|
||||
TEST_FW_DEV_ATTR(trigger_batched_requests),
|
||||
TEST_FW_DEV_ATTR(trigger_batched_requests_async),
|
||||
|
||||
TEST_FW_DEV_ATTR(release_all_firmware),
|
||||
TEST_FW_DEV_ATTR(test_result),
|
||||
TEST_FW_DEV_ATTR(read_firmware),
|
||||
NULL,
|
||||
};
|
||||
|
||||
|
@ -192,8 +888,17 @@ static int __init test_firmware_init(void)
|
|||
{
|
||||
int rc;
|
||||
|
||||
test_fw_config = kzalloc(sizeof(struct test_config), GFP_KERNEL);
|
||||
if (!test_fw_config)
|
||||
return -ENOMEM;
|
||||
|
||||
rc = __test_firmware_config_init();
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
rc = misc_register(&test_fw_misc_device);
|
||||
if (rc) {
|
||||
kfree(test_fw_config);
|
||||
pr_err("could not register misc device: %d\n", rc);
|
||||
return rc;
|
||||
}
|
||||
|
@ -207,8 +912,13 @@ module_init(test_firmware_init);
|
|||
|
||||
static void __exit test_firmware_exit(void)
|
||||
{
|
||||
mutex_lock(&test_fw_mutex);
|
||||
release_firmware(test_firmware);
|
||||
misc_deregister(&test_fw_misc_device);
|
||||
__test_firmware_config_free();
|
||||
kfree(test_fw_config);
|
||||
mutex_unlock(&test_fw_mutex);
|
||||
|
||||
pr_warn("removed interface\n");
|
||||
}
|
||||
|
||||
|
|
|
@ -134,6 +134,27 @@ load_fw_custom_cancel()
|
|||
wait
|
||||
}
|
||||
|
||||
load_fw_fallback_with_child()
|
||||
{
|
||||
local name="$1"
|
||||
local file="$2"
|
||||
|
||||
# This is the value already set but we want to be explicit
|
||||
echo 4 >/sys/class/firmware/timeout
|
||||
|
||||
sleep 1 &
|
||||
SECONDS_BEFORE=$(date +%s)
|
||||
echo -n "$name" >"$DIR"/trigger_request 2>/dev/null
|
||||
SECONDS_AFTER=$(date +%s)
|
||||
SECONDS_DELTA=$(($SECONDS_AFTER - $SECONDS_BEFORE))
|
||||
if [ "$SECONDS_DELTA" -lt 4 ]; then
|
||||
RET=1
|
||||
else
|
||||
RET=0
|
||||
fi
|
||||
wait
|
||||
return $RET
|
||||
}
|
||||
|
||||
trap "test_finish" EXIT
|
||||
|
||||
|
@ -221,4 +242,14 @@ else
|
|||
echo "$0: cancelling custom fallback mechanism works"
|
||||
fi
|
||||
|
||||
set +e
|
||||
load_fw_fallback_with_child "nope-signal-$NAME" "$FW"
|
||||
if [ "$?" -eq 0 ]; then
|
||||
echo "$0: SIGCHLD on sync ignored as expected" >&2
|
||||
else
|
||||
echo "$0: error - sync firmware request cancelled due to SIGCHLD" >&2
|
||||
exit 1
|
||||
fi
|
||||
set -e
|
||||
|
||||
exit 0
|
||||
|
|
|
@ -25,8 +25,9 @@ if [ ! -d $DIR ]; then
|
|||
fi
|
||||
|
||||
# CONFIG_FW_LOADER_USER_HELPER has a sysfs class under /sys/class/firmware/
|
||||
# These days no one enables CONFIG_FW_LOADER_USER_HELPER so check for that
|
||||
# as an indicator for CONFIG_FW_LOADER_USER_HELPER.
|
||||
# These days most distros enable CONFIG_FW_LOADER_USER_HELPER but disable
|
||||
# CONFIG_FW_LOADER_USER_HELPER_FALLBACK. We use /sys/class/firmware/ as an
|
||||
# indicator for CONFIG_FW_LOADER_USER_HELPER.
|
||||
HAS_FW_LOADER_USER_HELPER=$(if [ -d /sys/class/firmware/ ]; then echo yes; else echo no; fi)
|
||||
|
||||
if [ "$HAS_FW_LOADER_USER_HELPER" = "yes" ]; then
|
||||
|
@ -116,4 +117,240 @@ else
|
|||
echo "$0: async filesystem loading works"
|
||||
fi
|
||||
|
||||
### Batched requests tests
|
||||
test_config_present()
|
||||
{
|
||||
if [ ! -f $DIR/reset ]; then
|
||||
echo "Configuration triggers not present, ignoring test"
|
||||
exit 0
|
||||
fi
|
||||
}
|
||||
|
||||
# Defaults :
|
||||
#
|
||||
# send_uevent: 1
|
||||
# sync_direct: 0
|
||||
# name: test-firmware.bin
|
||||
# num_requests: 4
|
||||
config_reset()
|
||||
{
|
||||
echo 1 > $DIR/reset
|
||||
}
|
||||
|
||||
release_all_firmware()
|
||||
{
|
||||
echo 1 > $DIR/release_all_firmware
|
||||
}
|
||||
|
||||
config_set_name()
|
||||
{
|
||||
echo -n $1 > $DIR/config_name
|
||||
}
|
||||
|
||||
config_set_sync_direct()
|
||||
{
|
||||
echo 1 > $DIR/config_sync_direct
|
||||
}
|
||||
|
||||
config_unset_sync_direct()
|
||||
{
|
||||
echo 0 > $DIR/config_sync_direct
|
||||
}
|
||||
|
||||
config_set_uevent()
|
||||
{
|
||||
echo 1 > $DIR/config_send_uevent
|
||||
}
|
||||
|
||||
config_unset_uevent()
|
||||
{
|
||||
echo 0 > $DIR/config_send_uevent
|
||||
}
|
||||
|
||||
config_trigger_sync()
|
||||
{
|
||||
echo -n 1 > $DIR/trigger_batched_requests 2>/dev/null
|
||||
}
|
||||
|
||||
config_trigger_async()
|
||||
{
|
||||
echo -n 1 > $DIR/trigger_batched_requests_async 2> /dev/null
|
||||
}
|
||||
|
||||
config_set_read_fw_idx()
|
||||
{
|
||||
echo -n $1 > $DIR/config_read_fw_idx 2> /dev/null
|
||||
}
|
||||
|
||||
read_firmwares()
|
||||
{
|
||||
for i in $(seq 0 3); do
|
||||
config_set_read_fw_idx $i
|
||||
# Verify the contents are what we expect.
|
||||
# -Z required for now -- check for yourself, md5sum
|
||||
# on $FW and DIR/read_firmware will yield the same. Even
|
||||
# cmp agrees, so something is off.
|
||||
if ! diff -q -Z "$FW" $DIR/read_firmware 2>/dev/null ; then
|
||||
echo "request #$i: firmware was not loaded" >&2
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
read_firmwares_expect_nofile()
|
||||
{
|
||||
for i in $(seq 0 3); do
|
||||
config_set_read_fw_idx $i
|
||||
# Ensures contents differ
|
||||
if diff -q -Z "$FW" $DIR/read_firmware 2>/dev/null ; then
|
||||
echo "request $i: file was not expected to match" >&2
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
test_batched_request_firmware_nofile()
|
||||
{
|
||||
echo -n "Batched request_firmware() nofile try #$1: "
|
||||
config_reset
|
||||
config_set_name nope-test-firmware.bin
|
||||
config_trigger_sync
|
||||
read_firmwares_expect_nofile
|
||||
release_all_firmware
|
||||
echo "OK"
|
||||
}
|
||||
|
||||
test_batched_request_firmware_direct_nofile()
|
||||
{
|
||||
echo -n "Batched request_firmware_direct() nofile try #$1: "
|
||||
config_reset
|
||||
config_set_name nope-test-firmware.bin
|
||||
config_set_sync_direct
|
||||
config_trigger_sync
|
||||
release_all_firmware
|
||||
echo "OK"
|
||||
}
|
||||
|
||||
test_request_firmware_nowait_uevent_nofile()
|
||||
{
|
||||
echo -n "Batched request_firmware_nowait(uevent=true) nofile try #$1: "
|
||||
config_reset
|
||||
config_set_name nope-test-firmware.bin
|
||||
config_trigger_async
|
||||
release_all_firmware
|
||||
echo "OK"
|
||||
}
|
||||
|
||||
test_wait_and_cancel_custom_load()
|
||||
{
|
||||
if [ "$HAS_FW_LOADER_USER_HELPER" != "yes" ]; then
|
||||
return
|
||||
fi
|
||||
local timeout=10
|
||||
name=$1
|
||||
while [ ! -e "$DIR"/"$name"/loading ]; do
|
||||
sleep 0.1
|
||||
timeout=$(( $timeout - 1 ))
|
||||
if [ "$timeout" -eq 0 ]; then
|
||||
echo "firmware interface never appeared:" >&2
|
||||
echo "$DIR/$name/loading" >&2
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
echo -1 >"$DIR"/"$name"/loading
|
||||
}
|
||||
|
||||
test_request_firmware_nowait_custom_nofile()
|
||||
{
|
||||
echo -n "Batched request_firmware_nowait(uevent=false) nofile try #$1: "
|
||||
config_unset_uevent
|
||||
config_set_name nope-test-firmware.bin
|
||||
config_trigger_async &
|
||||
test_wait_and_cancel_custom_load nope-test-firmware.bin
|
||||
wait
|
||||
release_all_firmware
|
||||
echo "OK"
|
||||
}
|
||||
|
||||
test_batched_request_firmware()
|
||||
{
|
||||
echo -n "Batched request_firmware() try #$1: "
|
||||
config_reset
|
||||
config_trigger_sync
|
||||
read_firmwares
|
||||
release_all_firmware
|
||||
echo "OK"
|
||||
}
|
||||
|
||||
test_batched_request_firmware_direct()
|
||||
{
|
||||
echo -n "Batched request_firmware_direct() try #$1: "
|
||||
config_reset
|
||||
config_set_sync_direct
|
||||
config_trigger_sync
|
||||
release_all_firmware
|
||||
echo "OK"
|
||||
}
|
||||
|
||||
test_request_firmware_nowait_uevent()
|
||||
{
|
||||
echo -n "Batched request_firmware_nowait(uevent=true) try #$1: "
|
||||
config_reset
|
||||
config_trigger_async
|
||||
release_all_firmware
|
||||
echo "OK"
|
||||
}
|
||||
|
||||
test_request_firmware_nowait_custom()
|
||||
{
|
||||
echo -n "Batched request_firmware_nowait(uevent=false) try #$1: "
|
||||
config_unset_uevent
|
||||
config_trigger_async
|
||||
release_all_firmware
|
||||
echo "OK"
|
||||
}
|
||||
|
||||
# Only continue if batched request triggers are present on the
|
||||
# test-firmware driver
|
||||
test_config_present
|
||||
|
||||
# test with the file present
|
||||
echo
|
||||
echo "Testing with the file present..."
|
||||
for i in $(seq 1 5); do
|
||||
test_batched_request_firmware $i
|
||||
done
|
||||
|
||||
for i in $(seq 1 5); do
|
||||
test_batched_request_firmware_direct $i
|
||||
done
|
||||
|
||||
for i in $(seq 1 5); do
|
||||
test_request_firmware_nowait_uevent $i
|
||||
done
|
||||
|
||||
for i in $(seq 1 5); do
|
||||
test_request_firmware_nowait_custom $i
|
||||
done
|
||||
|
||||
# Test for file not found, errors are expected, the failure would be
|
||||
# a hung task, which would require a hard reset.
|
||||
echo
|
||||
echo "Testing with the file missing..."
|
||||
for i in $(seq 1 5); do
|
||||
test_batched_request_firmware_nofile $i
|
||||
done
|
||||
|
||||
for i in $(seq 1 5); do
|
||||
test_batched_request_firmware_direct_nofile $i
|
||||
done
|
||||
|
||||
for i in $(seq 1 5); do
|
||||
test_request_firmware_nowait_uevent_nofile $i
|
||||
done
|
||||
|
||||
for i in $(seq 1 5); do
|
||||
test_request_firmware_nowait_custom_nofile $i
|
||||
done
|
||||
|
||||
exit 0
|
||||
|
|
Loading…
Reference in New Issue