Char/Misc driver patches for 4.8-rc1

Here is the big char/misc driver update for 4.8-rc1.
 
 Not a lot of stuff, but it's all over the place, full details are in the
 shortlog below.  All of these have been in linux-next with no reported
 issues for a while.
 
 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v2
 
 iFYEABECABYFAleVPBsPHGdyZWdAa3JvYWguY29tAAoJEDFH1A3bLfspEQgAoJOX
 nSWKA7j4JMGy1v+uNIqsgUmUAJsFyS388N+Faa2K4uyp7CYQ6jaAZw==
 =0Ofd
 -----END PGP SIGNATURE-----

Merge tag 'char-misc-4.8-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc

Pull char/misc driver updates from Greg KH:
 "Here is the big char/misc driver update for 4.8-rc1.

  Not a lot of stuff, but it's all over the place, full details are in
  the shortlog.  All of these have been in linux-next with no reported
  issues for a while"

* tag 'char-misc-4.8-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc: (49 commits)
  lkdtm: silence warnings about function declarations
  lkdtm: hide unused functions
  intel_th: pci: Add Kaby Lake PCH-H support
  intel_th: Fix a deadlock in modprobing
  dsp56k: prevent a harmless underflow
  chardev: add missing line break in pr_warn
  lkdtm: use struct arrays instead of enums
  lkdtm: move jprobe entry points to start of source
  lkdtm: reorganize module paramaters
  lkdtm: rename globals for clarity
  lkdtm: rename "count" to "crash_count"
  lkdtm: remove intentional off-by-one array access
  lkdtm: split remaining logic bug tests to separate file
  lkdtm: split heap corruption tests to separate file
  lkdtm: split memory permissions tests to separate file
  lkdtm: split usercopy tests to separate file
  lkdtm: drop "alloc_size" parameter
  lkdtm: add usercopy test for blocking kernel text
  extcon: adc-jack: add suspend/resume support
  extcon: add missing of_node_put after calling of_parse_phandle
  ...
This commit is contained in:
Linus Torvalds 2016-07-24 16:26:26 -07:00
commit 9d0be76f52
33 changed files with 2147 additions and 1608 deletions

View File

@ -46,7 +46,8 @@ Optional properties:
The second cell represents the MICBIAS to be used.
The third cell represents the value of the micd-pol-gpio pin.
- wlf,gpsw : Settings for the general purpose switch
- wlf,gpsw : Settings for the general purpose switch, set as one of the
ARIZONA_GPSW_XXX defines.
Example:

View File

@ -6970,7 +6970,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/livepatching.git
LINUX KERNEL DUMP TEST MODULE (LKDTM)
M: Kees Cook <keescook@chromium.org>
S: Maintained
F: drivers/misc/lkdtm.c
F: drivers/misc/lkdtm*
LLC (802.2)
M: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>

View File

@ -325,7 +325,7 @@ static long dsp56k_ioctl(struct file *file, unsigned int cmd,
if(get_user(bin, &binary->bin) < 0)
return -EFAULT;
if (len == 0) {
if (len <= 0) {
return -EINVAL; /* nothing to upload?!? */
}
if (len > DSP56K_MAX_BINARY_LENGTH) {

View File

@ -2,7 +2,8 @@
# Makefile for external connector class (extcon) devices
#
obj-$(CONFIG_EXTCON) += extcon.o
obj-$(CONFIG_EXTCON) += extcon-core.o
extcon-core-objs += extcon.o devres.o
obj-$(CONFIG_EXTCON_ADC_JACK) += extcon-adc-jack.o
obj-$(CONFIG_EXTCON_ARIZONA) += extcon-arizona.o
obj-$(CONFIG_EXTCON_AXP288) += extcon-axp288.o

216
drivers/extcon/devres.c Normal file
View File

@ -0,0 +1,216 @@
/*
* drivers/extcon/devres.c - EXTCON device's resource management
*
* Copyright (C) 2016 Samsung Electronics
* Author: Chanwoo Choi <cw00.choi@samsung.com>
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/extcon.h>
static int devm_extcon_dev_match(struct device *dev, void *res, void *data)
{
struct extcon_dev **r = res;
if (WARN_ON(!r || !*r))
return 0;
return *r == data;
}
static void devm_extcon_dev_release(struct device *dev, void *res)
{
extcon_dev_free(*(struct extcon_dev **)res);
}
static void devm_extcon_dev_unreg(struct device *dev, void *res)
{
extcon_dev_unregister(*(struct extcon_dev **)res);
}
struct extcon_dev_notifier_devres {
struct extcon_dev *edev;
unsigned int id;
struct notifier_block *nb;
};
static void devm_extcon_dev_notifier_unreg(struct device *dev, void *res)
{
struct extcon_dev_notifier_devres *this = res;
extcon_unregister_notifier(this->edev, this->id, this->nb);
}
/**
* devm_extcon_dev_allocate - Allocate managed extcon device
* @dev: device owning the extcon device being created
* @supported_cable: Array of supported extcon ending with EXTCON_NONE.
* If supported_cable is NULL, cable name related APIs
* are disabled.
*
* This function manages automatically the memory of extcon device using device
* resource management and simplify the control of freeing the memory of extcon
* device.
*
* Returns the pointer memory of allocated extcon_dev if success
* or ERR_PTR(err) if fail
*/
struct extcon_dev *devm_extcon_dev_allocate(struct device *dev,
const unsigned int *supported_cable)
{
struct extcon_dev **ptr, *edev;
ptr = devres_alloc(devm_extcon_dev_release, sizeof(*ptr), GFP_KERNEL);
if (!ptr)
return ERR_PTR(-ENOMEM);
edev = extcon_dev_allocate(supported_cable);
if (IS_ERR(edev)) {
devres_free(ptr);
return edev;
}
edev->dev.parent = dev;
*ptr = edev;
devres_add(dev, ptr);
return edev;
}
EXPORT_SYMBOL_GPL(devm_extcon_dev_allocate);
/**
* devm_extcon_dev_free() - Resource-managed extcon_dev_unregister()
* @dev: device the extcon belongs to
* @edev: the extcon device to unregister
*
* Free the memory that is allocated with devm_extcon_dev_allocate()
* function.
*/
void devm_extcon_dev_free(struct device *dev, struct extcon_dev *edev)
{
WARN_ON(devres_release(dev, devm_extcon_dev_release,
devm_extcon_dev_match, edev));
}
EXPORT_SYMBOL_GPL(devm_extcon_dev_free);
/**
* devm_extcon_dev_register() - Resource-managed extcon_dev_register()
* @dev: device to allocate extcon device
* @edev: the new extcon device to register
*
* Managed extcon_dev_register() function. If extcon device is attached with
* this function, that extcon device is automatically unregistered on driver
* detach. Internally this function calls extcon_dev_register() function.
* To get more information, refer that function.
*
* If extcon device is registered with this function and the device needs to be
* unregistered separately, devm_extcon_dev_unregister() should be used.
*
* Returns 0 if success or negaive error number if failure.
*/
int devm_extcon_dev_register(struct device *dev, struct extcon_dev *edev)
{
struct extcon_dev **ptr;
int ret;
ptr = devres_alloc(devm_extcon_dev_unreg, sizeof(*ptr), GFP_KERNEL);
if (!ptr)
return -ENOMEM;
ret = extcon_dev_register(edev);
if (ret) {
devres_free(ptr);
return ret;
}
*ptr = edev;
devres_add(dev, ptr);
return 0;
}
EXPORT_SYMBOL_GPL(devm_extcon_dev_register);
/**
* devm_extcon_dev_unregister() - Resource-managed extcon_dev_unregister()
* @dev: device the extcon belongs to
* @edev: the extcon device to unregister
*
* Unregister extcon device that is registered with devm_extcon_dev_register()
* function.
*/
void devm_extcon_dev_unregister(struct device *dev, struct extcon_dev *edev)
{
WARN_ON(devres_release(dev, devm_extcon_dev_unreg,
devm_extcon_dev_match, edev));
}
EXPORT_SYMBOL_GPL(devm_extcon_dev_unregister);
/**
* devm_extcon_register_notifier() - Resource-managed extcon_register_notifier()
* @dev: device to allocate extcon device
* @edev: the extcon device that has the external connecotr.
* @id: the unique id of each external connector in extcon enumeration.
* @nb: a notifier block to be registered.
*
* This function manages automatically the notifier of extcon device using
* device resource management and simplify the control of unregistering
* the notifier of extcon device.
*
* Note that the second parameter given to the callback of nb (val) is
* "old_state", not the current state. The current state can be retrieved
* by looking at the third pameter (edev pointer)'s state value.
*
* Returns 0 if success or negaive error number if failure.
*/
int devm_extcon_register_notifier(struct device *dev, struct extcon_dev *edev,
unsigned int id, struct notifier_block *nb)
{
struct extcon_dev_notifier_devres *ptr;
int ret;
ptr = devres_alloc(devm_extcon_dev_notifier_unreg, sizeof(*ptr),
GFP_KERNEL);
if (!ptr)
return -ENOMEM;
ret = extcon_register_notifier(edev, id, nb);
if (ret) {
devres_free(ptr);
return ret;
}
ptr->edev = edev;
ptr->id = id;
ptr->nb = nb;
devres_add(dev, ptr);
return 0;
}
EXPORT_SYMBOL(devm_extcon_register_notifier);
/**
* devm_extcon_unregister_notifier()
- Resource-managed extcon_unregister_notifier()
* @dev: device to allocate extcon device
* @edev: the extcon device that has the external connecotr.
* @id: the unique id of each external connector in extcon enumeration.
* @nb: a notifier block to be registered.
*/
void devm_extcon_unregister_notifier(struct device *dev,
struct extcon_dev *edev, unsigned int id,
struct notifier_block *nb)
{
WARN_ON(devres_release(dev, devm_extcon_dev_notifier_unreg,
devm_extcon_dev_match, edev));
}
EXPORT_SYMBOL(devm_extcon_unregister_notifier);

View File

@ -38,6 +38,7 @@
* @chan: iio channel being queried.
*/
struct adc_jack_data {
struct device *dev;
struct extcon_dev *edev;
const unsigned int **cable_names;
@ -49,6 +50,7 @@ struct adc_jack_data {
struct delayed_work handler;
struct iio_channel *chan;
bool wakeup_source;
};
static void adc_jack_handler(struct work_struct *work)
@ -105,6 +107,7 @@ static int adc_jack_probe(struct platform_device *pdev)
return -EINVAL;
}
data->dev = &pdev->dev;
data->edev = devm_extcon_dev_allocate(&pdev->dev, pdata->cable_names);
if (IS_ERR(data->edev)) {
dev_err(&pdev->dev, "failed to allocate extcon device\n");
@ -128,6 +131,7 @@ static int adc_jack_probe(struct platform_device *pdev)
return PTR_ERR(data->chan);
data->handling_delay = msecs_to_jiffies(pdata->handling_delay_ms);
data->wakeup_source = pdata->wakeup_source;
INIT_DEFERRABLE_WORK(&data->handler, adc_jack_handler);
@ -151,6 +155,9 @@ static int adc_jack_probe(struct platform_device *pdev)
return err;
}
if (data->wakeup_source)
device_init_wakeup(&pdev->dev, 1);
return 0;
}
@ -165,11 +172,38 @@ static int adc_jack_remove(struct platform_device *pdev)
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int adc_jack_suspend(struct device *dev)
{
struct adc_jack_data *data = dev_get_drvdata(dev);
cancel_delayed_work_sync(&data->handler);
if (device_may_wakeup(data->dev))
enable_irq_wake(data->irq);
return 0;
}
static int adc_jack_resume(struct device *dev)
{
struct adc_jack_data *data = dev_get_drvdata(dev);
if (device_may_wakeup(data->dev))
disable_irq_wake(data->irq);
return 0;
}
#endif /* CONFIG_PM_SLEEP */
static SIMPLE_DEV_PM_OPS(adc_jack_pm_ops,
adc_jack_suspend, adc_jack_resume);
static struct platform_driver adc_jack_driver = {
.probe = adc_jack_probe,
.remove = adc_jack_remove,
.driver = {
.name = "adc-jack",
.pm = &adc_jack_pm_ops,
},
};

View File

@ -24,8 +24,10 @@
#include <linux/module.h>
#include <linux/of_gpio.h>
#include <linux/platform_device.h>
#include <linux/pm_wakeirq.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <linux/acpi.h>
#define USB_GPIO_DEBOUNCE_MS 20 /* ms */
@ -91,7 +93,7 @@ static int usb_extcon_probe(struct platform_device *pdev)
struct usb_extcon_info *info;
int ret;
if (!np)
if (!np && !ACPI_HANDLE(dev))
return -EINVAL;
info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
@ -141,7 +143,8 @@ static int usb_extcon_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, info);
device_init_wakeup(dev, 1);
device_init_wakeup(dev, true);
dev_pm_set_wake_irq(dev, info->id_irq);
/* Perform initial detection */
usb_extcon_detect_cable(&info->wq_detcable.work);
@ -155,6 +158,9 @@ static int usb_extcon_remove(struct platform_device *pdev)
cancel_delayed_work_sync(&info->wq_detcable);
dev_pm_clear_wake_irq(&pdev->dev);
device_init_wakeup(&pdev->dev, false);
return 0;
}
@ -164,12 +170,6 @@ static int usb_extcon_suspend(struct device *dev)
struct usb_extcon_info *info = dev_get_drvdata(dev);
int ret = 0;
if (device_may_wakeup(dev)) {
ret = enable_irq_wake(info->id_irq);
if (ret)
return ret;
}
/*
* We don't want to process any IRQs after this point
* as GPIOs used behind I2C subsystem might not be
@ -185,13 +185,10 @@ static int usb_extcon_resume(struct device *dev)
struct usb_extcon_info *info = dev_get_drvdata(dev);
int ret = 0;
if (device_may_wakeup(dev)) {
ret = disable_irq_wake(info->id_irq);
if (ret)
return ret;
}
enable_irq(info->id_irq);
if (!device_may_wakeup(dev))
queue_delayed_work(system_power_efficient_wq,
&info->wq_detcable, 0);
return ret;
}
@ -206,6 +203,12 @@ static const struct of_device_id usb_extcon_dt_match[] = {
};
MODULE_DEVICE_TABLE(of, usb_extcon_dt_match);
static const struct platform_device_id usb_extcon_platform_ids[] = {
{ .name = "extcon-usb-gpio", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(platform, usb_extcon_platform_ids);
static struct platform_driver usb_extcon_driver = {
.probe = usb_extcon_probe,
.remove = usb_extcon_remove,
@ -214,6 +217,7 @@ static struct platform_driver usb_extcon_driver = {
.pm = &usb_extcon_pm_ops,
.of_match_table = usb_extcon_dt_match,
},
.id_table = usb_extcon_platform_ids,
};
module_platform_driver(usb_extcon_driver);

View File

@ -77,6 +77,26 @@ static const char *extcon_name[] = {
NULL,
};
/**
* struct extcon_cable - An internal data for each cable of extcon device.
* @edev: The extcon device
* @cable_index: Index of this cable in the edev
* @attr_g: Attribute group for the cable
* @attr_name: "name" sysfs entry
* @attr_state: "state" sysfs entry
* @attrs: Array pointing to attr_name and attr_state for attr_g
*/
struct extcon_cable {
struct extcon_dev *edev;
int cable_index;
struct attribute_group attr_g;
struct device_attribute attr_name;
struct device_attribute attr_state;
struct attribute *attrs[3]; /* to be fed to attr_g.attrs */
};
static struct class *extcon_class;
#if defined(CONFIG_ANDROID)
static struct class_compat *switch_class;
@ -127,38 +147,6 @@ static int find_cable_index_by_id(struct extcon_dev *edev, const unsigned int id
return -EINVAL;
}
static int find_cable_id_by_name(struct extcon_dev *edev, const char *name)
{
int id = -EINVAL;
int i = 0;
/* Find the id of extcon cable */
while (extcon_name[i]) {
if (!strncmp(extcon_name[i], name, CABLE_NAME_MAX)) {
id = i;
break;
}
i++;
}
return id;
}
static int find_cable_index_by_name(struct extcon_dev *edev, const char *name)
{
int id;
if (edev->max_supported == 0)
return -EINVAL;
/* Find the the number of extcon cable */
id = find_cable_id_by_name(edev, name);
if (id < 0)
return id;
return find_cable_index_by_id(edev, id);
}
static bool is_extcon_changed(u32 prev, u32 new, int idx, bool *attached)
{
if (((prev >> idx) & 0x1) != ((new >> idx) & 0x1)) {
@ -373,25 +361,6 @@ int extcon_get_cable_state_(struct extcon_dev *edev, const unsigned int id)
}
EXPORT_SYMBOL_GPL(extcon_get_cable_state_);
/**
* extcon_get_cable_state() - Get the status of a specific cable.
* @edev: the extcon device that has the cable.
* @cable_name: cable name.
*
* Note that this is slower than extcon_get_cable_state_.
*/
int extcon_get_cable_state(struct extcon_dev *edev, const char *cable_name)
{
int id;
id = find_cable_id_by_name(edev, cable_name);
if (id < 0)
return id;
return extcon_get_cable_state_(edev, id);
}
EXPORT_SYMBOL_GPL(extcon_get_cable_state);
/**
* extcon_set_cable_state_() - Set the status of a specific cable.
* @edev: the extcon device that has the cable.
@ -421,28 +390,6 @@ int extcon_set_cable_state_(struct extcon_dev *edev, unsigned int id,
}
EXPORT_SYMBOL_GPL(extcon_set_cable_state_);
/**
* extcon_set_cable_state() - Set the status of a specific cable.
* @edev: the extcon device that has the cable.
* @cable_name: cable name.
* @cable_state: the new cable status. The default semantics is
* true: attached / false: detached.
*
* Note that this is slower than extcon_set_cable_state_.
*/
int extcon_set_cable_state(struct extcon_dev *edev,
const char *cable_name, bool cable_state)
{
int id;
id = find_cable_id_by_name(edev, cable_name);
if (id < 0)
return id;
return extcon_set_cable_state_(edev, id, cable_state);
}
EXPORT_SYMBOL_GPL(extcon_set_cable_state);
/**
* extcon_get_extcon_dev() - Get the extcon device instance from the name
* @extcon_name: The extcon name provided with extcon_dev_register()
@ -466,105 +413,6 @@ struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name)
}
EXPORT_SYMBOL_GPL(extcon_get_extcon_dev);
/**
* extcon_register_interest() - Register a notifier for a state change of a
* specific cable, not an entier set of cables of a
* extcon device.
* @obj: an empty extcon_specific_cable_nb object to be returned.
* @extcon_name: the name of extcon device.
* if NULL, extcon_register_interest will register
* every cable with the target cable_name given.
* @cable_name: the target cable name.
* @nb: the notifier block to get notified.
*
* Provide an empty extcon_specific_cable_nb. extcon_register_interest() sets
* the struct for you.
*
* extcon_register_interest is a helper function for those who want to get
* notification for a single specific cable's status change. If a user wants
* to get notification for any changes of all cables of a extcon device,
* he/she should use the general extcon_register_notifier().
*
* Note that the second parameter given to the callback of nb (val) is
* "old_state", not the current state. The current state can be retrieved
* by looking at the third pameter (edev pointer)'s state value.
*/
int extcon_register_interest(struct extcon_specific_cable_nb *obj,
const char *extcon_name, const char *cable_name,
struct notifier_block *nb)
{
unsigned long flags;
int ret;
if (!obj || !cable_name || !nb)
return -EINVAL;
if (extcon_name) {
obj->edev = extcon_get_extcon_dev(extcon_name);
if (!obj->edev)
return -ENODEV;
obj->cable_index = find_cable_index_by_name(obj->edev,
cable_name);
if (obj->cable_index < 0)
return obj->cable_index;
obj->user_nb = nb;
spin_lock_irqsave(&obj->edev->lock, flags);
ret = raw_notifier_chain_register(
&obj->edev->nh[obj->cable_index],
obj->user_nb);
spin_unlock_irqrestore(&obj->edev->lock, flags);
} else {
struct class_dev_iter iter;
struct extcon_dev *extd;
struct device *dev;
if (!extcon_class)
return -ENODEV;
class_dev_iter_init(&iter, extcon_class, NULL, NULL);
while ((dev = class_dev_iter_next(&iter))) {
extd = dev_get_drvdata(dev);
if (find_cable_index_by_name(extd, cable_name) < 0)
continue;
class_dev_iter_exit(&iter);
return extcon_register_interest(obj, extd->name,
cable_name, nb);
}
ret = -ENODEV;
}
return ret;
}
EXPORT_SYMBOL_GPL(extcon_register_interest);
/**
* extcon_unregister_interest() - Unregister the notifier registered by
* extcon_register_interest().
* @obj: the extcon_specific_cable_nb object returned by
* extcon_register_interest().
*/
int extcon_unregister_interest(struct extcon_specific_cable_nb *obj)
{
unsigned long flags;
int ret;
if (!obj)
return -EINVAL;
spin_lock_irqsave(&obj->edev->lock, flags);
ret = raw_notifier_chain_unregister(
&obj->edev->nh[obj->cable_index], obj->user_nb);
spin_unlock_irqrestore(&obj->edev->lock, flags);
return ret;
}
EXPORT_SYMBOL_GPL(extcon_unregister_interest);
/**
* extcon_register_notifier() - Register a notifiee to get notified by
* any attach status changes from the extcon.
@ -582,14 +430,35 @@ int extcon_register_notifier(struct extcon_dev *edev, unsigned int id,
unsigned long flags;
int ret, idx;
if (!edev || !nb)
if (!nb)
return -EINVAL;
idx = find_cable_index_by_id(edev, id);
if (edev) {
idx = find_cable_index_by_id(edev, id);
if (idx < 0)
return idx;
spin_lock_irqsave(&edev->lock, flags);
ret = raw_notifier_chain_register(&edev->nh[idx], nb);
spin_unlock_irqrestore(&edev->lock, flags);
spin_lock_irqsave(&edev->lock, flags);
ret = raw_notifier_chain_register(&edev->nh[idx], nb);
spin_unlock_irqrestore(&edev->lock, flags);
} else {
struct extcon_dev *extd;
mutex_lock(&extcon_dev_list_lock);
list_for_each_entry(extd, &extcon_dev_list, entry) {
idx = find_cable_index_by_id(extd, id);
if (idx >= 0)
break;
}
mutex_unlock(&extcon_dev_list_lock);
if (idx >= 0) {
edev = extd;
return extcon_register_notifier(extd, id, nb);
} else {
ret = -ENODEV;
}
}
return ret;
}
@ -611,6 +480,8 @@ int extcon_unregister_notifier(struct extcon_dev *edev, unsigned int id,
return -EINVAL;
idx = find_cable_index_by_id(edev, id);
if (idx < 0)
return idx;
spin_lock_irqsave(&edev->lock, flags);
ret = raw_notifier_chain_unregister(&edev->nh[idx], nb);
@ -693,66 +564,6 @@ void extcon_dev_free(struct extcon_dev *edev)
}
EXPORT_SYMBOL_GPL(extcon_dev_free);
static int devm_extcon_dev_match(struct device *dev, void *res, void *data)
{
struct extcon_dev **r = res;
if (WARN_ON(!r || !*r))
return 0;
return *r == data;
}
static void devm_extcon_dev_release(struct device *dev, void *res)
{
extcon_dev_free(*(struct extcon_dev **)res);
}
/**
* devm_extcon_dev_allocate - Allocate managed extcon device
* @dev: device owning the extcon device being created
* @supported_cable: Array of supported extcon ending with EXTCON_NONE.
* If supported_cable is NULL, cable name related APIs
* are disabled.
*
* This function manages automatically the memory of extcon device using device
* resource management and simplify the control of freeing the memory of extcon
* device.
*
* Returns the pointer memory of allocated extcon_dev if success
* or ERR_PTR(err) if fail
*/
struct extcon_dev *devm_extcon_dev_allocate(struct device *dev,
const unsigned int *supported_cable)
{
struct extcon_dev **ptr, *edev;
ptr = devres_alloc(devm_extcon_dev_release, sizeof(*ptr), GFP_KERNEL);
if (!ptr)
return ERR_PTR(-ENOMEM);
edev = extcon_dev_allocate(supported_cable);
if (IS_ERR(edev)) {
devres_free(ptr);
return edev;
}
edev->dev.parent = dev;
*ptr = edev;
devres_add(dev, ptr);
return edev;
}
EXPORT_SYMBOL_GPL(devm_extcon_dev_allocate);
void devm_extcon_dev_free(struct device *dev, struct extcon_dev *edev)
{
WARN_ON(devres_release(dev, devm_extcon_dev_release,
devm_extcon_dev_match, edev));
}
EXPORT_SYMBOL_GPL(devm_extcon_dev_free);
/**
* extcon_dev_register() - Register a new extcon device
* @edev : the new extcon device (should be allocated before calling)
@ -1018,63 +829,6 @@ void extcon_dev_unregister(struct extcon_dev *edev)
}
EXPORT_SYMBOL_GPL(extcon_dev_unregister);
static void devm_extcon_dev_unreg(struct device *dev, void *res)
{
extcon_dev_unregister(*(struct extcon_dev **)res);
}
/**
* devm_extcon_dev_register() - Resource-managed extcon_dev_register()
* @dev: device to allocate extcon device
* @edev: the new extcon device to register
*
* Managed extcon_dev_register() function. If extcon device is attached with
* this function, that extcon device is automatically unregistered on driver
* detach. Internally this function calls extcon_dev_register() function.
* To get more information, refer that function.
*
* If extcon device is registered with this function and the device needs to be
* unregistered separately, devm_extcon_dev_unregister() should be used.
*
* Returns 0 if success or negaive error number if failure.
*/
int devm_extcon_dev_register(struct device *dev, struct extcon_dev *edev)
{
struct extcon_dev **ptr;
int ret;
ptr = devres_alloc(devm_extcon_dev_unreg, sizeof(*ptr), GFP_KERNEL);
if (!ptr)
return -ENOMEM;
ret = extcon_dev_register(edev);
if (ret) {
devres_free(ptr);
return ret;
}
*ptr = edev;
devres_add(dev, ptr);
return 0;
}
EXPORT_SYMBOL_GPL(devm_extcon_dev_register);
/**
* devm_extcon_dev_unregister() - Resource-managed extcon_dev_unregister()
* @dev: device the extcon belongs to
* @edev: the extcon device to unregister
*
* Unregister extcon device that is registered with devm_extcon_dev_register()
* function.
*/
void devm_extcon_dev_unregister(struct device *dev, struct extcon_dev *edev)
{
WARN_ON(devres_release(dev, devm_extcon_dev_unreg,
devm_extcon_dev_match, edev));
}
EXPORT_SYMBOL_GPL(devm_extcon_dev_unregister);
#ifdef CONFIG_OF
/*
* extcon_get_edev_by_phandle - Get the extcon device from devicetree
@ -1107,10 +861,12 @@ struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev, int index)
list_for_each_entry(edev, &extcon_dev_list, entry) {
if (edev->dev.parent && edev->dev.parent->of_node == node) {
mutex_unlock(&extcon_dev_list_lock);
of_node_put(node);
return edev;
}
}
mutex_unlock(&extcon_dev_list_lock);
of_node_put(node);
return ERR_PTR(-EPROBE_DEFER);
}

View File

@ -23,6 +23,7 @@
#include <linux/debugfs.h>
#include <linux/idr.h>
#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include <linux/dma-mapping.h>
#include "intel_th.h"
@ -67,23 +68,33 @@ static int intel_th_probe(struct device *dev)
hubdrv = to_intel_th_driver(hub->dev.driver);
pm_runtime_set_active(dev);
pm_runtime_no_callbacks(dev);
pm_runtime_enable(dev);
ret = thdrv->probe(to_intel_th_device(dev));
if (ret)
return ret;
goto out_pm;
if (thdrv->attr_group) {
ret = sysfs_create_group(&thdev->dev.kobj, thdrv->attr_group);
if (ret) {
thdrv->remove(thdev);
return ret;
}
if (ret)
goto out;
}
if (thdev->type == INTEL_TH_OUTPUT &&
!intel_th_output_assigned(thdev))
/* does not talk to hardware */
ret = hubdrv->assign(hub, thdev);
out:
if (ret)
thdrv->remove(thdev);
out_pm:
if (ret)
pm_runtime_disable(dev);
return ret;
}
@ -103,6 +114,8 @@ static int intel_th_remove(struct device *dev)
if (thdrv->attr_group)
sysfs_remove_group(&thdev->dev.kobj, thdrv->attr_group);
pm_runtime_get_sync(dev);
thdrv->remove(thdev);
if (intel_th_output_assigned(thdev)) {
@ -110,9 +123,14 @@ static int intel_th_remove(struct device *dev)
to_intel_th_driver(dev->parent->driver);
if (hub->dev.driver)
/* does not talk to hardware */
hubdrv->unassign(hub, thdev);
}
pm_runtime_disable(dev);
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
return 0;
}
@ -185,6 +203,7 @@ static int intel_th_output_activate(struct intel_th_device *thdev)
{
struct intel_th_driver *thdrv =
to_intel_th_driver_or_null(thdev->dev.driver);
int ret = 0;
if (!thdrv)
return -ENODEV;
@ -192,12 +211,17 @@ static int intel_th_output_activate(struct intel_th_device *thdev)
if (!try_module_get(thdrv->driver.owner))
return -ENODEV;
pm_runtime_get_sync(&thdev->dev);
if (thdrv->activate)
return thdrv->activate(thdev);
ret = thdrv->activate(thdev);
else
intel_th_trace_enable(thdev);
intel_th_trace_enable(thdev);
if (ret)
pm_runtime_put(&thdev->dev);
return 0;
return ret;
}
static void intel_th_output_deactivate(struct intel_th_device *thdev)
@ -213,6 +237,7 @@ static void intel_th_output_deactivate(struct intel_th_device *thdev)
else
intel_th_trace_disable(thdev);
pm_runtime_put(&thdev->dev);
module_put(thdrv->driver.owner);
}
@ -465,6 +490,38 @@ static struct intel_th_subdevice {
},
};
#ifdef CONFIG_MODULES
static void __intel_th_request_hub_module(struct work_struct *work)
{
struct intel_th *th = container_of(work, struct intel_th,
request_module_work);
request_module("intel_th_%s", th->hub->name);
}
static int intel_th_request_hub_module(struct intel_th *th)
{
INIT_WORK(&th->request_module_work, __intel_th_request_hub_module);
schedule_work(&th->request_module_work);
return 0;
}
static void intel_th_request_hub_module_flush(struct intel_th *th)
{
flush_work(&th->request_module_work);
}
#else
static inline int intel_th_request_hub_module(struct intel_th *th)
{
return -EINVAL;
}
static inline void intel_th_request_hub_module_flush(struct intel_th *th)
{
}
#endif /* CONFIG_MODULES */
static int intel_th_populate(struct intel_th *th, struct resource *devres,
unsigned int ndevres, int irq)
{
@ -535,7 +592,7 @@ static int intel_th_populate(struct intel_th *th, struct resource *devres,
/* need switch driver to be loaded to enumerate the rest */
if (subdev->type == INTEL_TH_SWITCH && !req) {
th->hub = thdev;
err = request_module("intel_th_%s", subdev->name);
err = intel_th_request_hub_module(th);
if (!err)
req++;
}
@ -628,6 +685,10 @@ intel_th_alloc(struct device *dev, struct resource *devres,
dev_set_drvdata(dev, th);
pm_runtime_no_callbacks(dev);
pm_runtime_put(dev);
pm_runtime_allow(dev);
err = intel_th_populate(th, devres, ndevres, irq);
if (err)
goto err_chrdev;
@ -635,6 +696,8 @@ intel_th_alloc(struct device *dev, struct resource *devres,
return th;
err_chrdev:
pm_runtime_forbid(dev);
__unregister_chrdev(th->major, 0, TH_POSSIBLE_OUTPUTS,
"intel_th/output");
@ -652,12 +715,16 @@ void intel_th_free(struct intel_th *th)
{
int i;
intel_th_request_hub_module_flush(th);
for (i = 0; i < TH_SUBDEVICE_MAX; i++)
if (th->thdev[i] != th->hub)
intel_th_device_remove(th->thdev[i]);
intel_th_device_remove(th->hub);
pm_runtime_get_sync(th->dev);
pm_runtime_forbid(th->dev);
__unregister_chrdev(th->major, 0, TH_POSSIBLE_OUTPUTS,
"intel_th/output");
@ -682,6 +749,7 @@ int intel_th_trace_enable(struct intel_th_device *thdev)
if (WARN_ON_ONCE(thdev->type != INTEL_TH_OUTPUT))
return -EINVAL;
pm_runtime_get_sync(&thdev->dev);
hubdrv->enable(hub, &thdev->output);
return 0;
@ -702,6 +770,7 @@ int intel_th_trace_disable(struct intel_th_device *thdev)
return -EINVAL;
hubdrv->disable(hub, &thdev->output);
pm_runtime_put(&thdev->dev);
return 0;
}

View File

@ -22,6 +22,7 @@
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/bitmap.h>
#include <linux/pm_runtime.h>
#include "intel_th.h"
#include "gth.h"
@ -190,6 +191,11 @@ static ssize_t master_attr_store(struct device *dev,
if (old_port >= 0) {
gth->master[ma->master] = -1;
clear_bit(ma->master, gth->output[old_port].master);
/*
* if the port is active, program this setting,
* implies that runtime PM is on
*/
if (gth->output[old_port].output->active)
gth_master_set(gth, ma->master, -1);
}
@ -204,7 +210,7 @@ static ssize_t master_attr_store(struct device *dev,
set_bit(ma->master, gth->output[port].master);
/* if the port is active, program this setting */
/* if the port is active, program this setting, see above */
if (gth->output[port].output->active)
gth_master_set(gth, ma->master, port);
}
@ -326,11 +332,15 @@ static ssize_t output_attr_show(struct device *dev,
struct gth_device *gth = oa->gth;
size_t count;
pm_runtime_get_sync(dev);
spin_lock(&gth->gth_lock);
count = snprintf(buf, PAGE_SIZE, "%x\n",
gth_output_parm_get(gth, oa->port, oa->parm));
spin_unlock(&gth->gth_lock);
pm_runtime_put(dev);
return count;
}
@ -346,10 +356,14 @@ static ssize_t output_attr_store(struct device *dev,
if (kstrtouint(buf, 16, &config) < 0)
return -EINVAL;
pm_runtime_get_sync(dev);
spin_lock(&gth->gth_lock);
gth_output_parm_set(gth, oa->port, oa->parm, config);
spin_unlock(&gth->gth_lock);
pm_runtime_put(dev);
return count;
}
@ -451,7 +465,7 @@ static int intel_th_output_attributes(struct gth_device *gth)
}
/**
* intel_th_gth_disable() - enable tracing to an output device
* intel_th_gth_disable() - disable tracing to an output device
* @thdev: GTH device
* @output: output device's descriptor
*

View File

@ -114,6 +114,9 @@ intel_th_output_assigned(struct intel_th_device *thdev)
* @unassign: deassociate an output type device from an output port
* @enable: enable tracing for a given output device
* @disable: disable tracing for a given output device
* @irq: interrupt callback
* @activate: enable tracing on the output's side
* @deactivate: disable tracing on the output's side
* @fops: file operations for device nodes
* @attr_group: attributes provided by the driver
*
@ -205,6 +208,9 @@ struct intel_th {
int id;
int major;
#ifdef CONFIG_MODULES
struct work_struct request_module_work;
#endif /* CONFIG_MODULES */
#ifdef CONFIG_INTEL_TH_DEBUG
struct dentry *dbg;
#endif

View File

@ -80,6 +80,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x1a8e),
.driver_data = (kernel_ulong_t)0,
},
{
/* Kaby Lake PCH-H */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa2a6),
.driver_data = (kernel_ulong_t)0,
},
{ 0 },
};

View File

@ -15,6 +15,7 @@
* as defined in MIPI STPv2 specification.
*/
#include <linux/pm_runtime.h>
#include <linux/uaccess.h>
#include <linux/kernel.h>
#include <linux/module.h>
@ -482,14 +483,40 @@ static ssize_t stm_char_write(struct file *file, const char __user *buf,
return -EFAULT;
}
pm_runtime_get_sync(&stm->dev);
count = stm_write(stm->data, stmf->output.master, stmf->output.channel,
kbuf, count);
pm_runtime_mark_last_busy(&stm->dev);
pm_runtime_put_autosuspend(&stm->dev);
kfree(kbuf);
return count;
}
static void stm_mmap_open(struct vm_area_struct *vma)
{
struct stm_file *stmf = vma->vm_file->private_data;
struct stm_device *stm = stmf->stm;
pm_runtime_get(&stm->dev);
}
static void stm_mmap_close(struct vm_area_struct *vma)
{
struct stm_file *stmf = vma->vm_file->private_data;
struct stm_device *stm = stmf->stm;
pm_runtime_mark_last_busy(&stm->dev);
pm_runtime_put_autosuspend(&stm->dev);
}
static const struct vm_operations_struct stm_mmap_vmops = {
.open = stm_mmap_open,
.close = stm_mmap_close,
};
static int stm_char_mmap(struct file *file, struct vm_area_struct *vma)
{
struct stm_file *stmf = file->private_data;
@ -514,8 +541,11 @@ static int stm_char_mmap(struct file *file, struct vm_area_struct *vma)
if (!phys)
return -EINVAL;
pm_runtime_get_sync(&stm->dev);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_ops = &stm_mmap_vmops;
vm_iomap_memory(vma, phys, size);
return 0;
@ -701,6 +731,17 @@ int stm_register_device(struct device *parent, struct stm_data *stm_data,
if (err)
goto err_device;
/*
* Use delayed autosuspend to avoid bouncing back and forth
* on recurring character device writes, with the initial
* delay time of 2 seconds.
*/
pm_runtime_no_callbacks(&stm->dev);
pm_runtime_use_autosuspend(&stm->dev);
pm_runtime_set_autosuspend_delay(&stm->dev, 2000);
pm_runtime_set_suspended(&stm->dev);
pm_runtime_enable(&stm->dev);
return 0;
err_device:
@ -724,6 +765,9 @@ void stm_unregister_device(struct stm_data *stm_data)
struct stm_source_device *src, *iter;
int i, ret;
pm_runtime_dont_use_autosuspend(&stm->dev);
pm_runtime_disable(&stm->dev);
mutex_lock(&stm->link_mutex);
list_for_each_entry_safe(src, iter, &stm->link_list, link_entry) {
ret = __stm_source_link_drop(src, stm);
@ -878,6 +922,8 @@ static int __stm_source_link_drop(struct stm_source_device *src,
stm_output_free(link, &src->output);
list_del_init(&src->link_entry);
pm_runtime_mark_last_busy(&link->dev);
pm_runtime_put_autosuspend(&link->dev);
/* matches stm_find_device() from stm_source_link_store() */
stm_put_device(link);
rcu_assign_pointer(src->link, NULL);
@ -971,8 +1017,11 @@ static ssize_t stm_source_link_store(struct device *dev,
if (!link)
return -EINVAL;
pm_runtime_get(&link->dev);
err = stm_source_link_add(src, link);
if (err) {
pm_runtime_put_autosuspend(&link->dev);
/* matches the stm_find_device() above */
stm_put_device(link);
}
@ -1033,6 +1082,9 @@ int stm_source_register_device(struct device *parent,
if (err)
goto err;
pm_runtime_no_callbacks(&src->dev);
pm_runtime_forbid(&src->dev);
err = device_add(&src->dev);
if (err)
goto err;

View File

@ -57,3 +57,17 @@ obj-$(CONFIG_ECHO) += echo/
obj-$(CONFIG_VEXPRESS_SYSCFG) += vexpress-syscfg.o
obj-$(CONFIG_CXL_BASE) += cxl/
obj-$(CONFIG_PANEL) += panel.o
lkdtm-$(CONFIG_LKDTM) += lkdtm_core.o
lkdtm-$(CONFIG_LKDTM) += lkdtm_bugs.o
lkdtm-$(CONFIG_LKDTM) += lkdtm_heap.o
lkdtm-$(CONFIG_LKDTM) += lkdtm_perms.o
lkdtm-$(CONFIG_LKDTM) += lkdtm_rodata_objcopy.o
lkdtm-$(CONFIG_LKDTM) += lkdtm_usercopy.o
OBJCOPYFLAGS :=
OBJCOPYFLAGS_lkdtm_rodata_objcopy.o := \
--set-section-flags .text=alloc,readonly \
--rename-section .text=.rodata
$(obj)/lkdtm_rodata_objcopy.o: $(obj)/lkdtm_rodata.o
$(call if_changed,objcopy)

File diff suppressed because it is too large Load Diff

60
drivers/misc/lkdtm.h Normal file
View File

@ -0,0 +1,60 @@
#ifndef __LKDTM_H
#define __LKDTM_H
#define pr_fmt(fmt) "lkdtm: " fmt
#include <linux/kernel.h>
/* lkdtm_bugs.c */
void __init lkdtm_bugs_init(int *recur_param);
void lkdtm_PANIC(void);
void lkdtm_BUG(void);
void lkdtm_WARNING(void);
void lkdtm_EXCEPTION(void);
void lkdtm_LOOP(void);
void lkdtm_OVERFLOW(void);
void lkdtm_CORRUPT_STACK(void);
void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void);
void lkdtm_SOFTLOCKUP(void);
void lkdtm_HARDLOCKUP(void);
void lkdtm_SPINLOCKUP(void);
void lkdtm_HUNG_TASK(void);
void lkdtm_ATOMIC_UNDERFLOW(void);
void lkdtm_ATOMIC_OVERFLOW(void);
/* lkdtm_heap.c */
void lkdtm_OVERWRITE_ALLOCATION(void);
void lkdtm_WRITE_AFTER_FREE(void);
void lkdtm_READ_AFTER_FREE(void);
void lkdtm_WRITE_BUDDY_AFTER_FREE(void);
void lkdtm_READ_BUDDY_AFTER_FREE(void);
/* lkdtm_perms.c */
void __init lkdtm_perms_init(void);
void lkdtm_WRITE_RO(void);
void lkdtm_WRITE_RO_AFTER_INIT(void);
void lkdtm_WRITE_KERN(void);
void lkdtm_EXEC_DATA(void);
void lkdtm_EXEC_STACK(void);
void lkdtm_EXEC_KMALLOC(void);
void lkdtm_EXEC_VMALLOC(void);
void lkdtm_EXEC_RODATA(void);
void lkdtm_EXEC_USERSPACE(void);
void lkdtm_ACCESS_USERSPACE(void);
/* lkdtm_rodata.c */
void lkdtm_rodata_do_nothing(void);
/* lkdtm_usercopy.c */
void __init lkdtm_usercopy_init(void);
void __exit lkdtm_usercopy_exit(void);
void lkdtm_USERCOPY_HEAP_SIZE_TO(void);
void lkdtm_USERCOPY_HEAP_SIZE_FROM(void);
void lkdtm_USERCOPY_HEAP_FLAG_TO(void);
void lkdtm_USERCOPY_HEAP_FLAG_FROM(void);
void lkdtm_USERCOPY_STACK_FRAME_TO(void);
void lkdtm_USERCOPY_STACK_FRAME_FROM(void);
void lkdtm_USERCOPY_STACK_BEYOND(void);
void lkdtm_USERCOPY_KERNEL(void);
#endif

148
drivers/misc/lkdtm_bugs.c Normal file
View File

@ -0,0 +1,148 @@
/*
* This is for all the tests related to logic bugs (e.g. bad dereferences,
* bad alignment, bad loops, bad locking, bad scheduling, deep stacks, and
* lockups) along with other things that don't fit well into existing LKDTM
* test source files.
*/
#include "lkdtm.h"
#include <linux/sched.h>
/*
* Make sure our attempts to over run the kernel stack doesn't trigger
* a compiler warning when CONFIG_FRAME_WARN is set. Then make sure we
* recurse past the end of THREAD_SIZE by default.
*/
#if defined(CONFIG_FRAME_WARN) && (CONFIG_FRAME_WARN > 0)
#define REC_STACK_SIZE (CONFIG_FRAME_WARN / 2)
#else
#define REC_STACK_SIZE (THREAD_SIZE / 8)
#endif
#define REC_NUM_DEFAULT ((THREAD_SIZE / REC_STACK_SIZE) * 2)
static int recur_count = REC_NUM_DEFAULT;
static DEFINE_SPINLOCK(lock_me_up);
static int recursive_loop(int remaining)
{
char buf[REC_STACK_SIZE];
/* Make sure compiler does not optimize this away. */
memset(buf, (remaining & 0xff) | 0x1, REC_STACK_SIZE);
if (!remaining)
return 0;
else
return recursive_loop(remaining - 1);
}
/* If the depth is negative, use the default, otherwise keep parameter. */
void __init lkdtm_bugs_init(int *recur_param)
{
if (*recur_param < 0)
*recur_param = recur_count;
else
recur_count = *recur_param;
}
void lkdtm_PANIC(void)
{
panic("dumptest");
}
void lkdtm_BUG(void)
{
BUG();
}
void lkdtm_WARNING(void)
{
WARN_ON(1);
}
void lkdtm_EXCEPTION(void)
{
*((int *) 0) = 0;
}
void lkdtm_LOOP(void)
{
for (;;)
;
}
void lkdtm_OVERFLOW(void)
{
(void) recursive_loop(recur_count);
}
noinline void lkdtm_CORRUPT_STACK(void)
{
/* Use default char array length that triggers stack protection. */
char data[8];
memset((void *)data, 0, 64);
}
void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void)
{
static u8 data[5] __attribute__((aligned(4))) = {1, 2, 3, 4, 5};
u32 *p;
u32 val = 0x12345678;
p = (u32 *)(data + 1);
if (*p == 0)
val = 0x87654321;
*p = val;
}
void lkdtm_SOFTLOCKUP(void)
{
preempt_disable();
for (;;)
cpu_relax();
}
void lkdtm_HARDLOCKUP(void)
{
local_irq_disable();
for (;;)
cpu_relax();
}
void lkdtm_SPINLOCKUP(void)
{
/* Must be called twice to trigger. */
spin_lock(&lock_me_up);
/* Let sparse know we intended to exit holding the lock. */
__release(&lock_me_up);
}
void lkdtm_HUNG_TASK(void)
{
set_current_state(TASK_UNINTERRUPTIBLE);
schedule();
}
void lkdtm_ATOMIC_UNDERFLOW(void)
{
atomic_t under = ATOMIC_INIT(INT_MIN);
pr_info("attempting good atomic increment\n");
atomic_inc(&under);
atomic_dec(&under);
pr_info("attempting bad atomic underflow\n");
atomic_dec(&under);
}
void lkdtm_ATOMIC_OVERFLOW(void)
{
atomic_t over = ATOMIC_INIT(INT_MAX);
pr_info("attempting good atomic decrement\n");
atomic_dec(&over);
atomic_inc(&over);
pr_info("attempting bad atomic overflow\n");
atomic_inc(&over);
}

544
drivers/misc/lkdtm_core.c Normal file
View File

@ -0,0 +1,544 @@
/*
* Linux Kernel Dump Test Module for testing kernel crashes conditions:
* induces system failures at predefined crashpoints and under predefined
* operational conditions in order to evaluate the reliability of kernel
* sanity checking and crash dumps obtained using different dumping
* solutions.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* Copyright (C) IBM Corporation, 2006
*
* Author: Ankita Garg <ankita@in.ibm.com>
*
* It is adapted from the Linux Kernel Dump Test Tool by
* Fernando Luis Vazquez Cao <http://lkdtt.sourceforge.net>
*
* Debugfs support added by Simon Kagstrom <simon.kagstrom@netinsight.net>
*
* See Documentation/fault-injection/provoke-crashes.txt for instructions
*/
#include "lkdtm.h"
#include <linux/fs.h>
#include <linux/module.h>
#include <linux/buffer_head.h>
#include <linux/kprobes.h>
#include <linux/list.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/hrtimer.h>
#include <linux/slab.h>
#include <scsi/scsi_cmnd.h>
#include <linux/debugfs.h>
#ifdef CONFIG_IDE
#include <linux/ide.h>
#endif
#define DEFAULT_COUNT 10
static int lkdtm_debugfs_open(struct inode *inode, struct file *file);
static ssize_t lkdtm_debugfs_read(struct file *f, char __user *user_buf,
size_t count, loff_t *off);
static ssize_t direct_entry(struct file *f, const char __user *user_buf,
size_t count, loff_t *off);
#ifdef CONFIG_KPROBES
static void lkdtm_handler(void);
static ssize_t lkdtm_debugfs_entry(struct file *f,
const char __user *user_buf,
size_t count, loff_t *off);
/* jprobe entry point handlers. */
static unsigned int jp_do_irq(unsigned int irq)
{
lkdtm_handler();
jprobe_return();
return 0;
}
static irqreturn_t jp_handle_irq_event(unsigned int irq,
struct irqaction *action)
{
lkdtm_handler();
jprobe_return();
return 0;
}
static void jp_tasklet_action(struct softirq_action *a)
{
lkdtm_handler();
jprobe_return();
}
static void jp_ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
{
lkdtm_handler();
jprobe_return();
}
struct scan_control;
static unsigned long jp_shrink_inactive_list(unsigned long max_scan,
struct zone *zone,
struct scan_control *sc)
{
lkdtm_handler();
jprobe_return();
return 0;
}
static int jp_hrtimer_start(struct hrtimer *timer, ktime_t tim,
const enum hrtimer_mode mode)
{
lkdtm_handler();
jprobe_return();
return 0;
}
static int jp_scsi_dispatch_cmd(struct scsi_cmnd *cmd)
{
lkdtm_handler();
jprobe_return();
return 0;
}
# ifdef CONFIG_IDE
static int jp_generic_ide_ioctl(ide_drive_t *drive, struct file *file,
struct block_device *bdev, unsigned int cmd,
unsigned long arg)
{
lkdtm_handler();
jprobe_return();
return 0;
}
# endif
#endif
/* Crash points */
struct crashpoint {
const char *name;
const struct file_operations fops;
struct jprobe jprobe;
};
#define CRASHPOINT(_name, _write, _symbol, _entry) \
{ \
.name = _name, \
.fops = { \
.read = lkdtm_debugfs_read, \
.llseek = generic_file_llseek, \
.open = lkdtm_debugfs_open, \
.write = _write, \
}, \
.jprobe = { \
.kp.symbol_name = _symbol, \
.entry = (kprobe_opcode_t *)_entry, \
}, \
}
/* Define the possible places where we can trigger a crash point. */
struct crashpoint crashpoints[] = {
CRASHPOINT("DIRECT", direct_entry,
NULL, NULL),
#ifdef CONFIG_KPROBES
CRASHPOINT("INT_HARDWARE_ENTRY", lkdtm_debugfs_entry,
"do_IRQ", jp_do_irq),
CRASHPOINT("INT_HW_IRQ_EN", lkdtm_debugfs_entry,
"handle_IRQ_event", jp_handle_irq_event),
CRASHPOINT("INT_TASKLET_ENTRY", lkdtm_debugfs_entry,
"tasklet_action", jp_tasklet_action),
CRASHPOINT("FS_DEVRW", lkdtm_debugfs_entry,
"ll_rw_block", jp_ll_rw_block),
CRASHPOINT("MEM_SWAPOUT", lkdtm_debugfs_entry,
"shrink_inactive_list", jp_shrink_inactive_list),
CRASHPOINT("TIMERADD", lkdtm_debugfs_entry,
"hrtimer_start", jp_hrtimer_start),
CRASHPOINT("SCSI_DISPATCH_CMD", lkdtm_debugfs_entry,
"scsi_dispatch_cmd", jp_scsi_dispatch_cmd),
# ifdef CONFIG_IDE
CRASHPOINT("IDE_CORE_CP", lkdtm_debugfs_entry,
"generic_ide_ioctl", jp_generic_ide_ioctl),
# endif
#endif
};
/* Crash types. */
struct crashtype {
const char *name;
void (*func)(void);
};
#define CRASHTYPE(_name) \
{ \
.name = __stringify(_name), \
.func = lkdtm_ ## _name, \
}
/* Define the possible types of crashes that can be triggered. */
struct crashtype crashtypes[] = {
CRASHTYPE(PANIC),
CRASHTYPE(BUG),
CRASHTYPE(WARNING),
CRASHTYPE(EXCEPTION),
CRASHTYPE(LOOP),
CRASHTYPE(OVERFLOW),
CRASHTYPE(CORRUPT_STACK),
CRASHTYPE(UNALIGNED_LOAD_STORE_WRITE),
CRASHTYPE(OVERWRITE_ALLOCATION),
CRASHTYPE(WRITE_AFTER_FREE),
CRASHTYPE(READ_AFTER_FREE),
CRASHTYPE(WRITE_BUDDY_AFTER_FREE),
CRASHTYPE(READ_BUDDY_AFTER_FREE),
CRASHTYPE(SOFTLOCKUP),
CRASHTYPE(HARDLOCKUP),
CRASHTYPE(SPINLOCKUP),
CRASHTYPE(HUNG_TASK),
CRASHTYPE(EXEC_DATA),
CRASHTYPE(EXEC_STACK),
CRASHTYPE(EXEC_KMALLOC),
CRASHTYPE(EXEC_VMALLOC),
CRASHTYPE(EXEC_RODATA),
CRASHTYPE(EXEC_USERSPACE),
CRASHTYPE(ACCESS_USERSPACE),
CRASHTYPE(WRITE_RO),
CRASHTYPE(WRITE_RO_AFTER_INIT),
CRASHTYPE(WRITE_KERN),
CRASHTYPE(ATOMIC_UNDERFLOW),
CRASHTYPE(ATOMIC_OVERFLOW),
CRASHTYPE(USERCOPY_HEAP_SIZE_TO),
CRASHTYPE(USERCOPY_HEAP_SIZE_FROM),
CRASHTYPE(USERCOPY_HEAP_FLAG_TO),
CRASHTYPE(USERCOPY_HEAP_FLAG_FROM),
CRASHTYPE(USERCOPY_STACK_FRAME_TO),
CRASHTYPE(USERCOPY_STACK_FRAME_FROM),
CRASHTYPE(USERCOPY_STACK_BEYOND),
CRASHTYPE(USERCOPY_KERNEL),
};
/* Global jprobe entry and crashtype. */
static struct jprobe *lkdtm_jprobe;
struct crashpoint *lkdtm_crashpoint;
struct crashtype *lkdtm_crashtype;
/* Module parameters */
static int recur_count = -1;
module_param(recur_count, int, 0644);
MODULE_PARM_DESC(recur_count, " Recursion level for the stack overflow test");
static char* cpoint_name;
module_param(cpoint_name, charp, 0444);
MODULE_PARM_DESC(cpoint_name, " Crash Point, where kernel is to be crashed");
static char* cpoint_type;
module_param(cpoint_type, charp, 0444);
MODULE_PARM_DESC(cpoint_type, " Crash Point Type, action to be taken on "\
"hitting the crash point");
static int cpoint_count = DEFAULT_COUNT;
module_param(cpoint_count, int, 0644);
MODULE_PARM_DESC(cpoint_count, " Crash Point Count, number of times the "\
"crash point is to be hit to trigger action");
/* Return the crashtype number or NULL if the name is invalid */
static struct crashtype *find_crashtype(const char *name)
{
int i;
for (i = 0; i < ARRAY_SIZE(crashtypes); i++) {
if (!strcmp(name, crashtypes[i].name))
return &crashtypes[i];
}
return NULL;
}
/*
* This is forced noinline just so it distinctly shows up in the stackdump
* which makes validation of expected lkdtm crashes easier.
*/
static noinline void lkdtm_do_action(struct crashtype *crashtype)
{
BUG_ON(!crashtype || !crashtype->func);
crashtype->func();
}
static int lkdtm_register_cpoint(struct crashpoint *crashpoint,
struct crashtype *crashtype)
{
int ret;
/* If this doesn't have a symbol, just call immediately. */
if (!crashpoint->jprobe.kp.symbol_name) {
lkdtm_do_action(crashtype);
return 0;
}
if (lkdtm_jprobe != NULL)
unregister_jprobe(lkdtm_jprobe);
lkdtm_crashpoint = crashpoint;
lkdtm_crashtype = crashtype;
lkdtm_jprobe = &crashpoint->jprobe;
ret = register_jprobe(lkdtm_jprobe);
if (ret < 0) {
pr_info("Couldn't register jprobe %s\n",
crashpoint->jprobe.kp.symbol_name);
lkdtm_jprobe = NULL;
lkdtm_crashpoint = NULL;
lkdtm_crashtype = NULL;
}
return ret;
}
#ifdef CONFIG_KPROBES
/* Global crash counter and spinlock. */
static int crash_count = DEFAULT_COUNT;
static DEFINE_SPINLOCK(crash_count_lock);
/* Called by jprobe entry points. */
static void lkdtm_handler(void)
{
unsigned long flags;
bool do_it = false;
BUG_ON(!lkdtm_crashpoint || !lkdtm_crashtype);
spin_lock_irqsave(&crash_count_lock, flags);
crash_count--;
pr_info("Crash point %s of type %s hit, trigger in %d rounds\n",
lkdtm_crashpoint->name, lkdtm_crashtype->name, crash_count);
if (crash_count == 0) {
do_it = true;
crash_count = cpoint_count;
}
spin_unlock_irqrestore(&crash_count_lock, flags);
if (do_it)
lkdtm_do_action(lkdtm_crashtype);
}
static ssize_t lkdtm_debugfs_entry(struct file *f,
const char __user *user_buf,
size_t count, loff_t *off)
{
struct crashpoint *crashpoint = file_inode(f)->i_private;
struct crashtype *crashtype = NULL;
char *buf;
int err;
if (count >= PAGE_SIZE)
return -EINVAL;
buf = (char *)__get_free_page(GFP_KERNEL);
if (!buf)
return -ENOMEM;
if (copy_from_user(buf, user_buf, count)) {
free_page((unsigned long) buf);
return -EFAULT;
}
/* NULL-terminate and remove enter */
buf[count] = '\0';
strim(buf);
crashtype = find_crashtype(buf);
free_page((unsigned long)buf);
if (!crashtype)
return -EINVAL;
err = lkdtm_register_cpoint(crashpoint, crashtype);
if (err < 0)
return err;
*off += count;
return count;
}
#endif
/* Generic read callback that just prints out the available crash types */
static ssize_t lkdtm_debugfs_read(struct file *f, char __user *user_buf,
size_t count, loff_t *off)
{
char *buf;
int i, n, out;
buf = (char *)__get_free_page(GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
n = snprintf(buf, PAGE_SIZE, "Available crash types:\n");
for (i = 0; i < ARRAY_SIZE(crashtypes); i++) {
n += snprintf(buf + n, PAGE_SIZE - n, "%s\n",
crashtypes[i].name);
}
buf[n] = '\0';
out = simple_read_from_buffer(user_buf, count, off,
buf, n);
free_page((unsigned long) buf);
return out;
}
static int lkdtm_debugfs_open(struct inode *inode, struct file *file)
{
return 0;
}
/* Special entry to just crash directly. Available without KPROBEs */
static ssize_t direct_entry(struct file *f, const char __user *user_buf,
size_t count, loff_t *off)
{
struct crashtype *crashtype;
char *buf;
if (count >= PAGE_SIZE)
return -EINVAL;
if (count < 1)
return -EINVAL;
buf = (char *)__get_free_page(GFP_KERNEL);
if (!buf)
return -ENOMEM;
if (copy_from_user(buf, user_buf, count)) {
free_page((unsigned long) buf);
return -EFAULT;
}
/* NULL-terminate and remove enter */
buf[count] = '\0';
strim(buf);
crashtype = find_crashtype(buf);
free_page((unsigned long) buf);
if (!crashtype)
return -EINVAL;
pr_info("Performing direct entry %s\n", crashtype->name);
lkdtm_do_action(crashtype);
*off += count;
return count;
}
static struct dentry *lkdtm_debugfs_root;
static int __init lkdtm_module_init(void)
{
struct crashpoint *crashpoint = NULL;
struct crashtype *crashtype = NULL;
int ret = -EINVAL;
int i;
/* Neither or both of these need to be set */
if ((cpoint_type || cpoint_name) && !(cpoint_type && cpoint_name)) {
pr_err("Need both cpoint_type and cpoint_name or neither\n");
return -EINVAL;
}
if (cpoint_type) {
crashtype = find_crashtype(cpoint_type);
if (!crashtype) {
pr_err("Unknown crashtype '%s'\n", cpoint_type);
return -EINVAL;
}
}
if (cpoint_name) {
for (i = 0; i < ARRAY_SIZE(crashpoints); i++) {
if (!strcmp(cpoint_name, crashpoints[i].name))
crashpoint = &crashpoints[i];
}
/* Refuse unknown crashpoints. */
if (!crashpoint) {
pr_err("Invalid crashpoint %s\n", cpoint_name);
return -EINVAL;
}
}
#ifdef CONFIG_KPROBES
/* Set crash count. */
crash_count = cpoint_count;
#endif
/* Handle test-specific initialization. */
lkdtm_bugs_init(&recur_count);
lkdtm_perms_init();
lkdtm_usercopy_init();
/* Register debugfs interface */
lkdtm_debugfs_root = debugfs_create_dir("provoke-crash", NULL);
if (!lkdtm_debugfs_root) {
pr_err("creating root dir failed\n");
return -ENODEV;
}
/* Install debugfs trigger files. */
for (i = 0; i < ARRAY_SIZE(crashpoints); i++) {
struct crashpoint *cur = &crashpoints[i];
struct dentry *de;
de = debugfs_create_file(cur->name, 0644, lkdtm_debugfs_root,
cur, &cur->fops);
if (de == NULL) {
pr_err("could not create crashpoint %s\n", cur->name);
goto out_err;
}
}
/* Install crashpoint if one was selected. */
if (crashpoint) {
ret = lkdtm_register_cpoint(crashpoint, crashtype);
if (ret < 0) {
pr_info("Invalid crashpoint %s\n", crashpoint->name);
goto out_err;
}
pr_info("Crash point %s of type %s registered\n",
crashpoint->name, cpoint_type);
} else {
pr_info("No crash points registered, enable through debugfs\n");
}
return 0;
out_err:
debugfs_remove_recursive(lkdtm_debugfs_root);
return ret;
}
static void __exit lkdtm_module_exit(void)
{
debugfs_remove_recursive(lkdtm_debugfs_root);
/* Handle test-specific clean-up. */
lkdtm_usercopy_exit();
unregister_jprobe(lkdtm_jprobe);
pr_info("Crash point unregistered\n");
}
module_init(lkdtm_module_init);
module_exit(lkdtm_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Kernel crash testing module");

142
drivers/misc/lkdtm_heap.c Normal file
View File

@ -0,0 +1,142 @@
/*
* This is for all the tests relating directly to heap memory, including
* page allocation and slab allocations.
*/
#include "lkdtm.h"
#include <linux/slab.h>
/*
* This tries to stay within the next largest power-of-2 kmalloc cache
* to avoid actually overwriting anything important if it's not detected
* correctly.
*/
void lkdtm_OVERWRITE_ALLOCATION(void)
{
size_t len = 1020;
u32 *data = kmalloc(len, GFP_KERNEL);
data[1024 / sizeof(u32)] = 0x12345678;
kfree(data);
}
void lkdtm_WRITE_AFTER_FREE(void)
{
int *base, *again;
size_t len = 1024;
/*
* The slub allocator uses the first word to store the free
* pointer in some configurations. Use the middle of the
* allocation to avoid running into the freelist
*/
size_t offset = (len / sizeof(*base)) / 2;
base = kmalloc(len, GFP_KERNEL);
pr_info("Allocated memory %p-%p\n", base, &base[offset * 2]);
pr_info("Attempting bad write to freed memory at %p\n",
&base[offset]);
kfree(base);
base[offset] = 0x0abcdef0;
/* Attempt to notice the overwrite. */
again = kmalloc(len, GFP_KERNEL);
kfree(again);
if (again != base)
pr_info("Hmm, didn't get the same memory range.\n");
}
void lkdtm_READ_AFTER_FREE(void)
{
int *base, *val, saw;
size_t len = 1024;
/*
* The slub allocator uses the first word to store the free
* pointer in some configurations. Use the middle of the
* allocation to avoid running into the freelist
*/
size_t offset = (len / sizeof(*base)) / 2;
base = kmalloc(len, GFP_KERNEL);
if (!base) {
pr_info("Unable to allocate base memory.\n");
return;
}
val = kmalloc(len, GFP_KERNEL);
if (!val) {
pr_info("Unable to allocate val memory.\n");
kfree(base);
return;
}
*val = 0x12345678;
base[offset] = *val;
pr_info("Value in memory before free: %x\n", base[offset]);
kfree(base);
pr_info("Attempting bad read from freed memory\n");
saw = base[offset];
if (saw != *val) {
/* Good! Poisoning happened, so declare a win. */
pr_info("Memory correctly poisoned (%x)\n", saw);
BUG();
}
pr_info("Memory was not poisoned\n");
kfree(val);
}
void lkdtm_WRITE_BUDDY_AFTER_FREE(void)
{
unsigned long p = __get_free_page(GFP_KERNEL);
if (!p) {
pr_info("Unable to allocate free page\n");
return;
}
pr_info("Writing to the buddy page before free\n");
memset((void *)p, 0x3, PAGE_SIZE);
free_page(p);
schedule();
pr_info("Attempting bad write to the buddy page after free\n");
memset((void *)p, 0x78, PAGE_SIZE);
/* Attempt to notice the overwrite. */
p = __get_free_page(GFP_KERNEL);
free_page(p);
schedule();
}
void lkdtm_READ_BUDDY_AFTER_FREE(void)
{
unsigned long p = __get_free_page(GFP_KERNEL);
int saw, *val;
int *base;
if (!p) {
pr_info("Unable to allocate free page\n");
return;
}
val = kmalloc(1024, GFP_KERNEL);
if (!val) {
pr_info("Unable to allocate val memory.\n");
free_page(p);
return;
}
base = (int *)p;
*val = 0x12345678;
base[0] = *val;
pr_info("Value in memory before free: %x\n", base[0]);
free_page(p);
pr_info("Attempting to read from freed memory\n");
saw = base[0];
if (saw != *val) {
/* Good! Poisoning happened, so declare a win. */
pr_info("Memory correctly poisoned (%x)\n", saw);
BUG();
}
pr_info("Buddy page was not poisoned\n");
kfree(val);
}

199
drivers/misc/lkdtm_perms.c Normal file
View File

@ -0,0 +1,199 @@
/*
* This is for all the tests related to validating kernel memory
* permissions: non-executable regions, non-writable regions, and
* even non-readable regions.
*/
#include "lkdtm.h"
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/mman.h>
#include <linux/uaccess.h>
#include <asm/cacheflush.h>
/* Whether or not to fill the target memory area with do_nothing(). */
#define CODE_WRITE true
#define CODE_AS_IS false
/* How many bytes to copy to be sure we've copied enough of do_nothing(). */
#define EXEC_SIZE 64
/* This is non-const, so it will end up in the .data section. */
static u8 data_area[EXEC_SIZE];
/* This is cost, so it will end up in the .rodata section. */
static const unsigned long rodata = 0xAA55AA55;
/* This is marked __ro_after_init, so it should ultimately be .rodata. */
static unsigned long ro_after_init __ro_after_init = 0x55AA5500;
/*
* This just returns to the caller. It is designed to be copied into
* non-executable memory regions.
*/
static void do_nothing(void)
{
return;
}
/* Must immediately follow do_nothing for size calculuations to work out. */
static void do_overwritten(void)
{
pr_info("do_overwritten wasn't overwritten!\n");
return;
}
static noinline void execute_location(void *dst, bool write)
{
void (*func)(void) = dst;
pr_info("attempting ok execution at %p\n", do_nothing);
do_nothing();
if (write == CODE_WRITE) {
memcpy(dst, do_nothing, EXEC_SIZE);
flush_icache_range((unsigned long)dst,
(unsigned long)dst + EXEC_SIZE);
}
pr_info("attempting bad execution at %p\n", func);
func();
}
static void execute_user_location(void *dst)
{
/* Intentionally crossing kernel/user memory boundary. */
void (*func)(void) = dst;
pr_info("attempting ok execution at %p\n", do_nothing);
do_nothing();
if (copy_to_user((void __user *)dst, do_nothing, EXEC_SIZE))
return;
flush_icache_range((unsigned long)dst, (unsigned long)dst + EXEC_SIZE);
pr_info("attempting bad execution at %p\n", func);
func();
}
void lkdtm_WRITE_RO(void)
{
/* Explicitly cast away "const" for the test. */
unsigned long *ptr = (unsigned long *)&rodata;
pr_info("attempting bad rodata write at %p\n", ptr);
*ptr ^= 0xabcd1234;
}
void lkdtm_WRITE_RO_AFTER_INIT(void)
{
unsigned long *ptr = &ro_after_init;
/*
* Verify we were written to during init. Since an Oops
* is considered a "success", a failure is to just skip the
* real test.
*/
if ((*ptr & 0xAA) != 0xAA) {
pr_info("%p was NOT written during init!?\n", ptr);
return;
}
pr_info("attempting bad ro_after_init write at %p\n", ptr);
*ptr ^= 0xabcd1234;
}
void lkdtm_WRITE_KERN(void)
{
size_t size;
unsigned char *ptr;
size = (unsigned long)do_overwritten - (unsigned long)do_nothing;
ptr = (unsigned char *)do_overwritten;
pr_info("attempting bad %zu byte write at %p\n", size, ptr);
memcpy(ptr, (unsigned char *)do_nothing, size);
flush_icache_range((unsigned long)ptr, (unsigned long)(ptr + size));
do_overwritten();
}
void lkdtm_EXEC_DATA(void)
{
execute_location(data_area, CODE_WRITE);
}
void lkdtm_EXEC_STACK(void)
{
u8 stack_area[EXEC_SIZE];
execute_location(stack_area, CODE_WRITE);
}
void lkdtm_EXEC_KMALLOC(void)
{
u32 *kmalloc_area = kmalloc(EXEC_SIZE, GFP_KERNEL);
execute_location(kmalloc_area, CODE_WRITE);
kfree(kmalloc_area);
}
void lkdtm_EXEC_VMALLOC(void)
{
u32 *vmalloc_area = vmalloc(EXEC_SIZE);
execute_location(vmalloc_area, CODE_WRITE);
vfree(vmalloc_area);
}
void lkdtm_EXEC_RODATA(void)
{
execute_location(lkdtm_rodata_do_nothing, CODE_AS_IS);
}
void lkdtm_EXEC_USERSPACE(void)
{
unsigned long user_addr;
user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
PROT_READ | PROT_WRITE | PROT_EXEC,
MAP_ANONYMOUS | MAP_PRIVATE, 0);
if (user_addr >= TASK_SIZE) {
pr_warn("Failed to allocate user memory\n");
return;
}
execute_user_location((void *)user_addr);
vm_munmap(user_addr, PAGE_SIZE);
}
void lkdtm_ACCESS_USERSPACE(void)
{
unsigned long user_addr, tmp = 0;
unsigned long *ptr;
user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
PROT_READ | PROT_WRITE | PROT_EXEC,
MAP_ANONYMOUS | MAP_PRIVATE, 0);
if (user_addr >= TASK_SIZE) {
pr_warn("Failed to allocate user memory\n");
return;
}
if (copy_to_user((void __user *)user_addr, &tmp, sizeof(tmp))) {
pr_warn("copy_to_user failed\n");
vm_munmap(user_addr, PAGE_SIZE);
return;
}
ptr = (unsigned long *)user_addr;
pr_info("attempting bad read at %p\n", ptr);
tmp = *ptr;
tmp += 0xc0dec0de;
pr_info("attempting bad write at %p\n", ptr);
*ptr = tmp;
vm_munmap(user_addr, PAGE_SIZE);
}
void __init lkdtm_perms_init(void)
{
/* Make sure we can write to __ro_after_init values during __init */
ro_after_init |= 0xAA;
}

View File

@ -0,0 +1,10 @@
/*
* This includes functions that are meant to live entirely in .rodata
* (via objcopy tricks), to validate the non-executability of .rodata.
*/
#include "lkdtm.h"
void lkdtm_rodata_do_nothing(void)
{
/* Does nothing. We just want an architecture agnostic "return". */
}

View File

@ -0,0 +1,313 @@
/*
* This is for all the tests related to copy_to_user() and copy_from_user()
* hardening.
*/
#include "lkdtm.h"
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/mman.h>
#include <linux/uaccess.h>
#include <asm/cacheflush.h>
static size_t cache_size = 1024;
static struct kmem_cache *bad_cache;
static const unsigned char test_text[] = "This is a test.\n";
/*
* Instead of adding -Wno-return-local-addr, just pass the stack address
* through a function to obfuscate it from the compiler.
*/
static noinline unsigned char *trick_compiler(unsigned char *stack)
{
return stack + 0;
}
static noinline unsigned char *do_usercopy_stack_callee(int value)
{
unsigned char buf[32];
int i;
/* Exercise stack to avoid everything living in registers. */
for (i = 0; i < sizeof(buf); i++) {
buf[i] = value & 0xff;
}
return trick_compiler(buf);
}
static noinline void do_usercopy_stack(bool to_user, bool bad_frame)
{
unsigned long user_addr;
unsigned char good_stack[32];
unsigned char *bad_stack;
int i;
/* Exercise stack to avoid everything living in registers. */
for (i = 0; i < sizeof(good_stack); i++)
good_stack[i] = test_text[i % sizeof(test_text)];
/* This is a pointer to outside our current stack frame. */
if (bad_frame) {
bad_stack = do_usercopy_stack_callee((uintptr_t)bad_stack);
} else {
/* Put start address just inside stack. */
bad_stack = task_stack_page(current) + THREAD_SIZE;
bad_stack -= sizeof(unsigned long);
}
user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
PROT_READ | PROT_WRITE | PROT_EXEC,
MAP_ANONYMOUS | MAP_PRIVATE, 0);
if (user_addr >= TASK_SIZE) {
pr_warn("Failed to allocate user memory\n");
return;
}
if (to_user) {
pr_info("attempting good copy_to_user of local stack\n");
if (copy_to_user((void __user *)user_addr, good_stack,
sizeof(good_stack))) {
pr_warn("copy_to_user failed unexpectedly?!\n");
goto free_user;
}
pr_info("attempting bad copy_to_user of distant stack\n");
if (copy_to_user((void __user *)user_addr, bad_stack,
sizeof(good_stack))) {
pr_warn("copy_to_user failed, but lacked Oops\n");
goto free_user;
}
} else {
/*
* There isn't a safe way to not be protected by usercopy
* if we're going to write to another thread's stack.
*/
if (!bad_frame)
goto free_user;
pr_info("attempting good copy_from_user of local stack\n");
if (copy_from_user(good_stack, (void __user *)user_addr,
sizeof(good_stack))) {
pr_warn("copy_from_user failed unexpectedly?!\n");
goto free_user;
}
pr_info("attempting bad copy_from_user of distant stack\n");
if (copy_from_user(bad_stack, (void __user *)user_addr,
sizeof(good_stack))) {
pr_warn("copy_from_user failed, but lacked Oops\n");
goto free_user;
}
}
free_user:
vm_munmap(user_addr, PAGE_SIZE);
}
static void do_usercopy_heap_size(bool to_user)
{
unsigned long user_addr;
unsigned char *one, *two;
const size_t size = 1024;
one = kmalloc(size, GFP_KERNEL);
two = kmalloc(size, GFP_KERNEL);
if (!one || !two) {
pr_warn("Failed to allocate kernel memory\n");
goto free_kernel;
}
user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
PROT_READ | PROT_WRITE | PROT_EXEC,
MAP_ANONYMOUS | MAP_PRIVATE, 0);
if (user_addr >= TASK_SIZE) {
pr_warn("Failed to allocate user memory\n");
goto free_kernel;
}
memset(one, 'A', size);
memset(two, 'B', size);
if (to_user) {
pr_info("attempting good copy_to_user of correct size\n");
if (copy_to_user((void __user *)user_addr, one, size)) {
pr_warn("copy_to_user failed unexpectedly?!\n");
goto free_user;
}
pr_info("attempting bad copy_to_user of too large size\n");
if (copy_to_user((void __user *)user_addr, one, 2 * size)) {
pr_warn("copy_to_user failed, but lacked Oops\n");
goto free_user;
}
} else {
pr_info("attempting good copy_from_user of correct size\n");
if (copy_from_user(one, (void __user *)user_addr, size)) {
pr_warn("copy_from_user failed unexpectedly?!\n");
goto free_user;
}
pr_info("attempting bad copy_from_user of too large size\n");
if (copy_from_user(one, (void __user *)user_addr, 2 * size)) {
pr_warn("copy_from_user failed, but lacked Oops\n");
goto free_user;
}
}
free_user:
vm_munmap(user_addr, PAGE_SIZE);
free_kernel:
kfree(one);
kfree(two);
}
static void do_usercopy_heap_flag(bool to_user)
{
unsigned long user_addr;
unsigned char *good_buf = NULL;
unsigned char *bad_buf = NULL;
/* Make sure cache was prepared. */
if (!bad_cache) {
pr_warn("Failed to allocate kernel cache\n");
return;
}
/*
* Allocate one buffer from each cache (kmalloc will have the
* SLAB_USERCOPY flag already, but "bad_cache" won't).
*/
good_buf = kmalloc(cache_size, GFP_KERNEL);
bad_buf = kmem_cache_alloc(bad_cache, GFP_KERNEL);
if (!good_buf || !bad_buf) {
pr_warn("Failed to allocate buffers from caches\n");
goto free_alloc;
}
/* Allocate user memory we'll poke at. */
user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
PROT_READ | PROT_WRITE | PROT_EXEC,
MAP_ANONYMOUS | MAP_PRIVATE, 0);
if (user_addr >= TASK_SIZE) {
pr_warn("Failed to allocate user memory\n");
goto free_alloc;
}
memset(good_buf, 'A', cache_size);
memset(bad_buf, 'B', cache_size);
if (to_user) {
pr_info("attempting good copy_to_user with SLAB_USERCOPY\n");
if (copy_to_user((void __user *)user_addr, good_buf,
cache_size)) {
pr_warn("copy_to_user failed unexpectedly?!\n");
goto free_user;
}
pr_info("attempting bad copy_to_user w/o SLAB_USERCOPY\n");
if (copy_to_user((void __user *)user_addr, bad_buf,
cache_size)) {
pr_warn("copy_to_user failed, but lacked Oops\n");
goto free_user;
}
} else {
pr_info("attempting good copy_from_user with SLAB_USERCOPY\n");
if (copy_from_user(good_buf, (void __user *)user_addr,
cache_size)) {
pr_warn("copy_from_user failed unexpectedly?!\n");
goto free_user;
}
pr_info("attempting bad copy_from_user w/o SLAB_USERCOPY\n");
if (copy_from_user(bad_buf, (void __user *)user_addr,
cache_size)) {
pr_warn("copy_from_user failed, but lacked Oops\n");
goto free_user;
}
}
free_user:
vm_munmap(user_addr, PAGE_SIZE);
free_alloc:
if (bad_buf)
kmem_cache_free(bad_cache, bad_buf);
kfree(good_buf);
}
/* Callable tests. */
void lkdtm_USERCOPY_HEAP_SIZE_TO(void)
{
do_usercopy_heap_size(true);
}
void lkdtm_USERCOPY_HEAP_SIZE_FROM(void)
{
do_usercopy_heap_size(false);
}
void lkdtm_USERCOPY_HEAP_FLAG_TO(void)
{
do_usercopy_heap_flag(true);
}
void lkdtm_USERCOPY_HEAP_FLAG_FROM(void)
{
do_usercopy_heap_flag(false);
}
void lkdtm_USERCOPY_STACK_FRAME_TO(void)
{
do_usercopy_stack(true, true);
}
void lkdtm_USERCOPY_STACK_FRAME_FROM(void)
{
do_usercopy_stack(false, true);
}
void lkdtm_USERCOPY_STACK_BEYOND(void)
{
do_usercopy_stack(true, false);
}
void lkdtm_USERCOPY_KERNEL(void)
{
unsigned long user_addr;
user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
PROT_READ | PROT_WRITE | PROT_EXEC,
MAP_ANONYMOUS | MAP_PRIVATE, 0);
if (user_addr >= TASK_SIZE) {
pr_warn("Failed to allocate user memory\n");
return;
}
pr_info("attempting good copy_to_user from kernel rodata\n");
if (copy_to_user((void __user *)user_addr, test_text,
sizeof(test_text))) {
pr_warn("copy_to_user failed unexpectedly?!\n");
goto free_user;
}
pr_info("attempting bad copy_to_user from kernel text\n");
if (copy_to_user((void __user *)user_addr, vm_mmap, PAGE_SIZE)) {
pr_warn("copy_to_user failed, but lacked Oops\n");
goto free_user;
}
free_user:
vm_munmap(user_addr, PAGE_SIZE);
}
void __init lkdtm_usercopy_init(void)
{
/* Prepare cache that lacks SLAB_USERCOPY flag. */
bad_cache = kmem_cache_create("lkdtm-no-usercopy", cache_size, 0,
0, NULL);
}
void __exit lkdtm_usercopy_exit(void)
{
kmem_cache_destroy(bad_cache);
}

View File

@ -132,6 +132,7 @@ static inline void mei_hbm_hdr(struct mei_msg_hdr *hdr, size_t length)
hdr->length = length;
hdr->msg_complete = 1;
hdr->reserved = 0;
hdr->internal = 0;
}
/**
@ -165,15 +166,15 @@ void mei_hbm_cl_hdr(struct mei_cl *cl, u8 hbm_cmd, void *buf, size_t len)
* Return: 0 on success, <0 on failure.
*/
static inline
int mei_hbm_cl_write(struct mei_device *dev,
struct mei_cl *cl, u8 hbm_cmd, size_t len)
int mei_hbm_cl_write(struct mei_device *dev, struct mei_cl *cl,
u8 hbm_cmd, u8 *buf, size_t len)
{
struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
struct mei_msg_hdr mei_hdr;
mei_hbm_hdr(mei_hdr, len);
mei_hbm_cl_hdr(cl, hbm_cmd, dev->wr_msg.data, len);
mei_hbm_hdr(&mei_hdr, len);
mei_hbm_cl_hdr(cl, hbm_cmd, buf, len);
return mei_write_message(dev, mei_hdr, dev->wr_msg.data);
return mei_write_message(dev, &mei_hdr, buf);
}
/**
@ -250,24 +251,23 @@ int mei_hbm_start_wait(struct mei_device *dev)
*/
int mei_hbm_start_req(struct mei_device *dev)
{
struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
struct hbm_host_version_request *start_req;
struct mei_msg_hdr mei_hdr;
struct hbm_host_version_request start_req;
const size_t len = sizeof(struct hbm_host_version_request);
int ret;
mei_hbm_reset(dev);
mei_hbm_hdr(mei_hdr, len);
mei_hbm_hdr(&mei_hdr, len);
/* host start message */
start_req = (struct hbm_host_version_request *)dev->wr_msg.data;
memset(start_req, 0, len);
start_req->hbm_cmd = HOST_START_REQ_CMD;
start_req->host_version.major_version = HBM_MAJOR_VERSION;
start_req->host_version.minor_version = HBM_MINOR_VERSION;
memset(&start_req, 0, len);
start_req.hbm_cmd = HOST_START_REQ_CMD;
start_req.host_version.major_version = HBM_MAJOR_VERSION;
start_req.host_version.minor_version = HBM_MINOR_VERSION;
dev->hbm_state = MEI_HBM_IDLE;
ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data);
ret = mei_write_message(dev, &mei_hdr, &start_req);
if (ret) {
dev_err(dev->dev, "version message write failed: ret = %d\n",
ret);
@ -288,23 +288,22 @@ int mei_hbm_start_req(struct mei_device *dev)
*/
static int mei_hbm_enum_clients_req(struct mei_device *dev)
{
struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
struct hbm_host_enum_request *enum_req;
struct mei_msg_hdr mei_hdr;
struct hbm_host_enum_request enum_req;
const size_t len = sizeof(struct hbm_host_enum_request);
int ret;
/* enumerate clients */
mei_hbm_hdr(mei_hdr, len);
mei_hbm_hdr(&mei_hdr, len);
enum_req = (struct hbm_host_enum_request *)dev->wr_msg.data;
memset(enum_req, 0, len);
enum_req->hbm_cmd = HOST_ENUM_REQ_CMD;
enum_req->flags |= dev->hbm_f_dc_supported ?
MEI_HBM_ENUM_F_ALLOW_ADD : 0;
enum_req->flags |= dev->hbm_f_ie_supported ?
MEI_HBM_ENUM_F_IMMEDIATE_ENUM : 0;
memset(&enum_req, 0, len);
enum_req.hbm_cmd = HOST_ENUM_REQ_CMD;
enum_req.flags |= dev->hbm_f_dc_supported ?
MEI_HBM_ENUM_F_ALLOW_ADD : 0;
enum_req.flags |= dev->hbm_f_ie_supported ?
MEI_HBM_ENUM_F_IMMEDIATE_ENUM : 0;
ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data);
ret = mei_write_message(dev, &mei_hdr, &enum_req);
if (ret) {
dev_err(dev->dev, "enumeration request write failed: ret = %d.\n",
ret);
@ -358,23 +357,21 @@ static int mei_hbm_me_cl_add(struct mei_device *dev,
*/
static int mei_hbm_add_cl_resp(struct mei_device *dev, u8 addr, u8 status)
{
struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
struct hbm_add_client_response *resp;
struct mei_msg_hdr mei_hdr;
struct hbm_add_client_response resp;
const size_t len = sizeof(struct hbm_add_client_response);
int ret;
dev_dbg(dev->dev, "adding client response\n");
resp = (struct hbm_add_client_response *)dev->wr_msg.data;
mei_hbm_hdr(&mei_hdr, len);
mei_hbm_hdr(mei_hdr, len);
memset(resp, 0, sizeof(struct hbm_add_client_response));
memset(&resp, 0, sizeof(struct hbm_add_client_response));
resp.hbm_cmd = MEI_HBM_ADD_CLIENT_RES_CMD;
resp.me_addr = addr;
resp.status = status;
resp->hbm_cmd = MEI_HBM_ADD_CLIENT_RES_CMD;
resp->me_addr = addr;
resp->status = status;
ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data);
ret = mei_write_message(dev, &mei_hdr, &resp);
if (ret)
dev_err(dev->dev, "add client response write failed: ret = %d\n",
ret);
@ -421,18 +418,17 @@ int mei_hbm_cl_notify_req(struct mei_device *dev,
struct mei_cl *cl, u8 start)
{
struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
struct hbm_notification_request *req;
struct mei_msg_hdr mei_hdr;
struct hbm_notification_request req;
const size_t len = sizeof(struct hbm_notification_request);
int ret;
mei_hbm_hdr(mei_hdr, len);
mei_hbm_cl_hdr(cl, MEI_HBM_NOTIFY_REQ_CMD, dev->wr_msg.data, len);
mei_hbm_hdr(&mei_hdr, len);
mei_hbm_cl_hdr(cl, MEI_HBM_NOTIFY_REQ_CMD, &req, len);
req = (struct hbm_notification_request *)dev->wr_msg.data;
req->start = start;
req.start = start;
ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data);
ret = mei_write_message(dev, &mei_hdr, &req);
if (ret)
dev_err(dev->dev, "notify request failed: ret = %d\n", ret);
@ -534,8 +530,8 @@ static void mei_hbm_cl_notify(struct mei_device *dev,
*/
static int mei_hbm_prop_req(struct mei_device *dev, unsigned long start_idx)
{
struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
struct hbm_props_request *prop_req;
struct mei_msg_hdr mei_hdr;
struct hbm_props_request prop_req;
const size_t len = sizeof(struct hbm_props_request);
unsigned long addr;
int ret;
@ -550,15 +546,14 @@ static int mei_hbm_prop_req(struct mei_device *dev, unsigned long start_idx)
return 0;
}
mei_hbm_hdr(mei_hdr, len);
prop_req = (struct hbm_props_request *)dev->wr_msg.data;
mei_hbm_hdr(&mei_hdr, len);
memset(prop_req, 0, sizeof(struct hbm_props_request));
memset(&prop_req, 0, sizeof(struct hbm_props_request));
prop_req->hbm_cmd = HOST_CLIENT_PROPERTIES_REQ_CMD;
prop_req->me_addr = addr;
prop_req.hbm_cmd = HOST_CLIENT_PROPERTIES_REQ_CMD;
prop_req.me_addr = addr;
ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data);
ret = mei_write_message(dev, &mei_hdr, &prop_req);
if (ret) {
dev_err(dev->dev, "properties request write failed: ret = %d\n",
ret);
@ -581,21 +576,20 @@ static int mei_hbm_prop_req(struct mei_device *dev, unsigned long start_idx)
*/
int mei_hbm_pg(struct mei_device *dev, u8 pg_cmd)
{
struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
struct hbm_power_gate *req;
struct mei_msg_hdr mei_hdr;
struct hbm_power_gate req;
const size_t len = sizeof(struct hbm_power_gate);
int ret;
if (!dev->hbm_f_pg_supported)
return -EOPNOTSUPP;
mei_hbm_hdr(mei_hdr, len);
mei_hbm_hdr(&mei_hdr, len);
req = (struct hbm_power_gate *)dev->wr_msg.data;
memset(req, 0, len);
req->hbm_cmd = pg_cmd;
memset(&req, 0, len);
req.hbm_cmd = pg_cmd;
ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data);
ret = mei_write_message(dev, &mei_hdr, &req);
if (ret)
dev_err(dev->dev, "power gate command write failed.\n");
return ret;
@ -611,18 +605,17 @@ EXPORT_SYMBOL_GPL(mei_hbm_pg);
*/
static int mei_hbm_stop_req(struct mei_device *dev)
{
struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
struct hbm_host_stop_request *req =
(struct hbm_host_stop_request *)dev->wr_msg.data;
struct mei_msg_hdr mei_hdr;
struct hbm_host_stop_request req;
const size_t len = sizeof(struct hbm_host_stop_request);
mei_hbm_hdr(mei_hdr, len);
mei_hbm_hdr(&mei_hdr, len);
memset(req, 0, len);
req->hbm_cmd = HOST_STOP_REQ_CMD;
req->reason = DRIVER_STOP_REQUEST;
memset(&req, 0, len);
req.hbm_cmd = HOST_STOP_REQ_CMD;
req.reason = DRIVER_STOP_REQUEST;
return mei_write_message(dev, mei_hdr, dev->wr_msg.data);
return mei_write_message(dev, &mei_hdr, &req);
}
/**
@ -636,9 +629,10 @@ static int mei_hbm_stop_req(struct mei_device *dev)
int mei_hbm_cl_flow_control_req(struct mei_device *dev, struct mei_cl *cl)
{
const size_t len = sizeof(struct hbm_flow_control);
u8 buf[len];
cl_dbg(dev, cl, "sending flow control\n");
return mei_hbm_cl_write(dev, cl, MEI_FLOW_CONTROL_CMD, len);
return mei_hbm_cl_write(dev, cl, MEI_FLOW_CONTROL_CMD, buf, len);
}
/**
@ -714,8 +708,9 @@ static void mei_hbm_cl_flow_control_res(struct mei_device *dev,
int mei_hbm_cl_disconnect_req(struct mei_device *dev, struct mei_cl *cl)
{
const size_t len = sizeof(struct hbm_client_connect_request);
u8 buf[len];
return mei_hbm_cl_write(dev, cl, CLIENT_DISCONNECT_REQ_CMD, len);
return mei_hbm_cl_write(dev, cl, CLIENT_DISCONNECT_REQ_CMD, buf, len);
}
/**
@ -729,8 +724,9 @@ int mei_hbm_cl_disconnect_req(struct mei_device *dev, struct mei_cl *cl)
int mei_hbm_cl_disconnect_rsp(struct mei_device *dev, struct mei_cl *cl)
{
const size_t len = sizeof(struct hbm_client_connect_response);
u8 buf[len];
return mei_hbm_cl_write(dev, cl, CLIENT_DISCONNECT_RES_CMD, len);
return mei_hbm_cl_write(dev, cl, CLIENT_DISCONNECT_RES_CMD, buf, len);
}
/**
@ -765,8 +761,9 @@ static void mei_hbm_cl_disconnect_res(struct mei_device *dev, struct mei_cl *cl,
int mei_hbm_cl_connect_req(struct mei_device *dev, struct mei_cl *cl)
{
const size_t len = sizeof(struct hbm_client_connect_request);
u8 buf[len];
return mei_hbm_cl_write(dev, cl, CLIENT_CONNECT_REQ_CMD, len);
return mei_hbm_cl_write(dev, cl, CLIENT_CONNECT_REQ_CMD, buf, len);
}
/**

View File

@ -382,7 +382,6 @@ const char *mei_pg_state_str(enum mei_pg_state state);
*
* @hbuf_depth : depth of hardware host/write buffer is slots
* @hbuf_is_ready : query if the host host/write buffer is ready
* @wr_msg : the buffer for hbm control messages
*
* @version : HBM protocol version in use
* @hbm_f_pg_supported : hbm feature pgi protocol
@ -467,12 +466,6 @@ struct mei_device {
u8 hbuf_depth;
bool hbuf_is_ready;
/* used for control messages */
struct {
struct mei_msg_hdr hdr;
unsigned char data[128];
} wr_msg;
struct hbm_version version;
unsigned int hbm_f_pg_supported:1;
unsigned int hbm_f_dc_supported:1;
@ -670,8 +663,7 @@ static inline size_t mei_hbuf_max_len(const struct mei_device *dev)
}
static inline int mei_write_message(struct mei_device *dev,
struct mei_msg_hdr *hdr,
unsigned char *buf)
struct mei_msg_hdr *hdr, void *buf)
{
return dev->ops->write(dev, hdr, buf);
}

View File

@ -15,7 +15,8 @@ if NVMEM
config NVMEM_IMX_OCOTP
tristate "i.MX6 On-Chip OTP Controller support"
depends on SOC_IMX6
depends on SOC_IMX6 || COMPILE_TEST
depends on HAS_IOMEM
help
This is a driver for the On-Chip OTP Controller (OCOTP) available on
i.MX6 SoCs, providing access to 4 Kbits of one-time programmable
@ -50,7 +51,6 @@ config MTK_EFUSE
tristate "Mediatek SoCs EFUSE support"
depends on ARCH_MEDIATEK || COMPILE_TEST
depends on HAS_IOMEM
select REGMAP_MMIO
help
This is a driver to access hardware related data like sensor
calibration, HDMI impedance etc.

View File

@ -15,6 +15,7 @@
* http://www.gnu.org/copyleft/gpl.html
*/
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/module.h>
@ -26,6 +27,7 @@
struct ocotp_priv {
struct device *dev;
struct clk *clk;
void __iomem *base;
unsigned int nregs;
};
@ -36,7 +38,7 @@ static int imx_ocotp_read(void *context, unsigned int offset,
struct ocotp_priv *priv = context;
unsigned int count;
u32 *buf = val;
int i;
int i, ret;
u32 index;
index = offset >> 2;
@ -45,9 +47,16 @@ static int imx_ocotp_read(void *context, unsigned int offset,
if (count > (priv->nregs - index))
count = priv->nregs - index;
ret = clk_prepare_enable(priv->clk);
if (ret < 0) {
dev_err(priv->dev, "failed to prepare/enable ocotp clk\n");
return ret;
}
for (i = index; i < (index + count); i++)
*buf++ = readl(priv->base + 0x400 + i * 0x10);
clk_disable_unprepare(priv->clk);
return 0;
}
@ -85,8 +94,12 @@ static int imx_ocotp_probe(struct platform_device *pdev)
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
priv->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(priv->clk))
return PTR_ERR(priv->clk);
of_id = of_match_device(imx_ocotp_dt_ids, dev);
priv->nregs = (unsigned int)of_id->data;
priv->nregs = (unsigned long)of_id->data;
imx_ocotp_nvmem_config.size = 4 * priv->nregs;
imx_ocotp_nvmem_config.dev = dev;
imx_ocotp_nvmem_config.priv = priv;

View File

@ -14,15 +14,35 @@
#include <linux/device.h>
#include <linux/module.h>
#include <linux/io.h>
#include <linux/nvmem-provider.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
static struct regmap_config mtk_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
};
static int mtk_reg_read(void *context,
unsigned int reg, void *_val, size_t bytes)
{
void __iomem *base = context;
u32 *val = _val;
int i = 0, words = bytes / 4;
while (words--)
*val++ = readl(base + reg + (i++ * 4));
return 0;
}
static int mtk_reg_write(void *context,
unsigned int reg, void *_val, size_t bytes)
{
void __iomem *base = context;
u32 *val = _val;
int i = 0, words = bytes / 4;
while (words--)
writel(*val++, base + reg + (i++ * 4));
return 0;
}
static int mtk_efuse_probe(struct platform_device *pdev)
{
@ -30,7 +50,6 @@ static int mtk_efuse_probe(struct platform_device *pdev)
struct resource *res;
struct nvmem_device *nvmem;
struct nvmem_config *econfig;
struct regmap *regmap;
void __iomem *base;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@ -42,14 +61,12 @@ static int mtk_efuse_probe(struct platform_device *pdev)
if (!econfig)
return -ENOMEM;
mtk_regmap_config.max_register = resource_size(res) - 1;
regmap = devm_regmap_init_mmio(dev, base, &mtk_regmap_config);
if (IS_ERR(regmap)) {
dev_err(dev, "regmap init failed\n");
return PTR_ERR(regmap);
}
econfig->stride = 4;
econfig->word_size = 4;
econfig->reg_read = mtk_reg_read;
econfig->reg_write = mtk_reg_write;
econfig->size = resource_size(res);
econfig->priv = base;
econfig->dev = dev;
econfig->owner = THIS_MODULE;
nvmem = nvmem_register(econfig);

View File

@ -25,7 +25,6 @@
#include <linux/nvmem-provider.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/stmp_device.h>
@ -66,11 +65,10 @@ static int mxs_ocotp_wait(struct mxs_ocotp *otp)
return 0;
}
static int mxs_ocotp_read(void *context, const void *reg, size_t reg_size,
void *val, size_t val_size)
static int mxs_ocotp_read(void *context, unsigned int offset,
void *val, size_t bytes)
{
struct mxs_ocotp *otp = context;
unsigned int offset = *(u32 *)reg;
u32 *buf = val;
int ret;
@ -94,17 +92,16 @@ static int mxs_ocotp_read(void *context, const void *reg, size_t reg_size,
if (ret)
goto close_banks;
while (val_size >= reg_size) {
while (bytes) {
if ((offset < OCOTP_DATA_OFFSET) || (offset % 16)) {
/* fill up non-data register */
*buf = 0;
*buf++ = 0;
} else {
*buf = readl(otp->base + offset);
*buf++ = readl(otp->base + offset);
}
buf++;
val_size -= reg_size;
offset += reg_size;
bytes -= 4;
offset += 4;
}
close_banks:
@ -117,57 +114,29 @@ static int mxs_ocotp_read(void *context, const void *reg, size_t reg_size,
return ret;
}
static int mxs_ocotp_write(void *context, const void *data, size_t count)
{
/* We don't want to support writing */
return 0;
}
static bool mxs_ocotp_writeable_reg(struct device *dev, unsigned int reg)
{
return false;
}
static struct nvmem_config ocotp_config = {
.name = "mxs-ocotp",
.stride = 16,
.word_size = 4,
.owner = THIS_MODULE,
.reg_read = mxs_ocotp_read,
};
static const struct regmap_range imx23_ranges[] = {
regmap_reg_range(OCOTP_DATA_OFFSET, 0x210),
struct mxs_data {
int size;
};
static const struct regmap_access_table imx23_access = {
.yes_ranges = imx23_ranges,
.n_yes_ranges = ARRAY_SIZE(imx23_ranges),
static const struct mxs_data imx23_data = {
.size = 0x220,
};
static const struct regmap_range imx28_ranges[] = {
regmap_reg_range(OCOTP_DATA_OFFSET, 0x290),
};
static const struct regmap_access_table imx28_access = {
.yes_ranges = imx28_ranges,
.n_yes_ranges = ARRAY_SIZE(imx28_ranges),
};
static struct regmap_bus mxs_ocotp_bus = {
.read = mxs_ocotp_read,
.write = mxs_ocotp_write, /* make regmap_init() happy */
.reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
.val_format_endian_default = REGMAP_ENDIAN_NATIVE,
};
static struct regmap_config mxs_ocotp_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 16,
.writeable_reg = mxs_ocotp_writeable_reg,
static const struct mxs_data imx28_data = {
.size = 0x2a0,
};
static const struct of_device_id mxs_ocotp_match[] = {
{ .compatible = "fsl,imx23-ocotp", .data = &imx23_access },
{ .compatible = "fsl,imx28-ocotp", .data = &imx28_access },
{ .compatible = "fsl,imx23-ocotp", .data = &imx23_data },
{ .compatible = "fsl,imx28-ocotp", .data = &imx28_data },
{ /* sentinel */},
};
MODULE_DEVICE_TABLE(of, mxs_ocotp_match);
@ -175,11 +144,10 @@ MODULE_DEVICE_TABLE(of, mxs_ocotp_match);
static int mxs_ocotp_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct mxs_data *data;
struct mxs_ocotp *otp;
struct resource *res;
const struct of_device_id *match;
struct regmap *regmap;
const struct regmap_access_table *access;
int ret;
match = of_match_device(dev->driver->of_match_table, dev);
@ -205,17 +173,10 @@ static int mxs_ocotp_probe(struct platform_device *pdev)
return ret;
}
access = match->data;
mxs_ocotp_config.rd_table = access;
mxs_ocotp_config.max_register = access->yes_ranges[0].range_max;
regmap = devm_regmap_init(dev, &mxs_ocotp_bus, otp, &mxs_ocotp_config);
if (IS_ERR(regmap)) {
dev_err(dev, "regmap init failed\n");
ret = PTR_ERR(regmap);
goto err_clk;
}
data = match->data;
ocotp_config.size = data->size;
ocotp_config.priv = otp;
ocotp_config.dev = dev;
otp->nvmem = nvmem_register(&ocotp_config);
if (IS_ERR(otp->nvmem)) {

View File

@ -129,10 +129,6 @@
#define AXP288_EXTCON_DEV_NAME "axp288_extcon"
#define AXP288_EXTCON_SLOW_CHARGER "SLOW-CHARGER"
#define AXP288_EXTCON_DOWNSTREAM_CHARGER "CHARGE-DOWNSTREAM"
#define AXP288_EXTCON_FAST_CHARGER "FAST-CHARGER"
enum {
VBUS_OV_IRQ = 0,
CHARGE_DONE_IRQ,
@ -158,7 +154,7 @@ struct axp288_chrg_info {
/* OTG/Host mode */
struct {
struct work_struct work;
struct extcon_specific_cable_nb cable;
struct extcon_dev *cable;
struct notifier_block id_nb;
bool id_short;
} otg;
@ -586,17 +582,15 @@ static void axp288_charger_extcon_evt_worker(struct work_struct *work)
bool old_connected = info->cable.connected;
/* Determine cable/charger type */
if (extcon_get_cable_state(edev, AXP288_EXTCON_SLOW_CHARGER) > 0) {
if (extcon_get_cable_state_(edev, EXTCON_CHG_USB_SDP) > 0) {
dev_dbg(&info->pdev->dev, "USB SDP charger is connected");
info->cable.connected = true;
info->cable.chg_type = POWER_SUPPLY_TYPE_USB;
} else if (extcon_get_cable_state(edev,
AXP288_EXTCON_DOWNSTREAM_CHARGER) > 0) {
} else if (extcon_get_cable_state_(edev, EXTCON_CHG_USB_CDP) > 0) {
dev_dbg(&info->pdev->dev, "USB CDP charger is connected");
info->cable.connected = true;
info->cable.chg_type = POWER_SUPPLY_TYPE_USB_CDP;
} else if (extcon_get_cable_state(edev,
AXP288_EXTCON_FAST_CHARGER) > 0) {
} else if (extcon_get_cable_state_(edev, EXTCON_CHG_USB_DCP) > 0) {
dev_dbg(&info->pdev->dev, "USB DCP charger is connected");
info->cable.connected = true;
info->cable.chg_type = POWER_SUPPLY_TYPE_USB_DCP;
@ -692,8 +686,8 @@ static int axp288_charger_handle_otg_evt(struct notifier_block *nb,
{
struct axp288_chrg_info *info =
container_of(nb, struct axp288_chrg_info, otg.id_nb);
struct extcon_dev *edev = param;
int usb_host = extcon_get_cable_state(edev, "USB-Host");
struct extcon_dev *edev = info->otg.cable;
int usb_host = extcon_get_cable_state_(edev, EXTCON_USB_HOST);
dev_dbg(&info->pdev->dev, "external connector USB-Host is %s\n",
usb_host ? "attached" : "detached");
@ -848,10 +842,33 @@ static int axp288_charger_probe(struct platform_device *pdev)
/* Register for extcon notification */
INIT_WORK(&info->cable.work, axp288_charger_extcon_evt_worker);
info->cable.nb.notifier_call = axp288_charger_handle_cable_evt;
ret = extcon_register_notifier(info->cable.edev, EXTCON_NONE, &info->cable.nb);
ret = extcon_register_notifier(info->cable.edev, EXTCON_CHG_USB_SDP,
&info->cable.nb);
if (ret) {
dev_err(&info->pdev->dev,
"failed to register extcon notifier %d\n", ret);
"failed to register extcon notifier for SDP %d\n", ret);
return ret;
}
ret = extcon_register_notifier(info->cable.edev, EXTCON_CHG_USB_CDP,
&info->cable.nb);
if (ret) {
dev_err(&info->pdev->dev,
"failed to register extcon notifier for CDP %d\n", ret);
extcon_unregister_notifier(info->cable.edev,
EXTCON_CHG_USB_SDP, &info->cable.nb);
return ret;
}
ret = extcon_register_notifier(info->cable.edev, EXTCON_CHG_USB_DCP,
&info->cable.nb);
if (ret) {
dev_err(&info->pdev->dev,
"failed to register extcon notifier for DCP %d\n", ret);
extcon_unregister_notifier(info->cable.edev,
EXTCON_CHG_USB_SDP, &info->cable.nb);
extcon_unregister_notifier(info->cable.edev,
EXTCON_CHG_USB_CDP, &info->cable.nb);
return ret;
}
@ -871,14 +888,14 @@ static int axp288_charger_probe(struct platform_device *pdev)
/* Register for OTG notification */
INIT_WORK(&info->otg.work, axp288_charger_otg_evt_worker);
info->otg.id_nb.notifier_call = axp288_charger_handle_otg_evt;
ret = extcon_register_interest(&info->otg.cable, NULL, "USB-Host",
ret = extcon_register_notifier(info->otg.cable, EXTCON_USB_HOST,
&info->otg.id_nb);
if (ret)
dev_warn(&pdev->dev, "failed to register otg notifier\n");
if (info->otg.cable.edev)
info->otg.id_short = extcon_get_cable_state(
info->otg.cable.edev, "USB-Host");
if (info->otg.cable)
info->otg.id_short = extcon_get_cable_state_(
info->otg.cable, EXTCON_USB_HOST);
/* Register charger interrupts */
for (i = 0; i < CHRG_INTR_END; i++) {
@ -905,11 +922,17 @@ static int axp288_charger_probe(struct platform_device *pdev)
return 0;
intr_reg_failed:
if (info->otg.cable.edev)
extcon_unregister_interest(&info->otg.cable);
if (info->otg.cable)
extcon_unregister_notifier(info->otg.cable, EXTCON_USB_HOST,
&info->otg.id_nb);
power_supply_unregister(info->psy_usb);
psy_reg_failed:
extcon_unregister_notifier(info->cable.edev, EXTCON_NONE, &info->cable.nb);
extcon_unregister_notifier(info->cable.edev, EXTCON_CHG_USB_SDP,
&info->cable.nb);
extcon_unregister_notifier(info->cable.edev, EXTCON_CHG_USB_CDP,
&info->cable.nb);
extcon_unregister_notifier(info->cable.edev, EXTCON_CHG_USB_DCP,
&info->cable.nb);
return ret;
}
@ -917,10 +940,16 @@ static int axp288_charger_remove(struct platform_device *pdev)
{
struct axp288_chrg_info *info = dev_get_drvdata(&pdev->dev);
if (info->otg.cable.edev)
extcon_unregister_interest(&info->otg.cable);
if (info->otg.cable)
extcon_unregister_notifier(info->otg.cable, EXTCON_USB_HOST,
&info->otg.id_nb);
extcon_unregister_notifier(info->cable.edev, EXTCON_NONE, &info->cable.nb);
extcon_unregister_notifier(info->cable.edev, EXTCON_CHG_USB_SDP,
&info->cable.nb);
extcon_unregister_notifier(info->cable.edev, EXTCON_CHG_USB_CDP,
&info->cable.nb);
extcon_unregister_notifier(info->cable.edev, EXTCON_CHG_USB_DCP,
&info->cable.nb);
power_supply_unregister(info->psy_usb);
return 0;

View File

@ -92,7 +92,7 @@ __register_chrdev_region(unsigned int major, unsigned int baseminor,
}
if (i < CHRDEV_MAJOR_DYN_END)
pr_warn("CHRDEV \"%s\" major number %d goes below the dynamic allocation range",
pr_warn("CHRDEV \"%s\" major number %d goes below the dynamic allocation range\n",
name, i);
if (i == 0) {

View File

@ -126,42 +126,6 @@ struct extcon_dev {
struct device_attribute *d_attrs_muex;
};
/**
* struct extcon_cable - An internal data for each cable of extcon device.
* @edev: The extcon device
* @cable_index: Index of this cable in the edev
* @attr_g: Attribute group for the cable
* @attr_name: "name" sysfs entry
* @attr_state: "state" sysfs entry
* @attrs: Array pointing to attr_name and attr_state for attr_g
*/
struct extcon_cable {
struct extcon_dev *edev;
int cable_index;
struct attribute_group attr_g;
struct device_attribute attr_name;
struct device_attribute attr_state;
struct attribute *attrs[3]; /* to be fed to attr_g.attrs */
};
/**
* struct extcon_specific_cable_nb - An internal data for
* extcon_register_interest().
* @user_nb: user provided notifier block for events from
* a specific cable.
* @cable_index: the target cable.
* @edev: the target extcon device.
* @previous_value: the saved previous event value.
*/
struct extcon_specific_cable_nb {
struct notifier_block *user_nb;
int cable_index;
struct extcon_dev *edev;
unsigned long previous_value;
};
#if IS_ENABLED(CONFIG_EXTCON)
/*
@ -201,29 +165,12 @@ extern int extcon_update_state(struct extcon_dev *edev, u32 mask, u32 state);
/*
* get/set_cable_state access each bit of the 32b encoded state value.
* They are used to access the status of each cable based on the cable_name.
* They are used to access the status of each cable based on the cable id.
*/
extern int extcon_get_cable_state_(struct extcon_dev *edev, unsigned int id);
extern int extcon_set_cable_state_(struct extcon_dev *edev, unsigned int id,
bool cable_state);
extern int extcon_get_cable_state(struct extcon_dev *edev,
const char *cable_name);
extern int extcon_set_cable_state(struct extcon_dev *edev,
const char *cable_name, bool cable_state);
/*
* Following APIs are for notifiees (those who want to be notified)
* to register a callback for events from a specific cable of the extcon.
* Notifiees are the connected device drivers wanting to get notified by
* a specific external port of a connection device.
*/
extern int extcon_register_interest(struct extcon_specific_cable_nb *obj,
const char *extcon_name,
const char *cable_name,
struct notifier_block *nb);
extern int extcon_unregister_interest(struct extcon_specific_cable_nb *nb);
/*
* Following APIs are to monitor every action of a notifier.
* Registrar gets notified for every external port of a connection device.
@ -235,6 +182,12 @@ extern int extcon_register_notifier(struct extcon_dev *edev, unsigned int id,
struct notifier_block *nb);
extern int extcon_unregister_notifier(struct extcon_dev *edev, unsigned int id,
struct notifier_block *nb);
extern int devm_extcon_register_notifier(struct device *dev,
struct extcon_dev *edev, unsigned int id,
struct notifier_block *nb);
extern void devm_extcon_unregister_notifier(struct device *dev,
struct extcon_dev *edev, unsigned int id,
struct notifier_block *nb);
/*
* Following API get the extcon device from devicetree.
@ -246,6 +199,7 @@ extern struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev,
/* Following API to get information of extcon device */
extern const char *extcon_get_edev_name(struct extcon_dev *edev);
#else /* CONFIG_EXTCON */
static inline int extcon_dev_register(struct extcon_dev *edev)
{
@ -306,18 +260,6 @@ static inline int extcon_set_cable_state_(struct extcon_dev *edev,
return 0;
}
static inline int extcon_get_cable_state(struct extcon_dev *edev,
const char *cable_name)
{
return 0;
}
static inline int extcon_set_cable_state(struct extcon_dev *edev,
const char *cable_name, int state)
{
return 0;
}
static inline struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name)
{
return NULL;
@ -337,19 +279,16 @@ static inline int extcon_unregister_notifier(struct extcon_dev *edev,
return 0;
}
static inline int extcon_register_interest(struct extcon_specific_cable_nb *obj,
const char *extcon_name,
const char *cable_name,
struct notifier_block *nb)
static inline int devm_extcon_register_notifier(struct device *dev,
struct extcon_dev *edev, unsigned int id,
struct notifier_block *nb)
{
return 0;
return -ENOSYS;
}
static inline int extcon_unregister_interest(struct extcon_specific_cable_nb
*obj)
{
return 0;
}
static inline void devm_extcon_unregister_notifier(struct device *dev,
struct extcon_dev *edev, unsigned int id,
struct notifier_block *nb) { }
static inline struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev,
int index)
@ -357,4 +296,28 @@ static inline struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev,
return ERR_PTR(-ENODEV);
}
#endif /* CONFIG_EXTCON */
/*
* Following structure and API are deprecated. EXTCON remains the function
* definition to prevent the build break.
*/
struct extcon_specific_cable_nb {
struct notifier_block *user_nb;
int cable_index;
struct extcon_dev *edev;
unsigned long previous_value;
};
static inline int extcon_register_interest(struct extcon_specific_cable_nb *obj,
const char *extcon_name, const char *cable_name,
struct notifier_block *nb)
{
return -EINVAL;
}
static inline int extcon_unregister_interest(struct extcon_specific_cable_nb
*obj)
{
return -EINVAL;
}
#endif /* __LINUX_EXTCON_H__ */

View File

@ -53,6 +53,7 @@ struct adc_jack_cond {
* milli-seconds after the interrupt occurs. You may
* describe such delays with @handling_delay_ms, which
* is rounded-off by jiffies.
* @wakeup_source: flag to wake up the system for extcon events.
*/
struct adc_jack_pdata {
const char *name;
@ -65,6 +66,7 @@ struct adc_jack_pdata {
unsigned long irq_flags;
unsigned long handling_delay_ms; /* in ms */
bool wakeup_source;
};
#endif /* _EXTCON_ADC_JACK_H */

View File

@ -74,7 +74,7 @@ static inline void nvmem_cell_put(struct nvmem_cell *cell)
{
}
static inline char *nvmem_cell_read(struct nvmem_cell *cell, size_t *len)
static inline void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len)
{
return ERR_PTR(-ENOSYS);
}