2011-06-07 17:49:55 +08:00
|
|
|
/*
|
|
|
|
* User address space access functions.
|
|
|
|
*
|
|
|
|
* For licencing details see kernel-base/COPYING
|
|
|
|
*/
|
|
|
|
|
2017-03-26 07:33:21 +08:00
|
|
|
#include <linux/uaccess.h>
|
2016-07-14 08:18:57 +08:00
|
|
|
#include <linux/export.h>
|
2011-06-07 17:49:55 +08:00
|
|
|
|
|
|
|
/*
|
2013-10-24 18:52:06 +08:00
|
|
|
* We rely on the nested NMI work to allow atomic faults from the NMI path; the
|
|
|
|
* nested NMI paths are careful to preserve CR2.
|
2011-06-07 17:49:55 +08:00
|
|
|
*/
|
|
|
|
unsigned long
|
|
|
|
copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
|
|
|
|
{
|
2013-10-24 18:52:06 +08:00
|
|
|
unsigned long ret;
|
2011-06-07 17:49:55 +08:00
|
|
|
|
2012-06-11 21:44:26 +08:00
|
|
|
if (__range_not_ok(from, n, TASK_SIZE))
|
2015-06-23 03:38:43 +08:00
|
|
|
return n;
|
2013-10-24 18:52:06 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Even though this function is typically called from NMI/IRQ context
|
|
|
|
* disable pagefaults so that its behaviour is consistent even when
|
|
|
|
* called form other contexts.
|
|
|
|
*/
|
|
|
|
pagefault_disable();
|
|
|
|
ret = __copy_from_user_inatomic(to, from, n);
|
|
|
|
pagefault_enable();
|
|
|
|
|
2013-10-31 04:16:22 +08:00
|
|
|
return ret;
|
2011-06-07 17:49:55 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(copy_from_user_nmi);
|