get rid of separate multipage fault-in primitives
* the only remaining callers of "short" fault-ins are just as happy with generic variants (both in lib/iov_iter.c); switch them to multipage variants, kill the "short" ones * rename the multipage variants to now available plain ones. * get rid of compat macro defining iov_iter_fault_in_multipage_readable by expanding it in its only user. Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
This commit is contained in:
parent
08895a8b6b
commit
4bce9f6ee8
|
@ -387,7 +387,7 @@ int armada_gem_pwrite_ioctl(struct drm_device *dev, void *data,
|
||||||
if (!access_ok(VERIFY_READ, ptr, args->size))
|
if (!access_ok(VERIFY_READ, ptr, args->size))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
ret = fault_in_multipages_readable(ptr, args->size);
|
ret = fault_in_pages_readable(ptr, args->size);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
|
|
@ -675,7 +675,7 @@ i915_gem_gtt_pread(struct drm_device *dev,
|
||||||
|
|
||||||
mutex_unlock(&dev->struct_mutex);
|
mutex_unlock(&dev->struct_mutex);
|
||||||
if (likely(!i915.prefault_disable)) {
|
if (likely(!i915.prefault_disable)) {
|
||||||
ret = fault_in_multipages_writeable(user_data, remain);
|
ret = fault_in_pages_writeable(user_data, remain);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
mutex_lock(&dev->struct_mutex);
|
mutex_lock(&dev->struct_mutex);
|
||||||
goto out_unpin;
|
goto out_unpin;
|
||||||
|
@ -803,7 +803,7 @@ i915_gem_shmem_pread(struct drm_device *dev,
|
||||||
mutex_unlock(&dev->struct_mutex);
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
|
||||||
if (likely(!i915.prefault_disable) && !prefaulted) {
|
if (likely(!i915.prefault_disable) && !prefaulted) {
|
||||||
ret = fault_in_multipages_writeable(user_data, remain);
|
ret = fault_in_pages_writeable(user_data, remain);
|
||||||
/* Userspace is tricking us, but we've already clobbered
|
/* Userspace is tricking us, but we've already clobbered
|
||||||
* its pages with the prefault and promised to write the
|
* its pages with the prefault and promised to write the
|
||||||
* data up to the first fault. Hence ignore any errors
|
* data up to the first fault. Hence ignore any errors
|
||||||
|
@ -1267,7 +1267,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
if (likely(!i915.prefault_disable)) {
|
if (likely(!i915.prefault_disable)) {
|
||||||
ret = fault_in_multipages_readable(u64_to_user_ptr(args->data_ptr),
|
ret = fault_in_pages_readable(u64_to_user_ptr(args->data_ptr),
|
||||||
args->size);
|
args->size);
|
||||||
if (ret)
|
if (ret)
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
|
@ -1048,7 +1048,7 @@ validate_exec_list(struct drm_device *dev,
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
if (likely(!i915.prefault_disable)) {
|
if (likely(!i915.prefault_disable)) {
|
||||||
if (fault_in_multipages_readable(ptr, length))
|
if (fault_in_pages_readable(ptr, length))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1850,7 +1850,7 @@ static ssize_t ntfs_perform_write(struct file *file, struct iov_iter *i,
|
||||||
* pages being swapped out between us bringing them into memory
|
* pages being swapped out between us bringing them into memory
|
||||||
* and doing the actual copying.
|
* and doing the actual copying.
|
||||||
*/
|
*/
|
||||||
if (unlikely(iov_iter_fault_in_multipages_readable(i, bytes))) {
|
if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
|
||||||
status = -EFAULT;
|
status = -EFAULT;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
|
@ -518,58 +518,9 @@ void page_endio(struct page *page, bool is_write, int err);
|
||||||
extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter);
|
extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Fault one or two userspace pages into pagetables.
|
* Fault everything in given userspace address range in.
|
||||||
* Return -EINVAL if more than two pages would be needed.
|
|
||||||
* Return non-zero on a fault.
|
|
||||||
*/
|
*/
|
||||||
static inline int fault_in_pages_writeable(char __user *uaddr, int size)
|
static inline int fault_in_pages_writeable(char __user *uaddr, int size)
|
||||||
{
|
|
||||||
int span, ret;
|
|
||||||
|
|
||||||
if (unlikely(size == 0))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
span = offset_in_page(uaddr) + size;
|
|
||||||
if (span > 2 * PAGE_SIZE)
|
|
||||||
return -EINVAL;
|
|
||||||
/*
|
|
||||||
* Writing zeroes into userspace here is OK, because we know that if
|
|
||||||
* the zero gets there, we'll be overwriting it.
|
|
||||||
*/
|
|
||||||
ret = __put_user(0, uaddr);
|
|
||||||
if (ret == 0 && span > PAGE_SIZE)
|
|
||||||
ret = __put_user(0, uaddr + size - 1);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int fault_in_pages_readable(const char __user *uaddr, int size)
|
|
||||||
{
|
|
||||||
volatile char c;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
if (unlikely(size == 0))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
ret = __get_user(c, uaddr);
|
|
||||||
if (ret == 0) {
|
|
||||||
const char __user *end = uaddr + size - 1;
|
|
||||||
|
|
||||||
if (((unsigned long)uaddr & PAGE_MASK) !=
|
|
||||||
((unsigned long)end & PAGE_MASK)) {
|
|
||||||
ret = __get_user(c, end);
|
|
||||||
(void)c;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Multipage variants of the above prefault helpers, useful if more than
|
|
||||||
* PAGE_SIZE of data needs to be prefaulted. These are separate from the above
|
|
||||||
* functions (which only handle up to PAGE_SIZE) to avoid clobbering the
|
|
||||||
* filemap.c hotpaths.
|
|
||||||
*/
|
|
||||||
static inline int fault_in_multipages_writeable(char __user *uaddr, int size)
|
|
||||||
{
|
{
|
||||||
char __user *end = uaddr + size - 1;
|
char __user *end = uaddr + size - 1;
|
||||||
|
|
||||||
|
@ -596,8 +547,7 @@ static inline int fault_in_multipages_writeable(char __user *uaddr, int size)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int fault_in_multipages_readable(const char __user *uaddr,
|
static inline int fault_in_pages_readable(const char __user *uaddr, int size)
|
||||||
int size)
|
|
||||||
{
|
{
|
||||||
volatile char c;
|
volatile char c;
|
||||||
const char __user *end = uaddr + size - 1;
|
const char __user *end = uaddr + size - 1;
|
||||||
|
|
|
@ -76,7 +76,6 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
|
||||||
struct iov_iter *i, unsigned long offset, size_t bytes);
|
struct iov_iter *i, unsigned long offset, size_t bytes);
|
||||||
void iov_iter_advance(struct iov_iter *i, size_t bytes);
|
void iov_iter_advance(struct iov_iter *i, size_t bytes);
|
||||||
int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes);
|
int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes);
|
||||||
#define iov_iter_fault_in_multipages_readable iov_iter_fault_in_readable
|
|
||||||
size_t iov_iter_single_seg_count(const struct iov_iter *i);
|
size_t iov_iter_single_seg_count(const struct iov_iter *i);
|
||||||
size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
|
size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
|
||||||
struct iov_iter *i);
|
struct iov_iter *i);
|
||||||
|
|
|
@ -306,8 +306,7 @@ int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
|
||||||
|
|
||||||
if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
|
if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
|
||||||
iterate_iovec(i, bytes, v, iov, skip, ({
|
iterate_iovec(i, bytes, v, iov, skip, ({
|
||||||
err = fault_in_multipages_readable(v.iov_base,
|
err = fault_in_pages_readable(v.iov_base, v.iov_len);
|
||||||
v.iov_len);
|
|
||||||
if (unlikely(err))
|
if (unlikely(err))
|
||||||
return err;
|
return err;
|
||||||
0;}))
|
0;}))
|
||||||
|
|
Loading…
Reference in New Issue