UPSTREAM: iov_iter: Turn iov_iter_fault_in_readable into fault_in_iov_iter_readable
commit a6294593e8a1290091d0b078d5d33da5e0cd3dfe upstream
Turn iov_iter_fault_in_readable into a function that returns the number
of bytes not faulted in, similar to copy_to_user, instead of returning a
non-zero value when any of the requested pages couldn't be faulted in.
This supports the existing users that require all pages to be faulted in
as well as new users that are happy if any pages can be faulted in.
Rename iov_iter_fault_in_readable to fault_in_iov_iter_readable to make
sure this change doesn't silently break things.
Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
Signed-off-by: Anand Jain <anand.jain@oracle.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
(cherry picked from commit 30e66b1dfc
)
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: Idc687713cf0585664c6eae5a075b3b067ba76b5e
This commit is contained in:
parent
d9fb814064
commit
e1c331f4ec
|
@ -1709,7 +1709,7 @@ static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
|
|||
* Fault pages before locking them in prepare_pages
|
||||
* to avoid recursive lock
|
||||
*/
|
||||
if (unlikely(iov_iter_fault_in_readable(i, write_bytes))) {
|
||||
if (unlikely(fault_in_iov_iter_readable(i, write_bytes))) {
|
||||
ret = -EFAULT;
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -4414,7 +4414,7 @@ static int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *iter,
|
|||
return 0;
|
||||
|
||||
/* If it will be a short write, don't bother. */
|
||||
if (iov_iter_fault_in_readable(iter, count))
|
||||
if (fault_in_iov_iter_readable(iter, count))
|
||||
return 0;
|
||||
|
||||
if (f2fs_has_inline_data(inode)) {
|
||||
|
|
|
@ -1166,7 +1166,7 @@ static ssize_t fuse_fill_write_pages(struct fuse_io_args *ia,
|
|||
|
||||
again:
|
||||
err = -EFAULT;
|
||||
if (iov_iter_fault_in_readable(ii, bytes))
|
||||
if (fault_in_iov_iter_readable(ii, bytes))
|
||||
break;
|
||||
|
||||
err = -ENOMEM;
|
||||
|
|
|
@ -757,7 +757,7 @@ static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
|
|||
* same page as we're writing to, without it being marked
|
||||
* up-to-date.
|
||||
*/
|
||||
if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
|
||||
if (unlikely(fault_in_iov_iter_readable(i, bytes))) {
|
||||
status = -EFAULT;
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -1829,7 +1829,7 @@ static ssize_t ntfs_perform_write(struct file *file, struct iov_iter *i,
|
|||
* pages being swapped out between us bringing them into memory
|
||||
* and doing the actual copying.
|
||||
*/
|
||||
if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
|
||||
if (unlikely(fault_in_iov_iter_readable(i, bytes))) {
|
||||
status = -EFAULT;
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -989,7 +989,7 @@ static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from)
|
|||
frame_vbo = pos & ~(frame_size - 1);
|
||||
index = frame_vbo >> PAGE_SHIFT;
|
||||
|
||||
if (unlikely(iov_iter_fault_in_readable(from, bytes))) {
|
||||
if (unlikely(fault_in_iov_iter_readable(from, bytes))) {
|
||||
err = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
|
|
|
@ -133,7 +133,7 @@ size_t copy_page_from_iter_atomic(struct page *page, unsigned offset,
|
|||
size_t bytes, struct iov_iter *i);
|
||||
void iov_iter_advance(struct iov_iter *i, size_t bytes);
|
||||
void iov_iter_revert(struct iov_iter *i, size_t bytes);
|
||||
int iov_iter_fault_in_readable(const struct iov_iter *i, size_t bytes);
|
||||
size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t bytes);
|
||||
size_t iov_iter_single_seg_count(const struct iov_iter *i);
|
||||
size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
|
||||
struct iov_iter *i);
|
||||
|
|
|
@ -431,33 +431,42 @@ static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t by
|
|||
}
|
||||
|
||||
/*
|
||||
* Fault in one or more iovecs of the given iov_iter, to a maximum length of
|
||||
* bytes. For each iovec, fault in each page that constitutes the iovec.
|
||||
* fault_in_iov_iter_readable - fault in iov iterator for reading
|
||||
* @i: iterator
|
||||
* @size: maximum length
|
||||
*
|
||||
* Return 0 on success, or non-zero if the memory could not be accessed (i.e.
|
||||
* because it is an invalid address).
|
||||
* Fault in one or more iovecs of the given iov_iter, to a maximum length of
|
||||
* @size. For each iovec, fault in each page that constitutes the iovec.
|
||||
*
|
||||
* Returns the number of bytes not faulted in (like copy_to_user() and
|
||||
* copy_from_user()).
|
||||
*
|
||||
* Always returns 0 for non-userspace iterators.
|
||||
*/
|
||||
int iov_iter_fault_in_readable(const struct iov_iter *i, size_t bytes)
|
||||
size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t size)
|
||||
{
|
||||
if (iter_is_iovec(i)) {
|
||||
size_t count = min(size, iov_iter_count(i));
|
||||
const struct iovec *p;
|
||||
size_t skip;
|
||||
|
||||
if (bytes > i->count)
|
||||
bytes = i->count;
|
||||
for (p = i->iov, skip = i->iov_offset; bytes; p++, skip = 0) {
|
||||
size_t len = min(bytes, p->iov_len - skip);
|
||||
size -= count;
|
||||
for (p = i->iov, skip = i->iov_offset; count; p++, skip = 0) {
|
||||
size_t len = min(count, p->iov_len - skip);
|
||||
size_t ret;
|
||||
|
||||
if (unlikely(!len))
|
||||
continue;
|
||||
if (fault_in_readable(p->iov_base + skip, len))
|
||||
return -EFAULT;
|
||||
bytes -= len;
|
||||
ret = fault_in_readable(p->iov_base + skip, len);
|
||||
count -= len - ret;
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
return count + size;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(iov_iter_fault_in_readable);
|
||||
EXPORT_SYMBOL(fault_in_iov_iter_readable);
|
||||
|
||||
void iov_iter_init(struct iov_iter *i, unsigned int direction,
|
||||
const struct iovec *iov, unsigned long nr_segs,
|
||||
|
|
|
@ -3810,7 +3810,7 @@ ssize_t generic_perform_write(struct file *file,
|
|||
* same page as we're writing to, without it being marked
|
||||
* up-to-date.
|
||||
*/
|
||||
if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
|
||||
if (unlikely(fault_in_iov_iter_readable(i, bytes))) {
|
||||
status = -EFAULT;
|
||||
break;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue