ntfs_copy_from_user_iter(): don't bother with copying iov_iter
Advance the original, let the caller revert if it needs to. Don't mess with iov_iter_single_seg_count() in the caller - if we got a (non-zero) short copy, use the amount actually copied for the next pass, limit it to "up to the end of page" if nothing got copied at all. Originally fault-in only read the first iovec; back then it used to make sense to limit to the just one iovec for the pass after short copy. These days it's no long true. Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
This commit is contained in:
parent
6efb943b86
commit
9067931236
|
@ -1684,20 +1684,19 @@ static size_t ntfs_copy_from_user_iter(struct page **pages, unsigned nr_pages,
|
|||
{
|
||||
struct page **last_page = pages + nr_pages;
|
||||
size_t total = 0;
|
||||
struct iov_iter data = *i;
|
||||
unsigned len, copied;
|
||||
|
||||
do {
|
||||
len = PAGE_SIZE - ofs;
|
||||
if (len > bytes)
|
||||
len = bytes;
|
||||
copied = iov_iter_copy_from_user_atomic(*pages, &data, ofs,
|
||||
copied = iov_iter_copy_from_user_atomic(*pages, i, ofs,
|
||||
len);
|
||||
iov_iter_advance(i, copied);
|
||||
total += copied;
|
||||
bytes -= copied;
|
||||
if (!bytes)
|
||||
break;
|
||||
iov_iter_advance(&data, copied);
|
||||
if (copied < len)
|
||||
goto err;
|
||||
ofs = 0;
|
||||
|
@ -1866,34 +1865,24 @@ static ssize_t ntfs_perform_write(struct file *file, struct iov_iter *i,
|
|||
if (likely(copied == bytes)) {
|
||||
status = ntfs_commit_pages_after_write(pages, do_pages,
|
||||
pos, bytes);
|
||||
if (!status)
|
||||
status = bytes;
|
||||
}
|
||||
do {
|
||||
unlock_page(pages[--do_pages]);
|
||||
put_page(pages[do_pages]);
|
||||
} while (do_pages);
|
||||
if (unlikely(status < 0))
|
||||
if (unlikely(status < 0)) {
|
||||
iov_iter_revert(i, copied);
|
||||
break;
|
||||
copied = status;
|
||||
}
|
||||
cond_resched();
|
||||
if (unlikely(!copied)) {
|
||||
size_t sc;
|
||||
|
||||
/*
|
||||
* We failed to copy anything. Fall back to single
|
||||
* segment length write.
|
||||
*
|
||||
* This is needed to avoid possible livelock in the
|
||||
* case that all segments in the iov cannot be copied
|
||||
* at once without a pagefault.
|
||||
*/
|
||||
sc = iov_iter_single_seg_count(i);
|
||||
if (bytes > sc)
|
||||
bytes = sc;
|
||||
if (unlikely(copied < bytes)) {
|
||||
iov_iter_revert(i, copied);
|
||||
if (copied)
|
||||
bytes = copied;
|
||||
else if (bytes > PAGE_SIZE - ofs)
|
||||
bytes = PAGE_SIZE - ofs;
|
||||
goto again;
|
||||
}
|
||||
iov_iter_advance(i, copied);
|
||||
pos += copied;
|
||||
written += copied;
|
||||
balance_dirty_pages_ratelimited(mapping);
|
||||
|
|
Loading…
Reference in New Issue