generic_perform_write()/iomap_write_actor(): saner logics for short copy
if we run into a short copy and ->write_end() refuses to advance at all, use the amount we'd managed to copy for the next iteration to handle. Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
This commit is contained in:
parent
9067931236
commit
bc1bb416bb
|
@ -771,10 +771,6 @@ iomap_write_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
|
||||||
* Otherwise there's a nasty deadlock on copying from the
|
* Otherwise there's a nasty deadlock on copying from the
|
||||||
* same page as we're writing to, without it being marked
|
* same page as we're writing to, without it being marked
|
||||||
* up-to-date.
|
* up-to-date.
|
||||||
*
|
|
||||||
* Not only is this an optimisation, but it is also required
|
|
||||||
* to check that the address is actually valid, when atomic
|
|
||||||
* usercopies are used, below.
|
|
||||||
*/
|
*/
|
||||||
if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
|
if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
|
||||||
status = -EFAULT;
|
status = -EFAULT;
|
||||||
|
@ -791,25 +787,24 @@ iomap_write_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
|
||||||
|
|
||||||
copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
|
copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
|
||||||
|
|
||||||
copied = iomap_write_end(inode, pos, bytes, copied, page, iomap,
|
status = iomap_write_end(inode, pos, bytes, copied, page, iomap,
|
||||||
srcmap);
|
srcmap);
|
||||||
|
|
||||||
cond_resched();
|
cond_resched();
|
||||||
|
|
||||||
iov_iter_advance(i, copied);
|
if (unlikely(status == 0)) {
|
||||||
if (unlikely(copied == 0)) {
|
|
||||||
/*
|
/*
|
||||||
* If we were unable to copy any data at all, we must
|
* A short copy made iomap_write_end() reject the
|
||||||
* fall back to a single segment length write.
|
* thing entirely. Might be memory poisoning
|
||||||
*
|
* halfway through, might be a race with munmap,
|
||||||
* If we didn't fallback here, we could livelock
|
* might be severe memory pressure.
|
||||||
* because not all segments in the iov can be copied at
|
|
||||||
* once without a pagefault.
|
|
||||||
*/
|
*/
|
||||||
bytes = min_t(unsigned long, PAGE_SIZE - offset,
|
if (copied)
|
||||||
iov_iter_single_seg_count(i));
|
bytes = copied;
|
||||||
goto again;
|
goto again;
|
||||||
}
|
}
|
||||||
|
copied = status;
|
||||||
|
iov_iter_advance(i, copied);
|
||||||
pos += copied;
|
pos += copied;
|
||||||
written += copied;
|
written += copied;
|
||||||
length -= copied;
|
length -= copied;
|
||||||
|
|
24
mm/filemap.c
24
mm/filemap.c
|
@ -3642,10 +3642,6 @@ ssize_t generic_perform_write(struct file *file,
|
||||||
* Otherwise there's a nasty deadlock on copying from the
|
* Otherwise there's a nasty deadlock on copying from the
|
||||||
* same page as we're writing to, without it being marked
|
* same page as we're writing to, without it being marked
|
||||||
* up-to-date.
|
* up-to-date.
|
||||||
*
|
|
||||||
* Not only is this an optimisation, but it is also required
|
|
||||||
* to check that the address is actually valid, when atomic
|
|
||||||
* usercopies are used, below.
|
|
||||||
*/
|
*/
|
||||||
if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
|
if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
|
||||||
status = -EFAULT;
|
status = -EFAULT;
|
||||||
|
@ -3672,24 +3668,22 @@ ssize_t generic_perform_write(struct file *file,
|
||||||
page, fsdata);
|
page, fsdata);
|
||||||
if (unlikely(status < 0))
|
if (unlikely(status < 0))
|
||||||
break;
|
break;
|
||||||
copied = status;
|
|
||||||
|
|
||||||
cond_resched();
|
cond_resched();
|
||||||
|
|
||||||
iov_iter_advance(i, copied);
|
if (unlikely(status == 0)) {
|
||||||
if (unlikely(copied == 0)) {
|
|
||||||
/*
|
/*
|
||||||
* If we were unable to copy any data at all, we must
|
* A short copy made ->write_end() reject the
|
||||||
* fall back to a single segment length write.
|
* thing entirely. Might be memory poisoning
|
||||||
*
|
* halfway through, might be a race with munmap,
|
||||||
* If we didn't fallback here, we could livelock
|
* might be severe memory pressure.
|
||||||
* because not all segments in the iov can be copied at
|
|
||||||
* once without a pagefault.
|
|
||||||
*/
|
*/
|
||||||
bytes = min_t(unsigned long, PAGE_SIZE - offset,
|
if (copied)
|
||||||
iov_iter_single_seg_count(i));
|
bytes = copied;
|
||||||
goto again;
|
goto again;
|
||||||
}
|
}
|
||||||
|
copied = status;
|
||||||
|
iov_iter_advance(i, copied);
|
||||||
pos += copied;
|
pos += copied;
|
||||||
written += copied;
|
written += copied;
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue