iov_iter.c: get rid of bvec_copy_page_{to,from}_iter()
Just have copy_page_{to,from}_iter() fall back to kmap_atomic + copy_{to,from}_iter() + kunmap_atomic() in ITER_BVEC case. As the matter of fact, that's what we want to do for any iov_iter kind that isn't blocking - e.g. ITER_KVEC will also go that way once we recognize it on iov_iter.c primitives level Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
This commit is contained in:
parent
8442fa46cf
commit
d271524a3a
|
@ -486,44 +486,6 @@ static size_t copy_from_iter_bvec(void *to, size_t bytes, struct iov_iter *i)
|
|||
return wanted;
|
||||
}
|
||||
|
||||
static size_t copy_page_to_iter_bvec(struct page *page, size_t offset,
|
||||
size_t bytes, struct iov_iter *i)
|
||||
{
|
||||
void *kaddr = kmap_atomic(page);
|
||||
size_t wanted = copy_to_iter_bvec(kaddr + offset, bytes, i);
|
||||
kunmap_atomic(kaddr);
|
||||
return wanted;
|
||||
}
|
||||
|
||||
static size_t copy_page_from_iter_bvec(struct page *page, size_t offset,
|
||||
size_t bytes, struct iov_iter *i)
|
||||
{
|
||||
void *kaddr = kmap_atomic(page);
|
||||
size_t wanted = copy_from_iter_bvec(kaddr + offset, bytes, i);
|
||||
kunmap_atomic(kaddr);
|
||||
return wanted;
|
||||
}
|
||||
|
||||
size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
|
||||
struct iov_iter *i)
|
||||
{
|
||||
if (i->type & ITER_BVEC)
|
||||
return copy_page_to_iter_bvec(page, offset, bytes, i);
|
||||
else
|
||||
return copy_page_to_iter_iovec(page, offset, bytes, i);
|
||||
}
|
||||
EXPORT_SYMBOL(copy_page_to_iter);
|
||||
|
||||
size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
|
||||
struct iov_iter *i)
|
||||
{
|
||||
if (i->type & ITER_BVEC)
|
||||
return copy_page_from_iter_bvec(page, offset, bytes, i);
|
||||
else
|
||||
return copy_page_from_iter_iovec(page, offset, bytes, i);
|
||||
}
|
||||
EXPORT_SYMBOL(copy_page_from_iter);
|
||||
|
||||
size_t copy_to_iter(void *addr, size_t bytes, struct iov_iter *i)
|
||||
{
|
||||
if (i->type & ITER_BVEC)
|
||||
|
@ -542,6 +504,32 @@ size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
|
|||
}
|
||||
EXPORT_SYMBOL(copy_from_iter);
|
||||
|
||||
size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
|
||||
struct iov_iter *i)
|
||||
{
|
||||
if (i->type & (ITER_BVEC|ITER_KVEC)) {
|
||||
void *kaddr = kmap_atomic(page);
|
||||
size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
|
||||
kunmap_atomic(kaddr);
|
||||
return wanted;
|
||||
} else
|
||||
return copy_page_to_iter_iovec(page, offset, bytes, i);
|
||||
}
|
||||
EXPORT_SYMBOL(copy_page_to_iter);
|
||||
|
||||
size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
|
||||
struct iov_iter *i)
|
||||
{
|
||||
if (i->type & ITER_BVEC) {
|
||||
void *kaddr = kmap_atomic(page);
|
||||
size_t wanted = copy_from_iter(kaddr + offset, bytes, i);
|
||||
kunmap_atomic(kaddr);
|
||||
return wanted;
|
||||
} else
|
||||
return copy_page_from_iter_iovec(page, offset, bytes, i);
|
||||
}
|
||||
EXPORT_SYMBOL(copy_page_from_iter);
|
||||
|
||||
size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
|
||||
{
|
||||
if (unlikely(bytes > i->count))
|
||||
|
|
Loading…
Reference in New Issue