use clear_page()/copy_page() in favor of memset()/memcpy() on whole pages

After all that's what they are intended for.

Signed-off-by: Jan Beulich <jbeulich@novell.com>
Cc: Miklos Szeredi <miklos@szeredi.hu>
Cc: "Eric W. Biederman" <ebiederm@xmission.com>
Cc: "Rafael J. Wysocki" <rjw@sisk.pl>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Jan Beulich 2010-10-26 14:22:27 -07:00 committed by Linus Torvalds
parent b647277681
commit 3ecb01df32
5 changed files with 13 additions and 13 deletions

View File

@ -811,7 +811,7 @@ static int fuse_copy_page(struct fuse_copy_state *cs, struct page **pagep,
if (page && zeroing && count < PAGE_SIZE) { if (page && zeroing && count < PAGE_SIZE) {
void *mapaddr = kmap_atomic(page, KM_USER1); void *mapaddr = kmap_atomic(page, KM_USER1);
memset(mapaddr, 0, PAGE_SIZE); clear_page(mapaddr);
kunmap_atomic(mapaddr, KM_USER1); kunmap_atomic(mapaddr, KM_USER1);
} }
while (count) { while (count) {

View File

@ -816,7 +816,7 @@ static int kimage_load_normal_segment(struct kimage *image,
ptr = kmap(page); ptr = kmap(page);
/* Start with a clear page */ /* Start with a clear page */
memset(ptr, 0, PAGE_SIZE); clear_page(ptr);
ptr += maddr & ~PAGE_MASK; ptr += maddr & ~PAGE_MASK;
mchunk = PAGE_SIZE - (maddr & ~PAGE_MASK); mchunk = PAGE_SIZE - (maddr & ~PAGE_MASK);
if (mchunk > mbytes) if (mchunk > mbytes)

View File

@ -993,7 +993,7 @@ static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
*/ */
safe_copy_page(buffer, s_page); safe_copy_page(buffer, s_page);
dst = kmap_atomic(d_page, KM_USER0); dst = kmap_atomic(d_page, KM_USER0);
memcpy(dst, buffer, PAGE_SIZE); copy_page(dst, buffer);
kunmap_atomic(dst, KM_USER0); kunmap_atomic(dst, KM_USER0);
} else { } else {
safe_copy_page(page_address(d_page), s_page); safe_copy_page(page_address(d_page), s_page);
@ -1687,7 +1687,7 @@ int snapshot_read_next(struct snapshot_handle *handle)
memory_bm_position_reset(&orig_bm); memory_bm_position_reset(&orig_bm);
memory_bm_position_reset(&copy_bm); memory_bm_position_reset(&copy_bm);
} else if (handle->cur <= nr_meta_pages) { } else if (handle->cur <= nr_meta_pages) {
memset(buffer, 0, PAGE_SIZE); clear_page(buffer);
pack_pfns(buffer, &orig_bm); pack_pfns(buffer, &orig_bm);
} else { } else {
struct page *page; struct page *page;
@ -1701,7 +1701,7 @@ int snapshot_read_next(struct snapshot_handle *handle)
void *kaddr; void *kaddr;
kaddr = kmap_atomic(page, KM_USER0); kaddr = kmap_atomic(page, KM_USER0);
memcpy(buffer, kaddr, PAGE_SIZE); copy_page(buffer, kaddr);
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr, KM_USER0);
handle->buffer = buffer; handle->buffer = buffer;
} else { } else {
@ -1984,7 +1984,7 @@ static void copy_last_highmem_page(void)
void *dst; void *dst;
dst = kmap_atomic(last_highmem_page, KM_USER0); dst = kmap_atomic(last_highmem_page, KM_USER0);
memcpy(dst, buffer, PAGE_SIZE); copy_page(dst, buffer);
kunmap_atomic(dst, KM_USER0); kunmap_atomic(dst, KM_USER0);
last_highmem_page = NULL; last_highmem_page = NULL;
} }
@ -2270,9 +2270,9 @@ swap_two_pages_data(struct page *p1, struct page *p2, void *buf)
kaddr1 = kmap_atomic(p1, KM_USER0); kaddr1 = kmap_atomic(p1, KM_USER0);
kaddr2 = kmap_atomic(p2, KM_USER1); kaddr2 = kmap_atomic(p2, KM_USER1);
memcpy(buf, kaddr1, PAGE_SIZE); copy_page(buf, kaddr1);
memcpy(kaddr1, kaddr2, PAGE_SIZE); copy_page(kaddr1, kaddr2);
memcpy(kaddr2, buf, PAGE_SIZE); copy_page(kaddr2, buf);
kunmap_atomic(kaddr2, KM_USER1); kunmap_atomic(kaddr2, KM_USER1);
kunmap_atomic(kaddr1, KM_USER0); kunmap_atomic(kaddr1, KM_USER0);
} }

View File

@ -251,7 +251,7 @@ static int write_page(void *buf, sector_t offset, struct bio **bio_chain)
if (bio_chain) { if (bio_chain) {
src = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH); src = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH);
if (src) { if (src) {
memcpy(src, buf, PAGE_SIZE); copy_page(src, buf);
} else { } else {
WARN_ON_ONCE(1); WARN_ON_ONCE(1);
bio_chain = NULL; /* Go synchronous */ bio_chain = NULL; /* Go synchronous */
@ -325,7 +325,7 @@ static int swap_write_page(struct swap_map_handle *handle, void *buf,
error = write_page(handle->cur, handle->cur_swap, NULL); error = write_page(handle->cur, handle->cur_swap, NULL);
if (error) if (error)
goto out; goto out;
memset(handle->cur, 0, PAGE_SIZE); clear_page(handle->cur);
handle->cur_swap = offset; handle->cur_swap = offset;
handle->k = 0; handle->k = 0;
} }
@ -910,7 +910,7 @@ int swsusp_check(void)
hib_resume_bdev = open_by_devnum(swsusp_resume_device, FMODE_READ); hib_resume_bdev = open_by_devnum(swsusp_resume_device, FMODE_READ);
if (!IS_ERR(hib_resume_bdev)) { if (!IS_ERR(hib_resume_bdev)) {
set_blocksize(hib_resume_bdev, PAGE_SIZE); set_blocksize(hib_resume_bdev, PAGE_SIZE);
memset(swsusp_header, 0, PAGE_SIZE); clear_page(swsusp_header);
error = hib_bio_read_page(swsusp_resume_block, error = hib_bio_read_page(swsusp_resume_block,
swsusp_header, NULL); swsusp_header, NULL);
if (error) if (error)

View File

@ -2080,7 +2080,7 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
* zeroes. * zeroes.
*/ */
if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE))
memset(kaddr, 0, PAGE_SIZE); clear_page(kaddr);
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr, KM_USER0);
flush_dcache_page(dst); flush_dcache_page(dst);
} else } else