f2fs: fix overflow of size calculation

We have potential overflow issue when calculating size of object, when
we left shift index with PAGE_CACHE_SHIFT bits, if type of index has only
32-bits space in 32-bit architecture, left shifting will incur overflow,
i.e:

pgoff_t index =  0xFFFFFFFF;
loff_t size = index << PAGE_CACHE_SHIFT;
size: 0xFFFFF000

So we should cast index with 64-bits type to avoid this issue.

Signed-off-by: Chao Yu <chao2.yu@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
This commit is contained in:
Chao Yu 2015-09-11 14:43:52 +08:00 committed by Jaegeuk Kim
parent 100136acfb
commit 9edcdabf36
5 changed files with 24 additions and 21 deletions

View File

@ -447,9 +447,9 @@ struct page *get_new_data_page(struct inode *inode,
lock_page(page);
}
got_it:
if (new_i_size &&
i_size_read(inode) < ((index + 1) << PAGE_CACHE_SHIFT)) {
i_size_write(inode, ((index + 1) << PAGE_CACHE_SHIFT));
if (new_i_size && i_size_read(inode) <
((loff_t)(index + 1) << PAGE_CACHE_SHIFT)) {
i_size_write(inode, ((loff_t)(index + 1) << PAGE_CACHE_SHIFT));
/* Only the directory inode sets new_i_size */
set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR);
}
@ -489,8 +489,9 @@ static int __allocate_data_block(struct dnode_of_data *dn)
/* update i_size */
fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) +
dn->ofs_in_node;
if (i_size_read(dn->inode) < ((fofs + 1) << PAGE_CACHE_SHIFT))
i_size_write(dn->inode, ((fofs + 1) << PAGE_CACHE_SHIFT));
if (i_size_read(dn->inode) < ((loff_t)(fofs + 1) << PAGE_CACHE_SHIFT))
i_size_write(dn->inode,
((loff_t)(fofs + 1) << PAGE_CACHE_SHIFT));
/* direct IO doesn't use extent cache to maximize the performance */
f2fs_drop_largest_extent(dn->inode, fofs);

View File

@ -198,9 +198,9 @@ static void update_mem_info(struct f2fs_sb_info *sbi)
si->page_mem = 0;
npages = NODE_MAPPING(sbi)->nrpages;
si->page_mem += npages << PAGE_CACHE_SHIFT;
si->page_mem += (unsigned long long)npages << PAGE_CACHE_SHIFT;
npages = META_MAPPING(sbi)->nrpages;
si->page_mem += npages << PAGE_CACHE_SHIFT;
si->page_mem += (unsigned long long)npages << PAGE_CACHE_SHIFT;
}
static int stat_show(struct seq_file *s, void *v)
@ -333,13 +333,13 @@ static int stat_show(struct seq_file *s, void *v)
/* memory footprint */
update_mem_info(si->sbi);
seq_printf(s, "\nMemory: %u KB\n",
seq_printf(s, "\nMemory: %llu KB\n",
(si->base_mem + si->cache_mem + si->page_mem) >> 10);
seq_printf(s, " - static: %u KB\n",
seq_printf(s, " - static: %llu KB\n",
si->base_mem >> 10);
seq_printf(s, " - cached: %u KB\n",
seq_printf(s, " - cached: %llu KB\n",
si->cache_mem >> 10);
seq_printf(s, " - paged : %u KB\n",
seq_printf(s, " - paged : %llu KB\n",
si->page_mem >> 10);
}
mutex_unlock(&f2fs_stat_mutex);

View File

@ -1844,7 +1844,7 @@ struct f2fs_stat_info {
unsigned int segment_count[2];
unsigned int block_count[2];
unsigned int inplace_count;
unsigned base_mem, cache_mem, page_mem;
unsigned long long base_mem, cache_mem, page_mem;
};
static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi)

View File

@ -74,7 +74,8 @@ static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma,
goto mapped;
/* page is wholly or partially inside EOF */
if (((page->index + 1) << PAGE_CACHE_SHIFT) > i_size_read(inode)) {
if (((loff_t)(page->index + 1) << PAGE_CACHE_SHIFT) >
i_size_read(inode)) {
unsigned offset;
offset = i_size_read(inode) & ~PAGE_CACHE_MASK;
zero_user_segment(page, offset, PAGE_CACHE_SIZE);
@ -343,7 +344,7 @@ static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
dirty = __get_first_dirty_index(inode->i_mapping, pgofs, whence);
for (; data_ofs < isize; data_ofs = pgofs << PAGE_CACHE_SHIFT) {
for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_CACHE_SHIFT) {
set_new_dnode(&dn, inode, NULL, NULL, 0);
err = get_dnode_of_data(&dn, pgofs, LOOKUP_NODE_RA);
if (err && err != -ENOENT) {
@ -802,8 +803,8 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
f2fs_balance_fs(sbi);
blk_start = pg_start << PAGE_CACHE_SHIFT;
blk_end = pg_end << PAGE_CACHE_SHIFT;
blk_start = (loff_t)pg_start << PAGE_CACHE_SHIFT;
blk_end = (loff_t)pg_end << PAGE_CACHE_SHIFT;
truncate_inode_pages_range(mapping, blk_start,
blk_end - 1);
@ -994,7 +995,7 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
return ret;
new_size = max_t(loff_t, new_size,
pg_start << PAGE_CACHE_SHIFT);
(loff_t)pg_start << PAGE_CACHE_SHIFT);
}
for (index = pg_start; index < pg_end; index++) {
@ -1030,7 +1031,7 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
f2fs_unlock_op(sbi);
new_size = max_t(loff_t, new_size,
(index + 1) << PAGE_CACHE_SHIFT);
(loff_t)(index + 1) << PAGE_CACHE_SHIFT);
}
if (off_end) {
@ -1192,9 +1193,10 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
if (pg_start == pg_end)
new_size = offset + len;
else if (index == pg_start && off_start)
new_size = (index + 1) << PAGE_CACHE_SHIFT;
new_size = (loff_t)(index + 1) << PAGE_CACHE_SHIFT;
else if (index == pg_end)
new_size = (index << PAGE_CACHE_SHIFT) + off_end;
new_size = ((loff_t)index << PAGE_CACHE_SHIFT) +
off_end;
else
new_size += PAGE_CACHE_SIZE;
}

View File

@ -570,7 +570,7 @@ int recover_fsync_data(struct f2fs_sb_info *sbi)
/* truncate meta pages to be used by the recovery */
truncate_inode_pages_range(META_MAPPING(sbi),
MAIN_BLKADDR(sbi) << PAGE_CACHE_SHIFT, -1);
(loff_t)MAIN_BLKADDR(sbi) << PAGE_CACHE_SHIFT, -1);
if (err) {
truncate_inode_pages_final(NODE_MAPPING(sbi));