btrfs: Prevent scrub recheck from racing with dev replace
scrub_setup_recheck_block() calls btrfs_map_sblock() and then accesses bbio without protection of bio_counter. This can lead to use-after-free if racing with dev replace cancel. Fix it by increasing bio_counter before calling btrfs_map_sblock() and decreasing the bio_counter when corresponding recover is finished. Cc: Liu Bo <bo.li.liu@oracle.com> Reported-by: Liu Bo <bo.li.liu@oracle.com> Signed-off-by: Qu Wenruo <quwenruo@cn.fujitsu.com> Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
parent
ae6529c35b
commit
e501bfe323
|
@ -860,9 +860,11 @@ static inline void scrub_get_recover(struct scrub_recover *recover)
|
|||
refcount_inc(&recover->refs);
|
||||
}
|
||||
|
||||
static inline void scrub_put_recover(struct scrub_recover *recover)
|
||||
static inline void scrub_put_recover(struct btrfs_fs_info *fs_info,
|
||||
struct scrub_recover *recover)
|
||||
{
|
||||
if (refcount_dec_and_test(&recover->refs)) {
|
||||
btrfs_bio_counter_dec(fs_info);
|
||||
btrfs_put_bbio(recover->bbio);
|
||||
kfree(recover);
|
||||
}
|
||||
|
@ -1241,7 +1243,7 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
|
|||
sblock->pagev[page_index]->sblock = NULL;
|
||||
recover = sblock->pagev[page_index]->recover;
|
||||
if (recover) {
|
||||
scrub_put_recover(recover);
|
||||
scrub_put_recover(fs_info, recover);
|
||||
sblock->pagev[page_index]->recover =
|
||||
NULL;
|
||||
}
|
||||
|
@ -1330,16 +1332,19 @@ static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
|
|||
* with a length of PAGE_SIZE, each returned stripe
|
||||
* represents one mirror
|
||||
*/
|
||||
btrfs_bio_counter_inc_blocked(fs_info);
|
||||
ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
|
||||
logical, &mapped_length, &bbio);
|
||||
if (ret || !bbio || mapped_length < sublen) {
|
||||
btrfs_put_bbio(bbio);
|
||||
btrfs_bio_counter_dec(fs_info);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
recover = kzalloc(sizeof(struct scrub_recover), GFP_NOFS);
|
||||
if (!recover) {
|
||||
btrfs_put_bbio(bbio);
|
||||
btrfs_bio_counter_dec(fs_info);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
@ -1365,7 +1370,7 @@ static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
|
|||
spin_lock(&sctx->stat_lock);
|
||||
sctx->stat.malloc_errors++;
|
||||
spin_unlock(&sctx->stat_lock);
|
||||
scrub_put_recover(recover);
|
||||
scrub_put_recover(fs_info, recover);
|
||||
return -ENOMEM;
|
||||
}
|
||||
scrub_page_get(page);
|
||||
|
@ -1407,7 +1412,7 @@ static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
|
|||
scrub_get_recover(recover);
|
||||
page->recover = recover;
|
||||
}
|
||||
scrub_put_recover(recover);
|
||||
scrub_put_recover(fs_info, recover);
|
||||
length -= sublen;
|
||||
logical += sublen;
|
||||
page_index++;
|
||||
|
|
Loading…
Reference in New Issue