block: remove the error_sector argument to blkdev_issue_flush
The argument isn't used by any caller, and drivers don't fill out bi_sector for flush requests either. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
172ce41db4
commit
9398554fb3
|
@ -432,15 +432,11 @@ void blk_insert_flush(struct request *rq)
|
|||
* blkdev_issue_flush - queue a flush
|
||||
* @bdev: blockdev to issue flush for
|
||||
* @gfp_mask: memory allocation flags (for bio_alloc)
|
||||
* @error_sector: error sector
|
||||
*
|
||||
* Description:
|
||||
* Issue a flush for the block device in question. Caller can supply
|
||||
* room for storing the error offset in case of a flush error, if they
|
||||
* wish to.
|
||||
* Issue a flush for the block device in question.
|
||||
*/
|
||||
int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
|
||||
sector_t *error_sector)
|
||||
int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask)
|
||||
{
|
||||
struct request_queue *q;
|
||||
struct bio *bio;
|
||||
|
@ -458,15 +454,6 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
|
|||
bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
|
||||
|
||||
ret = submit_bio_wait(bio);
|
||||
|
||||
/*
|
||||
* The driver must store the error location in ->bi_sector, if
|
||||
* it supports it. For non-stacked drivers, this should be
|
||||
* copied from blk_rq_pos(rq).
|
||||
*/
|
||||
if (error_sector)
|
||||
*error_sector = bio->bi_iter.bi_sector;
|
||||
|
||||
bio_put(bio);
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -2657,7 +2657,7 @@ static void bitmap_flush_work(struct work_struct *work)
|
|||
|
||||
dm_integrity_flush_buffers(ic);
|
||||
if (ic->meta_dev)
|
||||
blkdev_issue_flush(ic->dev->bdev, GFP_NOIO, NULL);
|
||||
blkdev_issue_flush(ic->dev->bdev, GFP_NOIO);
|
||||
|
||||
limit = ic->provided_data_sectors;
|
||||
if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
|
||||
|
|
|
@ -661,7 +661,7 @@ static int dmz_write_sb(struct dmz_metadata *zmd, unsigned int set)
|
|||
|
||||
ret = dmz_rdwr_block(zmd, REQ_OP_WRITE, block, mblk->page);
|
||||
if (ret == 0)
|
||||
ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO, NULL);
|
||||
ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -703,7 +703,7 @@ static int dmz_write_dirty_mblocks(struct dmz_metadata *zmd,
|
|||
|
||||
/* Flush drive cache (this will also sync data) */
|
||||
if (ret == 0)
|
||||
ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO, NULL);
|
||||
ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -772,7 +772,7 @@ int dmz_flush_metadata(struct dmz_metadata *zmd)
|
|||
|
||||
/* If there are no dirty metadata blocks, just flush the device cache */
|
||||
if (list_empty(&write_list)) {
|
||||
ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO, NULL);
|
||||
ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO);
|
||||
goto err;
|
||||
}
|
||||
|
||||
|
|
|
@ -1037,7 +1037,7 @@ static int ppl_recover(struct ppl_log *log, struct ppl_header *pplhdr,
|
|||
}
|
||||
|
||||
/* flush the disk cache after recovery if necessary */
|
||||
ret = blkdev_issue_flush(rdev->bdev, GFP_KERNEL, NULL);
|
||||
ret = blkdev_issue_flush(rdev->bdev, GFP_KERNEL);
|
||||
out:
|
||||
__free_page(page);
|
||||
return ret;
|
||||
|
|
|
@ -226,7 +226,7 @@ static void nvmet_bdev_execute_flush(struct nvmet_req *req)
|
|||
|
||||
u16 nvmet_bdev_flush(struct nvmet_req *req)
|
||||
{
|
||||
if (blkdev_issue_flush(req->ns->bdev, GFP_KERNEL, NULL))
|
||||
if (blkdev_issue_flush(req->ns->bdev, GFP_KERNEL))
|
||||
return NVME_SC_INTERNAL | NVME_SC_DNR;
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -672,7 +672,7 @@ int blkdev_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
|
|||
* i_mutex and doing so causes performance issues with concurrent
|
||||
* O_SYNC writers to a block device.
|
||||
*/
|
||||
error = blkdev_issue_flush(bdev, GFP_KERNEL, NULL);
|
||||
error = blkdev_issue_flush(bdev, GFP_KERNEL);
|
||||
if (error == -EOPNOTSUPP)
|
||||
error = 0;
|
||||
|
||||
|
|
|
@ -176,7 +176,7 @@ int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
|
|||
ret = ext4_fsync_journal(inode, datasync, &needs_barrier);
|
||||
|
||||
if (needs_barrier) {
|
||||
err = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
|
||||
err = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL);
|
||||
if (!ret)
|
||||
ret = err;
|
||||
}
|
||||
|
|
|
@ -1440,7 +1440,7 @@ int ext4_init_inode_table(struct super_block *sb, ext4_group_t group,
|
|||
if (ret < 0)
|
||||
goto err_out;
|
||||
if (barrier)
|
||||
blkdev_issue_flush(sb->s_bdev, GFP_NOFS, NULL);
|
||||
blkdev_issue_flush(sb->s_bdev, GFP_NOFS);
|
||||
|
||||
skip_zeroout:
|
||||
ext4_lock_group(sb, group);
|
||||
|
|
|
@ -5256,7 +5256,7 @@ static int ext4_sync_fs(struct super_block *sb, int wait)
|
|||
needs_barrier = true;
|
||||
if (needs_barrier) {
|
||||
int err;
|
||||
err = blkdev_issue_flush(sb->s_bdev, GFP_KERNEL, NULL);
|
||||
err = blkdev_issue_flush(sb->s_bdev, GFP_KERNEL);
|
||||
if (!ret)
|
||||
ret = err;
|
||||
}
|
||||
|
|
|
@ -195,7 +195,7 @@ int fat_file_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
return blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
|
||||
return blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -340,7 +340,7 @@ int hfsplus_file_fsync(struct file *file, loff_t start, loff_t end,
|
|||
}
|
||||
|
||||
if (!test_bit(HFSPLUS_SB_NOBARRIER, &sbi->flags))
|
||||
blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
|
||||
blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL);
|
||||
|
||||
inode_unlock(inode);
|
||||
|
||||
|
|
|
@ -239,7 +239,7 @@ static int hfsplus_sync_fs(struct super_block *sb, int wait)
|
|||
mutex_unlock(&sbi->vh_mutex);
|
||||
|
||||
if (!test_bit(HFSPLUS_SB_NOBARRIER, &sbi->flags))
|
||||
blkdev_issue_flush(sb->s_bdev, GFP_KERNEL, NULL);
|
||||
blkdev_issue_flush(sb->s_bdev, GFP_KERNEL);
|
||||
|
||||
return error;
|
||||
}
|
||||
|
|
|
@ -414,7 +414,7 @@ int jbd2_cleanup_journal_tail(journal_t *journal)
|
|||
* jbd2_cleanup_journal_tail() doesn't get called all that often.
|
||||
*/
|
||||
if (journal->j_flags & JBD2_BARRIER)
|
||||
blkdev_issue_flush(journal->j_fs_dev, GFP_NOFS, NULL);
|
||||
blkdev_issue_flush(journal->j_fs_dev, GFP_NOFS);
|
||||
|
||||
return __jbd2_update_log_tail(journal, first_tid, blocknr);
|
||||
}
|
||||
|
|
|
@ -775,7 +775,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
|
|||
if (commit_transaction->t_need_data_flush &&
|
||||
(journal->j_fs_dev != journal->j_dev) &&
|
||||
(journal->j_flags & JBD2_BARRIER))
|
||||
blkdev_issue_flush(journal->j_fs_dev, GFP_NOFS, NULL);
|
||||
blkdev_issue_flush(journal->j_fs_dev, GFP_NOFS);
|
||||
|
||||
/* Done it all: now write the commit record asynchronously. */
|
||||
if (jbd2_has_feature_async_commit(journal)) {
|
||||
|
@ -882,7 +882,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
|
|||
stats.run.rs_blocks_logged++;
|
||||
if (jbd2_has_feature_async_commit(journal) &&
|
||||
journal->j_flags & JBD2_BARRIER) {
|
||||
blkdev_issue_flush(journal->j_dev, GFP_NOFS, NULL);
|
||||
blkdev_issue_flush(journal->j_dev, GFP_NOFS);
|
||||
}
|
||||
|
||||
if (err)
|
||||
|
|
|
@ -286,7 +286,7 @@ int jbd2_journal_recover(journal_t *journal)
|
|||
err = err2;
|
||||
/* Make sure all replayed data is on permanent storage */
|
||||
if (journal->j_flags & JBD2_BARRIER) {
|
||||
err2 = blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL, NULL);
|
||||
err2 = blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL);
|
||||
if (!err)
|
||||
err = err2;
|
||||
}
|
||||
|
|
|
@ -1113,7 +1113,7 @@ int generic_file_fsync(struct file *file, loff_t start, loff_t end,
|
|||
err = __generic_file_fsync(file, start, end, datasync);
|
||||
if (err)
|
||||
return err;
|
||||
return blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
|
||||
return blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL);
|
||||
}
|
||||
EXPORT_SYMBOL(generic_file_fsync);
|
||||
|
||||
|
|
|
@ -375,7 +375,7 @@ static inline int nilfs_flush_device(struct the_nilfs *nilfs)
|
|||
*/
|
||||
smp_wmb();
|
||||
|
||||
err = blkdev_issue_flush(nilfs->ns_bdev, GFP_KERNEL, NULL);
|
||||
err = blkdev_issue_flush(nilfs->ns_bdev, GFP_KERNEL);
|
||||
if (err != -EIO)
|
||||
err = 0;
|
||||
return err;
|
||||
|
|
|
@ -194,7 +194,7 @@ static int ocfs2_sync_file(struct file *file, loff_t start, loff_t end,
|
|||
needs_barrier = true;
|
||||
err = jbd2_complete_transaction(journal, commit_tid);
|
||||
if (needs_barrier) {
|
||||
ret = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
|
||||
ret = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL);
|
||||
if (!err)
|
||||
err = ret;
|
||||
}
|
||||
|
|
|
@ -159,7 +159,7 @@ static int reiserfs_sync_file(struct file *filp, loff_t start, loff_t end,
|
|||
barrier_done = reiserfs_commit_for_inode(inode);
|
||||
reiserfs_write_unlock(inode->i_sb);
|
||||
if (barrier_done != 1 && reiserfs_barrier_flush(inode->i_sb))
|
||||
blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
|
||||
blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL);
|
||||
inode_unlock(inode);
|
||||
if (barrier_done < 0)
|
||||
return barrier_done;
|
||||
|
|
|
@ -305,7 +305,7 @@ void
|
|||
xfs_blkdev_issue_flush(
|
||||
xfs_buftarg_t *buftarg)
|
||||
{
|
||||
blkdev_issue_flush(buftarg->bt_bdev, GFP_NOFS, NULL);
|
||||
blkdev_issue_flush(buftarg->bt_bdev, GFP_NOFS);
|
||||
}
|
||||
|
||||
STATIC void
|
||||
|
|
|
@ -479,7 +479,7 @@ static int zonefs_file_fsync(struct file *file, loff_t start, loff_t end,
|
|||
if (ZONEFS_I(inode)->i_ztype == ZONEFS_ZTYPE_CNV)
|
||||
ret = file_write_and_wait_range(file, start, end);
|
||||
if (!ret)
|
||||
ret = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
|
||||
ret = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL);
|
||||
|
||||
if (ret)
|
||||
zonefs_io_error(inode, true);
|
||||
|
|
|
@ -1233,7 +1233,7 @@ static inline bool blk_needs_flush_plug(struct task_struct *tsk)
|
|||
|
||||
extern void blk_io_schedule(void);
|
||||
|
||||
extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *);
|
||||
int blkdev_issue_flush(struct block_device *, gfp_t);
|
||||
extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
|
||||
sector_t nr_sects, gfp_t gfp_mask, struct page *page);
|
||||
|
||||
|
@ -1872,8 +1872,7 @@ static inline bool blk_needs_flush_plug(struct task_struct *tsk)
|
|||
return false;
|
||||
}
|
||||
|
||||
static inline int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
|
||||
sector_t *error_sector)
|
||||
static inline int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue