mirror of https://gitee.com/openkylin/linux.git
md: provide generic support for handling unplug callbacks.
When an md device adds a request to a queue, it can call mddev_check_plugged. If this succeeds then we know that the md thread will be woken up shortly, and ->plug_cnt will be non-zero until then, so some processing can be delayed. If it fails, then no unplug callback is expected and the make_request function needs to do whatever is required to make the request happen. Signed-off-by: NeilBrown <neilb@suse.de>
This commit is contained in:
parent
482c083492
commit
97658cdd3a
|
@ -445,6 +445,61 @@ void md_flush_request(mddev_t *mddev, struct bio *bio)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(md_flush_request);
|
EXPORT_SYMBOL(md_flush_request);
|
||||||
|
|
||||||
|
/* Support for plugging.
|
||||||
|
* This mirrors the plugging support in request_queue, but does not
|
||||||
|
* require having a whole queue or request structures.
|
||||||
|
* We allocate an md_plug_cb for each md device and each thread it gets
|
||||||
|
* plugged on. This links tot the private plug_handle structure in the
|
||||||
|
* personality data where we keep a count of the number of outstanding
|
||||||
|
* plugs so other code can see if a plug is active.
|
||||||
|
*/
|
||||||
|
struct md_plug_cb {
|
||||||
|
struct blk_plug_cb cb;
|
||||||
|
mddev_t *mddev;
|
||||||
|
};
|
||||||
|
|
||||||
|
static void plugger_unplug(struct blk_plug_cb *cb)
|
||||||
|
{
|
||||||
|
struct md_plug_cb *mdcb = container_of(cb, struct md_plug_cb, cb);
|
||||||
|
if (atomic_dec_and_test(&mdcb->mddev->plug_cnt))
|
||||||
|
md_wakeup_thread(mdcb->mddev->thread);
|
||||||
|
kfree(mdcb);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Check that an unplug wakeup will come shortly.
|
||||||
|
* If not, wakeup the md thread immediately
|
||||||
|
*/
|
||||||
|
int mddev_check_plugged(mddev_t *mddev)
|
||||||
|
{
|
||||||
|
struct blk_plug *plug = current->plug;
|
||||||
|
struct md_plug_cb *mdcb;
|
||||||
|
|
||||||
|
if (!plug)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
list_for_each_entry(mdcb, &plug->cb_list, cb.list) {
|
||||||
|
if (mdcb->cb.callback == plugger_unplug &&
|
||||||
|
mdcb->mddev == mddev) {
|
||||||
|
/* Already on the list, move to top */
|
||||||
|
if (mdcb != list_first_entry(&plug->cb_list,
|
||||||
|
struct md_plug_cb,
|
||||||
|
cb.list))
|
||||||
|
list_move(&mdcb->cb.list, &plug->cb_list);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
/* Not currently on the callback list */
|
||||||
|
mdcb = kmalloc(sizeof(*mdcb), GFP_ATOMIC);
|
||||||
|
if (!mdcb)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
mdcb->mddev = mddev;
|
||||||
|
mdcb->cb.callback = plugger_unplug;
|
||||||
|
atomic_inc(&mddev->plug_cnt);
|
||||||
|
list_add(&mdcb->cb.list, &plug->cb_list);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(mddev_check_plugged);
|
||||||
|
|
||||||
static inline mddev_t *mddev_get(mddev_t *mddev)
|
static inline mddev_t *mddev_get(mddev_t *mddev)
|
||||||
{
|
{
|
||||||
|
@ -494,6 +549,7 @@ void mddev_init(mddev_t *mddev)
|
||||||
atomic_set(&mddev->active, 1);
|
atomic_set(&mddev->active, 1);
|
||||||
atomic_set(&mddev->openers, 0);
|
atomic_set(&mddev->openers, 0);
|
||||||
atomic_set(&mddev->active_io, 0);
|
atomic_set(&mddev->active_io, 0);
|
||||||
|
atomic_set(&mddev->plug_cnt, 0);
|
||||||
spin_lock_init(&mddev->write_lock);
|
spin_lock_init(&mddev->write_lock);
|
||||||
atomic_set(&mddev->flush_pending, 0);
|
atomic_set(&mddev->flush_pending, 0);
|
||||||
init_waitqueue_head(&mddev->sb_wait);
|
init_waitqueue_head(&mddev->sb_wait);
|
||||||
|
|
|
@ -179,6 +179,9 @@ struct mddev_s
|
||||||
int delta_disks, new_level, new_layout;
|
int delta_disks, new_level, new_layout;
|
||||||
int new_chunk_sectors;
|
int new_chunk_sectors;
|
||||||
|
|
||||||
|
atomic_t plug_cnt; /* If device is expecting
|
||||||
|
* more bios soon.
|
||||||
|
*/
|
||||||
struct mdk_thread_s *thread; /* management thread */
|
struct mdk_thread_s *thread; /* management thread */
|
||||||
struct mdk_thread_s *sync_thread; /* doing resync or reconstruct */
|
struct mdk_thread_s *sync_thread; /* doing resync or reconstruct */
|
||||||
sector_t curr_resync; /* last block scheduled */
|
sector_t curr_resync; /* last block scheduled */
|
||||||
|
@ -508,4 +511,5 @@ extern struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask,
|
||||||
mddev_t *mddev);
|
mddev_t *mddev);
|
||||||
extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
|
extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
|
||||||
mddev_t *mddev);
|
mddev_t *mddev);
|
||||||
|
extern int mddev_check_plugged(mddev_t *mddev);
|
||||||
#endif /* _MD_MD_H */
|
#endif /* _MD_MD_H */
|
||||||
|
|
Loading…
Reference in New Issue