block-5.5-20191221
-----BEGIN PGP SIGNATURE----- iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAl3/CNMQHGF4Ym9lQGtl cm5lbC5kawAKCRD301j7KXHgpv6bEADI1osvhdwSsfGocV7ly8lZQWranNGhbuzE JIjnRoDkVecRg4leeGNfFJlXk6FN3I4UP8AJURz/6kvmywUQ3KPrlColaasL+tHp ONGabgEcMhprNIvbk503/y3v8cod+giaTzJxVEnT7q9nSkOU2b13W1it/+V6ro/Y /T1A1ytIgdqinhzBUdseEhvsfWVejH5MqIaYE43eEssq99FFHvQ9Y/c4TnE1M1n7 rt79P52HqvcSzXXo6+2CUJyzR7E8F/p0dNplmaTjbEL/UrCYss9FnXaVl3K0OJiG bAqTSBvj2jI8ydrKCVj2ganKzcg2RM8s7A+O7byY0G+BhvXK+qifNhLkocOMN5Ey 9p62P+OV4CHT++jlSECgqET8c57c50xpUEN9ndzqkzwZnRu/GIoP3DRbmXXf0HCP p91tOD+gUVVBl+vPafG3Ne7wh+5deOKA38UKolqGHyrb8dGTY5IGf0bJOgK7JZrT FIRknBfuA2Fm6ytrK2ZCpyyCCfUdPL7dp/Jq9tdJMm3OAOqlKT9zF9PhyTWPQ5Bk mYZavlFgoONwQGGSrMURwqVB7DGXDE9MYWccYEe6wHY4UfRkG7ccx/TJWjyAwyVw GqvK5G/wZAqXhSelHMSUlB8Y8T/YqsDikm4an5pXMtv47oWuh+uIPmDgCcDzhSn0 bJyLPAjb7A== =23d2 -----END PGP SIGNATURE----- Merge tag 'block-5.5-20191221' of git://git.kernel.dk/linux-block Pull block fixes from Jens Axboe: "Let's try this one again, this time without the compat_ioctl changes. We've got those fixed up, but that can go out next week. This contains: - block queue flush lockdep annotation (Bart) - Type fix for bsg_queue_rq() (Bart) - Three dasd fixes (Stefan, Jan) - nbd deadlock fix (Mike) - Error handling bio user map fix (Yang) - iocost fix (Tejun) - sbitmap waitqueue addition fix that affects the kyber IO scheduler (David)" * tag 'block-5.5-20191221' of git://git.kernel.dk/linux-block: sbitmap: only queue kyber's wait callback if not already active block: fix memleak when __blk_rq_map_user_iov() is failed s390/dasd: fix typo in copyright statement s390/dasd: fix memleak in path handling error case s390/dasd/cio: Interpret ccw_device_get_mdc return value correctly block: Fix a lockdep complaint triggered by request queue flushing block: Fix the type of 'sts' in bsg_queue_rq() block: end bio with BLK_STS_AGAIN in case of non-mq devs and REQ_NOWAIT nbd: fix shutdown and recv work deadlock v2 iocost: over-budget forced IOs should schedule async delay
This commit is contained in:
commit
44579f35c2
|
@ -885,11 +885,14 @@ generic_make_request_checks(struct bio *bio)
|
|||
}
|
||||
|
||||
/*
|
||||
* For a REQ_NOWAIT based request, return -EOPNOTSUPP
|
||||
* if queue is not a request based queue.
|
||||
* Non-mq queues do not honor REQ_NOWAIT, so complete a bio
|
||||
* with BLK_STS_AGAIN status in order to catch -EAGAIN and
|
||||
* to give a chance to the caller to repeat request gracefully.
|
||||
*/
|
||||
if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_mq(q))
|
||||
goto not_supported;
|
||||
if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_mq(q)) {
|
||||
status = BLK_STS_AGAIN;
|
||||
goto end_io;
|
||||
}
|
||||
|
||||
if (should_fail_bio(bio))
|
||||
goto end_io;
|
||||
|
|
|
@ -69,6 +69,7 @@
|
|||
#include <linux/blkdev.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/blk-mq.h>
|
||||
#include <linux/lockdep.h>
|
||||
|
||||
#include "blk.h"
|
||||
#include "blk-mq.h"
|
||||
|
@ -505,6 +506,9 @@ struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
|
|||
INIT_LIST_HEAD(&fq->flush_queue[1]);
|
||||
INIT_LIST_HEAD(&fq->flush_data_in_flight);
|
||||
|
||||
lockdep_register_key(&fq->key);
|
||||
lockdep_set_class(&fq->mq_flush_lock, &fq->key);
|
||||
|
||||
return fq;
|
||||
|
||||
fail_rq:
|
||||
|
@ -519,6 +523,7 @@ void blk_free_flush_queue(struct blk_flush_queue *fq)
|
|||
if (!fq)
|
||||
return;
|
||||
|
||||
lockdep_unregister_key(&fq->key);
|
||||
kfree(fq->flush_rq);
|
||||
kfree(fq);
|
||||
}
|
||||
|
|
|
@ -1212,7 +1212,7 @@ static enum hrtimer_restart iocg_waitq_timer_fn(struct hrtimer *timer)
|
|||
return HRTIMER_NORESTART;
|
||||
}
|
||||
|
||||
static void iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now, u64 cost)
|
||||
static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now, u64 cost)
|
||||
{
|
||||
struct ioc *ioc = iocg->ioc;
|
||||
struct blkcg_gq *blkg = iocg_to_blkg(iocg);
|
||||
|
@ -1229,11 +1229,11 @@ static void iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now, u64 cost)
|
|||
/* clear or maintain depending on the overage */
|
||||
if (time_before_eq64(vtime, now->vnow)) {
|
||||
blkcg_clear_delay(blkg);
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
if (!atomic_read(&blkg->use_delay) &&
|
||||
time_before_eq64(vtime, now->vnow + vmargin))
|
||||
return;
|
||||
return false;
|
||||
|
||||
/* use delay */
|
||||
if (cost) {
|
||||
|
@ -1250,10 +1250,11 @@ static void iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now, u64 cost)
|
|||
oexpires = ktime_to_ns(hrtimer_get_softexpires(&iocg->delay_timer));
|
||||
if (hrtimer_is_queued(&iocg->delay_timer) &&
|
||||
abs(oexpires - expires) <= margin_ns / 4)
|
||||
return;
|
||||
return true;
|
||||
|
||||
hrtimer_start_range_ns(&iocg->delay_timer, ns_to_ktime(expires),
|
||||
margin_ns / 4, HRTIMER_MODE_ABS);
|
||||
return true;
|
||||
}
|
||||
|
||||
static enum hrtimer_restart iocg_delay_timer_fn(struct hrtimer *timer)
|
||||
|
@ -1739,7 +1740,9 @@ static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio)
|
|||
*/
|
||||
if (bio_issue_as_root_blkg(bio) || fatal_signal_pending(current)) {
|
||||
atomic64_add(abs_cost, &iocg->abs_vdebt);
|
||||
iocg_kick_delay(iocg, &now, cost);
|
||||
if (iocg_kick_delay(iocg, &now, cost))
|
||||
blkcg_schedule_throttle(rqos->q,
|
||||
(bio->bi_opf & REQ_SWAP) == REQ_SWAP);
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -151,7 +151,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
|
|||
return 0;
|
||||
|
||||
unmap_rq:
|
||||
__blk_rq_unmap_user(bio);
|
||||
blk_rq_unmap_user(bio);
|
||||
fail:
|
||||
rq->bio = NULL;
|
||||
return ret;
|
||||
|
|
|
@ -30,6 +30,7 @@ struct blk_flush_queue {
|
|||
* at the same time
|
||||
*/
|
||||
struct request *orig_rq;
|
||||
struct lock_class_key key;
|
||||
spinlock_t mq_flush_lock;
|
||||
};
|
||||
|
||||
|
|
|
@ -266,7 +266,7 @@ static blk_status_t bsg_queue_rq(struct blk_mq_hw_ctx *hctx,
|
|||
struct request *req = bd->rq;
|
||||
struct bsg_set *bset =
|
||||
container_of(q->tag_set, struct bsg_set, tag_set);
|
||||
int sts = BLK_STS_IOERR;
|
||||
blk_status_t sts = BLK_STS_IOERR;
|
||||
int ret;
|
||||
|
||||
blk_mq_start_request(req);
|
||||
|
|
|
@ -1296,10 +1296,10 @@ static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *b
|
|||
mutex_unlock(&nbd->config_lock);
|
||||
ret = wait_event_interruptible(config->recv_wq,
|
||||
atomic_read(&config->recv_threads) == 0);
|
||||
if (ret) {
|
||||
if (ret)
|
||||
sock_shutdown(nbd);
|
||||
flush_workqueue(nbd->recv_workq);
|
||||
}
|
||||
flush_workqueue(nbd->recv_workq);
|
||||
|
||||
mutex_lock(&nbd->config_lock);
|
||||
nbd_bdev_reset(bdev);
|
||||
/* user requested, ignore socket errors */
|
||||
|
|
|
@ -1128,7 +1128,8 @@ static u32 get_fcx_max_data(struct dasd_device *device)
|
|||
{
|
||||
struct dasd_eckd_private *private = device->private;
|
||||
int fcx_in_css, fcx_in_gneq, fcx_in_features;
|
||||
int tpm, mdc;
|
||||
unsigned int mdc;
|
||||
int tpm;
|
||||
|
||||
if (dasd_nofcx)
|
||||
return 0;
|
||||
|
@ -1142,7 +1143,7 @@ static u32 get_fcx_max_data(struct dasd_device *device)
|
|||
return 0;
|
||||
|
||||
mdc = ccw_device_get_mdc(device->cdev, 0);
|
||||
if (mdc < 0) {
|
||||
if (mdc == 0) {
|
||||
dev_warn(&device->cdev->dev, "Detecting the maximum supported data size for zHPF requests failed\n");
|
||||
return 0;
|
||||
} else {
|
||||
|
@ -1153,12 +1154,12 @@ static u32 get_fcx_max_data(struct dasd_device *device)
|
|||
static int verify_fcx_max_data(struct dasd_device *device, __u8 lpm)
|
||||
{
|
||||
struct dasd_eckd_private *private = device->private;
|
||||
int mdc;
|
||||
unsigned int mdc;
|
||||
u32 fcx_max_data;
|
||||
|
||||
if (private->fcx_max_data) {
|
||||
mdc = ccw_device_get_mdc(device->cdev, lpm);
|
||||
if ((mdc < 0)) {
|
||||
if (mdc == 0) {
|
||||
dev_warn(&device->cdev->dev,
|
||||
"Detecting the maximum data size for zHPF "
|
||||
"requests failed (rc=%d) for a new path %x\n",
|
||||
|
@ -2073,7 +2074,7 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
|
|||
dasd_free_block(device->block);
|
||||
device->block = NULL;
|
||||
out_err1:
|
||||
kfree(private->conf_data);
|
||||
dasd_eckd_clear_conf_data(device);
|
||||
kfree(device->private);
|
||||
device->private = NULL;
|
||||
return rc;
|
||||
|
@ -2082,7 +2083,6 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
|
|||
static void dasd_eckd_uncheck_device(struct dasd_device *device)
|
||||
{
|
||||
struct dasd_eckd_private *private = device->private;
|
||||
int i;
|
||||
|
||||
if (!private)
|
||||
return;
|
||||
|
@ -2092,21 +2092,7 @@ static void dasd_eckd_uncheck_device(struct dasd_device *device)
|
|||
private->sneq = NULL;
|
||||
private->vdsneq = NULL;
|
||||
private->gneq = NULL;
|
||||
private->conf_len = 0;
|
||||
for (i = 0; i < 8; i++) {
|
||||
kfree(device->path[i].conf_data);
|
||||
if ((__u8 *)device->path[i].conf_data ==
|
||||
private->conf_data) {
|
||||
private->conf_data = NULL;
|
||||
private->conf_len = 0;
|
||||
}
|
||||
device->path[i].conf_data = NULL;
|
||||
device->path[i].cssid = 0;
|
||||
device->path[i].ssid = 0;
|
||||
device->path[i].chpid = 0;
|
||||
}
|
||||
kfree(private->conf_data);
|
||||
private->conf_data = NULL;
|
||||
dasd_eckd_clear_conf_data(device);
|
||||
}
|
||||
|
||||
static struct dasd_ccw_req *
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
/*
|
||||
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
|
||||
* Bugreports.to..: <Linux390@de.ibm.com>
|
||||
* Coypright IBM Corp. 1999, 2000
|
||||
* Copyright IBM Corp. 1999, 2000
|
||||
*
|
||||
*/
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
* Carsten Otte <Cotte@de.ibm.com>
|
||||
* Martin Schwidefsky <schwidefsky@de.ibm.com>
|
||||
* Bugreports.to..: <Linux390@de.ibm.com>
|
||||
* Coypright IBM Corp. 1999, 2002
|
||||
* Copyright IBM Corp. 1999, 2002
|
||||
*
|
||||
* /proc interface for the dasd driver.
|
||||
*
|
||||
|
|
|
@ -635,7 +635,7 @@ EXPORT_SYMBOL(ccw_device_tm_start_timeout);
|
|||
* @mask: mask of paths to use
|
||||
*
|
||||
* Return the number of 64K-bytes blocks all paths at least support
|
||||
* for a transport command. Return values <= 0 indicate failures.
|
||||
* for a transport command. Return value 0 indicates failure.
|
||||
*/
|
||||
int ccw_device_get_mdc(struct ccw_device *cdev, u8 mask)
|
||||
{
|
||||
|
|
|
@ -650,8 +650,8 @@ void sbitmap_add_wait_queue(struct sbitmap_queue *sbq,
|
|||
if (!sbq_wait->sbq) {
|
||||
sbq_wait->sbq = sbq;
|
||||
atomic_inc(&sbq->ws_active);
|
||||
add_wait_queue(&ws->wait, &sbq_wait->wait);
|
||||
}
|
||||
add_wait_queue(&ws->wait, &sbq_wait->wait);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sbitmap_add_wait_queue);
|
||||
|
||||
|
|
Loading…
Reference in New Issue