for-5.6/block-2020-01-27
-----BEGIN PGP SIGNATURE----- iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAl4vOqAQHGF4Ym9lQGtl cm5lbC5kawAKCRD301j7KXHgppYBD/wLczY7hyjF2loc71MC9HloUq3BVbATktM3 OF6wRbyxbeiOj/7Px0lE0M67tQbnEoIP26gS03fd6e7HE19//gmzGuB3Z2R2CJ5q XKkTamqz0pcPcX5FdDO5JFQZf27/1Qs3g7Nkr7FjVcR2XQ8PFv5B/FLMhse4frJI k92Sj0V1OwdNtMXozKqno/7xPwL/kQKWoF6aFDgO27xLfsFmi8Wbgf/CslOOTHIN vAUaz3Cue6V17M5y98wD4nwpjG7Ve+aY1i6oFPBE7Az9TA0xoiBA/tNPKW7iS10C GEP1aoI6lpgkxAzvyR29K1ayjzV11hEIig3rNIWxNfmCSGaawttWXAPEi7jU5u2D ZXbzUJxKnfeg8yrAj0CTcKLA9i4v1cZXPCUXqMO2+wHEWgmxq2IWuWjSl/V4fn3Y zgTPBngDM4Gx3fAqvD8SVfCW7xwI4VRP+da58WCFOjwnOgYSouxS7RnCtm+yPUbk Es6m2XBb+3ycaJPT58LcXPrnTJWZeRincs3MfFJeTXRn5T7IzlBjKdIvQiQSHQXo caZzWHEJW827+wfQFNreXpk5KPi+D6boeziYe96UcII8L5qVw3N0X5hOpr6IRhkX hn2CUb/CmY6bl8PJJPVc4ygqgiavyvynJu+A0uJvFSjvXX6jjXNEsSJ6bz8aBxdm 4rmgPFTlqA== =yJZi -----END PGP SIGNATURE----- Merge tag 'for-5.6/block-2020-01-27' of git://git.kernel.dk/linux-block Pull core block updates from Jens Axboe: "This may be the most quiet round we've had in years. I'm not complaining. Really not a lot to detail here, outside of spelling and documentation improvements/fixes, we have: - Allow t10-pi to be modular (Herbert) - Remove dead code in bfq (Alex) - Mark zone management requests with REQ_SYNC (Chaitanya) - BFQ division improvement (Wen) - Small series improving plugging (Pavel)" * tag 'for-5.6/block-2020-01-27' of git://git.kernel.dk/linux-block: partitions/ldm: fix spelling mistake "to" -> "too" block, bfq: improve arithmetic division in bfq_delta() block/bfq: remove unused bfq_class_rt which never used block: mark zone-mgmt bios with REQ_SYNC blk-mq: Document functions for sending request block: Allow t10-pi to be modular blk-mq: optimise blk_mq_flush_plug_list() list: introduce list_for_each_continue() blk-mq: optimise rq sort function
This commit is contained in:
commit
48b4b4ff1e
|
@ -66,7 +66,6 @@ config BLK_DEV_BSGLIB
|
|||
|
||||
config BLK_DEV_INTEGRITY
|
||||
bool "Block layer data integrity support"
|
||||
select CRC_T10DIF if BLK_DEV_INTEGRITY
|
||||
---help---
|
||||
Some storage devices allow extra information to be
|
||||
stored/retrieved to help protect the data. The block layer
|
||||
|
@ -77,6 +76,11 @@ config BLK_DEV_INTEGRITY
|
|||
T10/SCSI Data Integrity Field or the T13/ATA External Path
|
||||
Protection. If in doubt, say N.
|
||||
|
||||
config BLK_DEV_INTEGRITY_T10
|
||||
tristate
|
||||
depends on BLK_DEV_INTEGRITY
|
||||
select CRC_T10DIF
|
||||
|
||||
config BLK_DEV_ZONED
|
||||
bool "Zoned block device support"
|
||||
select MQ_IOSCHED_DEADLINE
|
||||
|
|
|
@ -27,7 +27,8 @@ obj-$(CONFIG_IOSCHED_BFQ) += bfq.o
|
|||
|
||||
obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o
|
||||
obj-$(CONFIG_BLK_CMDLINE_PARSER) += cmdline-parser.o
|
||||
obj-$(CONFIG_BLK_DEV_INTEGRITY) += bio-integrity.o blk-integrity.o t10-pi.o
|
||||
obj-$(CONFIG_BLK_DEV_INTEGRITY) += bio-integrity.o blk-integrity.o
|
||||
obj-$(CONFIG_BLK_DEV_INTEGRITY_T10) += t10-pi.o
|
||||
obj-$(CONFIG_BLK_MQ_PCI) += blk-mq-pci.o
|
||||
obj-$(CONFIG_BLK_MQ_VIRTIO) += blk-mq-virtio.o
|
||||
obj-$(CONFIG_BLK_MQ_RDMA) += blk-mq-rdma.o
|
||||
|
|
|
@ -427,7 +427,6 @@ void bfq_schedule_dispatch(struct bfq_data *bfqd)
|
|||
}
|
||||
|
||||
#define bfq_class_idle(bfqq) ((bfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
|
||||
#define bfq_class_rt(bfqq) ((bfqq)->ioprio_class == IOPRIO_CLASS_RT)
|
||||
|
||||
#define bfq_sample_valid(samples) ((samples) > 80)
|
||||
|
||||
|
|
|
@ -277,10 +277,7 @@ struct bfq_queue *bfq_entity_to_bfqq(struct bfq_entity *entity)
|
|||
*/
|
||||
static u64 bfq_delta(unsigned long service, unsigned long weight)
|
||||
{
|
||||
u64 d = (u64)service << WFQ_SERVICE_SHIFT;
|
||||
|
||||
do_div(d, weight);
|
||||
return d;
|
||||
return div64_ul((u64)service << WFQ_SERVICE_SHIFT, weight);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
152
block/blk-mq.c
152
block/blk-mq.c
|
@ -641,6 +641,14 @@ bool blk_mq_complete_request(struct request *rq)
|
|||
}
|
||||
EXPORT_SYMBOL(blk_mq_complete_request);
|
||||
|
||||
/**
|
||||
* blk_mq_start_request - Start processing a request
|
||||
* @rq: Pointer to request to be started
|
||||
*
|
||||
* Function used by device drivers to notify the block layer that a request
|
||||
* is going to be processed now, so blk layer can do proper initializations
|
||||
* such as starting the timeout timer.
|
||||
*/
|
||||
void blk_mq_start_request(struct request *rq)
|
||||
{
|
||||
struct request_queue *q = rq->q;
|
||||
|
@ -1327,6 +1335,12 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
|
|||
return (queued + errors) != 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* __blk_mq_run_hw_queue - Run a hardware queue.
|
||||
* @hctx: Pointer to the hardware queue to run.
|
||||
*
|
||||
* Send pending requests to the hardware.
|
||||
*/
|
||||
static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
|
||||
{
|
||||
int srcu_idx;
|
||||
|
@ -1424,6 +1438,15 @@ static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
|
|||
return next_cpu;
|
||||
}
|
||||
|
||||
/**
|
||||
* __blk_mq_delay_run_hw_queue - Run (or schedule to run) a hardware queue.
|
||||
* @hctx: Pointer to the hardware queue to run.
|
||||
* @async: If we want to run the queue asynchronously.
|
||||
* @msecs: Microseconds of delay to wait before running the queue.
|
||||
*
|
||||
* If !@async, try to run the queue now. Else, run the queue asynchronously and
|
||||
* with a delay of @msecs.
|
||||
*/
|
||||
static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async,
|
||||
unsigned long msecs)
|
||||
{
|
||||
|
@ -1445,12 +1468,28 @@ static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async,
|
|||
msecs_to_jiffies(msecs));
|
||||
}
|
||||
|
||||
/**
|
||||
* blk_mq_delay_run_hw_queue - Run a hardware queue asynchronously.
|
||||
* @hctx: Pointer to the hardware queue to run.
|
||||
* @msecs: Microseconds of delay to wait before running the queue.
|
||||
*
|
||||
* Run a hardware queue asynchronously with a delay of @msecs.
|
||||
*/
|
||||
void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
|
||||
{
|
||||
__blk_mq_delay_run_hw_queue(hctx, true, msecs);
|
||||
}
|
||||
EXPORT_SYMBOL(blk_mq_delay_run_hw_queue);
|
||||
|
||||
/**
|
||||
* blk_mq_run_hw_queue - Start to run a hardware queue.
|
||||
* @hctx: Pointer to the hardware queue to run.
|
||||
* @async: If we want to run the queue asynchronously.
|
||||
*
|
||||
* Check if the request queue is not in a quiesced state and if there are
|
||||
* pending requests to be sent. If this is true, run the queue to send requests
|
||||
* to hardware.
|
||||
*/
|
||||
void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
|
||||
{
|
||||
int srcu_idx;
|
||||
|
@ -1474,6 +1513,11 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
|
|||
}
|
||||
EXPORT_SYMBOL(blk_mq_run_hw_queue);
|
||||
|
||||
/**
|
||||
* blk_mq_run_hw_queue - Run all hardware queues in a request queue.
|
||||
* @q: Pointer to the request queue to run.
|
||||
* @async: If we want to run the queue asynchronously.
|
||||
*/
|
||||
void blk_mq_run_hw_queues(struct request_queue *q, bool async)
|
||||
{
|
||||
struct blk_mq_hw_ctx *hctx;
|
||||
|
@ -1625,7 +1669,11 @@ void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
|
|||
blk_mq_hctx_mark_pending(hctx, ctx);
|
||||
}
|
||||
|
||||
/*
|
||||
/**
|
||||
* blk_mq_request_bypass_insert - Insert a request at dispatch list.
|
||||
* @rq: Pointer to request to be inserted.
|
||||
* @run_queue: If we should run the hardware queue after inserting the request.
|
||||
*
|
||||
* Should only be used carefully, when the caller knows we want to
|
||||
* bypass a potential IO scheduler on the target device.
|
||||
*/
|
||||
|
@ -1668,28 +1716,20 @@ static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b)
|
|||
struct request *rqa = container_of(a, struct request, queuelist);
|
||||
struct request *rqb = container_of(b, struct request, queuelist);
|
||||
|
||||
if (rqa->mq_ctx < rqb->mq_ctx)
|
||||
return -1;
|
||||
else if (rqa->mq_ctx > rqb->mq_ctx)
|
||||
return 1;
|
||||
else if (rqa->mq_hctx < rqb->mq_hctx)
|
||||
return -1;
|
||||
else if (rqa->mq_hctx > rqb->mq_hctx)
|
||||
return 1;
|
||||
if (rqa->mq_ctx != rqb->mq_ctx)
|
||||
return rqa->mq_ctx > rqb->mq_ctx;
|
||||
if (rqa->mq_hctx != rqb->mq_hctx)
|
||||
return rqa->mq_hctx > rqb->mq_hctx;
|
||||
|
||||
return blk_rq_pos(rqa) > blk_rq_pos(rqb);
|
||||
}
|
||||
|
||||
void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
|
||||
{
|
||||
struct blk_mq_hw_ctx *this_hctx;
|
||||
struct blk_mq_ctx *this_ctx;
|
||||
struct request_queue *this_q;
|
||||
struct request *rq;
|
||||
LIST_HEAD(list);
|
||||
LIST_HEAD(rq_list);
|
||||
unsigned int depth;
|
||||
|
||||
if (list_empty(&plug->mq_list))
|
||||
return;
|
||||
list_splice_init(&plug->mq_list, &list);
|
||||
|
||||
if (plug->rq_count > 2 && plug->multiple_queues)
|
||||
|
@ -1697,42 +1737,27 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
|
|||
|
||||
plug->rq_count = 0;
|
||||
|
||||
this_q = NULL;
|
||||
this_hctx = NULL;
|
||||
this_ctx = NULL;
|
||||
depth = 0;
|
||||
do {
|
||||
struct list_head rq_list;
|
||||
struct request *rq, *head_rq = list_entry_rq(list.next);
|
||||
struct list_head *pos = &head_rq->queuelist; /* skip first */
|
||||
struct blk_mq_hw_ctx *this_hctx = head_rq->mq_hctx;
|
||||
struct blk_mq_ctx *this_ctx = head_rq->mq_ctx;
|
||||
unsigned int depth = 1;
|
||||
|
||||
while (!list_empty(&list)) {
|
||||
rq = list_entry_rq(list.next);
|
||||
list_del_init(&rq->queuelist);
|
||||
BUG_ON(!rq->q);
|
||||
if (rq->mq_hctx != this_hctx || rq->mq_ctx != this_ctx) {
|
||||
if (this_hctx) {
|
||||
trace_block_unplug(this_q, depth, !from_schedule);
|
||||
blk_mq_sched_insert_requests(this_hctx, this_ctx,
|
||||
&rq_list,
|
||||
from_schedule);
|
||||
}
|
||||
|
||||
this_q = rq->q;
|
||||
this_ctx = rq->mq_ctx;
|
||||
this_hctx = rq->mq_hctx;
|
||||
depth = 0;
|
||||
list_for_each_continue(pos, &list) {
|
||||
rq = list_entry_rq(pos);
|
||||
BUG_ON(!rq->q);
|
||||
if (rq->mq_hctx != this_hctx || rq->mq_ctx != this_ctx)
|
||||
break;
|
||||
depth++;
|
||||
}
|
||||
|
||||
depth++;
|
||||
list_add_tail(&rq->queuelist, &rq_list);
|
||||
}
|
||||
|
||||
/*
|
||||
* If 'this_hctx' is set, we know we have entries to complete
|
||||
* on 'rq_list'. Do those.
|
||||
*/
|
||||
if (this_hctx) {
|
||||
trace_block_unplug(this_q, depth, !from_schedule);
|
||||
list_cut_before(&rq_list, &list, pos);
|
||||
trace_block_unplug(head_rq->q, depth, !from_schedule);
|
||||
blk_mq_sched_insert_requests(this_hctx, this_ctx, &rq_list,
|
||||
from_schedule);
|
||||
}
|
||||
} while(!list_empty(&list));
|
||||
}
|
||||
|
||||
static void blk_mq_bio_to_request(struct request *rq, struct bio *bio,
|
||||
|
@ -1828,6 +1853,17 @@ static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
|
|||
return BLK_STS_OK;
|
||||
}
|
||||
|
||||
/**
|
||||
* blk_mq_try_issue_directly - Try to send a request directly to device driver.
|
||||
* @hctx: Pointer of the associated hardware queue.
|
||||
* @rq: Pointer to request to be sent.
|
||||
* @cookie: Request queue cookie.
|
||||
*
|
||||
* If the device has enough resources to accept a new request now, send the
|
||||
* request directly to device driver. Else, insert at hctx->dispatch queue, so
|
||||
* we can try send it another time in the future. Requests inserted at this
|
||||
* queue have higher priority.
|
||||
*/
|
||||
static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
|
||||
struct request *rq, blk_qc_t *cookie)
|
||||
{
|
||||
|
@ -1905,6 +1941,22 @@ static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* blk_mq_make_request - Create and send a request to block device.
|
||||
* @q: Request queue pointer.
|
||||
* @bio: Bio pointer.
|
||||
*
|
||||
* Builds up a request structure from @q and @bio and send to the device. The
|
||||
* request may not be queued directly to hardware if:
|
||||
* * This request can be merged with another one
|
||||
* * We want to place request at plug queue for possible future merging
|
||||
* * There is an IO scheduler active at this queue
|
||||
*
|
||||
* It will not queue the request if there is an error with the bio, or at the
|
||||
* request creation.
|
||||
*
|
||||
* Returns: Request queue cookie.
|
||||
*/
|
||||
static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
|
||||
{
|
||||
const int is_sync = op_is_sync(bio->bi_opf);
|
||||
|
@ -1950,7 +2002,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
|
|||
|
||||
plug = blk_mq_plug(q, bio);
|
||||
if (unlikely(is_flush_fua)) {
|
||||
/* bypass scheduler for flush rq */
|
||||
/* Bypass scheduler for flush requests */
|
||||
blk_insert_flush(rq);
|
||||
blk_mq_run_hw_queue(data.hctx, true);
|
||||
} else if (plug && (q->nr_hw_queues == 1 || q->mq_ops->commit_rqs ||
|
||||
|
@ -1978,6 +2030,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
|
|||
|
||||
blk_add_rq_to_plug(plug, rq);
|
||||
} else if (q->elevator) {
|
||||
/* Insert the request at the IO scheduler queue */
|
||||
blk_mq_sched_insert_request(rq, false, true, true);
|
||||
} else if (plug && !blk_queue_nomerges(q)) {
|
||||
/*
|
||||
|
@ -2004,8 +2057,13 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
|
|||
}
|
||||
} else if ((q->nr_hw_queues > 1 && is_sync) ||
|
||||
!data.hctx->dispatch_busy) {
|
||||
/*
|
||||
* There is no scheduler and we can try to send directly
|
||||
* to the hardware.
|
||||
*/
|
||||
blk_mq_try_issue_directly(data.hctx, rq, &cookie);
|
||||
} else {
|
||||
/* Default case. */
|
||||
blk_mq_sched_insert_request(rq, false, true, true);
|
||||
}
|
||||
|
||||
|
|
|
@ -198,7 +198,7 @@ int blkdev_zone_mgmt(struct block_device *bdev, enum req_opf op,
|
|||
break;
|
||||
}
|
||||
|
||||
bio->bi_opf = op;
|
||||
bio->bi_opf = op | REQ_SYNC;
|
||||
bio->bi_iter.bi_sector = sector;
|
||||
sector += zone_sectors;
|
||||
|
||||
|
|
|
@ -1233,7 +1233,7 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags)
|
|||
BUG_ON (!data || !frags);
|
||||
|
||||
if (size < 2 * VBLK_SIZE_HEAD) {
|
||||
ldm_error("Value of size is to small.");
|
||||
ldm_error("Value of size is too small.");
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
#include <linux/t10-pi.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/crc-t10dif.h>
|
||||
#include <linux/module.h>
|
||||
#include <net/checksum.h>
|
||||
|
||||
typedef __be16 (csum_fn) (void *, unsigned int);
|
||||
|
@ -280,3 +281,5 @@ const struct blk_integrity_profile t10_pi_type3_ip = {
|
|||
.complete_fn = t10_pi_type3_complete,
|
||||
};
|
||||
EXPORT_SYMBOL(t10_pi_type3_ip);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
config NVME_CORE
|
||||
tristate
|
||||
select BLK_DEV_INTEGRITY_T10 if BLK_DEV_INTEGRITY
|
||||
|
||||
config BLK_DEV_NVME
|
||||
tristate "NVM Express block device"
|
||||
|
|
|
@ -68,6 +68,7 @@ comment "SCSI support type (disk, tape, CD-ROM)"
|
|||
config BLK_DEV_SD
|
||||
tristate "SCSI disk support"
|
||||
depends on SCSI
|
||||
select BLK_DEV_INTEGRITY_T10 if BLK_DEV_INTEGRITY
|
||||
---help---
|
||||
If you want to use SCSI hard disks, Fibre Channel disks,
|
||||
Serial ATA (SATA) or Parallel ATA (PATA) hard disks,
|
||||
|
|
|
@ -538,6 +538,16 @@ static inline void list_splice_tail_init(struct list_head *list,
|
|||
#define list_for_each(pos, head) \
|
||||
for (pos = (head)->next; pos != (head); pos = pos->next)
|
||||
|
||||
/**
|
||||
* list_for_each_continue - continue iteration over a list
|
||||
* @pos: the &struct list_head to use as a loop cursor.
|
||||
* @head: the head for your list.
|
||||
*
|
||||
* Continue to iterate over a list, continuing after the current position.
|
||||
*/
|
||||
#define list_for_each_continue(pos, head) \
|
||||
for (pos = pos->next; pos != (head); pos = pos->next)
|
||||
|
||||
/**
|
||||
* list_for_each_prev - iterate over a list backwards
|
||||
* @pos: the &struct list_head to use as a loop cursor.
|
||||
|
|
Loading…
Reference in New Issue