mirror of https://gitee.com/openkylin/linux.git
SCSI misc on 20210902
This series consists of the usual driver updates (ufs, qla2xxx, target, smartpqi, lpfc, mpt3sas). The core change causing the most churn was replacing the command request field request with a macro, allowing us to offset map to it and remove the redundant field; the same was also done for the tag field. The most impactful change is the final removal of scsi_ioctl, which has been deprecated for over a decade. Signed-off-by: James E.J. Bottomley <jejb@linux.ibm.com> -----BEGIN PGP SIGNATURE----- iJwEABMIAEQWIQTnYEDbdso9F2cI+arnQslM7pishQUCYTD/TiYcamFtZXMuYm90 dG9tbGV5QGhhbnNlbnBhcnRuZXJzaGlwLmNvbQAKCRDnQslM7pishdUkAQCjb3Ux 4K9438mMelHlzM4er1S1IJ0WNnvObaVMNO9LBwD+JUz+rHsrKvuEX9j3g3C3u6JH hC3BUEW8f2LLnujWanQ= =lC5o -----END PGP SIGNATURE----- Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi Pull SCSI updates from James Bottomley: "This series consists of the usual driver updates (ufs, qla2xxx, target, smartpqi, lpfc, mpt3sas). The core change causing the most churn was replacing the command request field request with a macro, allowing us to offset map to it and remove the redundant field; the same was also done for the tag field. The most impactful change is the final removal of scsi_ioctl, which has been deprecated for over a decade" * tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (293 commits) scsi: ufs: Fix ufshcd_request_sense_async() for Samsung KLUFG8RHDA-B2D1 scsi: ufs: ufs-exynos: Fix static checker warning scsi: mpt3sas: Use the proper SCSI midlayer interfaces for PI scsi: lpfc: Use the proper SCSI midlayer interfaces for PI scsi: lpfc: Copyright updates for 14.0.0.1 patches scsi: lpfc: Update lpfc version to 14.0.0.1 scsi: lpfc: Add bsg support for retrieving adapter cmf data scsi: lpfc: Add cmf_info sysfs entry scsi: lpfc: Add debugfs support for cm framework buffers scsi: lpfc: Add support for maintaining the cm statistics buffer scsi: lpfc: Add rx monitoring statistics scsi: lpfc: Add support for the CM framework scsi: lpfc: Add cmfsync WQE support scsi: lpfc: Add support for cm enablement buffer scsi: lpfc: Add cm statistics buffer support scsi: lpfc: Add EDC ELS support scsi: lpfc: Expand FPIN and RDF receive logging scsi: lpfc: Add MIB feature enablement support scsi: lpfc: Add SET_HOST_DATA mbox cmd to pass date/time info to firmware scsi: fc: Add EDC ELS definition ...
This commit is contained in:
commit
a9c9a6f741
|
@ -1298,3 +1298,239 @@ Description: This node is used to set or display whether UFS WriteBooster is
|
|||
(if the platform supports UFSHCD_CAP_CLK_SCALING). For a
|
||||
platform that doesn't support UFSHCD_CAP_CLK_SCALING, we can
|
||||
disable/enable WriteBooster through this sysfs node.
|
||||
|
||||
What: /sys/bus/platform/drivers/ufshcd/*/device_descriptor/hpb_version
|
||||
Date: June 2021
|
||||
Contact: Daejun Park <daejun7.park@samsung.com>
|
||||
Description: This entry shows the HPB specification version.
|
||||
The full information about the descriptor can be found in the UFS
|
||||
HPB (Host Performance Booster) Extension specifications.
|
||||
Example: version 1.2.3 = 0123h
|
||||
|
||||
The file is read only.
|
||||
|
||||
What: /sys/bus/platform/drivers/ufshcd/*/device_descriptor/hpb_control
|
||||
Date: June 2021
|
||||
Contact: Daejun Park <daejun7.park@samsung.com>
|
||||
Description: This entry shows an indication of the HPB control mode.
|
||||
00h: Host control mode
|
||||
01h: Device control mode
|
||||
|
||||
The file is read only.
|
||||
|
||||
What: /sys/bus/platform/drivers/ufshcd/*/geometry_descriptor/hpb_region_size
|
||||
Date: June 2021
|
||||
Contact: Daejun Park <daejun7.park@samsung.com>
|
||||
Description: This entry shows the bHPBRegionSize which can be calculated
|
||||
as in the following (in bytes):
|
||||
HPB Region size = 512B * 2^bHPBRegionSize
|
||||
|
||||
The file is read only.
|
||||
|
||||
What: /sys/bus/platform/drivers/ufshcd/*/geometry_descriptor/hpb_number_lu
|
||||
Date: June 2021
|
||||
Contact: Daejun Park <daejun7.park@samsung.com>
|
||||
Description: This entry shows the maximum number of HPB LU supported by
|
||||
the device.
|
||||
00h: HPB is not supported by the device.
|
||||
01h ~ 20h: Maximum number of HPB LU supported by the device
|
||||
|
||||
The file is read only.
|
||||
|
||||
What: /sys/bus/platform/drivers/ufshcd/*/geometry_descriptor/hpb_subregion_size
|
||||
Date: June 2021
|
||||
Contact: Daejun Park <daejun7.park@samsung.com>
|
||||
Description: This entry shows the bHPBSubRegionSize, which can be
|
||||
calculated as in the following (in bytes) and shall be a multiple of
|
||||
logical block size:
|
||||
HPB Sub-Region size = 512B x 2^bHPBSubRegionSize
|
||||
bHPBSubRegionSize shall not exceed bHPBRegionSize.
|
||||
|
||||
The file is read only.
|
||||
|
||||
What: /sys/bus/platform/drivers/ufshcd/*/geometry_descriptor/hpb_max_active_regions
|
||||
Date: June 2021
|
||||
Contact: Daejun Park <daejun7.park@samsung.com>
|
||||
Description: This entry shows the maximum number of active HPB regions that
|
||||
is supported by the device.
|
||||
|
||||
The file is read only.
|
||||
|
||||
What: /sys/class/scsi_device/*/device/unit_descriptor/hpb_lu_max_active_regions
|
||||
Date: June 2021
|
||||
Contact: Daejun Park <daejun7.park@samsung.com>
|
||||
Description: This entry shows the maximum number of HPB regions assigned to
|
||||
the HPB logical unit.
|
||||
|
||||
The file is read only.
|
||||
|
||||
What: /sys/class/scsi_device/*/device/unit_descriptor/hpb_pinned_region_start_offset
|
||||
Date: June 2021
|
||||
Contact: Daejun Park <daejun7.park@samsung.com>
|
||||
Description: This entry shows the start offset of HPB pinned region.
|
||||
|
||||
The file is read only.
|
||||
|
||||
What: /sys/class/scsi_device/*/device/unit_descriptor/hpb_number_pinned_regions
|
||||
Date: June 2021
|
||||
Contact: Daejun Park <daejun7.park@samsung.com>
|
||||
Description: This entry shows the number of HPB pinned regions assigned to
|
||||
the HPB logical unit.
|
||||
|
||||
The file is read only.
|
||||
|
||||
What: /sys/class/scsi_device/*/device/hpb_stats/hit_cnt
|
||||
Date: June 2021
|
||||
Contact: Daejun Park <daejun7.park@samsung.com>
|
||||
Description: This entry shows the number of reads that changed to HPB read.
|
||||
|
||||
The file is read only.
|
||||
|
||||
What: /sys/class/scsi_device/*/device/hpb_stats/miss_cnt
|
||||
Date: June 2021
|
||||
Contact: Daejun Park <daejun7.park@samsung.com>
|
||||
Description: This entry shows the number of reads that cannot be changed to
|
||||
HPB read.
|
||||
|
||||
The file is read only.
|
||||
|
||||
What: /sys/class/scsi_device/*/device/hpb_stats/rb_noti_cnt
|
||||
Date: June 2021
|
||||
Contact: Daejun Park <daejun7.park@samsung.com>
|
||||
Description: This entry shows the number of response UPIUs that has
|
||||
recommendations for activating sub-regions and/or inactivating region.
|
||||
|
||||
The file is read only.
|
||||
|
||||
What: /sys/class/scsi_device/*/device/hpb_stats/rb_active_cnt
|
||||
Date: June 2021
|
||||
Contact: Daejun Park <daejun7.park@samsung.com>
|
||||
Description: This entry shows the number of active sub-regions recommended by
|
||||
response UPIUs.
|
||||
|
||||
The file is read only.
|
||||
|
||||
What: /sys/class/scsi_device/*/device/hpb_stats/rb_inactive_cnt
|
||||
Date: June 2021
|
||||
Contact: Daejun Park <daejun7.park@samsung.com>
|
||||
Description: This entry shows the number of inactive regions recommended by
|
||||
response UPIUs.
|
||||
|
||||
The file is read only.
|
||||
|
||||
What: /sys/class/scsi_device/*/device/hpb_stats/map_req_cnt
|
||||
Date: June 2021
|
||||
Contact: Daejun Park <daejun7.park@samsung.com>
|
||||
Description: This entry shows the number of read buffer commands for
|
||||
activating sub-regions recommended by response UPIUs.
|
||||
|
||||
The file is read only.
|
||||
|
||||
What: /sys/class/scsi_device/*/device/hpb_params/requeue_timeout_ms
|
||||
Date: June 2021
|
||||
Contact: Daejun Park <daejun7.park@samsung.com>
|
||||
Description: This entry shows the requeue timeout threshold for write buffer
|
||||
command in ms. The value can be changed by writing an integer to
|
||||
this entry.
|
||||
|
||||
What: /sys/bus/platform/drivers/ufshcd/*/attributes/max_data_size_hpb_single_cmd
|
||||
Date: June 2021
|
||||
Contact: Daejun Park <daejun7.park@samsung.com>
|
||||
Description: This entry shows the maximum HPB data size for using a single HPB
|
||||
command.
|
||||
|
||||
=== ========
|
||||
00h 4KB
|
||||
01h 8KB
|
||||
02h 12KB
|
||||
...
|
||||
FFh 1024KB
|
||||
=== ========
|
||||
|
||||
The file is read only.
|
||||
|
||||
What: /sys/bus/platform/drivers/ufshcd/*/flags/hpb_enable
|
||||
Date: June 2021
|
||||
Contact: Daejun Park <daejun7.park@samsung.com>
|
||||
Description: This entry shows the status of HPB.
|
||||
|
||||
== ============================
|
||||
0 HPB is not enabled.
|
||||
1 HPB is enabled
|
||||
== ============================
|
||||
|
||||
The file is read only.
|
||||
|
||||
What: /sys/class/scsi_device/*/device/hpb_param_sysfs/activation_thld
|
||||
Date: February 2021
|
||||
Contact: Avri Altman <avri.altman@wdc.com>
|
||||
Description: In host control mode, reads are the major source of activation
|
||||
trials. Once this threshold hs met, the region is added to the
|
||||
"to-be-activated" list. Since we reset the read counter upon
|
||||
write, this include sending a rb command updating the region
|
||||
ppn as well.
|
||||
|
||||
What: /sys/class/scsi_device/*/device/hpb_param_sysfs/normalization_factor
|
||||
Date: February 2021
|
||||
Contact: Avri Altman <avri.altman@wdc.com>
|
||||
Description: In host control mode, we think of the regions as "buckets".
|
||||
Those buckets are being filled with reads, and emptied on write.
|
||||
We use entries_per_srgn - the amount of blocks in a subregion as
|
||||
our bucket size. This applies because HPB1.0 only handles
|
||||
single-block reads. Once the bucket size is crossed, we trigger
|
||||
a normalization work - not only to avoid overflow, but mainly
|
||||
because we want to keep those counters normalized, as we are
|
||||
using those reads as a comparative score, to make various decisions.
|
||||
The normalization is dividing (shift right) the read counter by
|
||||
the normalization_factor. If during consecutive normalizations
|
||||
an active region has exhausted its reads - inactivate it.
|
||||
|
||||
What: /sys/class/scsi_device/*/device/hpb_param_sysfs/eviction_thld_enter
|
||||
Date: February 2021
|
||||
Contact: Avri Altman <avri.altman@wdc.com>
|
||||
Description: Region deactivation is often due to the fact that eviction took
|
||||
place: A region becomes active at the expense of another. This is
|
||||
happening when the max-active-regions limit has been crossed.
|
||||
In host mode, eviction is considered an extreme measure. We
|
||||
want to verify that the entering region has enough reads, and
|
||||
the exiting region has much fewer reads. eviction_thld_enter is
|
||||
the min reads that a region must have in order to be considered
|
||||
a candidate for evicting another region.
|
||||
|
||||
What: /sys/class/scsi_device/*/device/hpb_param_sysfs/eviction_thld_exit
|
||||
Date: February 2021
|
||||
Contact: Avri Altman <avri.altman@wdc.com>
|
||||
Description: Same as above for the exiting region. A region is considered to
|
||||
be a candidate for eviction only if it has fewer reads than
|
||||
eviction_thld_exit.
|
||||
|
||||
What: /sys/class/scsi_device/*/device/hpb_param_sysfs/read_timeout_ms
|
||||
Date: February 2021
|
||||
Contact: Avri Altman <avri.altman@wdc.com>
|
||||
Description: In order not to hang on to "cold" regions, we inactivate
|
||||
a region that has no READ access for a predefined amount of
|
||||
time - read_timeout_ms. If read_timeout_ms has expired, and the
|
||||
region is dirty, it is less likely that we can make any use of
|
||||
HPB reading it so we inactivate it. Still, deactivation has
|
||||
its overhead, and we may still benefit from HPB reading this
|
||||
region if it is clean - see read_timeout_expiries.
|
||||
|
||||
What: /sys/class/scsi_device/*/device/hpb_param_sysfs/read_timeout_expiries
|
||||
Date: February 2021
|
||||
Contact: Avri Altman <avri.altman@wdc.com>
|
||||
Description: If the region read timeout has expired, but the region is clean,
|
||||
just re-wind its timer for another spin. Do that as long as it
|
||||
is clean and did not exhaust its read_timeout_expiries threshold.
|
||||
|
||||
What: /sys/class/scsi_device/*/device/hpb_param_sysfs/timeout_polling_interval_ms
|
||||
Date: February 2021
|
||||
Contact: Avri Altman <avri.altman@wdc.com>
|
||||
Description: The frequency with which the delayed worker that checks the
|
||||
read_timeouts is awakened.
|
||||
|
||||
What: /sys/class/scsi_device/*/device/hpb_param_sysfs/inflight_map_req
|
||||
Date: February 2021
|
||||
Contact: Avri Altman <avri.altman@wdc.com>
|
||||
Description: In host control mode the host is the originator of map requests.
|
||||
To avoid flooding the device with map requests, use a simple throttling
|
||||
mechanism that limits the number of inflight map requests.
|
||||
|
|
|
@ -29,35 +29,15 @@ if BLOCK
|
|||
config BLK_RQ_ALLOC_TIME
|
||||
bool
|
||||
|
||||
config BLK_SCSI_REQUEST
|
||||
bool
|
||||
|
||||
config BLK_CGROUP_RWSTAT
|
||||
bool
|
||||
|
||||
config BLK_DEV_BSG
|
||||
bool "Block layer SG support v4"
|
||||
default y
|
||||
select BLK_SCSI_REQUEST
|
||||
help
|
||||
Saying Y here will enable generic SG (SCSI generic) v4 support
|
||||
for any block device.
|
||||
|
||||
Unlike SG v3 (aka block/scsi_ioctl.c drivers/scsi/sg.c), SG v4
|
||||
can handle complicated SCSI commands: tagged variable length cdbs
|
||||
with bidirectional data transfers and generic request/response
|
||||
protocols (e.g. Task Management Functions and SMP in Serial
|
||||
Attached SCSI).
|
||||
|
||||
This option is required by recent UDEV versions to properly
|
||||
access device serial numbers, etc.
|
||||
|
||||
If unsure, say Y.
|
||||
config BLK_DEV_BSG_COMMON
|
||||
tristate
|
||||
|
||||
config BLK_DEV_BSGLIB
|
||||
bool "Block layer SG support v4 helper lib"
|
||||
select BLK_DEV_BSG
|
||||
select BLK_SCSI_REQUEST
|
||||
select BLK_DEV_BSG_COMMON
|
||||
help
|
||||
Subsystems will normally enable this if needed. Users will not
|
||||
normally need to manually enable this.
|
||||
|
|
|
@ -12,8 +12,7 @@ obj-$(CONFIG_BLOCK) := bio.o elevator.o blk-core.o blk-sysfs.o \
|
|||
disk-events.o
|
||||
|
||||
obj-$(CONFIG_BOUNCE) += bounce.o
|
||||
obj-$(CONFIG_BLK_SCSI_REQUEST) += scsi_ioctl.o
|
||||
obj-$(CONFIG_BLK_DEV_BSG) += bsg.o
|
||||
obj-$(CONFIG_BLK_DEV_BSG_COMMON) += bsg.o
|
||||
obj-$(CONFIG_BLK_DEV_BSGLIB) += bsg-lib.o
|
||||
obj-$(CONFIG_BLK_CGROUP) += blk-cgroup.o
|
||||
obj-$(CONFIG_BLK_CGROUP_RWSTAT) += blk-cgroup-rwstat.o
|
||||
|
|
|
@ -3280,8 +3280,6 @@ int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
|
|||
set->map[HCTX_TYPE_POLL].nr_queues)
|
||||
blk_queue_flag_set(QUEUE_FLAG_POLL, q);
|
||||
|
||||
q->sg_reserved_size = INT_MAX;
|
||||
|
||||
INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work);
|
||||
INIT_LIST_HEAD(&q->requeue_list);
|
||||
spin_lock_init(&q->requeue_lock);
|
||||
|
|
|
@ -6,6 +6,7 @@
|
|||
* Copyright (C) 2011 Red Hat, Inc. All rights reserved.
|
||||
* Copyright (C) 2011 Mike Christie
|
||||
*/
|
||||
#include <linux/bsg.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/blk-mq.h>
|
||||
#include <linux/delay.h>
|
||||
|
@ -19,36 +20,44 @@
|
|||
|
||||
struct bsg_set {
|
||||
struct blk_mq_tag_set tag_set;
|
||||
struct bsg_device *bd;
|
||||
bsg_job_fn *job_fn;
|
||||
bsg_timeout_fn *timeout_fn;
|
||||
};
|
||||
|
||||
static int bsg_transport_check_proto(struct sg_io_v4 *hdr)
|
||||
static int bsg_transport_sg_io_fn(struct request_queue *q, struct sg_io_v4 *hdr,
|
||||
fmode_t mode, unsigned int timeout)
|
||||
{
|
||||
struct bsg_job *job;
|
||||
struct request *rq;
|
||||
struct bio *bio;
|
||||
int ret;
|
||||
|
||||
if (hdr->protocol != BSG_PROTOCOL_SCSI ||
|
||||
hdr->subprotocol != BSG_SUB_PROTOCOL_SCSI_TRANSPORT)
|
||||
return -EINVAL;
|
||||
if (!capable(CAP_SYS_RAWIO))
|
||||
return -EPERM;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bsg_transport_fill_hdr(struct request *rq, struct sg_io_v4 *hdr,
|
||||
fmode_t mode)
|
||||
{
|
||||
struct bsg_job *job = blk_mq_rq_to_pdu(rq);
|
||||
int ret;
|
||||
rq = blk_get_request(q, hdr->dout_xfer_len ?
|
||||
REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
|
||||
if (IS_ERR(rq))
|
||||
return PTR_ERR(rq);
|
||||
rq->timeout = timeout;
|
||||
|
||||
job = blk_mq_rq_to_pdu(rq);
|
||||
job->request_len = hdr->request_len;
|
||||
job->request = memdup_user(uptr64(hdr->request), hdr->request_len);
|
||||
if (IS_ERR(job->request))
|
||||
return PTR_ERR(job->request);
|
||||
if (IS_ERR(job->request)) {
|
||||
ret = PTR_ERR(job->request);
|
||||
goto out_put_request;
|
||||
}
|
||||
|
||||
if (hdr->dout_xfer_len && hdr->din_xfer_len) {
|
||||
job->bidi_rq = blk_get_request(rq->q, REQ_OP_DRV_IN, 0);
|
||||
if (IS_ERR(job->bidi_rq)) {
|
||||
ret = PTR_ERR(job->bidi_rq);
|
||||
goto out;
|
||||
goto out_free_job_request;
|
||||
}
|
||||
|
||||
ret = blk_rq_map_user(rq->q, job->bidi_rq, NULL,
|
||||
|
@ -63,20 +72,20 @@ static int bsg_transport_fill_hdr(struct request *rq, struct sg_io_v4 *hdr,
|
|||
job->bidi_bio = NULL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
ret = 0;
|
||||
if (hdr->dout_xfer_len) {
|
||||
ret = blk_rq_map_user(rq->q, rq, NULL, uptr64(hdr->dout_xferp),
|
||||
hdr->dout_xfer_len, GFP_KERNEL);
|
||||
} else if (hdr->din_xfer_len) {
|
||||
ret = blk_rq_map_user(rq->q, rq, NULL, uptr64(hdr->din_xferp),
|
||||
hdr->din_xfer_len, GFP_KERNEL);
|
||||
}
|
||||
|
||||
out_free_bidi_rq:
|
||||
if (job->bidi_rq)
|
||||
blk_put_request(job->bidi_rq);
|
||||
out:
|
||||
kfree(job->request);
|
||||
return ret;
|
||||
}
|
||||
if (ret)
|
||||
goto out_unmap_bidi_rq;
|
||||
|
||||
static int bsg_transport_complete_rq(struct request *rq, struct sg_io_v4 *hdr)
|
||||
{
|
||||
struct bsg_job *job = blk_mq_rq_to_pdu(rq);
|
||||
int ret = 0;
|
||||
bio = rq->bio;
|
||||
blk_execute_rq(NULL, rq, !(hdr->flags & BSG_FLAG_Q_AT_TAIL));
|
||||
|
||||
/*
|
||||
* The assignments below don't make much sense, but are kept for
|
||||
|
@ -119,28 +128,20 @@ static int bsg_transport_complete_rq(struct request *rq, struct sg_io_v4 *hdr)
|
|||
hdr->din_resid = 0;
|
||||
}
|
||||
|
||||
blk_rq_unmap_user(bio);
|
||||
out_unmap_bidi_rq:
|
||||
if (job->bidi_rq)
|
||||
blk_rq_unmap_user(job->bidi_bio);
|
||||
out_free_bidi_rq:
|
||||
if (job->bidi_rq)
|
||||
blk_put_request(job->bidi_rq);
|
||||
out_free_job_request:
|
||||
kfree(job->request);
|
||||
out_put_request:
|
||||
blk_put_request(rq);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void bsg_transport_free_rq(struct request *rq)
|
||||
{
|
||||
struct bsg_job *job = blk_mq_rq_to_pdu(rq);
|
||||
|
||||
if (job->bidi_rq) {
|
||||
blk_rq_unmap_user(job->bidi_bio);
|
||||
blk_put_request(job->bidi_rq);
|
||||
}
|
||||
|
||||
kfree(job->request);
|
||||
}
|
||||
|
||||
static const struct bsg_ops bsg_transport_ops = {
|
||||
.check_proto = bsg_transport_check_proto,
|
||||
.fill_hdr = bsg_transport_fill_hdr,
|
||||
.complete_rq = bsg_transport_complete_rq,
|
||||
.free_rq = bsg_transport_free_rq,
|
||||
};
|
||||
|
||||
/**
|
||||
* bsg_teardown_job - routine to teardown a bsg job
|
||||
* @kref: kref inside bsg_job that is to be torn down
|
||||
|
@ -327,7 +328,7 @@ void bsg_remove_queue(struct request_queue *q)
|
|||
struct bsg_set *bset =
|
||||
container_of(q->tag_set, struct bsg_set, tag_set);
|
||||
|
||||
bsg_unregister_queue(q);
|
||||
bsg_unregister_queue(bset->bd);
|
||||
blk_cleanup_queue(q);
|
||||
blk_mq_free_tag_set(&bset->tag_set);
|
||||
kfree(bset);
|
||||
|
@ -396,10 +397,9 @@ struct request_queue *bsg_setup_queue(struct device *dev, const char *name,
|
|||
q->queuedata = dev;
|
||||
blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT);
|
||||
|
||||
ret = bsg_register_queue(q, dev, name, &bsg_transport_ops);
|
||||
if (ret) {
|
||||
printk(KERN_ERR "%s: bsg interface failed to "
|
||||
"initialize - register queue\n", dev->kobj.name);
|
||||
bset->bd = bsg_register_queue(q, dev, name, bsg_transport_sg_io_fn);
|
||||
if (IS_ERR(bset->bd)) {
|
||||
ret = PTR_ERR(bset->bd);
|
||||
goto out_cleanup_queue;
|
||||
}
|
||||
|
||||
|
|
465
block/bsg.c
465
block/bsg.c
|
@ -15,343 +15,97 @@
|
|||
|
||||
#include <scsi/scsi.h>
|
||||
#include <scsi/scsi_ioctl.h>
|
||||
#include <scsi/scsi_cmnd.h>
|
||||
#include <scsi/scsi_device.h>
|
||||
#include <scsi/scsi_driver.h>
|
||||
#include <scsi/sg.h>
|
||||
|
||||
#define BSG_DESCRIPTION "Block layer SCSI generic (bsg) driver"
|
||||
#define BSG_VERSION "0.4"
|
||||
|
||||
#define bsg_dbg(bd, fmt, ...) \
|
||||
pr_debug("%s: " fmt, (bd)->name, ##__VA_ARGS__)
|
||||
|
||||
struct bsg_device {
|
||||
struct request_queue *queue;
|
||||
spinlock_t lock;
|
||||
struct hlist_node dev_list;
|
||||
refcount_t ref_count;
|
||||
char name[20];
|
||||
struct device device;
|
||||
struct cdev cdev;
|
||||
int max_queue;
|
||||
unsigned int timeout;
|
||||
unsigned int reserved_size;
|
||||
bsg_sg_io_fn *sg_io_fn;
|
||||
};
|
||||
|
||||
static inline struct bsg_device *to_bsg_device(struct inode *inode)
|
||||
{
|
||||
return container_of(inode->i_cdev, struct bsg_device, cdev);
|
||||
}
|
||||
|
||||
#define BSG_DEFAULT_CMDS 64
|
||||
#define BSG_MAX_DEVS 32768
|
||||
|
||||
static DEFINE_MUTEX(bsg_mutex);
|
||||
static DEFINE_IDR(bsg_minor_idr);
|
||||
|
||||
#define BSG_LIST_ARRAY_SIZE 8
|
||||
static struct hlist_head bsg_device_list[BSG_LIST_ARRAY_SIZE];
|
||||
|
||||
static DEFINE_IDA(bsg_minor_ida);
|
||||
static struct class *bsg_class;
|
||||
static int bsg_major;
|
||||
|
||||
static inline struct hlist_head *bsg_dev_idx_hash(int index)
|
||||
static unsigned int bsg_timeout(struct bsg_device *bd, struct sg_io_v4 *hdr)
|
||||
{
|
||||
return &bsg_device_list[index & (BSG_LIST_ARRAY_SIZE - 1)];
|
||||
unsigned int timeout = BLK_DEFAULT_SG_TIMEOUT;
|
||||
|
||||
if (hdr->timeout)
|
||||
timeout = msecs_to_jiffies(hdr->timeout);
|
||||
else if (bd->timeout)
|
||||
timeout = bd->timeout;
|
||||
|
||||
return max_t(unsigned int, timeout, BLK_MIN_SG_TIMEOUT);
|
||||
}
|
||||
|
||||
#define uptr64(val) ((void __user *)(uintptr_t)(val))
|
||||
|
||||
static int bsg_scsi_check_proto(struct sg_io_v4 *hdr)
|
||||
static int bsg_sg_io(struct bsg_device *bd, fmode_t mode, void __user *uarg)
|
||||
{
|
||||
if (hdr->protocol != BSG_PROTOCOL_SCSI ||
|
||||
hdr->subprotocol != BSG_SUB_PROTOCOL_SCSI_CMD)
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bsg_scsi_fill_hdr(struct request *rq, struct sg_io_v4 *hdr,
|
||||
fmode_t mode)
|
||||
{
|
||||
struct scsi_request *sreq = scsi_req(rq);
|
||||
|
||||
if (hdr->dout_xfer_len && hdr->din_xfer_len) {
|
||||
pr_warn_once("BIDI support in bsg has been removed.\n");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
sreq->cmd_len = hdr->request_len;
|
||||
if (sreq->cmd_len > BLK_MAX_CDB) {
|
||||
sreq->cmd = kzalloc(sreq->cmd_len, GFP_KERNEL);
|
||||
if (!sreq->cmd)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (copy_from_user(sreq->cmd, uptr64(hdr->request), sreq->cmd_len))
|
||||
return -EFAULT;
|
||||
if (blk_verify_command(sreq->cmd, mode))
|
||||
return -EPERM;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bsg_scsi_complete_rq(struct request *rq, struct sg_io_v4 *hdr)
|
||||
{
|
||||
struct scsi_request *sreq = scsi_req(rq);
|
||||
int ret = 0;
|
||||
|
||||
/*
|
||||
* fill in all the output members
|
||||
*/
|
||||
hdr->device_status = sreq->result & 0xff;
|
||||
hdr->transport_status = host_byte(sreq->result);
|
||||
hdr->driver_status = 0;
|
||||
if (scsi_status_is_check_condition(sreq->result))
|
||||
hdr->driver_status = DRIVER_SENSE;
|
||||
hdr->info = 0;
|
||||
if (hdr->device_status || hdr->transport_status || hdr->driver_status)
|
||||
hdr->info |= SG_INFO_CHECK;
|
||||
hdr->response_len = 0;
|
||||
|
||||
if (sreq->sense_len && hdr->response) {
|
||||
int len = min_t(unsigned int, hdr->max_response_len,
|
||||
sreq->sense_len);
|
||||
|
||||
if (copy_to_user(uptr64(hdr->response), sreq->sense, len))
|
||||
ret = -EFAULT;
|
||||
else
|
||||
hdr->response_len = len;
|
||||
}
|
||||
|
||||
if (rq_data_dir(rq) == READ)
|
||||
hdr->din_resid = sreq->resid_len;
|
||||
else
|
||||
hdr->dout_resid = sreq->resid_len;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void bsg_scsi_free_rq(struct request *rq)
|
||||
{
|
||||
scsi_req_free_cmd(scsi_req(rq));
|
||||
}
|
||||
|
||||
static const struct bsg_ops bsg_scsi_ops = {
|
||||
.check_proto = bsg_scsi_check_proto,
|
||||
.fill_hdr = bsg_scsi_fill_hdr,
|
||||
.complete_rq = bsg_scsi_complete_rq,
|
||||
.free_rq = bsg_scsi_free_rq,
|
||||
};
|
||||
|
||||
static int bsg_sg_io(struct request_queue *q, fmode_t mode, void __user *uarg)
|
||||
{
|
||||
struct request *rq;
|
||||
struct bio *bio;
|
||||
struct sg_io_v4 hdr;
|
||||
int ret;
|
||||
|
||||
if (copy_from_user(&hdr, uarg, sizeof(hdr)))
|
||||
return -EFAULT;
|
||||
|
||||
if (!q->bsg_dev.class_dev)
|
||||
return -ENXIO;
|
||||
|
||||
if (hdr.guard != 'Q')
|
||||
return -EINVAL;
|
||||
ret = q->bsg_dev.ops->check_proto(&hdr);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
rq = blk_get_request(q, hdr.dout_xfer_len ?
|
||||
REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
|
||||
if (IS_ERR(rq))
|
||||
return PTR_ERR(rq);
|
||||
|
||||
ret = q->bsg_dev.ops->fill_hdr(rq, &hdr, mode);
|
||||
if (ret) {
|
||||
blk_put_request(rq);
|
||||
return ret;
|
||||
}
|
||||
|
||||
rq->timeout = msecs_to_jiffies(hdr.timeout);
|
||||
if (!rq->timeout)
|
||||
rq->timeout = q->sg_timeout;
|
||||
if (!rq->timeout)
|
||||
rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
|
||||
if (rq->timeout < BLK_MIN_SG_TIMEOUT)
|
||||
rq->timeout = BLK_MIN_SG_TIMEOUT;
|
||||
|
||||
if (hdr.dout_xfer_len) {
|
||||
ret = blk_rq_map_user(q, rq, NULL, uptr64(hdr.dout_xferp),
|
||||
hdr.dout_xfer_len, GFP_KERNEL);
|
||||
} else if (hdr.din_xfer_len) {
|
||||
ret = blk_rq_map_user(q, rq, NULL, uptr64(hdr.din_xferp),
|
||||
hdr.din_xfer_len, GFP_KERNEL);
|
||||
}
|
||||
|
||||
if (ret)
|
||||
goto out_free_rq;
|
||||
|
||||
bio = rq->bio;
|
||||
|
||||
blk_execute_rq(NULL, rq, !(hdr.flags & BSG_FLAG_Q_AT_TAIL));
|
||||
ret = rq->q->bsg_dev.ops->complete_rq(rq, &hdr);
|
||||
blk_rq_unmap_user(bio);
|
||||
|
||||
out_free_rq:
|
||||
rq->q->bsg_dev.ops->free_rq(rq);
|
||||
blk_put_request(rq);
|
||||
ret = bd->sg_io_fn(bd->queue, &hdr, mode, bsg_timeout(bd, &hdr));
|
||||
if (!ret && copy_to_user(uarg, &hdr, sizeof(hdr)))
|
||||
return -EFAULT;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct bsg_device *bsg_alloc_device(void)
|
||||
{
|
||||
struct bsg_device *bd;
|
||||
|
||||
bd = kzalloc(sizeof(struct bsg_device), GFP_KERNEL);
|
||||
if (unlikely(!bd))
|
||||
return NULL;
|
||||
|
||||
spin_lock_init(&bd->lock);
|
||||
bd->max_queue = BSG_DEFAULT_CMDS;
|
||||
INIT_HLIST_NODE(&bd->dev_list);
|
||||
return bd;
|
||||
}
|
||||
|
||||
static int bsg_put_device(struct bsg_device *bd)
|
||||
{
|
||||
struct request_queue *q = bd->queue;
|
||||
|
||||
mutex_lock(&bsg_mutex);
|
||||
|
||||
if (!refcount_dec_and_test(&bd->ref_count)) {
|
||||
mutex_unlock(&bsg_mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
hlist_del(&bd->dev_list);
|
||||
mutex_unlock(&bsg_mutex);
|
||||
|
||||
bsg_dbg(bd, "tearing down\n");
|
||||
|
||||
/*
|
||||
* close can always block
|
||||
*/
|
||||
kfree(bd);
|
||||
blk_put_queue(q);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct bsg_device *bsg_add_device(struct inode *inode,
|
||||
struct request_queue *rq,
|
||||
struct file *file)
|
||||
{
|
||||
struct bsg_device *bd;
|
||||
unsigned char buf[32];
|
||||
|
||||
lockdep_assert_held(&bsg_mutex);
|
||||
|
||||
if (!blk_get_queue(rq))
|
||||
return ERR_PTR(-ENXIO);
|
||||
|
||||
bd = bsg_alloc_device();
|
||||
if (!bd) {
|
||||
blk_put_queue(rq);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
bd->queue = rq;
|
||||
|
||||
refcount_set(&bd->ref_count, 1);
|
||||
hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(iminor(inode)));
|
||||
|
||||
strncpy(bd->name, dev_name(rq->bsg_dev.class_dev), sizeof(bd->name) - 1);
|
||||
bsg_dbg(bd, "bound to <%s>, max queue %d\n",
|
||||
format_dev_t(buf, inode->i_rdev), bd->max_queue);
|
||||
|
||||
return bd;
|
||||
}
|
||||
|
||||
static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q)
|
||||
{
|
||||
struct bsg_device *bd;
|
||||
|
||||
lockdep_assert_held(&bsg_mutex);
|
||||
|
||||
hlist_for_each_entry(bd, bsg_dev_idx_hash(minor), dev_list) {
|
||||
if (bd->queue == q) {
|
||||
refcount_inc(&bd->ref_count);
|
||||
goto found;
|
||||
}
|
||||
}
|
||||
bd = NULL;
|
||||
found:
|
||||
return bd;
|
||||
}
|
||||
|
||||
static struct bsg_device *bsg_get_device(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct bsg_device *bd;
|
||||
struct bsg_class_device *bcd;
|
||||
|
||||
/*
|
||||
* find the class device
|
||||
*/
|
||||
mutex_lock(&bsg_mutex);
|
||||
bcd = idr_find(&bsg_minor_idr, iminor(inode));
|
||||
|
||||
if (!bcd) {
|
||||
bd = ERR_PTR(-ENODEV);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
bd = __bsg_get_device(iminor(inode), bcd->queue);
|
||||
if (!bd)
|
||||
bd = bsg_add_device(inode, bcd->queue, file);
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&bsg_mutex);
|
||||
return bd;
|
||||
}
|
||||
|
||||
static int bsg_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct bsg_device *bd;
|
||||
|
||||
bd = bsg_get_device(inode, file);
|
||||
|
||||
if (IS_ERR(bd))
|
||||
return PTR_ERR(bd);
|
||||
|
||||
file->private_data = bd;
|
||||
if (!blk_get_queue(to_bsg_device(inode)->queue))
|
||||
return -ENXIO;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bsg_release(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct bsg_device *bd = file->private_data;
|
||||
|
||||
file->private_data = NULL;
|
||||
return bsg_put_device(bd);
|
||||
blk_put_queue(to_bsg_device(inode)->queue);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bsg_get_command_q(struct bsg_device *bd, int __user *uarg)
|
||||
{
|
||||
return put_user(bd->max_queue, uarg);
|
||||
return put_user(READ_ONCE(bd->max_queue), uarg);
|
||||
}
|
||||
|
||||
static int bsg_set_command_q(struct bsg_device *bd, int __user *uarg)
|
||||
{
|
||||
int queue;
|
||||
int max_queue;
|
||||
|
||||
if (get_user(queue, uarg))
|
||||
if (get_user(max_queue, uarg))
|
||||
return -EFAULT;
|
||||
if (queue < 1)
|
||||
if (max_queue < 1)
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock_irq(&bd->lock);
|
||||
bd->max_queue = queue;
|
||||
spin_unlock_irq(&bd->lock);
|
||||
WRITE_ONCE(bd->max_queue, max_queue);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
struct bsg_device *bd = file->private_data;
|
||||
struct bsg_device *bd = to_bsg_device(file_inode(file));
|
||||
struct request_queue *q = bd->queue;
|
||||
void __user *uarg = (void __user *) arg;
|
||||
int __user *intp = uarg;
|
||||
int val;
|
||||
|
||||
switch (cmd) {
|
||||
/*
|
||||
|
@ -366,17 +120,37 @@ static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
|||
* SCSI/sg ioctls
|
||||
*/
|
||||
case SG_GET_VERSION_NUM:
|
||||
return put_user(30527, intp);
|
||||
case SCSI_IOCTL_GET_IDLUN:
|
||||
return put_user(0, intp);
|
||||
case SCSI_IOCTL_GET_BUS_NUMBER:
|
||||
return put_user(0, intp);
|
||||
case SG_SET_TIMEOUT:
|
||||
if (get_user(val, intp))
|
||||
return -EFAULT;
|
||||
bd->timeout = clock_t_to_jiffies(val);
|
||||
return 0;
|
||||
case SG_GET_TIMEOUT:
|
||||
return jiffies_to_clock_t(bd->timeout);
|
||||
case SG_GET_RESERVED_SIZE:
|
||||
return put_user(min(bd->reserved_size, queue_max_bytes(q)),
|
||||
intp);
|
||||
case SG_SET_RESERVED_SIZE:
|
||||
if (get_user(val, intp))
|
||||
return -EFAULT;
|
||||
if (val < 0)
|
||||
return -EINVAL;
|
||||
bd->reserved_size =
|
||||
min_t(unsigned int, val, queue_max_bytes(q));
|
||||
return 0;
|
||||
case SG_EMULATED_HOST:
|
||||
case SCSI_IOCTL_SEND_COMMAND:
|
||||
return scsi_cmd_ioctl(bd->queue, NULL, file->f_mode, cmd, uarg);
|
||||
return put_user(1, intp);
|
||||
case SG_IO:
|
||||
return bsg_sg_io(bd->queue, file->f_mode, uarg);
|
||||
return bsg_sg_io(bd, file->f_mode, uarg);
|
||||
case SCSI_IOCTL_SEND_COMMAND:
|
||||
pr_warn_ratelimited("%s: calling unsupported SCSI_IOCTL_SEND_COMMAND\n",
|
||||
current->comm);
|
||||
return -EINVAL;
|
||||
default:
|
||||
return -ENOTTY;
|
||||
}
|
||||
|
@ -391,92 +165,65 @@ static const struct file_operations bsg_fops = {
|
|||
.llseek = default_llseek,
|
||||
};
|
||||
|
||||
void bsg_unregister_queue(struct request_queue *q)
|
||||
void bsg_unregister_queue(struct bsg_device *bd)
|
||||
{
|
||||
struct bsg_class_device *bcd = &q->bsg_dev;
|
||||
|
||||
if (!bcd->class_dev)
|
||||
return;
|
||||
|
||||
mutex_lock(&bsg_mutex);
|
||||
idr_remove(&bsg_minor_idr, bcd->minor);
|
||||
if (q->kobj.sd)
|
||||
sysfs_remove_link(&q->kobj, "bsg");
|
||||
device_unregister(bcd->class_dev);
|
||||
bcd->class_dev = NULL;
|
||||
mutex_unlock(&bsg_mutex);
|
||||
if (bd->queue->kobj.sd)
|
||||
sysfs_remove_link(&bd->queue->kobj, "bsg");
|
||||
cdev_device_del(&bd->cdev, &bd->device);
|
||||
ida_simple_remove(&bsg_minor_ida, MINOR(bd->device.devt));
|
||||
kfree(bd);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bsg_unregister_queue);
|
||||
|
||||
int bsg_register_queue(struct request_queue *q, struct device *parent,
|
||||
const char *name, const struct bsg_ops *ops)
|
||||
struct bsg_device *bsg_register_queue(struct request_queue *q,
|
||||
struct device *parent, const char *name, bsg_sg_io_fn *sg_io_fn)
|
||||
{
|
||||
struct bsg_class_device *bcd;
|
||||
dev_t dev;
|
||||
struct bsg_device *bd;
|
||||
int ret;
|
||||
struct device *class_dev = NULL;
|
||||
|
||||
/*
|
||||
* we need a proper transport to send commands, not a stacked device
|
||||
*/
|
||||
if (!queue_is_mq(q))
|
||||
return 0;
|
||||
bd = kzalloc(sizeof(*bd), GFP_KERNEL);
|
||||
if (!bd)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
bd->max_queue = BSG_DEFAULT_CMDS;
|
||||
bd->reserved_size = INT_MAX;
|
||||
bd->queue = q;
|
||||
bd->sg_io_fn = sg_io_fn;
|
||||
|
||||
bcd = &q->bsg_dev;
|
||||
memset(bcd, 0, sizeof(*bcd));
|
||||
|
||||
mutex_lock(&bsg_mutex);
|
||||
|
||||
ret = idr_alloc(&bsg_minor_idr, bcd, 0, BSG_MAX_DEVS, GFP_KERNEL);
|
||||
ret = ida_simple_get(&bsg_minor_ida, 0, BSG_MAX_DEVS, GFP_KERNEL);
|
||||
if (ret < 0) {
|
||||
if (ret == -ENOSPC) {
|
||||
printk(KERN_ERR "bsg: too many bsg devices\n");
|
||||
ret = -EINVAL;
|
||||
}
|
||||
goto unlock;
|
||||
if (ret == -ENOSPC)
|
||||
dev_err(parent, "bsg: too many bsg devices\n");
|
||||
goto out_kfree;
|
||||
}
|
||||
bd->device.devt = MKDEV(bsg_major, ret);
|
||||
bd->device.class = bsg_class;
|
||||
bd->device.parent = parent;
|
||||
dev_set_name(&bd->device, "%s", name);
|
||||
device_initialize(&bd->device);
|
||||
|
||||
bcd->minor = ret;
|
||||
bcd->queue = q;
|
||||
bcd->ops = ops;
|
||||
dev = MKDEV(bsg_major, bcd->minor);
|
||||
class_dev = device_create(bsg_class, parent, dev, NULL, "%s", name);
|
||||
if (IS_ERR(class_dev)) {
|
||||
ret = PTR_ERR(class_dev);
|
||||
goto idr_remove;
|
||||
}
|
||||
bcd->class_dev = class_dev;
|
||||
cdev_init(&bd->cdev, &bsg_fops);
|
||||
bd->cdev.owner = THIS_MODULE;
|
||||
ret = cdev_device_add(&bd->cdev, &bd->device);
|
||||
if (ret)
|
||||
goto out_ida_remove;
|
||||
|
||||
if (q->kobj.sd) {
|
||||
ret = sysfs_create_link(&q->kobj, &bcd->class_dev->kobj, "bsg");
|
||||
ret = sysfs_create_link(&q->kobj, &bd->device.kobj, "bsg");
|
||||
if (ret)
|
||||
goto unregister_class_dev;
|
||||
goto out_device_del;
|
||||
}
|
||||
|
||||
mutex_unlock(&bsg_mutex);
|
||||
return 0;
|
||||
return bd;
|
||||
|
||||
unregister_class_dev:
|
||||
device_unregister(class_dev);
|
||||
idr_remove:
|
||||
idr_remove(&bsg_minor_idr, bcd->minor);
|
||||
unlock:
|
||||
mutex_unlock(&bsg_mutex);
|
||||
return ret;
|
||||
out_device_del:
|
||||
cdev_device_del(&bd->cdev, &bd->device);
|
||||
out_ida_remove:
|
||||
ida_simple_remove(&bsg_minor_ida, MINOR(bd->device.devt));
|
||||
out_kfree:
|
||||
kfree(bd);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
int bsg_scsi_register_queue(struct request_queue *q, struct device *parent)
|
||||
{
|
||||
if (!blk_queue_scsi_passthrough(q)) {
|
||||
WARN_ONCE(true, "Attempt to register a non-SCSI queue\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return bsg_register_queue(q, parent, dev_name(parent), &bsg_scsi_ops);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bsg_scsi_register_queue);
|
||||
|
||||
static struct cdev bsg_cdev;
|
||||
EXPORT_SYMBOL_GPL(bsg_register_queue);
|
||||
|
||||
static char *bsg_devnode(struct device *dev, umode_t *mode)
|
||||
{
|
||||
|
@ -485,11 +232,8 @@ static char *bsg_devnode(struct device *dev, umode_t *mode)
|
|||
|
||||
static int __init bsg_init(void)
|
||||
{
|
||||
int ret, i;
|
||||
dev_t devid;
|
||||
|
||||
for (i = 0; i < BSG_LIST_ARRAY_SIZE; i++)
|
||||
INIT_HLIST_HEAD(&bsg_device_list[i]);
|
||||
int ret;
|
||||
|
||||
bsg_class = class_create(THIS_MODULE, "bsg");
|
||||
if (IS_ERR(bsg_class))
|
||||
|
@ -499,19 +243,12 @@ static int __init bsg_init(void)
|
|||
ret = alloc_chrdev_region(&devid, 0, BSG_MAX_DEVS, "bsg");
|
||||
if (ret)
|
||||
goto destroy_bsg_class;
|
||||
|
||||
bsg_major = MAJOR(devid);
|
||||
|
||||
cdev_init(&bsg_cdev, &bsg_fops);
|
||||
ret = cdev_add(&bsg_cdev, MKDEV(bsg_major, 0), BSG_MAX_DEVS);
|
||||
if (ret)
|
||||
goto unregister_chrdev;
|
||||
|
||||
printk(KERN_INFO BSG_DESCRIPTION " version " BSG_VERSION
|
||||
" loaded (major %d)\n", bsg_major);
|
||||
return 0;
|
||||
unregister_chrdev:
|
||||
unregister_chrdev_region(MKDEV(bsg_major, 0), BSG_MAX_DEVS);
|
||||
|
||||
destroy_bsg_class:
|
||||
class_destroy(bsg_class);
|
||||
return ret;
|
||||
|
|
|
@ -1,890 +0,0 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (C) 2001 Jens Axboe <axboe@suse.de>
|
||||
*/
|
||||
#include <linux/compat.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/capability.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/cdrom.h>
|
||||
#include <linux/ratelimit.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/times.h>
|
||||
#include <linux/uio.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
#include <scsi/scsi.h>
|
||||
#include <scsi/scsi_ioctl.h>
|
||||
#include <scsi/scsi_cmnd.h>
|
||||
#include <scsi/sg.h>
|
||||
|
||||
struct blk_cmd_filter {
|
||||
unsigned long read_ok[BLK_SCSI_CMD_PER_LONG];
|
||||
unsigned long write_ok[BLK_SCSI_CMD_PER_LONG];
|
||||
};
|
||||
|
||||
static struct blk_cmd_filter blk_default_cmd_filter;
|
||||
|
||||
/* Command group 3 is reserved and should never be used. */
|
||||
const unsigned char scsi_command_size_tbl[8] =
|
||||
{
|
||||
6, 10, 10, 12,
|
||||
16, 12, 10, 10
|
||||
};
|
||||
EXPORT_SYMBOL(scsi_command_size_tbl);
|
||||
|
||||
static int sg_get_version(int __user *p)
|
||||
{
|
||||
static const int sg_version_num = 30527;
|
||||
return put_user(sg_version_num, p);
|
||||
}
|
||||
|
||||
static int scsi_get_idlun(struct request_queue *q, int __user *p)
|
||||
{
|
||||
return put_user(0, p);
|
||||
}
|
||||
|
||||
static int scsi_get_bus(struct request_queue *q, int __user *p)
|
||||
{
|
||||
return put_user(0, p);
|
||||
}
|
||||
|
||||
static int sg_get_timeout(struct request_queue *q)
|
||||
{
|
||||
return jiffies_to_clock_t(q->sg_timeout);
|
||||
}
|
||||
|
||||
static int sg_set_timeout(struct request_queue *q, int __user *p)
|
||||
{
|
||||
int timeout, err = get_user(timeout, p);
|
||||
|
||||
if (!err)
|
||||
q->sg_timeout = clock_t_to_jiffies(timeout);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int max_sectors_bytes(struct request_queue *q)
|
||||
{
|
||||
unsigned int max_sectors = queue_max_sectors(q);
|
||||
|
||||
max_sectors = min_t(unsigned int, max_sectors, INT_MAX >> 9);
|
||||
|
||||
return max_sectors << 9;
|
||||
}
|
||||
|
||||
static int sg_get_reserved_size(struct request_queue *q, int __user *p)
|
||||
{
|
||||
int val = min_t(int, q->sg_reserved_size, max_sectors_bytes(q));
|
||||
|
||||
return put_user(val, p);
|
||||
}
|
||||
|
||||
static int sg_set_reserved_size(struct request_queue *q, int __user *p)
|
||||
{
|
||||
int size, err = get_user(size, p);
|
||||
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (size < 0)
|
||||
return -EINVAL;
|
||||
|
||||
q->sg_reserved_size = min(size, max_sectors_bytes(q));
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* will always return that we are ATAPI even for a real SCSI drive, I'm not
|
||||
* so sure this is worth doing anything about (why would you care??)
|
||||
*/
|
||||
static int sg_emulated_host(struct request_queue *q, int __user *p)
|
||||
{
|
||||
return put_user(1, p);
|
||||
}
|
||||
|
||||
static void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter)
|
||||
{
|
||||
/* Basic read-only commands */
|
||||
__set_bit(TEST_UNIT_READY, filter->read_ok);
|
||||
__set_bit(REQUEST_SENSE, filter->read_ok);
|
||||
__set_bit(READ_6, filter->read_ok);
|
||||
__set_bit(READ_10, filter->read_ok);
|
||||
__set_bit(READ_12, filter->read_ok);
|
||||
__set_bit(READ_16, filter->read_ok);
|
||||
__set_bit(READ_BUFFER, filter->read_ok);
|
||||
__set_bit(READ_DEFECT_DATA, filter->read_ok);
|
||||
__set_bit(READ_CAPACITY, filter->read_ok);
|
||||
__set_bit(READ_LONG, filter->read_ok);
|
||||
__set_bit(INQUIRY, filter->read_ok);
|
||||
__set_bit(MODE_SENSE, filter->read_ok);
|
||||
__set_bit(MODE_SENSE_10, filter->read_ok);
|
||||
__set_bit(LOG_SENSE, filter->read_ok);
|
||||
__set_bit(START_STOP, filter->read_ok);
|
||||
__set_bit(GPCMD_VERIFY_10, filter->read_ok);
|
||||
__set_bit(VERIFY_16, filter->read_ok);
|
||||
__set_bit(REPORT_LUNS, filter->read_ok);
|
||||
__set_bit(SERVICE_ACTION_IN_16, filter->read_ok);
|
||||
__set_bit(RECEIVE_DIAGNOSTIC, filter->read_ok);
|
||||
__set_bit(MAINTENANCE_IN, filter->read_ok);
|
||||
__set_bit(GPCMD_READ_BUFFER_CAPACITY, filter->read_ok);
|
||||
|
||||
/* Audio CD commands */
|
||||
__set_bit(GPCMD_PLAY_CD, filter->read_ok);
|
||||
__set_bit(GPCMD_PLAY_AUDIO_10, filter->read_ok);
|
||||
__set_bit(GPCMD_PLAY_AUDIO_MSF, filter->read_ok);
|
||||
__set_bit(GPCMD_PLAY_AUDIO_TI, filter->read_ok);
|
||||
__set_bit(GPCMD_PAUSE_RESUME, filter->read_ok);
|
||||
|
||||
/* CD/DVD data reading */
|
||||
__set_bit(GPCMD_READ_CD, filter->read_ok);
|
||||
__set_bit(GPCMD_READ_CD_MSF, filter->read_ok);
|
||||
__set_bit(GPCMD_READ_DISC_INFO, filter->read_ok);
|
||||
__set_bit(GPCMD_READ_CDVD_CAPACITY, filter->read_ok);
|
||||
__set_bit(GPCMD_READ_DVD_STRUCTURE, filter->read_ok);
|
||||
__set_bit(GPCMD_READ_HEADER, filter->read_ok);
|
||||
__set_bit(GPCMD_READ_TRACK_RZONE_INFO, filter->read_ok);
|
||||
__set_bit(GPCMD_READ_SUBCHANNEL, filter->read_ok);
|
||||
__set_bit(GPCMD_READ_TOC_PMA_ATIP, filter->read_ok);
|
||||
__set_bit(GPCMD_REPORT_KEY, filter->read_ok);
|
||||
__set_bit(GPCMD_SCAN, filter->read_ok);
|
||||
__set_bit(GPCMD_GET_CONFIGURATION, filter->read_ok);
|
||||
__set_bit(GPCMD_READ_FORMAT_CAPACITIES, filter->read_ok);
|
||||
__set_bit(GPCMD_GET_EVENT_STATUS_NOTIFICATION, filter->read_ok);
|
||||
__set_bit(GPCMD_GET_PERFORMANCE, filter->read_ok);
|
||||
__set_bit(GPCMD_SEEK, filter->read_ok);
|
||||
__set_bit(GPCMD_STOP_PLAY_SCAN, filter->read_ok);
|
||||
|
||||
/* Basic writing commands */
|
||||
__set_bit(WRITE_6, filter->write_ok);
|
||||
__set_bit(WRITE_10, filter->write_ok);
|
||||
__set_bit(WRITE_VERIFY, filter->write_ok);
|
||||
__set_bit(WRITE_12, filter->write_ok);
|
||||
__set_bit(WRITE_VERIFY_12, filter->write_ok);
|
||||
__set_bit(WRITE_16, filter->write_ok);
|
||||
__set_bit(WRITE_LONG, filter->write_ok);
|
||||
__set_bit(WRITE_LONG_2, filter->write_ok);
|
||||
__set_bit(WRITE_SAME, filter->write_ok);
|
||||
__set_bit(WRITE_SAME_16, filter->write_ok);
|
||||
__set_bit(WRITE_SAME_32, filter->write_ok);
|
||||
__set_bit(ERASE, filter->write_ok);
|
||||
__set_bit(GPCMD_MODE_SELECT_10, filter->write_ok);
|
||||
__set_bit(MODE_SELECT, filter->write_ok);
|
||||
__set_bit(LOG_SELECT, filter->write_ok);
|
||||
__set_bit(GPCMD_BLANK, filter->write_ok);
|
||||
__set_bit(GPCMD_CLOSE_TRACK, filter->write_ok);
|
||||
__set_bit(GPCMD_FLUSH_CACHE, filter->write_ok);
|
||||
__set_bit(GPCMD_FORMAT_UNIT, filter->write_ok);
|
||||
__set_bit(GPCMD_REPAIR_RZONE_TRACK, filter->write_ok);
|
||||
__set_bit(GPCMD_RESERVE_RZONE_TRACK, filter->write_ok);
|
||||
__set_bit(GPCMD_SEND_DVD_STRUCTURE, filter->write_ok);
|
||||
__set_bit(GPCMD_SEND_EVENT, filter->write_ok);
|
||||
__set_bit(GPCMD_SEND_KEY, filter->write_ok);
|
||||
__set_bit(GPCMD_SEND_OPC, filter->write_ok);
|
||||
__set_bit(GPCMD_SEND_CUE_SHEET, filter->write_ok);
|
||||
__set_bit(GPCMD_SET_SPEED, filter->write_ok);
|
||||
__set_bit(GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL, filter->write_ok);
|
||||
__set_bit(GPCMD_LOAD_UNLOAD, filter->write_ok);
|
||||
__set_bit(GPCMD_SET_STREAMING, filter->write_ok);
|
||||
__set_bit(GPCMD_SET_READ_AHEAD, filter->write_ok);
|
||||
|
||||
/* ZBC Commands */
|
||||
__set_bit(ZBC_OUT, filter->write_ok);
|
||||
__set_bit(ZBC_IN, filter->read_ok);
|
||||
}
|
||||
|
||||
int blk_verify_command(unsigned char *cmd, fmode_t mode)
|
||||
{
|
||||
struct blk_cmd_filter *filter = &blk_default_cmd_filter;
|
||||
|
||||
/* root can do any command. */
|
||||
if (capable(CAP_SYS_RAWIO))
|
||||
return 0;
|
||||
|
||||
/* Anybody who can open the device can do a read-safe command */
|
||||
if (test_bit(cmd[0], filter->read_ok))
|
||||
return 0;
|
||||
|
||||
/* Write-safe commands require a writable open */
|
||||
if (test_bit(cmd[0], filter->write_ok) && (mode & FMODE_WRITE))
|
||||
return 0;
|
||||
|
||||
return -EPERM;
|
||||
}
|
||||
EXPORT_SYMBOL(blk_verify_command);
|
||||
|
||||
static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
|
||||
struct sg_io_hdr *hdr, fmode_t mode)
|
||||
{
|
||||
struct scsi_request *req = scsi_req(rq);
|
||||
|
||||
if (copy_from_user(req->cmd, hdr->cmdp, hdr->cmd_len))
|
||||
return -EFAULT;
|
||||
if (blk_verify_command(req->cmd, mode))
|
||||
return -EPERM;
|
||||
|
||||
/*
|
||||
* fill in request structure
|
||||
*/
|
||||
req->cmd_len = hdr->cmd_len;
|
||||
|
||||
rq->timeout = msecs_to_jiffies(hdr->timeout);
|
||||
if (!rq->timeout)
|
||||
rq->timeout = q->sg_timeout;
|
||||
if (!rq->timeout)
|
||||
rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
|
||||
if (rq->timeout < BLK_MIN_SG_TIMEOUT)
|
||||
rq->timeout = BLK_MIN_SG_TIMEOUT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int blk_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr,
|
||||
struct bio *bio)
|
||||
{
|
||||
struct scsi_request *req = scsi_req(rq);
|
||||
int r, ret = 0;
|
||||
|
||||
/*
|
||||
* fill in all the output members
|
||||
*/
|
||||
hdr->status = req->result & 0xff;
|
||||
hdr->masked_status = status_byte(req->result);
|
||||
hdr->msg_status = COMMAND_COMPLETE;
|
||||
hdr->host_status = host_byte(req->result);
|
||||
hdr->driver_status = 0;
|
||||
if (scsi_status_is_check_condition(hdr->status))
|
||||
hdr->driver_status = DRIVER_SENSE;
|
||||
hdr->info = 0;
|
||||
if (hdr->masked_status || hdr->host_status || hdr->driver_status)
|
||||
hdr->info |= SG_INFO_CHECK;
|
||||
hdr->resid = req->resid_len;
|
||||
hdr->sb_len_wr = 0;
|
||||
|
||||
if (req->sense_len && hdr->sbp) {
|
||||
int len = min((unsigned int) hdr->mx_sb_len, req->sense_len);
|
||||
|
||||
if (!copy_to_user(hdr->sbp, req->sense, len))
|
||||
hdr->sb_len_wr = len;
|
||||
else
|
||||
ret = -EFAULT;
|
||||
}
|
||||
|
||||
r = blk_rq_unmap_user(bio);
|
||||
if (!ret)
|
||||
ret = r;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
|
||||
struct sg_io_hdr *hdr, fmode_t mode)
|
||||
{
|
||||
unsigned long start_time;
|
||||
ssize_t ret = 0;
|
||||
int writing = 0;
|
||||
int at_head = 0;
|
||||
struct request *rq;
|
||||
struct scsi_request *req;
|
||||
struct bio *bio;
|
||||
|
||||
if (hdr->interface_id != 'S')
|
||||
return -EINVAL;
|
||||
|
||||
if (hdr->dxfer_len > (queue_max_hw_sectors(q) << 9))
|
||||
return -EIO;
|
||||
|
||||
if (hdr->dxfer_len)
|
||||
switch (hdr->dxfer_direction) {
|
||||
default:
|
||||
return -EINVAL;
|
||||
case SG_DXFER_TO_DEV:
|
||||
writing = 1;
|
||||
break;
|
||||
case SG_DXFER_TO_FROM_DEV:
|
||||
case SG_DXFER_FROM_DEV:
|
||||
break;
|
||||
}
|
||||
if (hdr->flags & SG_FLAG_Q_AT_HEAD)
|
||||
at_head = 1;
|
||||
|
||||
ret = -ENOMEM;
|
||||
rq = blk_get_request(q, writing ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
|
||||
if (IS_ERR(rq))
|
||||
return PTR_ERR(rq);
|
||||
req = scsi_req(rq);
|
||||
|
||||
if (hdr->cmd_len > BLK_MAX_CDB) {
|
||||
req->cmd = kzalloc(hdr->cmd_len, GFP_KERNEL);
|
||||
if (!req->cmd)
|
||||
goto out_put_request;
|
||||
}
|
||||
|
||||
ret = blk_fill_sghdr_rq(q, rq, hdr, mode);
|
||||
if (ret < 0)
|
||||
goto out_free_cdb;
|
||||
|
||||
ret = 0;
|
||||
if (hdr->iovec_count) {
|
||||
struct iov_iter i;
|
||||
struct iovec *iov = NULL;
|
||||
|
||||
ret = import_iovec(rq_data_dir(rq), hdr->dxferp,
|
||||
hdr->iovec_count, 0, &iov, &i);
|
||||
if (ret < 0)
|
||||
goto out_free_cdb;
|
||||
|
||||
/* SG_IO howto says that the shorter of the two wins */
|
||||
iov_iter_truncate(&i, hdr->dxfer_len);
|
||||
|
||||
ret = blk_rq_map_user_iov(q, rq, NULL, &i, GFP_KERNEL);
|
||||
kfree(iov);
|
||||
} else if (hdr->dxfer_len)
|
||||
ret = blk_rq_map_user(q, rq, NULL, hdr->dxferp, hdr->dxfer_len,
|
||||
GFP_KERNEL);
|
||||
|
||||
if (ret)
|
||||
goto out_free_cdb;
|
||||
|
||||
bio = rq->bio;
|
||||
req->retries = 0;
|
||||
|
||||
start_time = jiffies;
|
||||
|
||||
blk_execute_rq(bd_disk, rq, at_head);
|
||||
|
||||
hdr->duration = jiffies_to_msecs(jiffies - start_time);
|
||||
|
||||
ret = blk_complete_sghdr_rq(rq, hdr, bio);
|
||||
|
||||
out_free_cdb:
|
||||
scsi_req_free_cmd(req);
|
||||
out_put_request:
|
||||
blk_put_request(rq);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* sg_scsi_ioctl -- handle deprecated SCSI_IOCTL_SEND_COMMAND ioctl
|
||||
* @q: request queue to send scsi commands down
|
||||
* @disk: gendisk to operate on (option)
|
||||
* @mode: mode used to open the file through which the ioctl has been
|
||||
* submitted
|
||||
* @sic: userspace structure describing the command to perform
|
||||
*
|
||||
* Send down the scsi command described by @sic to the device below
|
||||
* the request queue @q. If @file is non-NULL it's used to perform
|
||||
* fine-grained permission checks that allow users to send down
|
||||
* non-destructive SCSI commands. If the caller has a struct gendisk
|
||||
* available it should be passed in as @disk to allow the low level
|
||||
* driver to use the information contained in it. A non-NULL @disk
|
||||
* is only allowed if the caller knows that the low level driver doesn't
|
||||
* need it (e.g. in the scsi subsystem).
|
||||
*
|
||||
* Notes:
|
||||
* - This interface is deprecated - users should use the SG_IO
|
||||
* interface instead, as this is a more flexible approach to
|
||||
* performing SCSI commands on a device.
|
||||
* - The SCSI command length is determined by examining the 1st byte
|
||||
* of the given command. There is no way to override this.
|
||||
* - Data transfers are limited to PAGE_SIZE
|
||||
* - The length (x + y) must be at least OMAX_SB_LEN bytes long to
|
||||
* accommodate the sense buffer when an error occurs.
|
||||
* The sense buffer is truncated to OMAX_SB_LEN (16) bytes so that
|
||||
* old code will not be surprised.
|
||||
* - If a Unix error occurs (e.g. ENOMEM) then the user will receive
|
||||
* a negative return and the Unix error code in 'errno'.
|
||||
* If the SCSI command succeeds then 0 is returned.
|
||||
* Positive numbers returned are the compacted SCSI error codes (4
|
||||
* bytes in one int) where the lowest byte is the SCSI status.
|
||||
*/
|
||||
int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
|
||||
struct scsi_ioctl_command __user *sic)
|
||||
{
|
||||
enum { OMAX_SB_LEN = 16 }; /* For backward compatibility */
|
||||
struct request *rq;
|
||||
struct scsi_request *req;
|
||||
int err;
|
||||
unsigned int in_len, out_len, bytes, opcode, cmdlen;
|
||||
char *buffer = NULL;
|
||||
|
||||
if (!sic)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* get in an out lengths, verify they don't exceed a page worth of data
|
||||
*/
|
||||
if (get_user(in_len, &sic->inlen))
|
||||
return -EFAULT;
|
||||
if (get_user(out_len, &sic->outlen))
|
||||
return -EFAULT;
|
||||
if (in_len > PAGE_SIZE || out_len > PAGE_SIZE)
|
||||
return -EINVAL;
|
||||
if (get_user(opcode, sic->data))
|
||||
return -EFAULT;
|
||||
|
||||
bytes = max(in_len, out_len);
|
||||
if (bytes) {
|
||||
buffer = kzalloc(bytes, GFP_NOIO | GFP_USER | __GFP_NOWARN);
|
||||
if (!buffer)
|
||||
return -ENOMEM;
|
||||
|
||||
}
|
||||
|
||||
rq = blk_get_request(q, in_len ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
|
||||
if (IS_ERR(rq)) {
|
||||
err = PTR_ERR(rq);
|
||||
goto error_free_buffer;
|
||||
}
|
||||
req = scsi_req(rq);
|
||||
|
||||
cmdlen = COMMAND_SIZE(opcode);
|
||||
|
||||
/*
|
||||
* get command and data to send to device, if any
|
||||
*/
|
||||
err = -EFAULT;
|
||||
req->cmd_len = cmdlen;
|
||||
if (copy_from_user(req->cmd, sic->data, cmdlen))
|
||||
goto error;
|
||||
|
||||
if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
|
||||
goto error;
|
||||
|
||||
err = blk_verify_command(req->cmd, mode);
|
||||
if (err)
|
||||
goto error;
|
||||
|
||||
/* default. possible overriden later */
|
||||
req->retries = 5;
|
||||
|
||||
switch (opcode) {
|
||||
case SEND_DIAGNOSTIC:
|
||||
case FORMAT_UNIT:
|
||||
rq->timeout = FORMAT_UNIT_TIMEOUT;
|
||||
req->retries = 1;
|
||||
break;
|
||||
case START_STOP:
|
||||
rq->timeout = START_STOP_TIMEOUT;
|
||||
break;
|
||||
case MOVE_MEDIUM:
|
||||
rq->timeout = MOVE_MEDIUM_TIMEOUT;
|
||||
break;
|
||||
case READ_ELEMENT_STATUS:
|
||||
rq->timeout = READ_ELEMENT_STATUS_TIMEOUT;
|
||||
break;
|
||||
case READ_DEFECT_DATA:
|
||||
rq->timeout = READ_DEFECT_DATA_TIMEOUT;
|
||||
req->retries = 1;
|
||||
break;
|
||||
default:
|
||||
rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
|
||||
break;
|
||||
}
|
||||
|
||||
if (bytes) {
|
||||
err = blk_rq_map_kern(q, rq, buffer, bytes, GFP_NOIO);
|
||||
if (err)
|
||||
goto error;
|
||||
}
|
||||
|
||||
blk_execute_rq(disk, rq, 0);
|
||||
|
||||
err = req->result & 0xff; /* only 8 bit SCSI status */
|
||||
if (err) {
|
||||
if (req->sense_len && req->sense) {
|
||||
bytes = (OMAX_SB_LEN > req->sense_len) ?
|
||||
req->sense_len : OMAX_SB_LEN;
|
||||
if (copy_to_user(sic->data, req->sense, bytes))
|
||||
err = -EFAULT;
|
||||
}
|
||||
} else {
|
||||
if (copy_to_user(sic->data, buffer, out_len))
|
||||
err = -EFAULT;
|
||||
}
|
||||
|
||||
error:
|
||||
blk_put_request(rq);
|
||||
|
||||
error_free_buffer:
|
||||
kfree(buffer);
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sg_scsi_ioctl);
|
||||
|
||||
/* Send basic block requests */
|
||||
static int __blk_send_generic(struct request_queue *q, struct gendisk *bd_disk,
|
||||
int cmd, int data)
|
||||
{
|
||||
struct request *rq;
|
||||
int err;
|
||||
|
||||
rq = blk_get_request(q, REQ_OP_DRV_OUT, 0);
|
||||
if (IS_ERR(rq))
|
||||
return PTR_ERR(rq);
|
||||
rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
|
||||
scsi_req(rq)->cmd[0] = cmd;
|
||||
scsi_req(rq)->cmd[4] = data;
|
||||
scsi_req(rq)->cmd_len = 6;
|
||||
blk_execute_rq(bd_disk, rq, 0);
|
||||
err = scsi_req(rq)->result ? -EIO : 0;
|
||||
blk_put_request(rq);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static inline int blk_send_start_stop(struct request_queue *q,
|
||||
struct gendisk *bd_disk, int data)
|
||||
{
|
||||
return __blk_send_generic(q, bd_disk, GPCMD_START_STOP_UNIT, data);
|
||||
}
|
||||
|
||||
int put_sg_io_hdr(const struct sg_io_hdr *hdr, void __user *argp)
|
||||
{
|
||||
#ifdef CONFIG_COMPAT
|
||||
if (in_compat_syscall()) {
|
||||
struct compat_sg_io_hdr hdr32 = {
|
||||
.interface_id = hdr->interface_id,
|
||||
.dxfer_direction = hdr->dxfer_direction,
|
||||
.cmd_len = hdr->cmd_len,
|
||||
.mx_sb_len = hdr->mx_sb_len,
|
||||
.iovec_count = hdr->iovec_count,
|
||||
.dxfer_len = hdr->dxfer_len,
|
||||
.dxferp = (uintptr_t)hdr->dxferp,
|
||||
.cmdp = (uintptr_t)hdr->cmdp,
|
||||
.sbp = (uintptr_t)hdr->sbp,
|
||||
.timeout = hdr->timeout,
|
||||
.flags = hdr->flags,
|
||||
.pack_id = hdr->pack_id,
|
||||
.usr_ptr = (uintptr_t)hdr->usr_ptr,
|
||||
.status = hdr->status,
|
||||
.masked_status = hdr->masked_status,
|
||||
.msg_status = hdr->msg_status,
|
||||
.sb_len_wr = hdr->sb_len_wr,
|
||||
.host_status = hdr->host_status,
|
||||
.driver_status = hdr->driver_status,
|
||||
.resid = hdr->resid,
|
||||
.duration = hdr->duration,
|
||||
.info = hdr->info,
|
||||
};
|
||||
|
||||
if (copy_to_user(argp, &hdr32, sizeof(hdr32)))
|
||||
return -EFAULT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (copy_to_user(argp, hdr, sizeof(*hdr)))
|
||||
return -EFAULT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(put_sg_io_hdr);
|
||||
|
||||
int get_sg_io_hdr(struct sg_io_hdr *hdr, const void __user *argp)
|
||||
{
|
||||
#ifdef CONFIG_COMPAT
|
||||
struct compat_sg_io_hdr hdr32;
|
||||
|
||||
if (in_compat_syscall()) {
|
||||
if (copy_from_user(&hdr32, argp, sizeof(hdr32)))
|
||||
return -EFAULT;
|
||||
|
||||
*hdr = (struct sg_io_hdr) {
|
||||
.interface_id = hdr32.interface_id,
|
||||
.dxfer_direction = hdr32.dxfer_direction,
|
||||
.cmd_len = hdr32.cmd_len,
|
||||
.mx_sb_len = hdr32.mx_sb_len,
|
||||
.iovec_count = hdr32.iovec_count,
|
||||
.dxfer_len = hdr32.dxfer_len,
|
||||
.dxferp = compat_ptr(hdr32.dxferp),
|
||||
.cmdp = compat_ptr(hdr32.cmdp),
|
||||
.sbp = compat_ptr(hdr32.sbp),
|
||||
.timeout = hdr32.timeout,
|
||||
.flags = hdr32.flags,
|
||||
.pack_id = hdr32.pack_id,
|
||||
.usr_ptr = compat_ptr(hdr32.usr_ptr),
|
||||
.status = hdr32.status,
|
||||
.masked_status = hdr32.masked_status,
|
||||
.msg_status = hdr32.msg_status,
|
||||
.sb_len_wr = hdr32.sb_len_wr,
|
||||
.host_status = hdr32.host_status,
|
||||
.driver_status = hdr32.driver_status,
|
||||
.resid = hdr32.resid,
|
||||
.duration = hdr32.duration,
|
||||
.info = hdr32.info,
|
||||
};
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (copy_from_user(hdr, argp, sizeof(*hdr)))
|
||||
return -EFAULT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(get_sg_io_hdr);
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
struct compat_cdrom_generic_command {
|
||||
unsigned char cmd[CDROM_PACKET_SIZE];
|
||||
compat_caddr_t buffer;
|
||||
compat_uint_t buflen;
|
||||
compat_int_t stat;
|
||||
compat_caddr_t sense;
|
||||
unsigned char data_direction;
|
||||
unsigned char pad[3];
|
||||
compat_int_t quiet;
|
||||
compat_int_t timeout;
|
||||
compat_caddr_t unused;
|
||||
};
|
||||
#endif
|
||||
|
||||
static int scsi_get_cdrom_generic_arg(struct cdrom_generic_command *cgc,
|
||||
const void __user *arg)
|
||||
{
|
||||
#ifdef CONFIG_COMPAT
|
||||
if (in_compat_syscall()) {
|
||||
struct compat_cdrom_generic_command cgc32;
|
||||
|
||||
if (copy_from_user(&cgc32, arg, sizeof(cgc32)))
|
||||
return -EFAULT;
|
||||
|
||||
*cgc = (struct cdrom_generic_command) {
|
||||
.buffer = compat_ptr(cgc32.buffer),
|
||||
.buflen = cgc32.buflen,
|
||||
.stat = cgc32.stat,
|
||||
.sense = compat_ptr(cgc32.sense),
|
||||
.data_direction = cgc32.data_direction,
|
||||
.quiet = cgc32.quiet,
|
||||
.timeout = cgc32.timeout,
|
||||
.unused = compat_ptr(cgc32.unused),
|
||||
};
|
||||
memcpy(&cgc->cmd, &cgc32.cmd, CDROM_PACKET_SIZE);
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
if (copy_from_user(cgc, arg, sizeof(*cgc)))
|
||||
return -EFAULT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int scsi_put_cdrom_generic_arg(const struct cdrom_generic_command *cgc,
|
||||
void __user *arg)
|
||||
{
|
||||
#ifdef CONFIG_COMPAT
|
||||
if (in_compat_syscall()) {
|
||||
struct compat_cdrom_generic_command cgc32 = {
|
||||
.buffer = (uintptr_t)(cgc->buffer),
|
||||
.buflen = cgc->buflen,
|
||||
.stat = cgc->stat,
|
||||
.sense = (uintptr_t)(cgc->sense),
|
||||
.data_direction = cgc->data_direction,
|
||||
.quiet = cgc->quiet,
|
||||
.timeout = cgc->timeout,
|
||||
.unused = (uintptr_t)(cgc->unused),
|
||||
};
|
||||
memcpy(&cgc32.cmd, &cgc->cmd, CDROM_PACKET_SIZE);
|
||||
|
||||
if (copy_to_user(arg, &cgc32, sizeof(cgc32)))
|
||||
return -EFAULT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
if (copy_to_user(arg, cgc, sizeof(*cgc)))
|
||||
return -EFAULT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int scsi_cdrom_send_packet(struct request_queue *q,
|
||||
struct gendisk *bd_disk,
|
||||
fmode_t mode, void __user *arg)
|
||||
{
|
||||
struct cdrom_generic_command cgc;
|
||||
struct sg_io_hdr hdr;
|
||||
int err;
|
||||
|
||||
err = scsi_get_cdrom_generic_arg(&cgc, arg);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
cgc.timeout = clock_t_to_jiffies(cgc.timeout);
|
||||
memset(&hdr, 0, sizeof(hdr));
|
||||
hdr.interface_id = 'S';
|
||||
hdr.cmd_len = sizeof(cgc.cmd);
|
||||
hdr.dxfer_len = cgc.buflen;
|
||||
switch (cgc.data_direction) {
|
||||
case CGC_DATA_UNKNOWN:
|
||||
hdr.dxfer_direction = SG_DXFER_UNKNOWN;
|
||||
break;
|
||||
case CGC_DATA_WRITE:
|
||||
hdr.dxfer_direction = SG_DXFER_TO_DEV;
|
||||
break;
|
||||
case CGC_DATA_READ:
|
||||
hdr.dxfer_direction = SG_DXFER_FROM_DEV;
|
||||
break;
|
||||
case CGC_DATA_NONE:
|
||||
hdr.dxfer_direction = SG_DXFER_NONE;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
hdr.dxferp = cgc.buffer;
|
||||
hdr.sbp = cgc.sense;
|
||||
if (hdr.sbp)
|
||||
hdr.mx_sb_len = sizeof(struct request_sense);
|
||||
hdr.timeout = jiffies_to_msecs(cgc.timeout);
|
||||
hdr.cmdp = ((struct cdrom_generic_command __user*) arg)->cmd;
|
||||
hdr.cmd_len = sizeof(cgc.cmd);
|
||||
|
||||
err = sg_io(q, bd_disk, &hdr, mode);
|
||||
if (err == -EFAULT)
|
||||
return -EFAULT;
|
||||
|
||||
if (hdr.status)
|
||||
return -EIO;
|
||||
|
||||
cgc.stat = err;
|
||||
cgc.buflen = hdr.resid;
|
||||
if (scsi_put_cdrom_generic_arg(&cgc, arg))
|
||||
return -EFAULT;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
int scsi_cmd_ioctl(struct request_queue *q, struct gendisk *bd_disk, fmode_t mode,
|
||||
unsigned int cmd, void __user *arg)
|
||||
{
|
||||
int err;
|
||||
|
||||
if (!q)
|
||||
return -ENXIO;
|
||||
|
||||
switch (cmd) {
|
||||
/*
|
||||
* new sgv3 interface
|
||||
*/
|
||||
case SG_GET_VERSION_NUM:
|
||||
err = sg_get_version(arg);
|
||||
break;
|
||||
case SCSI_IOCTL_GET_IDLUN:
|
||||
err = scsi_get_idlun(q, arg);
|
||||
break;
|
||||
case SCSI_IOCTL_GET_BUS_NUMBER:
|
||||
err = scsi_get_bus(q, arg);
|
||||
break;
|
||||
case SG_SET_TIMEOUT:
|
||||
err = sg_set_timeout(q, arg);
|
||||
break;
|
||||
case SG_GET_TIMEOUT:
|
||||
err = sg_get_timeout(q);
|
||||
break;
|
||||
case SG_GET_RESERVED_SIZE:
|
||||
err = sg_get_reserved_size(q, arg);
|
||||
break;
|
||||
case SG_SET_RESERVED_SIZE:
|
||||
err = sg_set_reserved_size(q, arg);
|
||||
break;
|
||||
case SG_EMULATED_HOST:
|
||||
err = sg_emulated_host(q, arg);
|
||||
break;
|
||||
case SG_IO: {
|
||||
struct sg_io_hdr hdr;
|
||||
|
||||
err = get_sg_io_hdr(&hdr, arg);
|
||||
if (err)
|
||||
break;
|
||||
err = sg_io(q, bd_disk, &hdr, mode);
|
||||
if (err == -EFAULT)
|
||||
break;
|
||||
|
||||
if (put_sg_io_hdr(&hdr, arg))
|
||||
err = -EFAULT;
|
||||
break;
|
||||
}
|
||||
case CDROM_SEND_PACKET:
|
||||
err = scsi_cdrom_send_packet(q, bd_disk, mode, arg);
|
||||
break;
|
||||
|
||||
/*
|
||||
* old junk scsi send command ioctl
|
||||
*/
|
||||
case SCSI_IOCTL_SEND_COMMAND:
|
||||
printk(KERN_WARNING "program %s is using a deprecated SCSI ioctl, please convert it to SG_IO\n", current->comm);
|
||||
err = -EINVAL;
|
||||
if (!arg)
|
||||
break;
|
||||
|
||||
err = sg_scsi_ioctl(q, bd_disk, mode, arg);
|
||||
break;
|
||||
case CDROMCLOSETRAY:
|
||||
err = blk_send_start_stop(q, bd_disk, 0x03);
|
||||
break;
|
||||
case CDROMEJECT:
|
||||
err = blk_send_start_stop(q, bd_disk, 0x02);
|
||||
break;
|
||||
default:
|
||||
err = -ENOTTY;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(scsi_cmd_ioctl);
|
||||
|
||||
int scsi_verify_blk_ioctl(struct block_device *bd, unsigned int cmd)
|
||||
{
|
||||
if (bd && !bdev_is_partition(bd))
|
||||
return 0;
|
||||
|
||||
if (capable(CAP_SYS_RAWIO))
|
||||
return 0;
|
||||
|
||||
return -ENOIOCTLCMD;
|
||||
}
|
||||
EXPORT_SYMBOL(scsi_verify_blk_ioctl);
|
||||
|
||||
int scsi_cmd_blk_ioctl(struct block_device *bd, fmode_t mode,
|
||||
unsigned int cmd, void __user *arg)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = scsi_verify_blk_ioctl(bd, cmd);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
return scsi_cmd_ioctl(bd->bd_disk->queue, bd->bd_disk, mode, cmd, arg);
|
||||
}
|
||||
EXPORT_SYMBOL(scsi_cmd_blk_ioctl);
|
||||
|
||||
/**
|
||||
* scsi_req_init - initialize certain fields of a scsi_request structure
|
||||
* @req: Pointer to a scsi_request structure.
|
||||
* Initializes .__cmd[], .cmd, .cmd_len and .sense_len but no other members
|
||||
* of struct scsi_request.
|
||||
*/
|
||||
void scsi_req_init(struct scsi_request *req)
|
||||
{
|
||||
memset(req->__cmd, 0, sizeof(req->__cmd));
|
||||
req->cmd = req->__cmd;
|
||||
req->cmd_len = BLK_MAX_CDB;
|
||||
req->sense_len = 0;
|
||||
}
|
||||
EXPORT_SYMBOL(scsi_req_init);
|
||||
|
||||
static int __init blk_scsi_ioctl_init(void)
|
||||
{
|
||||
blk_set_cmd_filter_defaults(&blk_default_cmd_filter);
|
||||
return 0;
|
||||
}
|
||||
fs_initcall(blk_scsi_ioctl_init);
|
|
@ -912,7 +912,7 @@ void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
|
|||
* Note that ATA_QCFLAG_FAILED is unconditionally set after
|
||||
* this function completes.
|
||||
*/
|
||||
blk_abort_request(qc->scsicmd->request);
|
||||
blk_abort_request(scsi_cmd_to_rq(qc->scsicmd));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1893,8 +1893,7 @@ static inline int ata_eh_worth_retry(struct ata_queued_cmd *qc)
|
|||
*/
|
||||
static inline bool ata_eh_quiet(struct ata_queued_cmd *qc)
|
||||
{
|
||||
if (qc->scsicmd &&
|
||||
qc->scsicmd->request->rq_flags & RQF_QUIET)
|
||||
if (qc->scsicmd && scsi_cmd_to_rq(qc->scsicmd)->rq_flags & RQF_QUIET)
|
||||
qc->flags |= ATA_QCFLAG_QUIET;
|
||||
return qc->flags & ATA_QCFLAG_QUIET;
|
||||
}
|
||||
|
|
|
@ -631,7 +631,7 @@ static struct ata_queued_cmd *ata_scsi_qc_new(struct ata_device *dev,
|
|||
{
|
||||
struct ata_queued_cmd *qc;
|
||||
|
||||
qc = ata_qc_new_init(dev, cmd->request->tag);
|
||||
qc = ata_qc_new_init(dev, scsi_cmd_to_rq(cmd)->tag);
|
||||
if (qc) {
|
||||
qc->scsicmd = cmd;
|
||||
qc->scsidone = cmd->scsi_done;
|
||||
|
@ -639,7 +639,7 @@ static struct ata_queued_cmd *ata_scsi_qc_new(struct ata_device *dev,
|
|||
qc->sg = scsi_sglist(cmd);
|
||||
qc->n_elem = scsi_sg_count(cmd);
|
||||
|
||||
if (cmd->request->rq_flags & RQF_QUIET)
|
||||
if (scsi_cmd_to_rq(cmd)->rq_flags & RQF_QUIET)
|
||||
qc->flags |= ATA_QCFLAG_QUIET;
|
||||
} else {
|
||||
cmd->result = (DID_OK << 16) | SAM_STAT_TASK_SET_FULL;
|
||||
|
@ -1496,7 +1496,7 @@ static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc)
|
|||
|
||||
static bool ata_check_nblocks(struct scsi_cmnd *scmd, u32 n_blocks)
|
||||
{
|
||||
struct request *rq = scmd->request;
|
||||
struct request *rq = scsi_cmd_to_rq(scmd);
|
||||
u32 req_blocks;
|
||||
|
||||
if (!blk_rq_is_passthrough(rq))
|
||||
|
@ -1531,7 +1531,7 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc)
|
|||
{
|
||||
struct scsi_cmnd *scmd = qc->scsicmd;
|
||||
const u8 *cdb = scmd->cmnd;
|
||||
struct request *rq = scmd->request;
|
||||
struct request *rq = scsi_cmd_to_rq(scmd);
|
||||
int class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
|
||||
unsigned int tf_flags = 0;
|
||||
u64 block;
|
||||
|
@ -3139,7 +3139,7 @@ static unsigned int ata_scsi_write_same_xlat(struct ata_queued_cmd *qc)
|
|||
* as it modifies the DATA OUT buffer, which would corrupt user
|
||||
* memory for SG_IO commands.
|
||||
*/
|
||||
if (unlikely(blk_rq_is_passthrough(scmd->request)))
|
||||
if (unlikely(blk_rq_is_passthrough(scsi_cmd_to_rq(scmd))))
|
||||
goto invalid_opcode;
|
||||
|
||||
if (unlikely(scmd->cmd_len < 16)) {
|
||||
|
|
|
@ -48,8 +48,8 @@ static unsigned int pata_falcon_data_xfer(struct ata_queued_cmd *qc,
|
|||
struct scsi_cmnd *cmd = qc->scsicmd;
|
||||
bool swap = 1;
|
||||
|
||||
if (dev->class == ATA_DEV_ATA && cmd && cmd->request &&
|
||||
!blk_rq_is_passthrough(cmd->request))
|
||||
if (dev->class == ATA_DEV_ATA && cmd &&
|
||||
!blk_rq_is_passthrough(scsi_cmd_to_rq(cmd)))
|
||||
swap = 0;
|
||||
|
||||
/* Transfer multiple of 2 bytes */
|
||||
|
|
|
@ -886,6 +886,8 @@ static void device_link_put_kref(struct device_link *link)
|
|||
{
|
||||
if (link->flags & DL_FLAG_STATELESS)
|
||||
kref_put(&link->kref, __device_link_del);
|
||||
else if (!device_is_registered(link->consumer))
|
||||
__device_link_del(&link->kref);
|
||||
else
|
||||
WARN(1, "Unable to drop a managed device link reference\n");
|
||||
}
|
||||
|
|
|
@ -74,7 +74,6 @@ config N64CART
|
|||
|
||||
config CDROM
|
||||
tristate
|
||||
select BLK_SCSI_REQUEST
|
||||
|
||||
config GDROM
|
||||
tristate "SEGA Dreamcast GD-ROM drive"
|
||||
|
@ -306,7 +305,7 @@ config CDROM_PKTCDVD
|
|||
tristate "Packet writing on CD/DVD media (DEPRECATED)"
|
||||
depends on !UML
|
||||
select CDROM
|
||||
select BLK_SCSI_REQUEST
|
||||
select SCSI_COMMON
|
||||
help
|
||||
Note: This driver is deprecated and will be removed from the
|
||||
kernel in the near future!
|
||||
|
|
|
@ -27,7 +27,6 @@ config PARIDE_PCD
|
|||
tristate "Parallel port ATAPI CD-ROMs"
|
||||
depends on PARIDE
|
||||
select CDROM
|
||||
select BLK_SCSI_REQUEST # only for the generic cdrom code
|
||||
help
|
||||
This option enables the high-level driver for ATAPI CD-ROM devices
|
||||
connected through a parallel port. If you chose to build PARIDE
|
||||
|
|
|
@ -629,7 +629,7 @@ int register_cdrom(struct gendisk *disk, struct cdrom_device_info *cdi)
|
|||
if (CDROM_CAN(CDC_MRW_W))
|
||||
cdi->exit = cdrom_mrw_exit;
|
||||
|
||||
if (cdi->disk)
|
||||
if (cdi->ops->read_cdda_bpc)
|
||||
cdi->cdda_method = CDDA_BPC_FULL;
|
||||
else
|
||||
cdi->cdda_method = CDDA_OLD;
|
||||
|
@ -2159,81 +2159,26 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf,
|
|||
static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
|
||||
int lba, int nframes)
|
||||
{
|
||||
struct request_queue *q = cdi->disk->queue;
|
||||
struct request *rq;
|
||||
struct scsi_request *req;
|
||||
struct bio *bio;
|
||||
unsigned int len;
|
||||
int max_frames = (queue_max_sectors(cdi->disk->queue) << 9) /
|
||||
CD_FRAMESIZE_RAW;
|
||||
int nr, ret = 0;
|
||||
|
||||
if (!q)
|
||||
return -ENXIO;
|
||||
|
||||
if (!blk_queue_scsi_passthrough(q)) {
|
||||
WARN_ONCE(true,
|
||||
"Attempt read CDDA info through a non-SCSI queue\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
cdi->last_sense = 0;
|
||||
|
||||
while (nframes) {
|
||||
nr = nframes;
|
||||
if (cdi->cdda_method == CDDA_BPC_SINGLE)
|
||||
nr = 1;
|
||||
if (nr * CD_FRAMESIZE_RAW > (queue_max_sectors(q) << 9))
|
||||
nr = (queue_max_sectors(q) << 9) / CD_FRAMESIZE_RAW;
|
||||
|
||||
len = nr * CD_FRAMESIZE_RAW;
|
||||
|
||||
rq = blk_get_request(q, REQ_OP_DRV_IN, 0);
|
||||
if (IS_ERR(rq)) {
|
||||
ret = PTR_ERR(rq);
|
||||
break;
|
||||
}
|
||||
req = scsi_req(rq);
|
||||
|
||||
ret = blk_rq_map_user(q, rq, NULL, ubuf, len, GFP_KERNEL);
|
||||
if (ret) {
|
||||
blk_put_request(rq);
|
||||
break;
|
||||
}
|
||||
|
||||
req->cmd[0] = GPCMD_READ_CD;
|
||||
req->cmd[1] = 1 << 2;
|
||||
req->cmd[2] = (lba >> 24) & 0xff;
|
||||
req->cmd[3] = (lba >> 16) & 0xff;
|
||||
req->cmd[4] = (lba >> 8) & 0xff;
|
||||
req->cmd[5] = lba & 0xff;
|
||||
req->cmd[6] = (nr >> 16) & 0xff;
|
||||
req->cmd[7] = (nr >> 8) & 0xff;
|
||||
req->cmd[8] = nr & 0xff;
|
||||
req->cmd[9] = 0xf8;
|
||||
|
||||
req->cmd_len = 12;
|
||||
rq->timeout = 60 * HZ;
|
||||
bio = rq->bio;
|
||||
|
||||
blk_execute_rq(cdi->disk, rq, 0);
|
||||
if (scsi_req(rq)->result) {
|
||||
struct scsi_sense_hdr sshdr;
|
||||
|
||||
ret = -EIO;
|
||||
scsi_normalize_sense(req->sense, req->sense_len,
|
||||
&sshdr);
|
||||
cdi->last_sense = sshdr.sense_key;
|
||||
}
|
||||
|
||||
if (blk_rq_unmap_user(bio))
|
||||
ret = -EFAULT;
|
||||
blk_put_request(rq);
|
||||
else
|
||||
nr = min(nframes, max_frames);
|
||||
|
||||
ret = cdi->ops->read_cdda_bpc(cdi, ubuf, lba, nr,
|
||||
&cdi->last_sense);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
nframes -= nr;
|
||||
lba += nr;
|
||||
ubuf += len;
|
||||
ubuf += (nr * CD_FRAMESIZE_RAW);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -3357,13 +3302,6 @@ int cdrom_ioctl(struct cdrom_device_info *cdi, struct block_device *bdev,
|
|||
void __user *argp = (void __user *)arg;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Try the generic SCSI command ioctl's first.
|
||||
*/
|
||||
ret = scsi_cmd_blk_ioctl(bdev, mode, cmd, argp);
|
||||
if (ret != -ENOTTY)
|
||||
return ret;
|
||||
|
||||
switch (cmd) {
|
||||
case CDROMMULTISESSION:
|
||||
return cdrom_ioctl_multisession(cdi, argp);
|
||||
|
|
|
@ -159,7 +159,7 @@ iser_set_dif_domain(struct scsi_cmnd *sc, struct ib_sig_domain *domain)
|
|||
{
|
||||
domain->sig_type = IB_SIG_TYPE_T10_DIF;
|
||||
domain->sig.dif.pi_interval = scsi_prot_interval(sc);
|
||||
domain->sig.dif.ref_tag = t10_pi_ref_tag(sc->request);
|
||||
domain->sig.dif.ref_tag = t10_pi_ref_tag(scsi_cmd_to_rq(sc));
|
||||
/*
|
||||
* At the moment we hard code those, but in the future
|
||||
* we will take them from sc.
|
||||
|
|
|
@ -949,7 +949,7 @@ u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
|
|||
sector_t sector_off = mr_status.sig_err.sig_err_offset;
|
||||
|
||||
sector_div(sector_off, sector_size + 8);
|
||||
*sector = scsi_get_lba(iser_task->sc) + sector_off;
|
||||
*sector = scsi_get_sector(iser_task->sc) + sector_off;
|
||||
|
||||
iser_err("PI error found type %d at sector %llx "
|
||||
"expected %x vs actual %x\n",
|
||||
|
|
|
@ -1280,7 +1280,7 @@ static bool srp_terminate_cmd(struct scsi_cmnd *scmnd, void *context_ptr,
|
|||
{
|
||||
struct srp_terminate_context *context = context_ptr;
|
||||
struct srp_target_port *target = context->srp_target;
|
||||
u32 tag = blk_mq_unique_tag(scmnd->request);
|
||||
u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmnd));
|
||||
struct srp_rdma_ch *ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
|
||||
struct srp_request *req = scsi_cmd_priv(scmnd);
|
||||
|
||||
|
@ -2152,6 +2152,7 @@ static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
|
|||
|
||||
static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
|
||||
{
|
||||
struct request *rq = scsi_cmd_to_rq(scmnd);
|
||||
struct srp_target_port *target = host_to_target(shost);
|
||||
struct srp_rdma_ch *ch;
|
||||
struct srp_request *req = scsi_cmd_priv(scmnd);
|
||||
|
@ -2166,8 +2167,8 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
|
|||
if (unlikely(scmnd->result))
|
||||
goto err;
|
||||
|
||||
WARN_ON_ONCE(scmnd->request->tag < 0);
|
||||
tag = blk_mq_unique_tag(scmnd->request);
|
||||
WARN_ON_ONCE(rq->tag < 0);
|
||||
tag = blk_mq_unique_tag(rq);
|
||||
ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
|
||||
|
||||
spin_lock_irqsave(&ch->lock, flags);
|
||||
|
@ -2791,7 +2792,7 @@ static int srp_abort(struct scsi_cmnd *scmnd)
|
|||
|
||||
if (!req)
|
||||
return SUCCESS;
|
||||
tag = blk_mq_unique_tag(scmnd->request);
|
||||
tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmnd));
|
||||
ch_idx = blk_mq_unique_tag_to_hwq(tag);
|
||||
if (WARN_ON_ONCE(ch_idx >= target->ch_count))
|
||||
return SUCCESS;
|
||||
|
|
|
@ -2377,7 +2377,7 @@ static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
|
|||
}
|
||||
}
|
||||
|
||||
blk_add_driver_data(scsi->request, &blktrc, sizeof(blktrc));
|
||||
blk_add_driver_data(scsi_cmd_to_rq(scsi), &blktrc, sizeof(blktrc));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -2599,8 +2599,8 @@ int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd)
|
|||
io->fcp_cmnd_length = FCP_CMND_LEN;
|
||||
|
||||
if (scsi_get_prot_op(scsi_cmnd) != SCSI_PROT_NORMAL) {
|
||||
io->data_block_length = scsi_cmnd->device->sector_size;
|
||||
io->ref_tag_value = scsi_get_lba(scsi_cmnd) & 0xFFFFFFFF;
|
||||
io->data_block_length = scsi_prot_interval(scsi_cmnd);
|
||||
io->ref_tag_value = scsi_prot_ref_tag(scsi_cmnd);
|
||||
}
|
||||
|
||||
if (zfcp_fsf_set_data_dir(scsi_cmnd, &io->data_direction))
|
||||
|
|
|
@ -1823,7 +1823,7 @@ NCR_700_queuecommand_lck(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *)
|
|||
|
||||
if ((hostdata->tag_negotiated & (1<<scmd_id(SCp))) &&
|
||||
SCp->device->simple_tags) {
|
||||
slot->tag = SCp->request->tag;
|
||||
slot->tag = scsi_cmd_to_rq(SCp)->tag;
|
||||
CDEBUG(KERN_DEBUG, SCp, "sending out tag %d, slot %p\n",
|
||||
slot->tag, slot);
|
||||
} else {
|
||||
|
|
|
@ -1711,7 +1711,7 @@ static bool __init blogic_reportconfig(struct blogic_adapter *adapter)
|
|||
if (adapter->adapter_bus_type != BLOGIC_PCI_BUS) {
|
||||
blogic_info(" DMA Channel: None, ", adapter);
|
||||
if (adapter->bios_addr > 0)
|
||||
blogic_info("BIOS Address: 0x%lX, ", adapter,
|
||||
blogic_info("BIOS Address: 0x%X, ", adapter,
|
||||
adapter->bios_addr);
|
||||
else
|
||||
blogic_info("BIOS Address: None, ", adapter);
|
||||
|
@ -3436,7 +3436,7 @@ static void blogic_msg(enum blogic_msglevel msglevel, char *fmt,
|
|||
int len = 0;
|
||||
|
||||
va_start(args, adapter);
|
||||
len = vsprintf(buf, fmt, args);
|
||||
len = vscnprintf(buf, sizeof(buf), fmt, args);
|
||||
va_end(args);
|
||||
if (msglevel == BLOGIC_ANNOUNCE_LEVEL) {
|
||||
static int msglines = 0;
|
||||
|
@ -3451,7 +3451,7 @@ static void blogic_msg(enum blogic_msglevel msglevel, char *fmt,
|
|||
if (buf[0] != '\n' || len > 1)
|
||||
printk("%sscsi%d: %s", blogic_msglevelmap[msglevel], adapter->host_no, buf);
|
||||
} else
|
||||
printk("%s", buf);
|
||||
pr_cont("%s", buf);
|
||||
} else {
|
||||
if (begin) {
|
||||
if (adapter != NULL && adapter->adapter_initd)
|
||||
|
@ -3459,7 +3459,7 @@ static void blogic_msg(enum blogic_msglevel msglevel, char *fmt,
|
|||
else
|
||||
printk("%s%s", blogic_msglevelmap[msglevel], buf);
|
||||
} else
|
||||
printk("%s", buf);
|
||||
pr_cont("%s", buf);
|
||||
}
|
||||
begin = (buf[len - 1] == '\n');
|
||||
}
|
||||
|
|
|
@ -14,12 +14,16 @@ config RAID_ATTRS
|
|||
help
|
||||
Provides RAID
|
||||
|
||||
config SCSI_COMMON
|
||||
tristate
|
||||
|
||||
config SCSI
|
||||
tristate "SCSI device support"
|
||||
depends on BLOCK
|
||||
select SCSI_DMA if HAS_DMA
|
||||
select SG_POOL
|
||||
select BLK_SCSI_REQUEST
|
||||
select SCSI_COMMON
|
||||
select BLK_DEV_BSG_COMMON if BLK_DEV_BSG
|
||||
help
|
||||
If you want to use a SCSI hard disk, SCSI tape drive, SCSI CD-ROM or
|
||||
any other SCSI device under Linux, say Y and make sure that you know
|
||||
|
@ -140,6 +144,18 @@ config CHR_DEV_SG
|
|||
|
||||
If unsure, say N.
|
||||
|
||||
config BLK_DEV_BSG
|
||||
bool "/dev/bsg support (SG v4)"
|
||||
depends on SCSI
|
||||
default y
|
||||
help
|
||||
Saying Y here will enable generic SG (SCSI generic) v4 support
|
||||
for any SCSI device.
|
||||
|
||||
This option is required by UDEV to access device serial numbers, etc.
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
config CHR_DEV_SCH
|
||||
tristate "SCSI media changer support"
|
||||
depends on SCSI
|
||||
|
|
|
@ -20,7 +20,7 @@ CFLAGS_aha152x.o = -DAHA152X_STAT -DAUTOCONF
|
|||
obj-$(CONFIG_PCMCIA) += pcmcia/
|
||||
|
||||
obj-$(CONFIG_SCSI) += scsi_mod.o
|
||||
obj-$(CONFIG_BLK_SCSI_REQUEST) += scsi_common.o
|
||||
obj-$(CONFIG_SCSI_COMMON) += scsi_common.o
|
||||
|
||||
obj-$(CONFIG_RAID_ATTRS) += raid_class.o
|
||||
|
||||
|
@ -168,6 +168,7 @@ scsi_mod-$(CONFIG_BLK_DEBUG_FS) += scsi_debugfs.o
|
|||
scsi_mod-y += scsi_trace.o scsi_logging.o
|
||||
scsi_mod-$(CONFIG_PM) += scsi_pm.o
|
||||
scsi_mod-$(CONFIG_SCSI_DH) += scsi_dh.o
|
||||
scsi_mod-$(CONFIG_BLK_DEV_BSG) += scsi_bsg.o
|
||||
|
||||
hv_storvsc-y := storvsc_drv.o
|
||||
|
||||
|
@ -183,7 +184,7 @@ CFLAGS_ncr53c8xx.o := $(ncr53c8xx-flags-y) $(ncr53c8xx-flags-m)
|
|||
zalon7xx-objs := zalon.o ncr53c8xx.o
|
||||
|
||||
# Files generated that shall be removed upon make clean
|
||||
clean-files := 53c700_d.h 53c700_u.h scsi_devinfo_tbl.c
|
||||
clean-files := 53c700_d.h 53c700_u.h
|
||||
|
||||
$(obj)/53c700.o: $(obj)/53c700_d.h
|
||||
|
||||
|
@ -192,9 +193,11 @@ $(obj)/scsi_sysfs.o: $(obj)/scsi_devinfo_tbl.c
|
|||
quiet_cmd_bflags = GEN $@
|
||||
cmd_bflags = sed -n 's/.*define *BLIST_\([A-Z0-9_]*\) *.*/BLIST_FLAG_NAME(\1),/p' $< > $@
|
||||
|
||||
$(obj)/scsi_devinfo_tbl.c: include/scsi/scsi_devinfo.h
|
||||
$(obj)/scsi_devinfo_tbl.c: include/scsi/scsi_devinfo.h FORCE
|
||||
$(call if_changed,bflags)
|
||||
|
||||
targets += scsi_devinfo_tbl.c
|
||||
|
||||
# If you want to play with the firmware, uncomment
|
||||
# GENERATE_FIRMWARE := 1
|
||||
|
||||
|
|
|
@ -778,7 +778,7 @@ static void NCR5380_dma_complete(struct Scsi_Host *instance)
|
|||
}
|
||||
|
||||
#ifdef CONFIG_SUN3
|
||||
if ((sun3scsi_dma_finish(rq_data_dir(hostdata->connected->request)))) {
|
||||
if (sun3scsi_dma_finish(hostdata->connected->sc_data_direction)) {
|
||||
pr_err("scsi%d: overrun in UDC counter -- not prepared to deal with this!\n",
|
||||
instance->host_no);
|
||||
BUG();
|
||||
|
@ -1710,7 +1710,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
|
|||
count = sun3scsi_dma_xfer_len(hostdata, cmd);
|
||||
|
||||
if (count > 0) {
|
||||
if (rq_data_dir(cmd->request))
|
||||
if (cmd->sc_data_direction == DMA_TO_DEVICE)
|
||||
sun3scsi_dma_send_setup(hostdata,
|
||||
cmd->SCp.ptr, count);
|
||||
else
|
||||
|
@ -2158,7 +2158,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
|
|||
count = sun3scsi_dma_xfer_len(hostdata, tmp);
|
||||
|
||||
if (count > 0) {
|
||||
if (rq_data_dir(tmp->request))
|
||||
if (tmp->sc_data_direction == DMA_TO_DEVICE)
|
||||
sun3scsi_dma_send_setup(hostdata,
|
||||
tmp->SCp.ptr, count);
|
||||
else
|
||||
|
|
|
@ -25,7 +25,6 @@
|
|||
#include <linux/completion.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/highmem.h> /* For flush_kernel_dcache_page */
|
||||
#include <linux/module.h>
|
||||
|
||||
#include <asm/unaligned.h>
|
||||
|
@ -1505,7 +1504,7 @@ static struct aac_srb * aac_scsi_common(struct fib * fib, struct scsi_cmnd * cmd
|
|||
srbcmd->id = cpu_to_le32(scmd_id(cmd));
|
||||
srbcmd->lun = cpu_to_le32(cmd->device->lun);
|
||||
srbcmd->flags = cpu_to_le32(flag);
|
||||
timeout = cmd->request->timeout/HZ;
|
||||
timeout = scsi_cmd_to_rq(cmd)->timeout / HZ;
|
||||
if (timeout == 0)
|
||||
timeout = (dev->sa_firmware ? AAC_SA_TIMEOUT : AAC_ARC_TIMEOUT);
|
||||
srbcmd->timeout = cpu_to_le32(timeout); // timeout in seconds
|
||||
|
|
|
@ -224,7 +224,7 @@ struct fib *aac_fib_alloc_tag(struct aac_dev *dev, struct scsi_cmnd *scmd)
|
|||
{
|
||||
struct fib *fibptr;
|
||||
|
||||
fibptr = &dev->fibs[scmd->request->tag];
|
||||
fibptr = &dev->fibs[scsi_cmd_to_rq(scmd)->tag];
|
||||
/*
|
||||
* Null out fields that depend on being zero at the start of
|
||||
* each I/O
|
||||
|
|
|
@ -7423,7 +7423,7 @@ static int asc_build_req(struct asc_board *boardp, struct scsi_cmnd *scp,
|
|||
* Set the srb_tag to the command tag + 1, as
|
||||
* srb_tag '0' is used internally by the chip.
|
||||
*/
|
||||
srb_tag = scp->request->tag + 1;
|
||||
srb_tag = scsi_cmd_to_rq(scp)->tag + 1;
|
||||
asc_scsi_q->q2.srb_tag = srb_tag;
|
||||
|
||||
/*
|
||||
|
@ -7637,7 +7637,7 @@ static int
|
|||
adv_build_req(struct asc_board *boardp, struct scsi_cmnd *scp,
|
||||
adv_req_t **adv_reqpp)
|
||||
{
|
||||
u32 srb_tag = scp->request->tag;
|
||||
u32 srb_tag = scsi_cmd_to_rq(scp)->tag;
|
||||
adv_req_t *reqp;
|
||||
ADV_SCSI_REQ_Q *scsiqp;
|
||||
int ret;
|
||||
|
|
|
@ -262,11 +262,12 @@ static void aha1542_free_cmd(struct scsi_cmnd *cmd)
|
|||
struct aha1542_cmd *acmd = scsi_cmd_priv(cmd);
|
||||
|
||||
if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
|
||||
struct request *rq = scsi_cmd_to_rq(cmd);
|
||||
void *buf = acmd->data_buffer;
|
||||
struct req_iterator iter;
|
||||
struct bio_vec bv;
|
||||
|
||||
rq_for_each_segment(bv, cmd->request, iter) {
|
||||
rq_for_each_segment(bv, rq, iter) {
|
||||
memcpy_to_page(bv.bv_page, bv.bv_offset, buf,
|
||||
bv.bv_len);
|
||||
buf += bv.bv_len;
|
||||
|
@ -447,11 +448,12 @@ static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
|
|||
#endif
|
||||
|
||||
if (cmd->sc_data_direction == DMA_TO_DEVICE) {
|
||||
struct request *rq = scsi_cmd_to_rq(cmd);
|
||||
void *buf = acmd->data_buffer;
|
||||
struct req_iterator iter;
|
||||
struct bio_vec bv;
|
||||
|
||||
rq_for_each_segment(bv, cmd->request, iter) {
|
||||
rq_for_each_segment(bv, rq, iter) {
|
||||
memcpy_from_page(buf, bv.bv_page, bv.bv_offset,
|
||||
bv.bv_len);
|
||||
buf += bv.bv_len;
|
||||
|
|
|
@ -235,8 +235,7 @@ static int beiscsi_exec_nemb_cmd(struct beiscsi_hba *phba,
|
|||
wrb = alloc_mcc_wrb(phba, &tag);
|
||||
if (!wrb) {
|
||||
mutex_unlock(&ctrl->mbox_lock);
|
||||
rc = -ENOMEM;
|
||||
goto free_cmd;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
sge = nonembedded_sgl(wrb);
|
||||
|
@ -269,24 +268,6 @@ static int beiscsi_exec_nemb_cmd(struct beiscsi_hba *phba,
|
|||
/* copy the response, if any */
|
||||
if (resp_buf)
|
||||
memcpy(resp_buf, nonemb_cmd->va, resp_buf_len);
|
||||
/**
|
||||
* This is special case of NTWK_GET_IF_INFO where the size of
|
||||
* response is not known. beiscsi_if_get_info checks the return
|
||||
* value to free DMA buffer.
|
||||
*/
|
||||
if (rc == -EAGAIN)
|
||||
return rc;
|
||||
|
||||
/**
|
||||
* If FW is busy that is driver timed out, DMA buffer is saved with
|
||||
* the tag, only when the cmd completes this buffer is freed.
|
||||
*/
|
||||
if (rc == -EBUSY)
|
||||
return rc;
|
||||
|
||||
free_cmd:
|
||||
dma_free_coherent(&ctrl->pdev->dev, nonemb_cmd->size,
|
||||
nonemb_cmd->va, nonemb_cmd->dma);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -309,6 +290,19 @@ static int beiscsi_prep_nemb_cmd(struct beiscsi_hba *phba,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void beiscsi_free_nemb_cmd(struct beiscsi_hba *phba,
|
||||
struct be_dma_mem *cmd, int rc)
|
||||
{
|
||||
/*
|
||||
* If FW is busy the DMA buffer is saved with the tag. When the cmd
|
||||
* completes this buffer is freed.
|
||||
*/
|
||||
if (rc == -EBUSY)
|
||||
return;
|
||||
|
||||
dma_free_coherent(&phba->ctrl.pdev->dev, cmd->size, cmd->va, cmd->dma);
|
||||
}
|
||||
|
||||
static void __beiscsi_eq_delay_compl(struct beiscsi_hba *phba, unsigned int tag)
|
||||
{
|
||||
struct be_dma_mem *tag_mem;
|
||||
|
@ -344,8 +338,16 @@ int beiscsi_modify_eq_delay(struct beiscsi_hba *phba,
|
|||
cpu_to_le32(set_eqd[i].delay_multiplier);
|
||||
}
|
||||
|
||||
return beiscsi_exec_nemb_cmd(phba, &nonemb_cmd,
|
||||
__beiscsi_eq_delay_compl, NULL, 0);
|
||||
rc = beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, __beiscsi_eq_delay_compl,
|
||||
NULL, 0);
|
||||
if (rc) {
|
||||
/*
|
||||
* Only free on failure. Async cmds are handled like -EBUSY
|
||||
* where it's handled for us.
|
||||
*/
|
||||
beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rc);
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -372,6 +374,7 @@ int beiscsi_get_initiator_name(struct beiscsi_hba *phba, char *name, bool cfg)
|
|||
req->hdr.version = 1;
|
||||
rc = beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL,
|
||||
&resp, sizeof(resp));
|
||||
beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rc);
|
||||
if (rc) {
|
||||
beiscsi_log(phba, KERN_ERR,
|
||||
BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
|
||||
|
@ -449,7 +452,9 @@ static int beiscsi_if_mod_gw(struct beiscsi_hba *phba,
|
|||
req->ip_addr.ip_type = ip_type;
|
||||
memcpy(req->ip_addr.addr, gw,
|
||||
(ip_type < BEISCSI_IP_TYPE_V6) ? IP_V4_LEN : IP_V6_LEN);
|
||||
return beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL, NULL, 0);
|
||||
rt_val = beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL, NULL, 0);
|
||||
beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rt_val);
|
||||
return rt_val;
|
||||
}
|
||||
|
||||
int beiscsi_if_set_gw(struct beiscsi_hba *phba, u32 ip_type, u8 *gw)
|
||||
|
@ -499,8 +504,10 @@ int beiscsi_if_get_gw(struct beiscsi_hba *phba, u32 ip_type,
|
|||
req = nonemb_cmd.va;
|
||||
req->ip_type = ip_type;
|
||||
|
||||
return beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL,
|
||||
resp, sizeof(*resp));
|
||||
rc = beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL, resp,
|
||||
sizeof(*resp));
|
||||
beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -537,6 +544,7 @@ beiscsi_if_clr_ip(struct beiscsi_hba *phba,
|
|||
"BG_%d : failed to clear IP: rc %d status %d\n",
|
||||
rc, req->ip_params.ip_record.status);
|
||||
}
|
||||
beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -581,6 +589,7 @@ beiscsi_if_set_ip(struct beiscsi_hba *phba, u8 *ip,
|
|||
if (req->ip_params.ip_record.status)
|
||||
rc = -EINVAL;
|
||||
}
|
||||
beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -608,6 +617,7 @@ int beiscsi_if_en_static(struct beiscsi_hba *phba, u32 ip_type,
|
|||
reldhcp->interface_hndl = phba->interface_handle;
|
||||
reldhcp->ip_type = ip_type;
|
||||
rc = beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL, NULL, 0);
|
||||
beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rc);
|
||||
if (rc < 0) {
|
||||
beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
|
||||
"BG_%d : failed to release existing DHCP: %d\n",
|
||||
|
@ -689,7 +699,7 @@ int beiscsi_if_en_dhcp(struct beiscsi_hba *phba, u32 ip_type)
|
|||
dhcpreq->interface_hndl = phba->interface_handle;
|
||||
dhcpreq->ip_type = ip_type;
|
||||
rc = beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL, NULL, 0);
|
||||
|
||||
beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rc);
|
||||
exit:
|
||||
kfree(if_info);
|
||||
return rc;
|
||||
|
@ -762,11 +772,8 @@ int beiscsi_if_get_info(struct beiscsi_hba *phba, int ip_type,
|
|||
BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
|
||||
"BG_%d : Memory Allocation Failure\n");
|
||||
|
||||
/* Free the DMA memory for the IOCTL issuing */
|
||||
dma_free_coherent(&phba->ctrl.pdev->dev,
|
||||
nonemb_cmd.size,
|
||||
nonemb_cmd.va,
|
||||
nonemb_cmd.dma);
|
||||
beiscsi_free_nemb_cmd(phba, &nonemb_cmd,
|
||||
-ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
@ -781,15 +788,13 @@ int beiscsi_if_get_info(struct beiscsi_hba *phba, int ip_type,
|
|||
nonemb_cmd.va)->actual_resp_len;
|
||||
ioctl_size += sizeof(struct be_cmd_req_hdr);
|
||||
|
||||
/* Free the previous allocated DMA memory */
|
||||
dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size,
|
||||
nonemb_cmd.va,
|
||||
nonemb_cmd.dma);
|
||||
|
||||
beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rc);
|
||||
/* Free the virtual memory */
|
||||
kfree(*if_info);
|
||||
} else
|
||||
} else {
|
||||
beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rc);
|
||||
break;
|
||||
}
|
||||
} while (true);
|
||||
return rc;
|
||||
}
|
||||
|
@ -806,8 +811,9 @@ int mgmt_get_nic_conf(struct beiscsi_hba *phba,
|
|||
if (rc)
|
||||
return rc;
|
||||
|
||||
return beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL,
|
||||
nic, sizeof(*nic));
|
||||
rc = beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL, nic, sizeof(*nic));
|
||||
beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void beiscsi_boot_process_compl(struct beiscsi_hba *phba,
|
||||
|
|
|
@ -1918,7 +1918,7 @@ static int bnx2i_queue_scsi_cmd_resp(struct iscsi_session *session,
|
|||
|
||||
spin_unlock(&session->back_lock);
|
||||
|
||||
p = &per_cpu(bnx2i_percpu, blk_mq_rq_cpu(sc->request));
|
||||
p = &per_cpu(bnx2i_percpu, blk_mq_rq_cpu(scsi_cmd_to_rq(sc)));
|
||||
spin_lock(&p->p_work_lock);
|
||||
if (unlikely(!p->iothread)) {
|
||||
rc = -EINVAL;
|
||||
|
|
|
@ -618,6 +618,12 @@ ch_checkrange(scsi_changer *ch, unsigned int type, unsigned int unit)
|
|||
return 0;
|
||||
}
|
||||
|
||||
struct changer_element_status32 {
|
||||
int ces_type;
|
||||
compat_uptr_t ces_data;
|
||||
};
|
||||
#define CHIOGSTATUS32 _IOW('c', 8, struct changer_element_status32)
|
||||
|
||||
static long ch_ioctl(struct file *file,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
|
@ -748,7 +754,20 @@ static long ch_ioctl(struct file *file,
|
|||
|
||||
return ch_gstatus(ch, ces.ces_type, ces.ces_data);
|
||||
}
|
||||
#ifdef CONFIG_COMPAT
|
||||
case CHIOGSTATUS32:
|
||||
{
|
||||
struct changer_element_status32 ces32;
|
||||
|
||||
if (copy_from_user(&ces32, argp, sizeof(ces32)))
|
||||
return -EFAULT;
|
||||
if (ces32.ces_type < 0 || ces32.ces_type >= CH_TYPES)
|
||||
return -EINVAL;
|
||||
|
||||
return ch_gstatus(ch, ces32.ces_type,
|
||||
compat_ptr(ces32.ces_data));
|
||||
}
|
||||
#endif
|
||||
case CHIOGELEM:
|
||||
{
|
||||
struct changer_get_element cge;
|
||||
|
@ -858,59 +877,11 @@ static long ch_ioctl(struct file *file,
|
|||
}
|
||||
|
||||
default:
|
||||
return scsi_ioctl(ch->device, cmd, argp);
|
||||
return scsi_ioctl(ch->device, NULL, file->f_mode, cmd, argp);
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
|
||||
struct changer_element_status32 {
|
||||
int ces_type;
|
||||
compat_uptr_t ces_data;
|
||||
};
|
||||
#define CHIOGSTATUS32 _IOW('c', 8,struct changer_element_status32)
|
||||
|
||||
static long ch_ioctl_compat(struct file * file,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
scsi_changer *ch = file->private_data;
|
||||
int retval = scsi_ioctl_block_when_processing_errors(ch->device, cmd,
|
||||
file->f_flags & O_NDELAY);
|
||||
if (retval)
|
||||
return retval;
|
||||
|
||||
switch (cmd) {
|
||||
case CHIOGPARAMS:
|
||||
case CHIOGVPARAMS:
|
||||
case CHIOPOSITION:
|
||||
case CHIOMOVE:
|
||||
case CHIOEXCHANGE:
|
||||
case CHIOGELEM:
|
||||
case CHIOINITELEM:
|
||||
case CHIOSVOLTAG:
|
||||
/* compatible */
|
||||
return ch_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
|
||||
case CHIOGSTATUS32:
|
||||
{
|
||||
struct changer_element_status32 ces32;
|
||||
unsigned char __user *data;
|
||||
|
||||
if (copy_from_user(&ces32, (void __user *)arg, sizeof (ces32)))
|
||||
return -EFAULT;
|
||||
if (ces32.ces_type < 0 || ces32.ces_type >= CH_TYPES)
|
||||
return -EINVAL;
|
||||
|
||||
data = compat_ptr(ces32.ces_data);
|
||||
return ch_gstatus(ch, ces32.ces_type, data);
|
||||
}
|
||||
default:
|
||||
return scsi_compat_ioctl(ch->device, cmd, compat_ptr(arg));
|
||||
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
/* ------------------------------------------------------------------------ */
|
||||
|
||||
static int ch_probe(struct device *dev)
|
||||
|
@ -1015,9 +986,7 @@ static const struct file_operations changer_fops = {
|
|||
.open = ch_open,
|
||||
.release = ch_release,
|
||||
.unlocked_ioctl = ch_ioctl,
|
||||
#ifdef CONFIG_COMPAT
|
||||
.compat_ioctl = ch_ioctl_compat,
|
||||
#endif
|
||||
.compat_ioctl = compat_ptr_ioctl,
|
||||
.llseek = noop_llseek,
|
||||
};
|
||||
|
||||
|
|
|
@ -1786,7 +1786,7 @@ csio_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmnd)
|
|||
struct csio_scsi_qset *sqset;
|
||||
struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
|
||||
|
||||
sqset = &hw->sqset[ln->portid][blk_mq_rq_cpu(cmnd->request)];
|
||||
sqset = &hw->sqset[ln->portid][blk_mq_rq_cpu(scsi_cmd_to_rq(cmnd))];
|
||||
|
||||
nr = fc_remote_port_chkready(rport);
|
||||
if (nr) {
|
||||
|
@ -1989,13 +1989,13 @@ csio_eh_abort_handler(struct scsi_cmnd *cmnd)
|
|||
csio_info(hw,
|
||||
"Aborted SCSI command to (%d:%llu) tag %u\n",
|
||||
cmnd->device->id, cmnd->device->lun,
|
||||
cmnd->request->tag);
|
||||
scsi_cmd_to_rq(cmnd)->tag);
|
||||
return SUCCESS;
|
||||
} else {
|
||||
csio_info(hw,
|
||||
"Failed to abort SCSI command, (%d:%llu) tag %u\n",
|
||||
cmnd->device->id, cmnd->device->lun,
|
||||
cmnd->request->tag);
|
||||
scsi_cmd_to_rq(cmnd)->tag);
|
||||
return FAILED;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -433,7 +433,7 @@ static u32 cmd_to_target_hwq(struct Scsi_Host *host, struct scsi_cmnd *scp,
|
|||
hwq = afu->hwq_rr_count++ % afu->num_hwqs;
|
||||
break;
|
||||
case HWQ_MODE_TAG:
|
||||
tag = blk_mq_unique_tag(scp->request);
|
||||
tag = blk_mq_unique_tag(scsi_cmd_to_rq(scp));
|
||||
hwq = blk_mq_unique_tag_to_hwq(tag);
|
||||
break;
|
||||
case HWQ_MODE_CPU:
|
||||
|
|
|
@ -652,7 +652,7 @@ static int adpt_abort(struct scsi_cmnd * cmd)
|
|||
msg[2] = 0;
|
||||
msg[3]= 0;
|
||||
/* Add 1 to avoid firmware treating it as invalid command */
|
||||
msg[4] = cmd->request->tag + 1;
|
||||
msg[4] = scsi_cmd_to_rq(cmd)->tag + 1;
|
||||
if (pHba->host)
|
||||
spin_lock_irq(pHba->host->host_lock);
|
||||
rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER);
|
||||
|
@ -2236,7 +2236,7 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_d
|
|||
msg[1] = ((0xff<<24)|(HOST_TID<<12)|d->tid);
|
||||
msg[2] = 0;
|
||||
/* Add 1 to avoid firmware treating it as invalid command */
|
||||
msg[3] = cmd->request->tag + 1;
|
||||
msg[3] = scsi_cmd_to_rq(cmd)->tag + 1;
|
||||
// Our cards use the transaction context as the tag for queueing
|
||||
// Adaptec/DPT Private stuff
|
||||
msg[4] = I2O_CMD_SCSI_EXEC|(DPT_ORGANIZATION_ID<<16);
|
||||
|
|
|
@ -780,7 +780,7 @@ efct_lio_npiv_make_nport(struct target_fabric_configfs *tf,
|
|||
{
|
||||
struct efct_lio_vport *lio_vport;
|
||||
struct efct *efct;
|
||||
int ret = -1;
|
||||
int ret;
|
||||
u64 p_wwpn, npiv_wwpn, npiv_wwnn;
|
||||
char *p, *pbuf, tmp[128];
|
||||
struct efct_lio_vport_list_t *vport_list;
|
||||
|
|
|
@ -107,7 +107,7 @@ static void fnic_cleanup_io(struct fnic *fnic);
|
|||
static inline spinlock_t *fnic_io_lock_hash(struct fnic *fnic,
|
||||
struct scsi_cmnd *sc)
|
||||
{
|
||||
u32 hash = sc->request->tag & (FNIC_IO_LOCKS - 1);
|
||||
u32 hash = scsi_cmd_to_rq(sc)->tag & (FNIC_IO_LOCKS - 1);
|
||||
|
||||
return &fnic->io_req_lock[hash];
|
||||
}
|
||||
|
@ -390,7 +390,7 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
|
|||
(rp->flags & FC_RP_FLAGS_RETRY))
|
||||
exch_flags |= FCPIO_ICMND_SRFLAG_RETRY;
|
||||
|
||||
fnic_queue_wq_copy_desc_icmnd_16(wq, sc->request->tag,
|
||||
fnic_queue_wq_copy_desc_icmnd_16(wq, scsi_cmd_to_rq(sc)->tag,
|
||||
0, exch_flags, io_req->sgl_cnt,
|
||||
SCSI_SENSE_BUFFERSIZE,
|
||||
io_req->sgl_list_pa,
|
||||
|
@ -422,6 +422,7 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
|
|||
*/
|
||||
static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
|
||||
{
|
||||
const int tag = scsi_cmd_to_rq(sc)->tag;
|
||||
struct fc_lport *lp = shost_priv(sc->device->host);
|
||||
struct fc_rport *rport;
|
||||
struct fnic_io_req *io_req = NULL;
|
||||
|
@ -511,8 +512,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
|
|||
sg_count = scsi_dma_map(sc);
|
||||
if (sg_count < 0) {
|
||||
FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
|
||||
sc->request->tag, sc, 0, sc->cmnd[0],
|
||||
sg_count, CMD_STATE(sc));
|
||||
tag, sc, 0, sc->cmnd[0], sg_count, CMD_STATE(sc));
|
||||
mempool_free(io_req, fnic->io_req_pool);
|
||||
goto out;
|
||||
}
|
||||
|
@ -571,7 +571,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
|
|||
* refetch the pointer under the lock.
|
||||
*/
|
||||
FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
|
||||
sc->request->tag, sc, 0, 0, 0,
|
||||
tag, sc, 0, 0, 0,
|
||||
(((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
|
||||
io_req = (struct fnic_io_req *)CMD_SP(sc);
|
||||
CMD_SP(sc) = NULL;
|
||||
|
@ -603,8 +603,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
|
|||
sc->cmnd[5]);
|
||||
|
||||
FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
|
||||
sc->request->tag, sc, io_req,
|
||||
sg_count, cmd_trace,
|
||||
tag, sc, io_req, sg_count, cmd_trace,
|
||||
(((u64)CMD_FLAGS(sc) >> 32) | CMD_STATE(sc)));
|
||||
|
||||
/* if only we issued IO, will we have the io lock */
|
||||
|
@ -1364,6 +1363,7 @@ int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int copy_work_to_do)
|
|||
static bool fnic_cleanup_io_iter(struct scsi_cmnd *sc, void *data,
|
||||
bool reserved)
|
||||
{
|
||||
const int tag = scsi_cmd_to_rq(sc)->tag;
|
||||
struct fnic *fnic = data;
|
||||
struct fnic_io_req *io_req;
|
||||
unsigned long flags = 0;
|
||||
|
@ -1371,7 +1371,7 @@ static bool fnic_cleanup_io_iter(struct scsi_cmnd *sc, void *data,
|
|||
unsigned long start_time = 0;
|
||||
struct fnic_stats *fnic_stats = &fnic->fnic_stats;
|
||||
|
||||
io_lock = fnic_io_lock_tag(fnic, sc->request->tag);
|
||||
io_lock = fnic_io_lock_tag(fnic, tag);
|
||||
spin_lock_irqsave(io_lock, flags);
|
||||
|
||||
io_req = (struct fnic_io_req *)CMD_SP(sc);
|
||||
|
@ -1413,7 +1413,7 @@ static bool fnic_cleanup_io_iter(struct scsi_cmnd *sc, void *data,
|
|||
sc->result = DID_TRANSPORT_DISRUPTED << 16;
|
||||
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
|
||||
"fnic_cleanup_io: tag:0x%x : sc:0x%p duration = %lu DID_TRANSPORT_DISRUPTED\n",
|
||||
sc->request->tag, sc, (jiffies - start_time));
|
||||
tag, sc, jiffies - start_time);
|
||||
|
||||
if (atomic64_read(&fnic->io_cmpl_skip))
|
||||
atomic64_dec(&fnic->io_cmpl_skip);
|
||||
|
@ -1425,10 +1425,10 @@ static bool fnic_cleanup_io_iter(struct scsi_cmnd *sc, void *data,
|
|||
if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED))
|
||||
shost_printk(KERN_ERR, fnic->lport->host,
|
||||
"Calling done for IO not issued to fw: tag:0x%x sc:0x%p\n",
|
||||
sc->request->tag, sc);
|
||||
tag, sc);
|
||||
|
||||
FNIC_TRACE(fnic_cleanup_io,
|
||||
sc->device->host->host_no, sc->request->tag, sc,
|
||||
sc->device->host->host_no, tag, sc,
|
||||
jiffies_to_msecs(jiffies - start_time),
|
||||
0, ((u64)sc->cmnd[0] << 32 |
|
||||
(u64)sc->cmnd[2] << 24 |
|
||||
|
@ -1566,7 +1566,7 @@ static bool fnic_rport_abort_io_iter(struct scsi_cmnd *sc, void *data,
|
|||
{
|
||||
struct fnic_rport_abort_io_iter_data *iter_data = data;
|
||||
struct fnic *fnic = iter_data->fnic;
|
||||
int abt_tag = sc->request->tag;
|
||||
int abt_tag = scsi_cmd_to_rq(sc)->tag;
|
||||
struct fnic_io_req *io_req;
|
||||
spinlock_t *io_lock;
|
||||
unsigned long flags;
|
||||
|
@ -1727,6 +1727,7 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
|
|||
*/
|
||||
int fnic_abort_cmd(struct scsi_cmnd *sc)
|
||||
{
|
||||
struct request *const rq = scsi_cmd_to_rq(sc);
|
||||
struct fc_lport *lp;
|
||||
struct fnic *fnic;
|
||||
struct fnic_io_req *io_req = NULL;
|
||||
|
@ -1741,7 +1742,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
|
|||
struct abort_stats *abts_stats;
|
||||
struct terminate_stats *term_stats;
|
||||
enum fnic_ioreq_state old_ioreq_state;
|
||||
int tag;
|
||||
const int tag = rq->tag;
|
||||
unsigned long abt_issued_time;
|
||||
DECLARE_COMPLETION_ONSTACK(tm_done);
|
||||
|
||||
|
@ -1757,7 +1758,6 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
|
|||
term_stats = &fnic->fnic_stats.term_stats;
|
||||
|
||||
rport = starget_to_rport(scsi_target(sc->device));
|
||||
tag = sc->request->tag;
|
||||
FNIC_SCSI_DBG(KERN_DEBUG,
|
||||
fnic->lport->host,
|
||||
"Abort Cmd called FCID 0x%x, LUN 0x%llx TAG %x flags %x\n",
|
||||
|
@ -1842,8 +1842,8 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
|
|||
/* Now queue the abort command to firmware */
|
||||
int_to_scsilun(sc->device->lun, &fc_lun);
|
||||
|
||||
if (fnic_queue_abort_io_req(fnic, sc->request->tag, task_req,
|
||||
fc_lun.scsi_lun, io_req)) {
|
||||
if (fnic_queue_abort_io_req(fnic, tag, task_req, fc_lun.scsi_lun,
|
||||
io_req)) {
|
||||
spin_lock_irqsave(io_lock, flags);
|
||||
if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
|
||||
CMD_STATE(sc) = old_ioreq_state;
|
||||
|
@ -1943,8 +1943,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
|
|||
}
|
||||
|
||||
fnic_abort_cmd_end:
|
||||
FNIC_TRACE(fnic_abort_cmd, sc->device->host->host_no,
|
||||
sc->request->tag, sc,
|
||||
FNIC_TRACE(fnic_abort_cmd, sc->device->host->host_no, tag, sc,
|
||||
jiffies_to_msecs(jiffies - start_time),
|
||||
0, ((u64)sc->cmnd[0] << 32 |
|
||||
(u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
|
||||
|
@ -1994,7 +1993,7 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic,
|
|||
/* fill in the lun info */
|
||||
int_to_scsilun(sc->device->lun, &fc_lun);
|
||||
|
||||
fnic_queue_wq_copy_desc_itmf(wq, sc->request->tag | FNIC_TAG_DEV_RST,
|
||||
fnic_queue_wq_copy_desc_itmf(wq, scsi_cmd_to_rq(sc)->tag | FNIC_TAG_DEV_RST,
|
||||
0, FCPIO_ITMF_LUN_RESET, SCSI_NO_TAG,
|
||||
fc_lun.scsi_lun, io_req->port_id,
|
||||
fnic->config.ra_tov, fnic->config.ed_tov);
|
||||
|
@ -2025,7 +2024,7 @@ static bool fnic_pending_aborts_iter(struct scsi_cmnd *sc,
|
|||
struct fnic_pending_aborts_iter_data *iter_data = data;
|
||||
struct fnic *fnic = iter_data->fnic;
|
||||
struct scsi_device *lun_dev = iter_data->lun_dev;
|
||||
int abt_tag = sc->request->tag;
|
||||
int abt_tag = scsi_cmd_to_rq(sc)->tag;
|
||||
struct fnic_io_req *io_req;
|
||||
spinlock_t *io_lock;
|
||||
unsigned long flags;
|
||||
|
@ -2206,14 +2205,15 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
|
|||
static inline int
|
||||
fnic_scsi_host_start_tag(struct fnic *fnic, struct scsi_cmnd *sc)
|
||||
{
|
||||
struct request_queue *q = sc->request->q;
|
||||
struct request *rq = scsi_cmd_to_rq(sc);
|
||||
struct request_queue *q = rq->q;
|
||||
struct request *dummy;
|
||||
|
||||
dummy = blk_mq_alloc_request(q, REQ_OP_WRITE, BLK_MQ_REQ_NOWAIT);
|
||||
if (IS_ERR(dummy))
|
||||
return SCSI_NO_TAG;
|
||||
|
||||
sc->tag = sc->request->tag = dummy->tag;
|
||||
rq->tag = dummy->tag;
|
||||
sc->host_scribble = (unsigned char *)dummy;
|
||||
|
||||
return dummy->tag;
|
||||
|
@ -2238,6 +2238,7 @@ fnic_scsi_host_end_tag(struct fnic *fnic, struct scsi_cmnd *sc)
|
|||
*/
|
||||
int fnic_device_reset(struct scsi_cmnd *sc)
|
||||
{
|
||||
struct request *rq = scsi_cmd_to_rq(sc);
|
||||
struct fc_lport *lp;
|
||||
struct fnic *fnic;
|
||||
struct fnic_io_req *io_req = NULL;
|
||||
|
@ -2250,7 +2251,7 @@ int fnic_device_reset(struct scsi_cmnd *sc)
|
|||
struct scsi_lun fc_lun;
|
||||
struct fnic_stats *fnic_stats;
|
||||
struct reset_stats *reset_stats;
|
||||
int tag = 0;
|
||||
int tag = rq->tag;
|
||||
DECLARE_COMPLETION_ONSTACK(tm_done);
|
||||
int tag_gen_flag = 0; /*to track tags allocated by fnic driver*/
|
||||
bool new_sc = 0;
|
||||
|
@ -2284,7 +2285,6 @@ int fnic_device_reset(struct scsi_cmnd *sc)
|
|||
CMD_FLAGS(sc) = FNIC_DEVICE_RESET;
|
||||
/* Allocate tag if not present */
|
||||
|
||||
tag = sc->request->tag;
|
||||
if (unlikely(tag < 0)) {
|
||||
/*
|
||||
* Really should fix the midlayer to pass in a proper
|
||||
|
@ -2458,8 +2458,7 @@ int fnic_device_reset(struct scsi_cmnd *sc)
|
|||
}
|
||||
|
||||
fnic_device_reset_end:
|
||||
FNIC_TRACE(fnic_device_reset, sc->device->host->host_no,
|
||||
sc->request->tag, sc,
|
||||
FNIC_TRACE(fnic_device_reset, sc->device->host->host_no, rq->tag, sc,
|
||||
jiffies_to_msecs(jiffies - start_time),
|
||||
0, ((u64)sc->cmnd[0] << 32 |
|
||||
(u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
|
||||
|
|
|
@ -185,7 +185,7 @@ static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba,
|
|||
void *bitmap = hisi_hba->slot_index_tags;
|
||||
|
||||
if (scsi_cmnd)
|
||||
return scsi_cmnd->request->tag;
|
||||
return scsi_cmd_to_rq(scsi_cmnd)->tag;
|
||||
|
||||
spin_lock(&hisi_hba->lock);
|
||||
index = find_next_zero_bit(bitmap, hisi_hba->slot_index_count,
|
||||
|
@ -449,7 +449,7 @@ static int hisi_sas_task_prep(struct sas_task *task,
|
|||
unsigned int dq_index;
|
||||
u32 blk_tag;
|
||||
|
||||
blk_tag = blk_mq_unique_tag(scmd->request);
|
||||
blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
|
||||
dq_index = blk_mq_unique_tag_to_hwq(blk_tag);
|
||||
*dq_pointer = dq = &hisi_hba->dq[dq_index];
|
||||
} else {
|
||||
|
|
|
@ -1153,7 +1153,7 @@ static void fill_prot_v3_hw(struct scsi_cmnd *scsi_cmnd,
|
|||
{
|
||||
unsigned char prot_op = scsi_get_prot_op(scsi_cmnd);
|
||||
unsigned int interval = scsi_prot_interval(scsi_cmnd);
|
||||
u32 lbrt_chk_val = t10_pi_ref_tag(scsi_cmnd->request);
|
||||
u32 lbrt_chk_val = t10_pi_ref_tag(scsi_cmd_to_rq(scsi_cmnd));
|
||||
|
||||
switch (prot_op) {
|
||||
case SCSI_PROT_READ_INSERT:
|
||||
|
|
|
@ -5686,7 +5686,7 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
|
|||
/* Get the ptr to our adapter structure out of cmd->host. */
|
||||
h = sdev_to_hba(cmd->device);
|
||||
|
||||
BUG_ON(cmd->request->tag < 0);
|
||||
BUG_ON(scsi_cmd_to_rq(cmd)->tag < 0);
|
||||
|
||||
dev = cmd->device->hostdata;
|
||||
if (!dev) {
|
||||
|
@ -5729,7 +5729,7 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
|
|||
* and is therefore a brand-new command.
|
||||
*/
|
||||
if (likely(cmd->retries == 0 &&
|
||||
!blk_rq_is_passthrough(cmd->request) &&
|
||||
!blk_rq_is_passthrough(scsi_cmd_to_rq(cmd)) &&
|
||||
h->acciopath_status)) {
|
||||
/* Submit with the retry_pending flag unset. */
|
||||
rc = hpsa_ioaccel_submit(h, c, cmd, false);
|
||||
|
@ -5894,7 +5894,7 @@ static int hpsa_scsi_add_host(struct ctlr_info *h)
|
|||
*/
|
||||
static int hpsa_get_cmd_index(struct scsi_cmnd *scmd)
|
||||
{
|
||||
int idx = scmd->request->tag;
|
||||
int idx = scsi_cmd_to_rq(scmd)->tag;
|
||||
|
||||
if (idx < 0)
|
||||
return idx;
|
||||
|
|
|
@ -1926,7 +1926,7 @@ static int ibmvfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
|
|||
struct ibmvfc_cmd *vfc_cmd;
|
||||
struct ibmvfc_fcp_cmd_iu *iu;
|
||||
struct ibmvfc_event *evt;
|
||||
u32 tag_and_hwq = blk_mq_unique_tag(cmnd->request);
|
||||
u32 tag_and_hwq = blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
|
||||
u16 hwq = blk_mq_unique_tag_to_hwq(tag_and_hwq);
|
||||
u16 scsi_channel;
|
||||
int rc;
|
||||
|
@ -1956,7 +1956,7 @@ static int ibmvfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
|
|||
memcpy(iu->cdb, cmnd->cmnd, cmnd->cmd_len);
|
||||
|
||||
if (cmnd->flags & SCMD_TAGGED) {
|
||||
vfc_cmd->task_tag = cpu_to_be64(cmnd->tag);
|
||||
vfc_cmd->task_tag = cpu_to_be64(scsi_cmd_to_rq(cmnd)->tag);
|
||||
iu->pri_task_attr = IBMVFC_SIMPLE_TASK;
|
||||
}
|
||||
|
||||
|
@ -3292,14 +3292,18 @@ static int ibmvfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
|
|||
int done = 0;
|
||||
|
||||
spin_lock_irqsave(shost->host_lock, flags);
|
||||
if (time >= (init_timeout * HZ)) {
|
||||
if (!vhost->scan_timeout)
|
||||
done = 1;
|
||||
else if (time >= (vhost->scan_timeout * HZ)) {
|
||||
dev_info(vhost->dev, "Scan taking longer than %d seconds, "
|
||||
"continuing initialization\n", init_timeout);
|
||||
"continuing initialization\n", vhost->scan_timeout);
|
||||
done = 1;
|
||||
}
|
||||
|
||||
if (vhost->scan_complete)
|
||||
if (vhost->scan_complete) {
|
||||
vhost->scan_timeout = init_timeout;
|
||||
done = 1;
|
||||
}
|
||||
spin_unlock_irqrestore(shost->host_lock, flags);
|
||||
return done;
|
||||
}
|
||||
|
@ -6084,6 +6088,7 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
|
|||
vhost->client_scsi_channels = min(shost->nr_hw_queues, nr_scsi_channels);
|
||||
vhost->using_channels = 0;
|
||||
vhost->do_enquiry = 1;
|
||||
vhost->scan_timeout = 0;
|
||||
|
||||
strcpy(vhost->partition_name, "UNKNOWN");
|
||||
init_waitqueue_head(&vhost->work_wait_q);
|
||||
|
|
|
@ -876,6 +876,7 @@ struct ibmvfc_host {
|
|||
int reinit;
|
||||
int delay_init;
|
||||
int scan_complete;
|
||||
int scan_timeout;
|
||||
int logged_in;
|
||||
int mq_enabled;
|
||||
int using_channels;
|
||||
|
|
|
@ -1072,7 +1072,7 @@ static int ibmvscsi_queuecommand_lck(struct scsi_cmnd *cmnd,
|
|||
init_event_struct(evt_struct,
|
||||
handle_cmd_rsp,
|
||||
VIOSRP_SRP_FORMAT,
|
||||
cmnd->request->timeout/HZ);
|
||||
scsi_cmd_to_rq(cmnd)->timeout / HZ);
|
||||
|
||||
evt_struct->cmnd = cmnd;
|
||||
evt_struct->cmnd_done = done;
|
||||
|
|
|
@ -3735,7 +3735,7 @@ ips_send_cmd(ips_ha_t * ha, ips_scb_t * scb)
|
|||
scb->cmd.dcdb.segment_4G = 0;
|
||||
scb->cmd.dcdb.enhanced_sg = 0;
|
||||
|
||||
TimeOut = scb->scsi_cmd->request->timeout;
|
||||
TimeOut = scsi_cmd_to_rq(scb->scsi_cmd)->timeout;
|
||||
|
||||
if (ha->subsys->param[4] & 0x00100000) { /* If NEW Tape DCDB is Supported */
|
||||
if (!scb->sg_len) {
|
||||
|
|
|
@ -341,7 +341,7 @@ static void scu_ssp_ireq_dif_insert(struct isci_request *ireq, u8 type, u8 op)
|
|||
tc->reserved_E8_0 = 0;
|
||||
|
||||
if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2))
|
||||
tc->ref_tag_seed_gen = scsi_get_lba(scmd) & 0xffffffff;
|
||||
tc->ref_tag_seed_gen = scsi_prot_ref_tag(scmd);
|
||||
else if (type & SCSI_PROT_DIF_TYPE3)
|
||||
tc->ref_tag_seed_gen = 0;
|
||||
}
|
||||
|
@ -369,7 +369,7 @@ static void scu_ssp_ireq_dif_strip(struct isci_request *ireq, u8 type, u8 op)
|
|||
tc->app_tag_gen = 0;
|
||||
|
||||
if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2))
|
||||
tc->ref_tag_seed_verify = scsi_get_lba(scmd) & 0xffffffff;
|
||||
tc->ref_tag_seed_verify = scsi_prot_ref_tag(scmd);
|
||||
else if (type & SCSI_PROT_DIF_TYPE3)
|
||||
tc->ref_tag_seed_verify = 0;
|
||||
|
||||
|
|
|
@ -10,7 +10,6 @@ config SCSI_SAS_LIBSAS
|
|||
tristate "SAS Domain Transport Attributes"
|
||||
depends on SCSI
|
||||
select SCSI_SAS_ATTRS
|
||||
select BLK_DEV_BSGLIB
|
||||
help
|
||||
This provides transport specific helpers for SAS drivers which
|
||||
use the domain device construct (like the aic94xxx).
|
||||
|
|
|
@ -18,4 +18,4 @@ libsas-y += sas_init.o \
|
|||
libsas-$(CONFIG_SCSI_SAS_ATA) += sas_ata.o
|
||||
libsas-$(CONFIG_SCSI_SAS_HOST_SMP) += sas_host_smp.o
|
||||
|
||||
ccflags-y := -DDEBUG
|
||||
ccflags-y := -DDEBUG -I$(srctree)/drivers/scsi
|
||||
|
|
|
@ -20,8 +20,8 @@
|
|||
#include <scsi/scsi.h>
|
||||
#include <scsi/scsi_transport.h>
|
||||
#include <scsi/scsi_transport_sas.h>
|
||||
#include "../scsi_sas_internal.h"
|
||||
#include "../scsi_transport_api.h"
|
||||
#include "scsi_sas_internal.h"
|
||||
#include "scsi_transport_api.h"
|
||||
#include <scsi/scsi_eh.h>
|
||||
|
||||
static enum ata_completion_errors sas_to_ata_err(struct task_status_struct *ts)
|
||||
|
@ -596,7 +596,7 @@ void sas_ata_task_abort(struct sas_task *task)
|
|||
|
||||
/* Bounce SCSI-initiated commands to the SCSI EH */
|
||||
if (qc->scsicmd) {
|
||||
blk_abort_request(qc->scsicmd->request);
|
||||
blk_abort_request(scsi_cmd_to_rq(qc->scsicmd));
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
#include <scsi/scsi_transport.h>
|
||||
#include <scsi/scsi_transport_sas.h>
|
||||
#include <scsi/sas_ata.h>
|
||||
#include "../scsi_sas_internal.h"
|
||||
#include "scsi_sas_internal.h"
|
||||
|
||||
/* ---------- Basic task processing for discovery purposes ---------- */
|
||||
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
#include <scsi/sas_ata.h>
|
||||
#include <scsi/scsi_transport.h>
|
||||
#include <scsi/scsi_transport_sas.h>
|
||||
#include "../scsi_sas_internal.h"
|
||||
#include "scsi_sas_internal.h"
|
||||
|
||||
static int sas_discover_expander(struct domain_device *dev);
|
||||
static int sas_configure_routing(struct domain_device *dev, u8 *sas_addr);
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
|
||||
#include <scsi/scsi_transport.h>
|
||||
#include <scsi/scsi_transport_sas.h>
|
||||
#include "../scsi_sas_internal.h"
|
||||
#include "scsi_sas_internal.h"
|
||||
|
||||
static void sas_host_smp_discover(struct sas_ha_struct *sas_ha, u8 *resp_data,
|
||||
u8 phy_id)
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
|
||||
#include "sas_internal.h"
|
||||
|
||||
#include "../scsi_sas_internal.h"
|
||||
#include "scsi_sas_internal.h"
|
||||
|
||||
static struct kmem_cache *sas_task_cache;
|
||||
static struct kmem_cache *sas_event_cache;
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
#include <scsi/scsi_host.h>
|
||||
#include <scsi/scsi_transport.h>
|
||||
#include <scsi/scsi_transport_sas.h>
|
||||
#include "../scsi_sas_internal.h"
|
||||
#include "scsi_sas_internal.h"
|
||||
|
||||
/* ---------- Phy events ---------- */
|
||||
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
|
||||
#include <scsi/scsi_transport.h>
|
||||
#include <scsi/scsi_transport_sas.h>
|
||||
#include "../scsi_sas_internal.h"
|
||||
#include "scsi_sas_internal.h"
|
||||
|
||||
static bool phy_is_wideport_member(struct asd_sas_port *port, struct asd_sas_phy *phy)
|
||||
{
|
||||
|
|
|
@ -22,9 +22,9 @@
|
|||
#include <scsi/scsi_transport.h>
|
||||
#include <scsi/scsi_transport_sas.h>
|
||||
#include <scsi/sas_ata.h>
|
||||
#include "../scsi_sas_internal.h"
|
||||
#include "../scsi_transport_api.h"
|
||||
#include "../scsi_priv.h"
|
||||
#include "scsi_sas_internal.h"
|
||||
#include "scsi_transport_api.h"
|
||||
#include "scsi_priv.h"
|
||||
|
||||
#include <linux/err.h>
|
||||
#include <linux/blkdev.h>
|
||||
|
@ -908,7 +908,7 @@ void sas_task_abort(struct sas_task *task)
|
|||
if (dev_is_sata(task->dev))
|
||||
sas_ata_task_abort(task);
|
||||
else
|
||||
blk_abort_request(sc->request);
|
||||
blk_abort_request(scsi_cmd_to_rq(sc));
|
||||
}
|
||||
|
||||
int sas_slave_alloc(struct scsi_device *sdev)
|
||||
|
|
|
@ -114,6 +114,12 @@ struct lpfc_sli2_slim;
|
|||
#define LPFC_MBX_NO_WAIT 0
|
||||
#define LPFC_MBX_WAIT 1
|
||||
|
||||
#define LPFC_CFG_PARAM_MAGIC_NUM 0xFEAA0005
|
||||
#define LPFC_PORT_CFG_NAME "/cfg/port.cfg"
|
||||
|
||||
#define lpfc_rangecheck(val, min, max) \
|
||||
((uint)(val) >= (uint)(min) && (val) <= (max))
|
||||
|
||||
enum lpfc_polling_flags {
|
||||
ENABLE_FCP_RING_POLLING = 0x1,
|
||||
DISABLE_FCP_RING_INT = 0x2
|
||||
|
@ -403,6 +409,160 @@ struct lpfc_trunk_link {
|
|||
link3;
|
||||
};
|
||||
|
||||
/* Format of congestion module parameters */
|
||||
struct lpfc_cgn_param {
|
||||
uint32_t cgn_param_magic;
|
||||
uint8_t cgn_param_version; /* version 1 */
|
||||
uint8_t cgn_param_mode; /* 0=off 1=managed 2=monitor only */
|
||||
#define LPFC_CFG_OFF 0
|
||||
#define LPFC_CFG_MANAGED 1
|
||||
#define LPFC_CFG_MONITOR 2
|
||||
uint8_t cgn_rsvd1;
|
||||
uint8_t cgn_rsvd2;
|
||||
uint8_t cgn_param_level0;
|
||||
uint8_t cgn_param_level1;
|
||||
uint8_t cgn_param_level2;
|
||||
uint8_t byte11;
|
||||
uint8_t byte12;
|
||||
uint8_t byte13;
|
||||
uint8_t byte14;
|
||||
uint8_t byte15;
|
||||
};
|
||||
|
||||
/* Max number of days of congestion data */
|
||||
#define LPFC_MAX_CGN_DAYS 10
|
||||
|
||||
/* Format of congestion buffer info
|
||||
* This structure defines memory thats allocated and registered with
|
||||
* the HBA firmware. When adding or removing fields from this structure
|
||||
* the alignment must match the HBA firmware.
|
||||
*/
|
||||
|
||||
struct lpfc_cgn_info {
|
||||
/* Header */
|
||||
__le16 cgn_info_size; /* is sizeof(struct lpfc_cgn_info) */
|
||||
uint8_t cgn_info_version; /* represents format of structure */
|
||||
#define LPFC_CGN_INFO_V1 1
|
||||
#define LPFC_CGN_INFO_V2 2
|
||||
#define LPFC_CGN_INFO_V3 3
|
||||
uint8_t cgn_info_mode; /* 0=off 1=managed 2=monitor only */
|
||||
uint8_t cgn_info_detect;
|
||||
uint8_t cgn_info_action;
|
||||
uint8_t cgn_info_level0;
|
||||
uint8_t cgn_info_level1;
|
||||
uint8_t cgn_info_level2;
|
||||
|
||||
/* Start Time */
|
||||
uint8_t cgn_info_month;
|
||||
uint8_t cgn_info_day;
|
||||
uint8_t cgn_info_year;
|
||||
uint8_t cgn_info_hour;
|
||||
uint8_t cgn_info_minute;
|
||||
uint8_t cgn_info_second;
|
||||
|
||||
/* minute / hours / daily indices */
|
||||
uint8_t cgn_index_minute;
|
||||
uint8_t cgn_index_hour;
|
||||
uint8_t cgn_index_day;
|
||||
|
||||
__le16 cgn_warn_freq;
|
||||
__le16 cgn_alarm_freq;
|
||||
__le16 cgn_lunq;
|
||||
uint8_t cgn_pad1[8];
|
||||
|
||||
/* Driver Information */
|
||||
__le16 cgn_drvr_min[60];
|
||||
__le32 cgn_drvr_hr[24];
|
||||
__le32 cgn_drvr_day[LPFC_MAX_CGN_DAYS];
|
||||
|
||||
/* Congestion Warnings */
|
||||
__le16 cgn_warn_min[60];
|
||||
__le32 cgn_warn_hr[24];
|
||||
__le32 cgn_warn_day[LPFC_MAX_CGN_DAYS];
|
||||
|
||||
/* Latency Information */
|
||||
__le32 cgn_latency_min[60];
|
||||
__le32 cgn_latency_hr[24];
|
||||
__le32 cgn_latency_day[LPFC_MAX_CGN_DAYS];
|
||||
|
||||
/* Bandwidth Information */
|
||||
__le16 cgn_bw_min[60];
|
||||
__le16 cgn_bw_hr[24];
|
||||
__le16 cgn_bw_day[LPFC_MAX_CGN_DAYS];
|
||||
|
||||
/* Congestion Alarms */
|
||||
__le16 cgn_alarm_min[60];
|
||||
__le32 cgn_alarm_hr[24];
|
||||
__le32 cgn_alarm_day[LPFC_MAX_CGN_DAYS];
|
||||
|
||||
/* Start of congestion statistics */
|
||||
uint8_t cgn_stat_npm; /* Notifications per minute */
|
||||
|
||||
/* Start Time */
|
||||
uint8_t cgn_stat_month;
|
||||
uint8_t cgn_stat_day;
|
||||
uint8_t cgn_stat_year;
|
||||
uint8_t cgn_stat_hour;
|
||||
uint8_t cgn_stat_minute;
|
||||
uint8_t cgn_pad2[2];
|
||||
|
||||
__le32 cgn_notification;
|
||||
__le32 cgn_peer_notification;
|
||||
__le32 link_integ_notification;
|
||||
__le32 delivery_notification;
|
||||
|
||||
uint8_t cgn_stat_cgn_month; /* Last congestion notification FPIN */
|
||||
uint8_t cgn_stat_cgn_day;
|
||||
uint8_t cgn_stat_cgn_year;
|
||||
uint8_t cgn_stat_cgn_hour;
|
||||
uint8_t cgn_stat_cgn_min;
|
||||
uint8_t cgn_stat_cgn_sec;
|
||||
|
||||
uint8_t cgn_stat_peer_month; /* Last peer congestion FPIN */
|
||||
uint8_t cgn_stat_peer_day;
|
||||
uint8_t cgn_stat_peer_year;
|
||||
uint8_t cgn_stat_peer_hour;
|
||||
uint8_t cgn_stat_peer_min;
|
||||
uint8_t cgn_stat_peer_sec;
|
||||
|
||||
uint8_t cgn_stat_lnk_month; /* Last link integrity FPIN */
|
||||
uint8_t cgn_stat_lnk_day;
|
||||
uint8_t cgn_stat_lnk_year;
|
||||
uint8_t cgn_stat_lnk_hour;
|
||||
uint8_t cgn_stat_lnk_min;
|
||||
uint8_t cgn_stat_lnk_sec;
|
||||
|
||||
uint8_t cgn_stat_del_month; /* Last delivery notification FPIN */
|
||||
uint8_t cgn_stat_del_day;
|
||||
uint8_t cgn_stat_del_year;
|
||||
uint8_t cgn_stat_del_hour;
|
||||
uint8_t cgn_stat_del_min;
|
||||
uint8_t cgn_stat_del_sec;
|
||||
#define LPFC_CGN_STAT_SIZE 48
|
||||
#define LPFC_CGN_DATA_SIZE (sizeof(struct lpfc_cgn_info) - \
|
||||
LPFC_CGN_STAT_SIZE - sizeof(uint32_t))
|
||||
|
||||
__le32 cgn_info_crc;
|
||||
#define LPFC_CGN_CRC32_MAGIC_NUMBER 0x1EDC6F41
|
||||
#define LPFC_CGN_CRC32_SEED 0xFFFFFFFF
|
||||
};
|
||||
|
||||
#define LPFC_CGN_INFO_SZ (sizeof(struct lpfc_cgn_info) - \
|
||||
sizeof(uint32_t))
|
||||
|
||||
struct lpfc_cgn_stat {
|
||||
atomic64_t total_bytes;
|
||||
atomic64_t rcv_bytes;
|
||||
atomic64_t rx_latency;
|
||||
#define LPFC_CGN_NOT_SENT 0xFFFFFFFFFFFFFFFFLL
|
||||
atomic_t rx_io_cnt;
|
||||
};
|
||||
|
||||
struct lpfc_cgn_acqe_stat {
|
||||
atomic64_t alarm;
|
||||
atomic64_t warn;
|
||||
};
|
||||
|
||||
struct lpfc_vport {
|
||||
struct lpfc_hba *phba;
|
||||
struct list_head listentry;
|
||||
|
@ -869,7 +1029,10 @@ struct lpfc_hba {
|
|||
* capability
|
||||
*/
|
||||
#define HBA_FLOGI_ISSUED 0x100000 /* FLOGI was issued */
|
||||
#define HBA_CGN_RSVD1 0x200000 /* Reserved CGN flag */
|
||||
#define HBA_CGN_DAY_WRAP 0x400000 /* HBA Congestion info day wraps */
|
||||
#define HBA_DEFER_FLOGI 0x800000 /* Defer FLOGI till read_sparm cmpl */
|
||||
#define HBA_SETUP 0x1000000 /* Signifies HBA setup is completed */
|
||||
#define HBA_NEEDS_CFG_PORT 0x2000000 /* SLI3 - needs a CONFIG_PORT mbox */
|
||||
#define HBA_HBEAT_INP 0x4000000 /* mbox HBEAT is in progress */
|
||||
#define HBA_HBEAT_TMO 0x8000000 /* HBEAT initiated after timeout */
|
||||
|
@ -922,7 +1085,6 @@ struct lpfc_hba {
|
|||
uint8_t wwpn[8];
|
||||
uint32_t RandomData[7];
|
||||
uint8_t fcp_embed_io;
|
||||
uint8_t nvme_support; /* Firmware supports NVME */
|
||||
uint8_t nvmet_support; /* driver supports NVMET */
|
||||
#define LPFC_NVMET_MAX_PORTS 32
|
||||
uint8_t mds_diags_support;
|
||||
|
@ -1121,6 +1283,7 @@ struct lpfc_hba {
|
|||
uint32_t total_iocbq_bufs;
|
||||
struct list_head active_rrq_list;
|
||||
spinlock_t hbalock;
|
||||
struct work_struct unblock_request_work; /* SCSI layer unblock IOs */
|
||||
|
||||
/* dma_mem_pools */
|
||||
struct dma_pool *lpfc_sg_dma_buf_pool;
|
||||
|
@ -1194,6 +1357,8 @@ struct lpfc_hba {
|
|||
#ifdef LPFC_HDWQ_LOCK_STAT
|
||||
struct dentry *debug_lockstat;
|
||||
#endif
|
||||
struct dentry *debug_cgn_buffer;
|
||||
struct dentry *debug_rx_monitor;
|
||||
struct dentry *debug_ras_log;
|
||||
atomic_t nvmeio_trc_cnt;
|
||||
uint32_t nvmeio_trc_size;
|
||||
|
@ -1344,6 +1509,76 @@ struct lpfc_hba {
|
|||
uint64_t ktime_seg10_min;
|
||||
uint64_t ktime_seg10_max;
|
||||
#endif
|
||||
/* CMF objects */
|
||||
struct lpfc_cgn_stat __percpu *cmf_stat;
|
||||
uint32_t cmf_interval_rate; /* timer interval limit in ms */
|
||||
uint32_t cmf_timer_cnt;
|
||||
#define LPFC_CMF_INTERVAL 90
|
||||
uint64_t cmf_link_byte_count;
|
||||
uint64_t cmf_max_line_rate;
|
||||
uint64_t cmf_max_bytes_per_interval;
|
||||
uint64_t cmf_last_sync_bw;
|
||||
#define LPFC_CMF_BLK_SIZE 512
|
||||
struct hrtimer cmf_timer;
|
||||
atomic_t cmf_bw_wait;
|
||||
atomic_t cmf_busy;
|
||||
atomic_t cmf_stop_io; /* To block request and stop IO's */
|
||||
uint32_t cmf_active_mode;
|
||||
uint32_t cmf_info_per_interval;
|
||||
#define LPFC_MAX_CMF_INFO 32
|
||||
struct timespec64 cmf_latency; /* Interval congestion timestamp */
|
||||
uint32_t cmf_last_ts; /* Interval congestion time (ms) */
|
||||
uint32_t cmf_active_info;
|
||||
|
||||
/* Signal / FPIN handling for Congestion Mgmt */
|
||||
u8 cgn_reg_fpin; /* Negotiated value from RDF */
|
||||
u8 cgn_init_reg_fpin; /* Initial value from READ_CONFIG */
|
||||
#define LPFC_CGN_FPIN_NONE 0x0
|
||||
#define LPFC_CGN_FPIN_WARN 0x1
|
||||
#define LPFC_CGN_FPIN_ALARM 0x2
|
||||
#define LPFC_CGN_FPIN_BOTH (LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM)
|
||||
|
||||
u8 cgn_reg_signal; /* Negotiated value from EDC */
|
||||
u8 cgn_init_reg_signal; /* Initial value from READ_CONFIG */
|
||||
/* cgn_reg_signal and cgn_init_reg_signal use
|
||||
* enum fc_edc_cg_signal_cap_types
|
||||
*/
|
||||
u16 cgn_fpin_frequency;
|
||||
#define LPFC_FPIN_INIT_FREQ 0xffff
|
||||
u32 cgn_sig_freq;
|
||||
u32 cgn_acqe_cnt;
|
||||
|
||||
/* RX monitor handling for CMF */
|
||||
struct rxtable_entry *rxtable; /* RX_monitor information */
|
||||
atomic_t rxtable_idx_head;
|
||||
#define LPFC_RXMONITOR_TABLE_IN_USE (LPFC_MAX_RXMONITOR_ENTRY + 73)
|
||||
atomic_t rxtable_idx_tail;
|
||||
atomic_t rx_max_read_cnt; /* Maximum read bytes */
|
||||
uint64_t rx_block_cnt;
|
||||
|
||||
/* Congestion parameters from flash */
|
||||
struct lpfc_cgn_param cgn_p;
|
||||
|
||||
/* Statistics counter for ACQE cgn alarms and warnings */
|
||||
struct lpfc_cgn_acqe_stat cgn_acqe_stat;
|
||||
|
||||
/* Congestion buffer information */
|
||||
struct lpfc_dmabuf *cgn_i; /* Congestion Info buffer */
|
||||
atomic_t cgn_fabric_warn_cnt; /* Total warning cgn events for info */
|
||||
atomic_t cgn_fabric_alarm_cnt; /* Total alarm cgn events for info */
|
||||
atomic_t cgn_sync_warn_cnt; /* Total warning events for SYNC wqe */
|
||||
atomic_t cgn_sync_alarm_cnt; /* Total alarm events for SYNC wqe */
|
||||
atomic_t cgn_driver_evt_cnt; /* Total driver cgn events for fmw */
|
||||
atomic_t cgn_latency_evt_cnt;
|
||||
struct timespec64 cgn_daily_ts;
|
||||
atomic64_t cgn_latency_evt; /* Avg latency per minute */
|
||||
unsigned long cgn_evt_timestamp;
|
||||
#define LPFC_CGN_TIMER_TO_MIN 60000 /* ms in a minute */
|
||||
uint32_t cgn_evt_minute;
|
||||
#define LPFC_SEC_MIN 60
|
||||
#define LPFC_MIN_HOUR 60
|
||||
#define LPFC_HOUR_DAY 24
|
||||
#define LPFC_MIN_DAY (LPFC_MIN_HOUR * LPFC_HOUR_DAY)
|
||||
|
||||
struct hlist_node cpuhp; /* used for cpuhp per hba callback */
|
||||
struct timer_list cpuhp_poll_timer;
|
||||
|
@ -1364,6 +1599,22 @@ struct lpfc_hba {
|
|||
struct dbg_log_ent dbg_log[DBG_LOG_SZ];
|
||||
};
|
||||
|
||||
#define LPFC_MAX_RXMONITOR_ENTRY 800
|
||||
#define LPFC_MAX_RXMONITOR_DUMP 32
|
||||
struct rxtable_entry {
|
||||
uint64_t total_bytes; /* Total no of read bytes requested */
|
||||
uint64_t rcv_bytes; /* Total no of read bytes completed */
|
||||
uint64_t avg_io_size;
|
||||
uint64_t avg_io_latency;/* Average io latency in microseconds */
|
||||
uint64_t max_read_cnt; /* Maximum read bytes */
|
||||
uint64_t max_bytes_per_interval;
|
||||
uint32_t cmf_busy;
|
||||
uint32_t cmf_info; /* CMF_SYNC_WQE info */
|
||||
uint32_t io_cnt;
|
||||
uint32_t timer_utilization;
|
||||
uint32_t timer_interval;
|
||||
};
|
||||
|
||||
static inline struct Scsi_Host *
|
||||
lpfc_shost_from_vport(struct lpfc_vport *vport)
|
||||
{
|
||||
|
|
|
@ -57,6 +57,8 @@
|
|||
#define LPFC_MIN_DEVLOSS_TMO 1
|
||||
#define LPFC_MAX_DEVLOSS_TMO 255
|
||||
|
||||
#define LPFC_MAX_INFO_TMP_LEN 100
|
||||
#define LPFC_INFO_MORE_STR "\nCould be more info...\n"
|
||||
/*
|
||||
* Write key size should be multiple of 4. If write key is changed
|
||||
* make sure that library write key is also changed.
|
||||
|
@ -112,6 +114,186 @@ lpfc_jedec_to_ascii(int incr, char hdw[])
|
|||
return;
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
lpfc_cmf_info_show(struct device *dev, struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct Scsi_Host *shost = class_to_shost(dev);
|
||||
struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
|
||||
struct lpfc_hba *phba = vport->phba;
|
||||
struct lpfc_cgn_info *cp = NULL;
|
||||
struct lpfc_cgn_stat *cgs;
|
||||
int len = 0;
|
||||
int cpu;
|
||||
u64 rcv, total;
|
||||
char tmp[LPFC_MAX_INFO_TMP_LEN] = {0};
|
||||
|
||||
if (phba->cgn_i)
|
||||
cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
|
||||
|
||||
scnprintf(tmp, sizeof(tmp),
|
||||
"Congestion Mgmt Info: E2Eattr %d Ver %d "
|
||||
"CMF %d cnt %d\n",
|
||||
phba->sli4_hba.pc_sli4_params.mi_ver,
|
||||
cp ? cp->cgn_info_version : 0,
|
||||
phba->sli4_hba.pc_sli4_params.cmf, phba->cmf_timer_cnt);
|
||||
|
||||
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
|
||||
goto buffer_done;
|
||||
|
||||
if (!phba->sli4_hba.pc_sli4_params.cmf)
|
||||
goto buffer_done;
|
||||
|
||||
switch (phba->cgn_init_reg_signal) {
|
||||
case EDC_CG_SIG_WARN_ONLY:
|
||||
scnprintf(tmp, sizeof(tmp),
|
||||
"Register: Init: Signal:WARN ");
|
||||
break;
|
||||
case EDC_CG_SIG_WARN_ALARM:
|
||||
scnprintf(tmp, sizeof(tmp),
|
||||
"Register: Init: Signal:WARN|ALARM ");
|
||||
break;
|
||||
default:
|
||||
scnprintf(tmp, sizeof(tmp),
|
||||
"Register: Init: Signal:NONE ");
|
||||
break;
|
||||
}
|
||||
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
|
||||
goto buffer_done;
|
||||
|
||||
switch (phba->cgn_init_reg_fpin) {
|
||||
case LPFC_CGN_FPIN_WARN:
|
||||
scnprintf(tmp, sizeof(tmp),
|
||||
"FPIN:WARN\n");
|
||||
break;
|
||||
case LPFC_CGN_FPIN_ALARM:
|
||||
scnprintf(tmp, sizeof(tmp),
|
||||
"FPIN:ALARM\n");
|
||||
break;
|
||||
case LPFC_CGN_FPIN_BOTH:
|
||||
scnprintf(tmp, sizeof(tmp),
|
||||
"FPIN:WARN|ALARM\n");
|
||||
break;
|
||||
default:
|
||||
scnprintf(tmp, sizeof(tmp),
|
||||
"FPIN:NONE\n");
|
||||
break;
|
||||
}
|
||||
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
|
||||
goto buffer_done;
|
||||
|
||||
switch (phba->cgn_reg_signal) {
|
||||
case EDC_CG_SIG_WARN_ONLY:
|
||||
scnprintf(tmp, sizeof(tmp),
|
||||
" Current: Signal:WARN ");
|
||||
break;
|
||||
case EDC_CG_SIG_WARN_ALARM:
|
||||
scnprintf(tmp, sizeof(tmp),
|
||||
" Current: Signal:WARN|ALARM ");
|
||||
break;
|
||||
default:
|
||||
scnprintf(tmp, sizeof(tmp),
|
||||
" Current: Signal:NONE ");
|
||||
break;
|
||||
}
|
||||
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
|
||||
goto buffer_done;
|
||||
|
||||
switch (phba->cgn_reg_fpin) {
|
||||
case LPFC_CGN_FPIN_WARN:
|
||||
scnprintf(tmp, sizeof(tmp),
|
||||
"FPIN:WARN ACQEcnt:%d\n", phba->cgn_acqe_cnt);
|
||||
break;
|
||||
case LPFC_CGN_FPIN_ALARM:
|
||||
scnprintf(tmp, sizeof(tmp),
|
||||
"FPIN:ALARM ACQEcnt:%d\n", phba->cgn_acqe_cnt);
|
||||
break;
|
||||
case LPFC_CGN_FPIN_BOTH:
|
||||
scnprintf(tmp, sizeof(tmp),
|
||||
"FPIN:WARN|ALARM ACQEcnt:%d\n", phba->cgn_acqe_cnt);
|
||||
break;
|
||||
default:
|
||||
scnprintf(tmp, sizeof(tmp),
|
||||
"FPIN:NONE ACQEcnt:%d\n", phba->cgn_acqe_cnt);
|
||||
break;
|
||||
}
|
||||
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
|
||||
goto buffer_done;
|
||||
|
||||
if (phba->cmf_active_mode != phba->cgn_p.cgn_param_mode) {
|
||||
switch (phba->cmf_active_mode) {
|
||||
case LPFC_CFG_OFF:
|
||||
scnprintf(tmp, sizeof(tmp), "Active: Mode:Off\n");
|
||||
break;
|
||||
case LPFC_CFG_MANAGED:
|
||||
scnprintf(tmp, sizeof(tmp), "Active: Mode:Managed\n");
|
||||
break;
|
||||
case LPFC_CFG_MONITOR:
|
||||
scnprintf(tmp, sizeof(tmp), "Active: Mode:Monitor\n");
|
||||
break;
|
||||
default:
|
||||
scnprintf(tmp, sizeof(tmp), "Active: Mode:Unknown\n");
|
||||
}
|
||||
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
|
||||
goto buffer_done;
|
||||
}
|
||||
|
||||
switch (phba->cgn_p.cgn_param_mode) {
|
||||
case LPFC_CFG_OFF:
|
||||
scnprintf(tmp, sizeof(tmp), "Config: Mode:Off ");
|
||||
break;
|
||||
case LPFC_CFG_MANAGED:
|
||||
scnprintf(tmp, sizeof(tmp), "Config: Mode:Managed ");
|
||||
break;
|
||||
case LPFC_CFG_MONITOR:
|
||||
scnprintf(tmp, sizeof(tmp), "Config: Mode:Monitor ");
|
||||
break;
|
||||
default:
|
||||
scnprintf(tmp, sizeof(tmp), "Config: Mode:Unknown ");
|
||||
}
|
||||
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
|
||||
goto buffer_done;
|
||||
|
||||
total = 0;
|
||||
rcv = 0;
|
||||
for_each_present_cpu(cpu) {
|
||||
cgs = per_cpu_ptr(phba->cmf_stat, cpu);
|
||||
total += atomic64_read(&cgs->total_bytes);
|
||||
rcv += atomic64_read(&cgs->rcv_bytes);
|
||||
}
|
||||
|
||||
scnprintf(tmp, sizeof(tmp),
|
||||
"IObusy:%d Info:%d Bytes: Rcv:x%llx Total:x%llx\n",
|
||||
atomic_read(&phba->cmf_busy),
|
||||
phba->cmf_active_info, rcv, total);
|
||||
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
|
||||
goto buffer_done;
|
||||
|
||||
scnprintf(tmp, sizeof(tmp),
|
||||
"Port_speed:%d Link_byte_cnt:%ld "
|
||||
"Max_byte_per_interval:%ld\n",
|
||||
lpfc_sli_port_speed_get(phba),
|
||||
(unsigned long)phba->cmf_link_byte_count,
|
||||
(unsigned long)phba->cmf_max_bytes_per_interval);
|
||||
strlcat(buf, tmp, PAGE_SIZE);
|
||||
|
||||
buffer_done:
|
||||
len = strnlen(buf, PAGE_SIZE);
|
||||
|
||||
if (unlikely(len >= (PAGE_SIZE - 1))) {
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
|
||||
"6312 Catching potential buffer "
|
||||
"overflow > PAGE_SIZE = %lu bytes\n",
|
||||
PAGE_SIZE);
|
||||
strscpy(buf + PAGE_SIZE - 1 -
|
||||
strnlen(LPFC_INFO_MORE_STR, PAGE_SIZE - 1),
|
||||
LPFC_INFO_MORE_STR,
|
||||
strnlen(LPFC_INFO_MORE_STR, PAGE_SIZE - 1)
|
||||
+ 1);
|
||||
}
|
||||
return len;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_drvr_version_show - Return the Emulex driver string with version number
|
||||
* @dev: class unused variable.
|
||||
|
@ -168,7 +350,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
|
|||
char *statep;
|
||||
int i;
|
||||
int len = 0;
|
||||
char tmp[LPFC_MAX_NVME_INFO_TMP_LEN] = {0};
|
||||
char tmp[LPFC_MAX_INFO_TMP_LEN] = {0};
|
||||
|
||||
if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
|
||||
len = scnprintf(buf, PAGE_SIZE, "NVME Disabled\n");
|
||||
|
@ -512,9 +694,9 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
|
|||
"6314 Catching potential buffer "
|
||||
"overflow > PAGE_SIZE = %lu bytes\n",
|
||||
PAGE_SIZE);
|
||||
strlcpy(buf + PAGE_SIZE - 1 - sizeof(LPFC_NVME_INFO_MORE_STR),
|
||||
LPFC_NVME_INFO_MORE_STR,
|
||||
sizeof(LPFC_NVME_INFO_MORE_STR) + 1);
|
||||
strscpy(buf + PAGE_SIZE - 1 - sizeof(LPFC_INFO_MORE_STR),
|
||||
LPFC_INFO_MORE_STR,
|
||||
sizeof(LPFC_INFO_MORE_STR) + 1);
|
||||
}
|
||||
|
||||
return len;
|
||||
|
@ -2248,11 +2430,6 @@ lpfc_sriov_hw_max_virtfn_show(struct device *dev,
|
|||
return scnprintf(buf, PAGE_SIZE, "%d\n", max_nr_virtfn);
|
||||
}
|
||||
|
||||
static inline bool lpfc_rangecheck(uint val, uint min, uint max)
|
||||
{
|
||||
return val >= min && val <= max;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_enable_bbcr_set: Sets an attribute value.
|
||||
* @phba: pointer the the adapter structure.
|
||||
|
@ -2641,6 +2818,7 @@ static DEVICE_ATTR_RO(lpfc_sriov_hw_max_virtfn);
|
|||
static DEVICE_ATTR(protocol, S_IRUGO, lpfc_sli4_protocol_show, NULL);
|
||||
static DEVICE_ATTR(lpfc_xlane_supported, S_IRUGO, lpfc_oas_supported_show,
|
||||
NULL);
|
||||
static DEVICE_ATTR(cmf_info, 0444, lpfc_cmf_info_show, NULL);
|
||||
|
||||
static char *lpfc_soft_wwn_key = "C99G71SL8032A";
|
||||
#define WWN_SZ 8
|
||||
|
@ -4038,6 +4216,7 @@ lpfc_topology_store(struct device *dev, struct device_attribute *attr,
|
|||
const char *val_buf = buf;
|
||||
int err;
|
||||
uint32_t prev_val;
|
||||
u8 sli_family, if_type;
|
||||
|
||||
if (!strncmp(buf, "nolip ", strlen("nolip "))) {
|
||||
nolip = 1;
|
||||
|
@ -4061,13 +4240,16 @@ lpfc_topology_store(struct device *dev, struct device_attribute *attr,
|
|||
/*
|
||||
* The 'topology' is not a configurable parameter if :
|
||||
* - persistent topology enabled
|
||||
* - G7/G6 with no private loop support
|
||||
* - ASIC_GEN_NUM >= 0xC, with no private loop support
|
||||
*/
|
||||
|
||||
sli_family = bf_get(lpfc_sli_intf_sli_family,
|
||||
&phba->sli4_hba.sli_intf);
|
||||
if_type = bf_get(lpfc_sli_intf_if_type,
|
||||
&phba->sli4_hba.sli_intf);
|
||||
if ((phba->hba_flag & HBA_PERSISTENT_TOPO ||
|
||||
(!phba->sli4_hba.pc_sli4_params.pls &&
|
||||
(phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC ||
|
||||
phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC))) &&
|
||||
(!phba->sli4_hba.pc_sli4_params.pls &&
|
||||
(sli_family == LPFC_SLI_INTF_FAMILY_G6 ||
|
||||
if_type == LPFC_SLI_INTF_IF_TYPE_6))) &&
|
||||
val == 4) {
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
|
||||
"3114 Loop mode not supported\n");
|
||||
|
@ -5412,9 +5594,9 @@ LPFC_VPORT_ATTR_R(fcp_class, 3, 2, 3,
|
|||
|
||||
/*
|
||||
# lpfc_use_adisc: Use ADISC for FCP rediscovery instead of PLOGI. Value range
|
||||
# is [0,1]. Default value is 0.
|
||||
# is [0,1]. Default value is 1.
|
||||
*/
|
||||
LPFC_VPORT_ATTR_RW(use_adisc, 0, 0, 1,
|
||||
LPFC_VPORT_ATTR_RW(use_adisc, 1, 0, 1,
|
||||
"Use ADISC on rediscovery to authenticate FCP devices");
|
||||
|
||||
/*
|
||||
|
@ -6146,6 +6328,19 @@ LPFC_ATTR_RW(ras_fwlog_func, 0, 0, 7, "Firmware Logging Enabled on Function");
|
|||
*/
|
||||
LPFC_BBCR_ATTR_RW(enable_bbcr, 1, 0, 1, "Enable BBC Recovery");
|
||||
|
||||
/* Signaling module parameters */
|
||||
int lpfc_fabric_cgn_frequency = 100; /* 100 ms default */
|
||||
module_param(lpfc_fabric_cgn_frequency, int, 0444);
|
||||
MODULE_PARM_DESC(lpfc_fabric_cgn_frequency, "Congestion signaling fabric freq");
|
||||
|
||||
int lpfc_acqe_cgn_frequency = 10; /* 10 sec default */
|
||||
module_param(lpfc_acqe_cgn_frequency, int, 0444);
|
||||
MODULE_PARM_DESC(lpfc_acqe_cgn_frequency, "Congestion signaling ACQE freq");
|
||||
|
||||
int lpfc_use_cgn_signal = 1; /* 0 - only use FPINs, 1 - Use signals if avail */
|
||||
module_param(lpfc_use_cgn_signal, int, 0444);
|
||||
MODULE_PARM_DESC(lpfc_use_cgn_signal, "Use Congestion signaling if available");
|
||||
|
||||
/*
|
||||
* lpfc_enable_dpp: Enable DPP on G7
|
||||
* 0 = DPP on G7 disabled
|
||||
|
@ -6320,6 +6515,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
|
|||
&dev_attr_lpfc_enable_bbcr,
|
||||
&dev_attr_lpfc_enable_dpp,
|
||||
&dev_attr_lpfc_enable_mi,
|
||||
&dev_attr_cmf_info,
|
||||
&dev_attr_lpfc_max_vmid,
|
||||
&dev_attr_lpfc_vmid_inactivity_timeout,
|
||||
&dev_attr_lpfc_vmid_app_header,
|
||||
|
@ -6350,6 +6546,7 @@ struct device_attribute *lpfc_vport_attrs[] = {
|
|||
&dev_attr_lpfc_max_scsicmpl_time,
|
||||
&dev_attr_lpfc_stat_data_ctrl,
|
||||
&dev_attr_lpfc_static_vport,
|
||||
&dev_attr_cmf_info,
|
||||
NULL,
|
||||
};
|
||||
|
||||
|
@ -6741,6 +6938,9 @@ lpfc_get_host_speed(struct Scsi_Host *shost)
|
|||
case LPFC_LINK_SPEED_128GHZ:
|
||||
fc_host_speed(shost) = FC_PORTSPEED_128GBIT;
|
||||
break;
|
||||
case LPFC_LINK_SPEED_256GHZ:
|
||||
fc_host_speed(shost) = FC_PORTSPEED_256GBIT;
|
||||
break;
|
||||
default:
|
||||
fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
|
||||
break;
|
||||
|
@ -6908,6 +7108,9 @@ lpfc_get_stats(struct Scsi_Host *shost)
|
|||
hs->invalid_crc_count = pmb->un.varRdLnk.crcCnt;
|
||||
hs->error_frames = pmb->un.varRdLnk.crcCnt;
|
||||
|
||||
hs->cn_sig_warn = atomic64_read(&phba->cgn_acqe_stat.warn);
|
||||
hs->cn_sig_alarm = atomic64_read(&phba->cgn_acqe_stat.alarm);
|
||||
|
||||
hs->link_failure_count -= lso->link_failure_count;
|
||||
hs->loss_of_sync_count -= lso->loss_of_sync_count;
|
||||
hs->loss_of_signal_count -= lso->loss_of_signal_count;
|
||||
|
@ -7019,6 +7222,12 @@ lpfc_reset_stats(struct Scsi_Host *shost)
|
|||
else
|
||||
lso->link_events = (phba->fc_eventTag >> 1);
|
||||
|
||||
atomic64_set(&phba->cgn_acqe_stat.warn, 0);
|
||||
atomic64_set(&phba->cgn_acqe_stat.alarm, 0);
|
||||
|
||||
memset(&shost_to_fc_host(shost)->fpin_stats, 0,
|
||||
sizeof(shost_to_fc_host(shost)->fpin_stats));
|
||||
|
||||
psli->stats_start = ktime_get_seconds();
|
||||
|
||||
mempool_free(pmboxq, phba->mbox_mem_pool);
|
||||
|
@ -7452,6 +7661,12 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
|
|||
lpfc_enable_dpp_init(phba, lpfc_enable_dpp);
|
||||
lpfc_enable_mi_init(phba, lpfc_enable_mi);
|
||||
|
||||
phba->cgn_p.cgn_param_mode = LPFC_CFG_OFF;
|
||||
phba->cmf_active_mode = LPFC_CFG_OFF;
|
||||
if (lpfc_fabric_cgn_frequency > EDC_CG_SIGFREQ_CNT_MAX ||
|
||||
lpfc_fabric_cgn_frequency < EDC_CG_SIGFREQ_CNT_MIN)
|
||||
lpfc_fabric_cgn_frequency = 100; /* 100 ms default */
|
||||
|
||||
if (phba->sli_rev != LPFC_SLI_REV4) {
|
||||
/* NVME only supported on SLI4 */
|
||||
phba->nvmet_support = 0;
|
||||
|
|
|
@ -5751,6 +5751,92 @@ lpfc_get_trunk_info(struct bsg_job *job)
|
|||
|
||||
}
|
||||
|
||||
static int
|
||||
lpfc_get_cgnbuf_info(struct bsg_job *job)
|
||||
{
|
||||
struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
|
||||
struct lpfc_hba *phba = vport->phba;
|
||||
struct fc_bsg_request *bsg_request = job->request;
|
||||
struct fc_bsg_reply *bsg_reply = job->reply;
|
||||
struct get_cgnbuf_info_req *cgnbuf_req;
|
||||
struct lpfc_cgn_info *cp;
|
||||
uint8_t *cgn_buff;
|
||||
int size, cinfosz;
|
||||
int rc = 0;
|
||||
|
||||
if (job->request_len < sizeof(struct fc_bsg_request) +
|
||||
sizeof(struct get_cgnbuf_info_req)) {
|
||||
rc = -ENOMEM;
|
||||
goto job_exit;
|
||||
}
|
||||
|
||||
if (!phba->sli4_hba.pc_sli4_params.cmf) {
|
||||
rc = -ENOENT;
|
||||
goto job_exit;
|
||||
}
|
||||
|
||||
if (!phba->cgn_i || !phba->cgn_i->virt) {
|
||||
rc = -ENOENT;
|
||||
goto job_exit;
|
||||
}
|
||||
|
||||
cp = phba->cgn_i->virt;
|
||||
if (cp->cgn_info_version < LPFC_CGN_INFO_V3) {
|
||||
rc = -EPERM;
|
||||
goto job_exit;
|
||||
}
|
||||
|
||||
cgnbuf_req = (struct get_cgnbuf_info_req *)
|
||||
bsg_request->rqst_data.h_vendor.vendor_cmd;
|
||||
|
||||
/* For reset or size == 0 */
|
||||
bsg_reply->reply_payload_rcv_len = 0;
|
||||
|
||||
if (cgnbuf_req->reset == LPFC_BSG_CGN_RESET_STAT) {
|
||||
lpfc_init_congestion_stat(phba);
|
||||
goto job_exit;
|
||||
}
|
||||
|
||||
/* We don't want to include the CRC at the end */
|
||||
cinfosz = sizeof(struct lpfc_cgn_info) - sizeof(uint32_t);
|
||||
|
||||
size = cgnbuf_req->read_size;
|
||||
if (!size)
|
||||
goto job_exit;
|
||||
|
||||
if (size < cinfosz) {
|
||||
/* Just copy back what we can */
|
||||
cinfosz = size;
|
||||
rc = -E2BIG;
|
||||
}
|
||||
|
||||
/* Allocate memory to read congestion info */
|
||||
cgn_buff = vmalloc(cinfosz);
|
||||
if (!cgn_buff) {
|
||||
rc = -ENOMEM;
|
||||
goto job_exit;
|
||||
}
|
||||
|
||||
memcpy(cgn_buff, cp, cinfosz);
|
||||
|
||||
bsg_reply->reply_payload_rcv_len =
|
||||
sg_copy_from_buffer(job->reply_payload.sg_list,
|
||||
job->reply_payload.sg_cnt,
|
||||
cgn_buff, cinfosz);
|
||||
|
||||
vfree(cgn_buff);
|
||||
|
||||
job_exit:
|
||||
bsg_reply->result = rc;
|
||||
if (!rc)
|
||||
bsg_job_done(job, bsg_reply->result,
|
||||
bsg_reply->reply_payload_rcv_len);
|
||||
else
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
|
||||
"2724 GET CGNBUF error: %d\n", rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job
|
||||
* @job: fc_bsg_job to handle
|
||||
|
@ -5813,6 +5899,9 @@ lpfc_bsg_hst_vendor(struct bsg_job *job)
|
|||
case LPFC_BSG_VENDOR_GET_TRUNK_INFO:
|
||||
rc = lpfc_get_trunk_info(job);
|
||||
break;
|
||||
case LPFC_BSG_VENDOR_GET_CGNBUF_INFO:
|
||||
rc = lpfc_get_cgnbuf_info(job);
|
||||
break;
|
||||
default:
|
||||
rc = -EINVAL;
|
||||
bsg_reply->reply_payload_rcv_len = 0;
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
/*******************************************************************
|
||||
* This file is part of the Emulex Linux Device Driver for *
|
||||
* Fibre Channel Host Bus Adapters. *
|
||||
* Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
|
||||
* Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
|
||||
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
|
||||
* Copyright (C) 2010-2015 Emulex. All rights reserved. *
|
||||
* EMULEX and SLI are trademarks of Emulex. *
|
||||
|
@ -43,6 +43,7 @@
|
|||
#define LPFC_BSG_VENDOR_RAS_GET_CONFIG 18
|
||||
#define LPFC_BSG_VENDOR_RAS_SET_CONFIG 19
|
||||
#define LPFC_BSG_VENDOR_GET_TRUNK_INFO 20
|
||||
#define LPFC_BSG_VENDOR_GET_CGNBUF_INFO 21
|
||||
|
||||
struct set_ct_event {
|
||||
uint32_t command;
|
||||
|
@ -386,6 +387,13 @@ struct get_trunk_info_req {
|
|||
uint32_t command;
|
||||
};
|
||||
|
||||
struct get_cgnbuf_info_req {
|
||||
uint32_t command;
|
||||
uint32_t read_size;
|
||||
uint32_t reset;
|
||||
#define LPFC_BSG_CGN_RESET_STAT 1
|
||||
};
|
||||
|
||||
/* driver only */
|
||||
#define SLI_CONFIG_NOT_HANDLED 0
|
||||
#define SLI_CONFIG_HANDLED 1
|
||||
|
|
|
@ -58,6 +58,8 @@ void lpfc_request_features(struct lpfc_hba *, struct lpfcMboxq *);
|
|||
int lpfc_sli4_mbox_rsrc_extent(struct lpfc_hba *, struct lpfcMboxq *,
|
||||
uint16_t, uint16_t, bool);
|
||||
int lpfc_get_sli4_parameters(struct lpfc_hba *, LPFC_MBOXQ_t *);
|
||||
int lpfc_reg_congestion_buf(struct lpfc_hba *phba);
|
||||
int lpfc_unreg_congestion_buf(struct lpfc_hba *phba);
|
||||
struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t);
|
||||
void lpfc_cleanup_rcv_buffers(struct lpfc_vport *);
|
||||
void lpfc_rcv_seq_check_edtov(struct lpfc_vport *);
|
||||
|
@ -74,6 +76,20 @@ int lpfc_init_iocb_list(struct lpfc_hba *phba, int cnt);
|
|||
void lpfc_free_iocb_list(struct lpfc_hba *phba);
|
||||
int lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
|
||||
struct lpfc_queue *drq, int count, int idx);
|
||||
uint32_t lpfc_calc_cmf_latency(struct lpfc_hba *phba);
|
||||
void lpfc_cmf_signal_init(struct lpfc_hba *phba);
|
||||
void lpfc_cmf_start(struct lpfc_hba *phba);
|
||||
void lpfc_cmf_stop(struct lpfc_hba *phba);
|
||||
void lpfc_init_congestion_stat(struct lpfc_hba *phba);
|
||||
void lpfc_init_congestion_buf(struct lpfc_hba *phba);
|
||||
int lpfc_sli4_cgn_params_read(struct lpfc_hba *phba);
|
||||
uint32_t lpfc_cgn_calc_crc32(void *bufp, uint32_t sz, uint32_t seed);
|
||||
int lpfc_config_cgn_signal(struct lpfc_hba *phba);
|
||||
int lpfc_issue_cmf_sync_wqe(struct lpfc_hba *phba, u32 ms, u64 total);
|
||||
void lpfc_cgn_dump_rxmonitor(struct lpfc_hba *phba);
|
||||
void lpfc_cgn_update_stat(struct lpfc_hba *phba, uint32_t dtag);
|
||||
void lpfc_unblock_requests(struct lpfc_hba *phba);
|
||||
void lpfc_block_requests(struct lpfc_hba *phba);
|
||||
|
||||
void lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *, LPFC_MBOXQ_t *);
|
||||
void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
|
||||
|
@ -87,6 +103,8 @@ void lpfc_unregister_vfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
|
|||
void lpfc_enqueue_node(struct lpfc_vport *, struct lpfc_nodelist *);
|
||||
void lpfc_dequeue_node(struct lpfc_vport *, struct lpfc_nodelist *);
|
||||
void lpfc_nlp_set_state(struct lpfc_vport *, struct lpfc_nodelist *, int);
|
||||
void lpfc_nlp_reg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp);
|
||||
void lpfc_nlp_unreg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp);
|
||||
void lpfc_drop_node(struct lpfc_vport *, struct lpfc_nodelist *);
|
||||
void lpfc_set_disctmo(struct lpfc_vport *);
|
||||
int lpfc_can_disctmo(struct lpfc_vport *);
|
||||
|
@ -141,6 +159,8 @@ int lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry);
|
|||
int lpfc_issue_els_rscn(struct lpfc_vport *vport, uint8_t retry);
|
||||
int lpfc_issue_fabric_reglogin(struct lpfc_vport *);
|
||||
int lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry);
|
||||
int lpfc_issue_els_edc(struct lpfc_vport *vport, uint8_t retry);
|
||||
void lpfc_els_rcv_fpin(struct lpfc_vport *vport, void *p, u32 fpin_length);
|
||||
int lpfc_els_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *);
|
||||
int lpfc_ct_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *);
|
||||
int lpfc_els_rsp_acc(struct lpfc_vport *, uint32_t, struct lpfc_iocbq *,
|
||||
|
@ -213,6 +233,9 @@ irqreturn_t lpfc_sli_fp_intr_handler(int, void *);
|
|||
irqreturn_t lpfc_sli4_intr_handler(int, void *);
|
||||
irqreturn_t lpfc_sli4_hba_intr_handler(int, void *);
|
||||
|
||||
int lpfc_read_object(struct lpfc_hba *phba, char *s, uint32_t *datap,
|
||||
uint32_t len);
|
||||
|
||||
void lpfc_sli4_cleanup_poll_list(struct lpfc_hba *phba);
|
||||
int lpfc_sli4_poll_eq(struct lpfc_queue *q, uint8_t path);
|
||||
void lpfc_sli4_poll_hbtimer(struct timer_list *t);
|
||||
|
@ -459,6 +482,9 @@ void lpfc_free_fast_evt(struct lpfc_hba *, struct lpfc_fast_path_event *);
|
|||
void lpfc_create_static_vport(struct lpfc_hba *);
|
||||
void lpfc_stop_hba_timers(struct lpfc_hba *);
|
||||
void lpfc_stop_port(struct lpfc_hba *);
|
||||
int lpfc_update_cmf_cmd(struct lpfc_hba *phba, uint32_t sz);
|
||||
int lpfc_update_cmf_cmpl(struct lpfc_hba *phba, uint64_t val, uint32_t sz,
|
||||
struct Scsi_Host *shost);
|
||||
void __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *);
|
||||
void lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *);
|
||||
void lpfc_parse_fcoe_conf(struct lpfc_hba *, uint8_t *, uint32_t);
|
||||
|
@ -605,6 +631,10 @@ extern int lpfc_enable_nvmet_cnt;
|
|||
extern unsigned long long lpfc_enable_nvmet[];
|
||||
extern int lpfc_no_hba_reset_cnt;
|
||||
extern unsigned long lpfc_no_hba_reset[];
|
||||
extern int lpfc_acqe_cgn_frequency;
|
||||
extern int lpfc_fabric_cgn_frequency;
|
||||
extern int lpfc_use_cgn_signal;
|
||||
|
||||
extern union lpfc_wqe128 lpfc_iread_cmd_template;
|
||||
extern union lpfc_wqe128 lpfc_iwrite_cmd_template;
|
||||
extern union lpfc_wqe128 lpfc_icmnd_cmd_template;
|
||||
|
|
|
@ -2288,6 +2288,8 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
|||
/* No retry on Vendor, RPA only done on physical port */
|
||||
if (phba->link_flag & LS_CT_VEN_RPA) {
|
||||
phba->link_flag &= ~LS_CT_VEN_RPA;
|
||||
if (phba->cmf_active_mode == LPFC_CFG_OFF)
|
||||
return;
|
||||
lpfc_printf_log(phba, KERN_ERR,
|
||||
LOG_DISCOVERY | LOG_ELS,
|
||||
"6460 VEN FDMI RPA failure\n");
|
||||
|
@ -2332,24 +2334,29 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
|||
break;
|
||||
case SLI_MGMT_RPA:
|
||||
if (vport->port_type == LPFC_PHYSICAL_PORT &&
|
||||
phba->cfg_enable_mi &&
|
||||
phba->sli4_hba.pc_sli4_params.mi_ver > LPFC_MIB1_SUPPORT) {
|
||||
phba->sli4_hba.pc_sli4_params.mi_ver) {
|
||||
/* mi is only for the phyical port, no vports */
|
||||
if (phba->link_flag & LS_CT_VEN_RPA) {
|
||||
lpfc_printf_vlog(vport, KERN_INFO,
|
||||
LOG_DISCOVERY | LOG_ELS,
|
||||
LOG_DISCOVERY | LOG_ELS |
|
||||
LOG_CGN_MGMT,
|
||||
"6449 VEN RPA FDMI Success\n");
|
||||
phba->link_flag &= ~LS_CT_VEN_RPA;
|
||||
break;
|
||||
}
|
||||
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
|
||||
"6210 Issue Vendor MI FDMI %x\n",
|
||||
phba->sli4_hba.pc_sli4_params.mi_ver);
|
||||
|
||||
/* CGN is only for the physical port, no vports */
|
||||
if (lpfc_fdmi_cmd(vport, ndlp, cmd,
|
||||
LPFC_FDMI_VENDOR_ATTR_mi) == 0)
|
||||
phba->link_flag |= LS_CT_VEN_RPA;
|
||||
lpfc_printf_log(phba, KERN_INFO,
|
||||
LOG_DISCOVERY | LOG_ELS,
|
||||
"6458 Send MI FDMI:%x Flag x%x\n",
|
||||
phba->sli4_hba.pc_sli4_params.mi_value,
|
||||
phba->sli4_hba.pc_sli4_params.mi_ver,
|
||||
phba->link_flag);
|
||||
} else {
|
||||
lpfc_printf_log(phba, KERN_INFO,
|
||||
|
@ -2846,6 +2853,8 @@ lpfc_fdmi_port_attr_support_speed(struct lpfc_vport *vport,
|
|||
|
||||
ae->un.AttrInt = 0;
|
||||
if (!(phba->hba_flag & HBA_FCOE_MODE)) {
|
||||
if (phba->lmt & LMT_256Gb)
|
||||
ae->un.AttrInt |= HBA_PORTSPEED_256GFC;
|
||||
if (phba->lmt & LMT_128Gb)
|
||||
ae->un.AttrInt |= HBA_PORTSPEED_128GFC;
|
||||
if (phba->lmt & LMT_64Gb)
|
||||
|
@ -2927,6 +2936,9 @@ lpfc_fdmi_port_attr_speed(struct lpfc_vport *vport,
|
|||
case LPFC_LINK_SPEED_128GHZ:
|
||||
ae->un.AttrInt = HBA_PORTSPEED_128GFC;
|
||||
break;
|
||||
case LPFC_LINK_SPEED_256GHZ:
|
||||
ae->un.AttrInt = HBA_PORTSPEED_256GFC;
|
||||
break;
|
||||
default:
|
||||
ae->un.AttrInt = HBA_PORTSPEED_UNKNOWN;
|
||||
break;
|
||||
|
@ -3343,7 +3355,7 @@ lpfc_fdmi_vendor_attr_mi(struct lpfc_vport *vport,
|
|||
ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
|
||||
memset(ae, 0, 256);
|
||||
sprintf(mibrevision, "ELXE2EM:%04d",
|
||||
phba->sli4_hba.pc_sli4_params.mi_value);
|
||||
phba->sli4_hba.pc_sli4_params.mi_ver);
|
||||
strncpy(ae->un.AttrString, &mibrevision[0], sizeof(ae->un.AttrString));
|
||||
len = strnlen(ae->un.AttrString, sizeof(ae->un.AttrString));
|
||||
len += (len & 3) ? (4 - (len & 3)) : 4;
|
||||
|
@ -3884,9 +3896,8 @@ lpfc_cmpl_ct_cmd_vmid(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
|||
/**
|
||||
* lpfc_vmid_cmd - Build and send a FDMI cmd to the specified NPort
|
||||
* @vport: pointer to a host virtual N_Port data structure.
|
||||
* @ndlp: ndlp to send FDMI cmd to (if NULL use FDMI_DID)
|
||||
* cmdcode: FDMI command to send
|
||||
* mask: Mask of HBA or PORT Attributes to send
|
||||
* @cmdcode: application server command code to send
|
||||
* @vmid: pointer to vmid info structure
|
||||
*
|
||||
* Builds and sends a FDMI command using the CT subsystem.
|
||||
*/
|
||||
|
|
|
@ -5429,6 +5429,180 @@ lpfc_idiag_extacc_read(struct file *file, char __user *buf, size_t nbytes,
|
|||
return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
|
||||
}
|
||||
|
||||
static int
|
||||
lpfc_cgn_buffer_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct lpfc_debug *debug;
|
||||
int rc = -ENOMEM;
|
||||
|
||||
debug = kmalloc(sizeof(*debug), GFP_KERNEL);
|
||||
if (!debug)
|
||||
goto out;
|
||||
|
||||
debug->buffer = vmalloc(LPFC_CGN_BUF_SIZE);
|
||||
if (!debug->buffer) {
|
||||
kfree(debug);
|
||||
goto out;
|
||||
}
|
||||
|
||||
debug->i_private = inode->i_private;
|
||||
file->private_data = debug;
|
||||
|
||||
rc = 0;
|
||||
out:
|
||||
return rc;
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
lpfc_cgn_buffer_read(struct file *file, char __user *buf, size_t nbytes,
|
||||
loff_t *ppos)
|
||||
{
|
||||
struct lpfc_debug *debug = file->private_data;
|
||||
struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
|
||||
char *buffer = debug->buffer;
|
||||
uint32_t *ptr;
|
||||
int cnt, len = 0;
|
||||
|
||||
if (!phba->sli4_hba.pc_sli4_params.mi_ver || !phba->cgn_i) {
|
||||
len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len,
|
||||
"Congestion Mgmt is not supported\n");
|
||||
goto out;
|
||||
}
|
||||
ptr = (uint32_t *)phba->cgn_i->virt;
|
||||
len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len,
|
||||
"Congestion Buffer Header\n");
|
||||
/* Dump the first 32 bytes */
|
||||
cnt = 32;
|
||||
len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len,
|
||||
"000: %08x %08x %08x %08x %08x %08x %08x %08x\n",
|
||||
*ptr, *(ptr + 1), *(ptr + 2), *(ptr + 3),
|
||||
*(ptr + 4), *(ptr + 5), *(ptr + 6), *(ptr + 7));
|
||||
ptr += 8;
|
||||
len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len,
|
||||
"Congestion Buffer Data\n");
|
||||
while (cnt < sizeof(struct lpfc_cgn_info)) {
|
||||
if (len > (LPFC_CGN_BUF_SIZE - LPFC_DEBUG_OUT_LINE_SZ)) {
|
||||
len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len,
|
||||
"Truncated . . .\n");
|
||||
break;
|
||||
}
|
||||
len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len,
|
||||
"%03x: %08x %08x %08x %08x "
|
||||
"%08x %08x %08x %08x\n",
|
||||
cnt, *ptr, *(ptr + 1), *(ptr + 2),
|
||||
*(ptr + 3), *(ptr + 4), *(ptr + 5),
|
||||
*(ptr + 6), *(ptr + 7));
|
||||
cnt += 32;
|
||||
ptr += 8;
|
||||
}
|
||||
out:
|
||||
return simple_read_from_buffer(buf, nbytes, ppos, buffer, len);
|
||||
}
|
||||
|
||||
static int
|
||||
lpfc_cgn_buffer_release(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct lpfc_debug *debug = file->private_data;
|
||||
|
||||
vfree(debug->buffer);
|
||||
kfree(debug);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
lpfc_rx_monitor_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct lpfc_rx_monitor_debug *debug;
|
||||
int rc = -ENOMEM;
|
||||
|
||||
debug = kmalloc(sizeof(*debug), GFP_KERNEL);
|
||||
if (!debug)
|
||||
goto out;
|
||||
|
||||
debug->buffer = vmalloc(MAX_DEBUGFS_RX_TABLE_SIZE);
|
||||
if (!debug->buffer) {
|
||||
kfree(debug);
|
||||
goto out;
|
||||
}
|
||||
|
||||
debug->i_private = inode->i_private;
|
||||
file->private_data = debug;
|
||||
|
||||
rc = 0;
|
||||
out:
|
||||
return rc;
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
lpfc_rx_monitor_read(struct file *file, char __user *buf, size_t nbytes,
|
||||
loff_t *ppos)
|
||||
{
|
||||
struct lpfc_rx_monitor_debug *debug = file->private_data;
|
||||
struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
|
||||
char *buffer = debug->buffer;
|
||||
struct rxtable_entry *entry;
|
||||
int i, len = 0, head, tail, last, start;
|
||||
|
||||
head = atomic_read(&phba->rxtable_idx_head);
|
||||
while (head == LPFC_RXMONITOR_TABLE_IN_USE) {
|
||||
/* Table is getting updated */
|
||||
msleep(20);
|
||||
head = atomic_read(&phba->rxtable_idx_head);
|
||||
}
|
||||
|
||||
tail = atomic_xchg(&phba->rxtable_idx_tail, head);
|
||||
if (!phba->rxtable || head == tail) {
|
||||
len += scnprintf(buffer + len, MAX_DEBUGFS_RX_TABLE_SIZE - len,
|
||||
"Rxtable is empty\n");
|
||||
goto out;
|
||||
}
|
||||
last = (head > tail) ? head : LPFC_MAX_RXMONITOR_ENTRY;
|
||||
start = tail;
|
||||
|
||||
len += scnprintf(buffer + len, MAX_DEBUGFS_RX_TABLE_SIZE - len,
|
||||
" MaxBPI\t Total Data Cmd Total Data Cmpl "
|
||||
" Latency(us) Avg IO Size\tMax IO Size IO cnt "
|
||||
"Info BWutil(ms)\n");
|
||||
get_table:
|
||||
for (i = start; i < last; i++) {
|
||||
entry = &phba->rxtable[i];
|
||||
len += scnprintf(buffer + len, MAX_DEBUGFS_RX_TABLE_SIZE - len,
|
||||
"%3d:%12lld %12lld\t%12lld\t"
|
||||
"%8lldus\t%8lld\t%10lld "
|
||||
"%8d %2d %2d(%2d)\n",
|
||||
i, entry->max_bytes_per_interval,
|
||||
entry->total_bytes,
|
||||
entry->rcv_bytes,
|
||||
entry->avg_io_latency,
|
||||
entry->avg_io_size,
|
||||
entry->max_read_cnt,
|
||||
entry->io_cnt,
|
||||
entry->cmf_info,
|
||||
entry->timer_utilization,
|
||||
entry->timer_interval);
|
||||
}
|
||||
|
||||
if (head != last) {
|
||||
start = 0;
|
||||
last = head;
|
||||
goto get_table;
|
||||
}
|
||||
out:
|
||||
return simple_read_from_buffer(buf, nbytes, ppos, buffer, len);
|
||||
}
|
||||
|
||||
static int
|
||||
lpfc_rx_monitor_release(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct lpfc_rx_monitor_debug *debug = file->private_data;
|
||||
|
||||
vfree(debug->buffer);
|
||||
kfree(debug);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#undef lpfc_debugfs_op_disc_trc
|
||||
static const struct file_operations lpfc_debugfs_op_disc_trc = {
|
||||
.owner = THIS_MODULE,
|
||||
|
@ -5657,6 +5831,23 @@ static const struct file_operations lpfc_idiag_op_extAcc = {
|
|||
.write = lpfc_idiag_extacc_write,
|
||||
.release = lpfc_idiag_cmd_release,
|
||||
};
|
||||
#undef lpfc_cgn_buffer_op
|
||||
static const struct file_operations lpfc_cgn_buffer_op = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = lpfc_cgn_buffer_open,
|
||||
.llseek = lpfc_debugfs_lseek,
|
||||
.read = lpfc_cgn_buffer_read,
|
||||
.release = lpfc_cgn_buffer_release,
|
||||
};
|
||||
|
||||
#undef lpfc_rx_monitor_op
|
||||
static const struct file_operations lpfc_rx_monitor_op = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = lpfc_rx_monitor_open,
|
||||
.llseek = lpfc_debugfs_lseek,
|
||||
.read = lpfc_rx_monitor_read,
|
||||
.release = lpfc_rx_monitor_release,
|
||||
};
|
||||
#endif
|
||||
|
||||
/* lpfc_idiag_mbxacc_dump_bsg_mbox - idiag debugfs dump bsg mailbox command
|
||||
|
@ -5907,6 +6098,32 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
|
|||
goto debug_failed;
|
||||
}
|
||||
|
||||
/* Congestion Info Buffer */
|
||||
scnprintf(name, sizeof(name), "cgn_buffer");
|
||||
phba->debug_cgn_buffer =
|
||||
debugfs_create_file(name, S_IFREG | 0644,
|
||||
phba->hba_debugfs_root,
|
||||
phba, &lpfc_cgn_buffer_op);
|
||||
if (!phba->debug_cgn_buffer) {
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
|
||||
"6527 Cannot create debugfs "
|
||||
"cgn_buffer\n");
|
||||
goto debug_failed;
|
||||
}
|
||||
|
||||
/* RX Monitor */
|
||||
scnprintf(name, sizeof(name), "rx_monitor");
|
||||
phba->debug_rx_monitor =
|
||||
debugfs_create_file(name, S_IFREG | 0644,
|
||||
phba->hba_debugfs_root,
|
||||
phba, &lpfc_rx_monitor_op);
|
||||
if (!phba->debug_rx_monitor) {
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
|
||||
"6528 Cannot create debugfs "
|
||||
"rx_monitor\n");
|
||||
goto debug_failed;
|
||||
}
|
||||
|
||||
/* RAS log */
|
||||
snprintf(name, sizeof(name), "ras_log");
|
||||
phba->debug_ras_log =
|
||||
|
@ -6335,6 +6552,12 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport)
|
|||
debugfs_remove(phba->debug_hbqinfo); /* hbqinfo */
|
||||
phba->debug_hbqinfo = NULL;
|
||||
|
||||
debugfs_remove(phba->debug_cgn_buffer);
|
||||
phba->debug_cgn_buffer = NULL;
|
||||
|
||||
debugfs_remove(phba->debug_rx_monitor);
|
||||
phba->debug_rx_monitor = NULL;
|
||||
|
||||
debugfs_remove(phba->debug_ras_log);
|
||||
phba->debug_ras_log = NULL;
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
/*******************************************************************
|
||||
* This file is part of the Emulex Linux Device Driver for *
|
||||
* Fibre Channel Host Bus Adapters. *
|
||||
* Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
|
||||
* Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
|
||||
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
|
||||
* Copyright (C) 2007-2011 Emulex. All rights reserved. *
|
||||
* EMULEX and SLI are trademarks of Emulex. *
|
||||
|
@ -52,6 +52,9 @@
|
|||
/* scsistat output buffer size */
|
||||
#define LPFC_SCSISTAT_SIZE 8192
|
||||
|
||||
/* Congestion Info Buffer size */
|
||||
#define LPFC_CGN_BUF_SIZE 8192
|
||||
|
||||
#define LPFC_DEBUG_OUT_LINE_SZ 80
|
||||
|
||||
/*
|
||||
|
@ -279,6 +282,12 @@ struct lpfc_idiag {
|
|||
void *ptr_private;
|
||||
};
|
||||
|
||||
#define MAX_DEBUGFS_RX_TABLE_SIZE (100 * LPFC_MAX_RXMONITOR_ENTRY)
|
||||
struct lpfc_rx_monitor_debug {
|
||||
char *i_private;
|
||||
char *buffer;
|
||||
};
|
||||
|
||||
#else
|
||||
|
||||
#define lpfc_nvmeio_data(phba, fmt, arg...) \
|
||||
|
|
|
@ -78,10 +78,11 @@ struct lpfc_node_rrqs {
|
|||
};
|
||||
|
||||
enum lpfc_fc4_xpt_flags {
|
||||
NLP_WAIT_FOR_UNREG = 0x1,
|
||||
SCSI_XPT_REGD = 0x2,
|
||||
NVME_XPT_REGD = 0x4,
|
||||
NLP_XPT_HAS_HH = 0x8,
|
||||
NLP_XPT_REGD = 0x1,
|
||||
SCSI_XPT_REGD = 0x2,
|
||||
NVME_XPT_REGD = 0x4,
|
||||
NVME_XPT_UNREG_WAIT = 0x8,
|
||||
NLP_XPT_HAS_HH = 0x10
|
||||
};
|
||||
|
||||
struct lpfc_nodelist {
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -3331,6 +3331,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
|
|||
case LPFC_LINK_SPEED_32GHZ:
|
||||
case LPFC_LINK_SPEED_64GHZ:
|
||||
case LPFC_LINK_SPEED_128GHZ:
|
||||
case LPFC_LINK_SPEED_256GHZ:
|
||||
break;
|
||||
default:
|
||||
phba->fc_linkspeed = LPFC_LINK_SPEED_UNKNOWN;
|
||||
|
@ -3646,6 +3647,10 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
|
|||
phba->wait_4_mlo_maint_flg);
|
||||
}
|
||||
lpfc_mbx_process_link_up(phba, la);
|
||||
|
||||
if (phba->cmf_active_mode != LPFC_CFG_OFF)
|
||||
lpfc_cmf_signal_init(phba);
|
||||
|
||||
} else if (attn_type == LPFC_ATT_LINK_DOWN ||
|
||||
attn_type == LPFC_ATT_UNEXP_WWPN) {
|
||||
phba->fc_stat.LinkDown++;
|
||||
|
@ -4208,6 +4213,7 @@ lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
|
|||
struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
|
||||
struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
|
||||
struct lpfc_vport *vport = pmb->vport;
|
||||
int rc;
|
||||
|
||||
pmb->ctx_buf = NULL;
|
||||
pmb->ctx_ndlp = NULL;
|
||||
|
@ -4283,9 +4289,23 @@ lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
|
|||
/* Issue SCR just before NameServer GID_FT Query */
|
||||
lpfc_issue_els_scr(vport, 0);
|
||||
|
||||
if (!phba->cfg_enable_mi ||
|
||||
phba->sli4_hba.pc_sli4_params.mi_ver < LPFC_MIB3_SUPPORT)
|
||||
/* Link was bounced or a Fabric LOGO occurred. Start EDC
|
||||
* with initial FW values provided the congestion mode is
|
||||
* not off. Note that signals may or may not be supported
|
||||
* by the adapter but FPIN is provided by default for 1
|
||||
* or both missing signals support.
|
||||
*/
|
||||
if (phba->cmf_active_mode != LPFC_CFG_OFF) {
|
||||
phba->cgn_reg_fpin = phba->cgn_init_reg_fpin;
|
||||
phba->cgn_reg_signal = phba->cgn_init_reg_signal;
|
||||
rc = lpfc_issue_els_edc(vport, 0);
|
||||
lpfc_printf_log(phba, KERN_INFO,
|
||||
LOG_INIT | LOG_ELS | LOG_DISCOVERY,
|
||||
"4220 EDC issue error x%x, Data: x%x\n",
|
||||
rc, phba->cgn_init_reg_signal);
|
||||
} else {
|
||||
lpfc_issue_els_rdf(vport, 0);
|
||||
}
|
||||
}
|
||||
|
||||
vport->fc_ns_retry = 0;
|
||||
|
@ -4501,10 +4521,152 @@ lpfc_nlp_counters(struct lpfc_vport *vport, int state, int count)
|
|||
spin_unlock_irqrestore(shost->host_lock, iflags);
|
||||
}
|
||||
|
||||
/* Register a node with backend if not already done */
|
||||
void
|
||||
lpfc_nlp_reg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
|
||||
{
|
||||
|
||||
unsigned long iflags;
|
||||
|
||||
spin_lock_irqsave(&ndlp->lock, iflags);
|
||||
if (ndlp->fc4_xpt_flags & NLP_XPT_REGD) {
|
||||
/* Already registered with backend, trigger rescan */
|
||||
spin_unlock_irqrestore(&ndlp->lock, iflags);
|
||||
|
||||
if (ndlp->fc4_xpt_flags & NVME_XPT_REGD &&
|
||||
ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_DISCOVERY)) {
|
||||
lpfc_nvme_rescan_port(vport, ndlp);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
ndlp->fc4_xpt_flags |= NLP_XPT_REGD;
|
||||
spin_unlock_irqrestore(&ndlp->lock, iflags);
|
||||
|
||||
if (lpfc_valid_xpt_node(ndlp)) {
|
||||
vport->phba->nport_event_cnt++;
|
||||
/*
|
||||
* Tell the fc transport about the port, if we haven't
|
||||
* already. If we have, and it's a scsi entity, be
|
||||
*/
|
||||
lpfc_register_remote_port(vport, ndlp);
|
||||
}
|
||||
|
||||
/* We are done if we do not have any NVME remote node */
|
||||
if (!(ndlp->nlp_fc4_type & NLP_FC4_NVME))
|
||||
return;
|
||||
|
||||
/* Notify the NVME transport of this new rport. */
|
||||
if (vport->phba->sli_rev >= LPFC_SLI_REV4 &&
|
||||
ndlp->nlp_fc4_type & NLP_FC4_NVME) {
|
||||
if (vport->phba->nvmet_support == 0) {
|
||||
/* Register this rport with the transport.
|
||||
* Only NVME Target Rports are registered with
|
||||
* the transport.
|
||||
*/
|
||||
if (ndlp->nlp_type & NLP_NVME_TARGET) {
|
||||
vport->phba->nport_event_cnt++;
|
||||
lpfc_nvme_register_port(vport, ndlp);
|
||||
}
|
||||
} else {
|
||||
/* Just take an NDLP ref count since the
|
||||
* target does not register rports.
|
||||
*/
|
||||
lpfc_nlp_get(ndlp);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Unregister a node with backend if not already done */
|
||||
void
|
||||
lpfc_nlp_unreg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
|
||||
{
|
||||
unsigned long iflags;
|
||||
|
||||
spin_lock_irqsave(&ndlp->lock, iflags);
|
||||
if (!(ndlp->fc4_xpt_flags & NLP_XPT_REGD)) {
|
||||
spin_unlock_irqrestore(&ndlp->lock, iflags);
|
||||
return;
|
||||
}
|
||||
|
||||
ndlp->fc4_xpt_flags &= ~NLP_XPT_REGD;
|
||||
spin_unlock_irqrestore(&ndlp->lock, iflags);
|
||||
|
||||
if (ndlp->rport &&
|
||||
ndlp->fc4_xpt_flags & SCSI_XPT_REGD) {
|
||||
vport->phba->nport_event_cnt++;
|
||||
lpfc_unregister_remote_port(ndlp);
|
||||
}
|
||||
|
||||
if (ndlp->fc4_xpt_flags & NVME_XPT_REGD) {
|
||||
vport->phba->nport_event_cnt++;
|
||||
if (vport->phba->nvmet_support == 0) {
|
||||
/* Start devloss if target. */
|
||||
if (ndlp->nlp_type & NLP_NVME_TARGET)
|
||||
lpfc_nvme_unregister_port(vport, ndlp);
|
||||
} else {
|
||||
/* NVMET has no upcall. */
|
||||
lpfc_nlp_put(ndlp);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/*
|
||||
* Adisc state change handling
|
||||
*/
|
||||
static void
|
||||
lpfc_handle_adisc_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
int new_state)
|
||||
{
|
||||
switch (new_state) {
|
||||
/*
|
||||
* Any state to ADISC_ISSUE
|
||||
* Do nothing, adisc cmpl handling will trigger state changes
|
||||
*/
|
||||
case NLP_STE_ADISC_ISSUE:
|
||||
break;
|
||||
|
||||
/*
|
||||
* ADISC_ISSUE to mapped states
|
||||
* Trigger a registration with backend, it will be nop if
|
||||
* already registered
|
||||
*/
|
||||
case NLP_STE_UNMAPPED_NODE:
|
||||
ndlp->nlp_type |= NLP_FC_NODE;
|
||||
fallthrough;
|
||||
case NLP_STE_MAPPED_NODE:
|
||||
ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
|
||||
lpfc_nlp_reg_node(vport, ndlp);
|
||||
break;
|
||||
|
||||
/*
|
||||
* ADISC_ISSUE to non-mapped states
|
||||
* We are moving from ADISC_ISSUE to a non-mapped state because
|
||||
* ADISC failed, we would have skipped unregistering with
|
||||
* backend, attempt it now
|
||||
*/
|
||||
case NLP_STE_NPR_NODE:
|
||||
ndlp->nlp_flag &= ~NLP_RCV_PLOGI;
|
||||
fallthrough;
|
||||
default:
|
||||
lpfc_nlp_unreg_node(vport, ndlp);
|
||||
break;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static void
|
||||
lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
int old_state, int new_state)
|
||||
{
|
||||
/* Trap ADISC changes here */
|
||||
if (new_state == NLP_STE_ADISC_ISSUE ||
|
||||
old_state == NLP_STE_ADISC_ISSUE) {
|
||||
lpfc_handle_adisc_state(vport, ndlp, new_state);
|
||||
return;
|
||||
}
|
||||
|
||||
if (new_state == NLP_STE_UNMAPPED_NODE) {
|
||||
ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
|
||||
ndlp->nlp_type |= NLP_FC_NODE;
|
||||
|
@ -4514,60 +4676,17 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
|||
if (new_state == NLP_STE_NPR_NODE)
|
||||
ndlp->nlp_flag &= ~NLP_RCV_PLOGI;
|
||||
|
||||
/* FCP and NVME Transport interface */
|
||||
/* Reg/Unreg for FCP and NVME Transport interface */
|
||||
if ((old_state == NLP_STE_MAPPED_NODE ||
|
||||
old_state == NLP_STE_UNMAPPED_NODE)) {
|
||||
if (ndlp->rport &&
|
||||
lpfc_valid_xpt_node(ndlp)) {
|
||||
vport->phba->nport_event_cnt++;
|
||||
lpfc_unregister_remote_port(ndlp);
|
||||
}
|
||||
|
||||
if (ndlp->nlp_fc4_type & NLP_FC4_NVME) {
|
||||
vport->phba->nport_event_cnt++;
|
||||
if (vport->phba->nvmet_support == 0) {
|
||||
/* Start devloss if target. */
|
||||
if (ndlp->nlp_type & NLP_NVME_TARGET)
|
||||
lpfc_nvme_unregister_port(vport, ndlp);
|
||||
} else {
|
||||
/* NVMET has no upcall. */
|
||||
lpfc_nlp_put(ndlp);
|
||||
}
|
||||
}
|
||||
/* For nodes marked for ADISC, Handle unreg in ADISC cmpl */
|
||||
if (!(ndlp->nlp_flag & NLP_NPR_ADISC))
|
||||
lpfc_nlp_unreg_node(vport, ndlp);
|
||||
}
|
||||
|
||||
/* FCP and NVME Transport interfaces */
|
||||
|
||||
if (new_state == NLP_STE_MAPPED_NODE ||
|
||||
new_state == NLP_STE_UNMAPPED_NODE) {
|
||||
if (lpfc_valid_xpt_node(ndlp)) {
|
||||
vport->phba->nport_event_cnt++;
|
||||
/*
|
||||
* Tell the fc transport about the port, if we haven't
|
||||
* already. If we have, and it's a scsi entity, be
|
||||
*/
|
||||
lpfc_register_remote_port(vport, ndlp);
|
||||
}
|
||||
/* Notify the NVME transport of this new rport. */
|
||||
if (vport->phba->sli_rev >= LPFC_SLI_REV4 &&
|
||||
ndlp->nlp_fc4_type & NLP_FC4_NVME) {
|
||||
if (vport->phba->nvmet_support == 0) {
|
||||
/* Register this rport with the transport.
|
||||
* Only NVME Target Rports are registered with
|
||||
* the transport.
|
||||
*/
|
||||
if (ndlp->nlp_type & NLP_NVME_TARGET) {
|
||||
vport->phba->nport_event_cnt++;
|
||||
lpfc_nvme_register_port(vport, ndlp);
|
||||
}
|
||||
} else {
|
||||
/* Just take an NDLP ref count since the
|
||||
* target does not register rports.
|
||||
*/
|
||||
lpfc_nlp_get(ndlp);
|
||||
}
|
||||
}
|
||||
}
|
||||
new_state == NLP_STE_UNMAPPED_NODE)
|
||||
lpfc_nlp_reg_node(vport, ndlp);
|
||||
|
||||
if ((new_state == NLP_STE_MAPPED_NODE) &&
|
||||
(vport->stat_data_enabled)) {
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
/*******************************************************************
|
||||
* This file is part of the Emulex Linux Device Driver for *
|
||||
* Fibre Channel Host Bus Adapters. *
|
||||
* Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
|
||||
* Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
|
||||
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
|
||||
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
|
||||
* EMULEX and SLI are trademarks of Emulex. *
|
||||
|
@ -608,6 +608,7 @@ struct fc_vft_header {
|
|||
#define ELS_CMD_LIRR 0x7A000000
|
||||
#define ELS_CMD_LCB 0x81000000
|
||||
#define ELS_CMD_FPIN 0x16000000
|
||||
#define ELS_CMD_EDC 0x17000000
|
||||
#define ELS_CMD_QFPA 0xB0000000
|
||||
#define ELS_CMD_UVEM 0xB1000000
|
||||
#else /* __LITTLE_ENDIAN_BITFIELD */
|
||||
|
@ -652,6 +653,7 @@ struct fc_vft_header {
|
|||
#define ELS_CMD_LIRR 0x7A
|
||||
#define ELS_CMD_LCB 0x81
|
||||
#define ELS_CMD_FPIN ELS_FPIN
|
||||
#define ELS_CMD_EDC ELS_EDC
|
||||
#define ELS_CMD_QFPA 0xB0
|
||||
#define ELS_CMD_UVEM 0xB1
|
||||
#endif
|
||||
|
@ -1694,6 +1696,7 @@ struct lpfc_fdmi_reg_portattr {
|
|||
#define PCI_DEVICE_ID_LANCER_FCOE_VF 0xe268
|
||||
#define PCI_DEVICE_ID_LANCER_G6_FC 0xe300
|
||||
#define PCI_DEVICE_ID_LANCER_G7_FC 0xf400
|
||||
#define PCI_DEVICE_ID_LANCER_G7P_FC 0xf500
|
||||
#define PCI_DEVICE_ID_SAT_SMB 0xf011
|
||||
#define PCI_DEVICE_ID_SAT_MID 0xf015
|
||||
#define PCI_DEVICE_ID_RFLY 0xf095
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
* included with this package. *
|
||||
*******************************************************************/
|
||||
|
||||
#include <uapi/scsi/fc/fc_fs.h>
|
||||
#include <uapi/scsi/fc/fc_els.h>
|
||||
|
||||
/* Macros to deal with bit fields. Each bit field must have 3 #defines
|
||||
|
@ -94,6 +95,9 @@ struct lpfc_sli_intf {
|
|||
#define LPFC_SLI_INTF_FAMILY_BE3 0x1
|
||||
#define LPFC_SLI_INTF_FAMILY_LNCR_A0 0xa
|
||||
#define LPFC_SLI_INTF_FAMILY_LNCR_B0 0xb
|
||||
#define LPFC_SLI_INTF_FAMILY_G6 0xc
|
||||
#define LPFC_SLI_INTF_FAMILY_G7 0xd
|
||||
#define LPFC_SLI_INTF_FAMILY_G7P 0xe
|
||||
#define lpfc_sli_intf_slirev_SHIFT 4
|
||||
#define lpfc_sli_intf_slirev_MASK 0x0000000F
|
||||
#define lpfc_sli_intf_slirev_WORD word0
|
||||
|
@ -393,6 +397,12 @@ struct lpfc_wcqe_complete {
|
|||
#define lpfc_wcqe_c_ersp0_MASK 0x0000FFFF
|
||||
#define lpfc_wcqe_c_ersp0_WORD word0
|
||||
uint32_t total_data_placed;
|
||||
#define lpfc_wcqe_c_cmf_cg_SHIFT 31
|
||||
#define lpfc_wcqe_c_cmf_cg_MASK 0x00000001
|
||||
#define lpfc_wcqe_c_cmf_cg_WORD total_data_placed
|
||||
#define lpfc_wcqe_c_cmf_bw_SHIFT 0
|
||||
#define lpfc_wcqe_c_cmf_bw_MASK 0x0FFFFFFF
|
||||
#define lpfc_wcqe_c_cmf_bw_WORD total_data_placed
|
||||
uint32_t parameter;
|
||||
#define lpfc_wcqe_c_bg_edir_SHIFT 5
|
||||
#define lpfc_wcqe_c_bg_edir_MASK 0x00000001
|
||||
|
@ -687,6 +697,7 @@ struct lpfc_register {
|
|||
#define lpfc_sliport_eqdelay_id_MASK 0xfff
|
||||
#define lpfc_sliport_eqdelay_id_WORD word0
|
||||
#define LPFC_SEC_TO_USEC 1000000
|
||||
#define LPFC_SEC_TO_MSEC 1000
|
||||
|
||||
/* The following Registers apply to SLI4 if_type 0 UCNAs. They typically
|
||||
* reside in BAR 2.
|
||||
|
@ -959,6 +970,12 @@ union lpfc_sli4_cfg_shdr {
|
|||
#define lpfc_mbox_hdr_add_status_SHIFT 8
|
||||
#define lpfc_mbox_hdr_add_status_MASK 0x000000FF
|
||||
#define lpfc_mbox_hdr_add_status_WORD word7
|
||||
#define LPFC_ADD_STATUS_INCOMPAT_OBJ 0xA2
|
||||
#define lpfc_mbox_hdr_add_status_2_SHIFT 16
|
||||
#define lpfc_mbox_hdr_add_status_2_MASK 0x000000FF
|
||||
#define lpfc_mbox_hdr_add_status_2_WORD word7
|
||||
#define LPFC_ADD_STATUS_2_INCOMPAT_FLASH 0x01
|
||||
#define LPFC_ADD_STATUS_2_INCORRECT_ASIC 0x02
|
||||
uint32_t response_length;
|
||||
uint32_t actual_response_length;
|
||||
} response;
|
||||
|
@ -1015,6 +1032,7 @@ struct mbox_header {
|
|||
#define LPFC_MBOX_OPCODE_SET_HOST_DATA 0x5D
|
||||
#define LPFC_MBOX_OPCODE_SEND_ACTIVATION 0x73
|
||||
#define LPFC_MBOX_OPCODE_RESET_LICENSES 0x74
|
||||
#define LPFC_MBOX_OPCODE_REG_CONGESTION_BUF 0x8E
|
||||
#define LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO 0x9A
|
||||
#define LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT 0x9B
|
||||
#define LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT 0x9C
|
||||
|
@ -1123,6 +1141,12 @@ struct lpfc_mbx_sge {
|
|||
uint32_t length;
|
||||
};
|
||||
|
||||
struct lpfc_mbx_host_buf {
|
||||
uint32_t length;
|
||||
uint32_t pa_lo;
|
||||
uint32_t pa_hi;
|
||||
};
|
||||
|
||||
struct lpfc_mbx_nembed_cmd {
|
||||
struct lpfc_sli4_cfg_mhdr cfg_mhdr;
|
||||
#define LPFC_SLI4_MBX_SGE_MAX_PAGES 19
|
||||
|
@ -1133,6 +1157,31 @@ struct lpfc_mbx_nembed_sge_virt {
|
|||
void *addr[LPFC_SLI4_MBX_SGE_MAX_PAGES];
|
||||
};
|
||||
|
||||
#define LPFC_MBX_OBJECT_NAME_LEN_DW 26
|
||||
struct lpfc_mbx_read_object { /* Version 0 */
|
||||
struct mbox_header header;
|
||||
union {
|
||||
struct {
|
||||
uint32_t word0;
|
||||
#define lpfc_mbx_rd_object_rlen_SHIFT 0
|
||||
#define lpfc_mbx_rd_object_rlen_MASK 0x00FFFFFF
|
||||
#define lpfc_mbx_rd_object_rlen_WORD word0
|
||||
uint32_t rd_object_offset;
|
||||
uint32_t rd_object_name[LPFC_MBX_OBJECT_NAME_LEN_DW];
|
||||
#define LPFC_OBJ_NAME_SZ 104 /* 26 x sizeof(uint32_t) is 104. */
|
||||
uint32_t rd_object_cnt;
|
||||
struct lpfc_mbx_host_buf rd_object_hbuf[4];
|
||||
} request;
|
||||
struct {
|
||||
uint32_t rd_object_actual_rlen;
|
||||
uint32_t word1;
|
||||
#define lpfc_mbx_rd_object_eof_SHIFT 31
|
||||
#define lpfc_mbx_rd_object_eof_MASK 0x1
|
||||
#define lpfc_mbx_rd_object_eof_WORD word1
|
||||
} response;
|
||||
} u;
|
||||
};
|
||||
|
||||
struct lpfc_mbx_eq_create {
|
||||
struct mbox_header header;
|
||||
union {
|
||||
|
@ -1555,7 +1604,7 @@ struct rq_context {
|
|||
#define lpfc_rq_context_hdr_size_WORD word1
|
||||
uint32_t word2;
|
||||
#define lpfc_rq_context_cq_id_SHIFT 16
|
||||
#define lpfc_rq_context_cq_id_MASK 0x000003FF
|
||||
#define lpfc_rq_context_cq_id_MASK 0x0000FFFF
|
||||
#define lpfc_rq_context_cq_id_WORD word2
|
||||
#define lpfc_rq_context_buf_size_SHIFT 0
|
||||
#define lpfc_rq_context_buf_size_MASK 0x0000FFFF
|
||||
|
@ -2328,6 +2377,7 @@ struct lpfc_mbx_redisc_fcf_tbl {
|
|||
#define ADD_STATUS_OPERATION_ALREADY_ACTIVE 0x67
|
||||
#define ADD_STATUS_FW_NOT_SUPPORTED 0xEB
|
||||
#define ADD_STATUS_INVALID_REQUEST 0x4B
|
||||
#define ADD_STATUS_INVALID_OBJECT_NAME 0xA0
|
||||
#define ADD_STATUS_FW_DOWNLOAD_HW_DISABLED 0x58
|
||||
|
||||
struct lpfc_mbx_sli4_config {
|
||||
|
@ -2803,6 +2853,12 @@ struct lpfc_mbx_read_config {
|
|||
#define lpfc_mbx_rd_conf_extnts_inuse_SHIFT 31
|
||||
#define lpfc_mbx_rd_conf_extnts_inuse_MASK 0x00000001
|
||||
#define lpfc_mbx_rd_conf_extnts_inuse_WORD word1
|
||||
#define lpfc_mbx_rd_conf_wcs_SHIFT 28 /* warning signaling */
|
||||
#define lpfc_mbx_rd_conf_wcs_MASK 0x00000001
|
||||
#define lpfc_mbx_rd_conf_wcs_WORD word1
|
||||
#define lpfc_mbx_rd_conf_acs_SHIFT 27 /* alarm signaling */
|
||||
#define lpfc_mbx_rd_conf_acs_MASK 0x00000001
|
||||
#define lpfc_mbx_rd_conf_acs_WORD word1
|
||||
uint32_t word2;
|
||||
#define lpfc_mbx_rd_conf_lnk_numb_SHIFT 0
|
||||
#define lpfc_mbx_rd_conf_lnk_numb_MASK 0x0000003F
|
||||
|
@ -3328,17 +3384,20 @@ struct lpfc_sli4_parameters {
|
|||
#define cfg_nosr_SHIFT 9
|
||||
#define cfg_nosr_MASK 0x00000001
|
||||
#define cfg_nosr_WORD word19
|
||||
|
||||
#define cfg_bv1s_SHIFT 10
|
||||
#define cfg_bv1s_MASK 0x00000001
|
||||
#define cfg_bv1s_WORD word19
|
||||
#define cfg_pvl_SHIFT 13
|
||||
#define cfg_pvl_MASK 0x00000001
|
||||
#define cfg_pvl_WORD word19
|
||||
|
||||
#define cfg_nsler_SHIFT 12
|
||||
#define cfg_nsler_MASK 0x00000001
|
||||
#define cfg_nsler_WORD word19
|
||||
#define cfg_pvl_SHIFT 13
|
||||
#define cfg_pvl_MASK 0x00000001
|
||||
#define cfg_pvl_WORD word19
|
||||
|
||||
#define cfg_pbde_SHIFT 20
|
||||
#define cfg_pbde_MASK 0x00000001
|
||||
#define cfg_pbde_WORD word19
|
||||
|
||||
uint32_t word20;
|
||||
#define cfg_max_tow_xri_SHIFT 0
|
||||
|
@ -3346,12 +3405,13 @@ struct lpfc_sli4_parameters {
|
|||
#define cfg_max_tow_xri_WORD word20
|
||||
|
||||
uint32_t word21;
|
||||
#define cfg_mib_bde_cnt_SHIFT 16
|
||||
#define cfg_mib_bde_cnt_MASK 0x000000ff
|
||||
#define cfg_mib_bde_cnt_WORD word21
|
||||
#define cfg_mi_ver_SHIFT 0
|
||||
#define cfg_mi_ver_MASK 0x0000ffff
|
||||
#define cfg_mi_ver_WORD word21
|
||||
#define cfg_cmf_SHIFT 24
|
||||
#define cfg_cmf_MASK 0x000000ff
|
||||
#define cfg_cmf_WORD word21
|
||||
|
||||
uint32_t mib_size;
|
||||
uint32_t word23; /* RESERVED */
|
||||
|
||||
|
@ -3380,7 +3440,10 @@ struct lpfc_sli4_parameters {
|
|||
|
||||
#define LPFC_SET_UE_RECOVERY 0x10
|
||||
#define LPFC_SET_MDS_DIAGS 0x12
|
||||
#define LPFC_SET_CGN_SIGNAL 0x1f
|
||||
#define LPFC_SET_DUAL_DUMP 0x1e
|
||||
#define LPFC_SET_ENABLE_MI 0x21
|
||||
#define LPFC_SET_ENABLE_CMF 0x24
|
||||
struct lpfc_mbx_set_feature {
|
||||
struct mbox_header header;
|
||||
uint32_t feature;
|
||||
|
@ -3395,6 +3458,9 @@ struct lpfc_mbx_set_feature {
|
|||
#define lpfc_mbx_set_feature_mds_deep_loopbk_SHIFT 1
|
||||
#define lpfc_mbx_set_feature_mds_deep_loopbk_MASK 0x00000001
|
||||
#define lpfc_mbx_set_feature_mds_deep_loopbk_WORD word6
|
||||
#define lpfc_mbx_set_feature_CGN_warn_freq_SHIFT 0
|
||||
#define lpfc_mbx_set_feature_CGN_warn_freq_MASK 0x0000ffff
|
||||
#define lpfc_mbx_set_feature_CGN_warn_freq_WORD word6
|
||||
#define lpfc_mbx_set_feature_dd_SHIFT 0
|
||||
#define lpfc_mbx_set_feature_dd_MASK 0x00000001
|
||||
#define lpfc_mbx_set_feature_dd_WORD word6
|
||||
|
@ -3404,6 +3470,15 @@ struct lpfc_mbx_set_feature {
|
|||
#define LPFC_DISABLE_DUAL_DUMP 0
|
||||
#define LPFC_ENABLE_DUAL_DUMP 1
|
||||
#define LPFC_QUERY_OP_DUAL_DUMP 2
|
||||
#define lpfc_mbx_set_feature_cmf_SHIFT 0
|
||||
#define lpfc_mbx_set_feature_cmf_MASK 0x00000001
|
||||
#define lpfc_mbx_set_feature_cmf_WORD word6
|
||||
#define lpfc_mbx_set_feature_mi_SHIFT 0
|
||||
#define lpfc_mbx_set_feature_mi_MASK 0x0000ffff
|
||||
#define lpfc_mbx_set_feature_mi_WORD word6
|
||||
#define lpfc_mbx_set_feature_milunq_SHIFT 16
|
||||
#define lpfc_mbx_set_feature_milunq_MASK 0x0000ffff
|
||||
#define lpfc_mbx_set_feature_milunq_WORD word6
|
||||
uint32_t word7;
|
||||
#define lpfc_mbx_set_feature_UERP_SHIFT 0
|
||||
#define lpfc_mbx_set_feature_UERP_MASK 0x0000ffff
|
||||
|
@ -3411,16 +3486,51 @@ struct lpfc_mbx_set_feature {
|
|||
#define lpfc_mbx_set_feature_UESR_SHIFT 16
|
||||
#define lpfc_mbx_set_feature_UESR_MASK 0x0000ffff
|
||||
#define lpfc_mbx_set_feature_UESR_WORD word7
|
||||
#define lpfc_mbx_set_feature_CGN_alarm_freq_SHIFT 0
|
||||
#define lpfc_mbx_set_feature_CGN_alarm_freq_MASK 0x0000ffff
|
||||
#define lpfc_mbx_set_feature_CGN_alarm_freq_WORD word7
|
||||
u32 word8;
|
||||
#define lpfc_mbx_set_feature_CGN_acqe_freq_SHIFT 0
|
||||
#define lpfc_mbx_set_feature_CGN_acqe_freq_MASK 0x000000ff
|
||||
#define lpfc_mbx_set_feature_CGN_acqe_freq_WORD word8
|
||||
};
|
||||
|
||||
|
||||
#define LPFC_SET_HOST_OS_DRIVER_VERSION 0x2
|
||||
#define LPFC_SET_HOST_DATE_TIME 0x4
|
||||
|
||||
struct lpfc_mbx_set_host_date_time {
|
||||
uint32_t word6;
|
||||
#define lpfc_mbx_set_host_month_WORD word6
|
||||
#define lpfc_mbx_set_host_month_SHIFT 16
|
||||
#define lpfc_mbx_set_host_month_MASK 0xFF
|
||||
#define lpfc_mbx_set_host_day_WORD word6
|
||||
#define lpfc_mbx_set_host_day_SHIFT 8
|
||||
#define lpfc_mbx_set_host_day_MASK 0xFF
|
||||
#define lpfc_mbx_set_host_year_WORD word6
|
||||
#define lpfc_mbx_set_host_year_SHIFT 0
|
||||
#define lpfc_mbx_set_host_year_MASK 0xFF
|
||||
uint32_t word7;
|
||||
#define lpfc_mbx_set_host_hour_WORD word7
|
||||
#define lpfc_mbx_set_host_hour_SHIFT 16
|
||||
#define lpfc_mbx_set_host_hour_MASK 0xFF
|
||||
#define lpfc_mbx_set_host_min_WORD word7
|
||||
#define lpfc_mbx_set_host_min_SHIFT 8
|
||||
#define lpfc_mbx_set_host_min_MASK 0xFF
|
||||
#define lpfc_mbx_set_host_sec_WORD word7
|
||||
#define lpfc_mbx_set_host_sec_SHIFT 0
|
||||
#define lpfc_mbx_set_host_sec_MASK 0xFF
|
||||
};
|
||||
|
||||
struct lpfc_mbx_set_host_data {
|
||||
#define LPFC_HOST_OS_DRIVER_VERSION_SIZE 48
|
||||
struct mbox_header header;
|
||||
uint32_t param_id;
|
||||
uint32_t param_len;
|
||||
uint8_t data[LPFC_HOST_OS_DRIVER_VERSION_SIZE];
|
||||
union {
|
||||
uint8_t data[LPFC_HOST_OS_DRIVER_VERSION_SIZE];
|
||||
struct lpfc_mbx_set_host_date_time tm;
|
||||
} un;
|
||||
};
|
||||
|
||||
struct lpfc_mbx_set_trunk_mode {
|
||||
|
@ -3438,6 +3548,21 @@ struct lpfc_mbx_get_sli4_parameters {
|
|||
struct lpfc_sli4_parameters sli4_parameters;
|
||||
};
|
||||
|
||||
struct lpfc_mbx_reg_congestion_buf {
|
||||
struct mbox_header header;
|
||||
uint32_t word0;
|
||||
#define lpfc_mbx_reg_cgn_buf_type_WORD word0
|
||||
#define lpfc_mbx_reg_cgn_buf_type_SHIFT 0
|
||||
#define lpfc_mbx_reg_cgn_buf_type_MASK 0xFF
|
||||
#define lpfc_mbx_reg_cgn_buf_cnt_WORD word0
|
||||
#define lpfc_mbx_reg_cgn_buf_cnt_SHIFT 16
|
||||
#define lpfc_mbx_reg_cgn_buf_cnt_MASK 0xFF
|
||||
uint32_t word1;
|
||||
uint32_t length;
|
||||
uint32_t addr_lo;
|
||||
uint32_t addr_hi;
|
||||
};
|
||||
|
||||
struct lpfc_rscr_desc_generic {
|
||||
#define LPFC_RSRC_DESC_WSIZE 22
|
||||
uint32_t desc[LPFC_RSRC_DESC_WSIZE];
|
||||
|
@ -3603,6 +3728,9 @@ struct lpfc_controller_attribute {
|
|||
#define lpfc_cntl_attr_eprom_ver_hi_SHIFT 8
|
||||
#define lpfc_cntl_attr_eprom_ver_hi_MASK 0x000000ff
|
||||
#define lpfc_cntl_attr_eprom_ver_hi_WORD word17
|
||||
#define lpfc_cntl_attr_flash_id_SHIFT 16
|
||||
#define lpfc_cntl_attr_flash_id_MASK 0x000000ff
|
||||
#define lpfc_cntl_attr_flash_id_WORD word17
|
||||
uint32_t mbx_da_struct_ver;
|
||||
uint32_t ep_fw_da_struct_ver;
|
||||
uint32_t ncsi_ver_str[3];
|
||||
|
@ -3744,6 +3872,7 @@ struct lpfc_mbx_get_port_name {
|
|||
#define MB_CEQ_STATUS_QUEUE_FLUSHING 0x4
|
||||
#define MB_CQE_STATUS_DMA_FAILED 0x5
|
||||
|
||||
|
||||
#define LPFC_MBX_WR_CONFIG_MAX_BDE 1
|
||||
struct lpfc_mbx_wr_object {
|
||||
struct mbox_header header;
|
||||
|
@ -3760,7 +3889,7 @@ struct lpfc_mbx_wr_object {
|
|||
#define lpfc_wr_object_write_length_MASK 0x00FFFFFF
|
||||
#define lpfc_wr_object_write_length_WORD word4
|
||||
uint32_t write_offset;
|
||||
uint32_t object_name[26];
|
||||
uint32_t object_name[LPFC_MBX_OBJECT_NAME_LEN_DW];
|
||||
uint32_t bde_count;
|
||||
struct ulp_bde64 bde[LPFC_MBX_WR_CONFIG_MAX_BDE];
|
||||
} request;
|
||||
|
@ -3809,6 +3938,7 @@ struct lpfc_mqe {
|
|||
struct lpfc_mbx_unreg_fcfi unreg_fcfi;
|
||||
struct lpfc_mbx_mq_create mq_create;
|
||||
struct lpfc_mbx_mq_create_ext mq_create_ext;
|
||||
struct lpfc_mbx_read_object read_object;
|
||||
struct lpfc_mbx_eq_create eq_create;
|
||||
struct lpfc_mbx_modify_eq_delay eq_delay;
|
||||
struct lpfc_mbx_cq_create cq_create;
|
||||
|
@ -3834,6 +3964,7 @@ struct lpfc_mqe {
|
|||
struct lpfc_mbx_query_fw_config query_fw_cfg;
|
||||
struct lpfc_mbx_set_beacon_config beacon_config;
|
||||
struct lpfc_mbx_get_sli4_parameters get_sli4_parameters;
|
||||
struct lpfc_mbx_reg_congestion_buf reg_congestion_buf;
|
||||
struct lpfc_mbx_set_link_diag_state link_diag_state;
|
||||
struct lpfc_mbx_set_link_diag_loopback link_diag_loopback;
|
||||
struct lpfc_mbx_run_link_diag_test link_diag_test;
|
||||
|
@ -3888,6 +4019,7 @@ struct lpfc_mcqe {
|
|||
#define LPFC_TRAILER_CODE_GRP5 0x5
|
||||
#define LPFC_TRAILER_CODE_FC 0x10
|
||||
#define LPFC_TRAILER_CODE_SLI 0x11
|
||||
#define LPFC_TRAILER_CODE_CMSTAT 0x13
|
||||
};
|
||||
|
||||
struct lpfc_acqe_link {
|
||||
|
@ -4122,6 +4254,19 @@ struct lpfc_acqe_misconfigured_event {
|
|||
#define LPFC_SLI_EVENT_STATUS_UNCERTIFIED 0x05
|
||||
};
|
||||
|
||||
struct lpfc_acqe_cgn_signal {
|
||||
u32 word0;
|
||||
#define lpfc_warn_acqe_SHIFT 0
|
||||
#define lpfc_warn_acqe_MASK 0x7FFFFFFF
|
||||
#define lpfc_warn_acqe_WORD word0
|
||||
#define lpfc_imm_acqe_SHIFT 31
|
||||
#define lpfc_imm_acqe_MASK 0x1
|
||||
#define lpfc_imm_acqe_WORD word0
|
||||
u32 alarm_cnt;
|
||||
u32 word2;
|
||||
u32 trailer;
|
||||
};
|
||||
|
||||
struct lpfc_acqe_sli {
|
||||
uint32_t event_data1;
|
||||
uint32_t event_data2;
|
||||
|
@ -4134,8 +4279,10 @@ struct lpfc_acqe_sli {
|
|||
#define LPFC_SLI_EVENT_TYPE_DIAG_DUMP 0x5
|
||||
#define LPFC_SLI_EVENT_TYPE_MISCONFIGURED 0x9
|
||||
#define LPFC_SLI_EVENT_TYPE_REMOTE_DPORT 0xA
|
||||
#define LPFC_SLI_EVENT_TYPE_PORT_PARAMS_CHG 0xE
|
||||
#define LPFC_SLI_EVENT_TYPE_MISCONF_FAWWN 0xF
|
||||
#define LPFC_SLI_EVENT_TYPE_EEPROM_FAILURE 0x10
|
||||
#define LPFC_SLI_EVENT_TYPE_CGN_SIGNAL 0x11
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -4543,6 +4690,69 @@ struct create_xri_wqe {
|
|||
#define T_REQUEST_TAG 3
|
||||
#define T_XRI_TAG 1
|
||||
|
||||
struct cmf_sync_wqe {
|
||||
uint32_t rsrvd[3];
|
||||
uint32_t word3;
|
||||
#define cmf_sync_interval_SHIFT 0
|
||||
#define cmf_sync_interval_MASK 0x00000ffff
|
||||
#define cmf_sync_interval_WORD word3
|
||||
#define cmf_sync_afpin_SHIFT 16
|
||||
#define cmf_sync_afpin_MASK 0x000000001
|
||||
#define cmf_sync_afpin_WORD word3
|
||||
#define cmf_sync_asig_SHIFT 17
|
||||
#define cmf_sync_asig_MASK 0x000000001
|
||||
#define cmf_sync_asig_WORD word3
|
||||
#define cmf_sync_op_SHIFT 20
|
||||
#define cmf_sync_op_MASK 0x00000000f
|
||||
#define cmf_sync_op_WORD word3
|
||||
#define cmf_sync_ver_SHIFT 24
|
||||
#define cmf_sync_ver_MASK 0x0000000ff
|
||||
#define cmf_sync_ver_WORD word3
|
||||
#define LPFC_CMF_SYNC_VER 1
|
||||
uint32_t event_tag;
|
||||
uint32_t word5;
|
||||
#define cmf_sync_wsigmax_SHIFT 0
|
||||
#define cmf_sync_wsigmax_MASK 0x00000ffff
|
||||
#define cmf_sync_wsigmax_WORD word5
|
||||
#define cmf_sync_wsigcnt_SHIFT 16
|
||||
#define cmf_sync_wsigcnt_MASK 0x00000ffff
|
||||
#define cmf_sync_wsigcnt_WORD word5
|
||||
uint32_t word6;
|
||||
uint32_t word7;
|
||||
#define cmf_sync_cmnd_SHIFT 8
|
||||
#define cmf_sync_cmnd_MASK 0x0000000ff
|
||||
#define cmf_sync_cmnd_WORD word7
|
||||
uint32_t word8;
|
||||
uint32_t word9;
|
||||
#define cmf_sync_reqtag_SHIFT 0
|
||||
#define cmf_sync_reqtag_MASK 0x00000ffff
|
||||
#define cmf_sync_reqtag_WORD word9
|
||||
#define cmf_sync_wfpinmax_SHIFT 16
|
||||
#define cmf_sync_wfpinmax_MASK 0x0000000ff
|
||||
#define cmf_sync_wfpinmax_WORD word9
|
||||
#define cmf_sync_wfpincnt_SHIFT 24
|
||||
#define cmf_sync_wfpincnt_MASK 0x0000000ff
|
||||
#define cmf_sync_wfpincnt_WORD word9
|
||||
uint32_t word10;
|
||||
#define cmf_sync_qosd_SHIFT 9
|
||||
#define cmf_sync_qosd_MASK 0x00000001
|
||||
#define cmf_sync_qosd_WORD word10
|
||||
uint32_t word11;
|
||||
#define cmf_sync_cmd_type_SHIFT 0
|
||||
#define cmf_sync_cmd_type_MASK 0x0000000f
|
||||
#define cmf_sync_cmd_type_WORD word11
|
||||
#define cmf_sync_wqec_SHIFT 7
|
||||
#define cmf_sync_wqec_MASK 0x00000001
|
||||
#define cmf_sync_wqec_WORD word11
|
||||
#define cmf_sync_cqid_SHIFT 16
|
||||
#define cmf_sync_cqid_MASK 0x0000ffff
|
||||
#define cmf_sync_cqid_WORD word11
|
||||
uint32_t read_bytes;
|
||||
uint32_t word13;
|
||||
uint32_t word14;
|
||||
uint32_t word15;
|
||||
};
|
||||
|
||||
struct abort_cmd_wqe {
|
||||
uint32_t rsrvd[3];
|
||||
uint32_t word3;
|
||||
|
@ -4672,6 +4882,7 @@ union lpfc_wqe {
|
|||
struct fcp_iread64_wqe fcp_iread;
|
||||
struct fcp_iwrite64_wqe fcp_iwrite;
|
||||
struct abort_cmd_wqe abort_cmd;
|
||||
struct cmf_sync_wqe cmf_sync;
|
||||
struct create_xri_wqe create_xri;
|
||||
struct xmit_bcast64_wqe xmit_bcast64;
|
||||
struct xmit_seq64_wqe xmit_sequence;
|
||||
|
@ -4692,6 +4903,7 @@ union lpfc_wqe128 {
|
|||
struct fcp_iread64_wqe fcp_iread;
|
||||
struct fcp_iwrite64_wqe fcp_iwrite;
|
||||
struct abort_cmd_wqe abort_cmd;
|
||||
struct cmf_sync_wqe cmf_sync;
|
||||
struct create_xri_wqe create_xri;
|
||||
struct xmit_bcast64_wqe xmit_bcast64;
|
||||
struct xmit_seq64_wqe xmit_sequence;
|
||||
|
@ -4707,6 +4919,7 @@ union lpfc_wqe128 {
|
|||
|
||||
#define MAGIC_NUMBER_G6 0xFEAA0003
|
||||
#define MAGIC_NUMBER_G7 0xFEAA0005
|
||||
#define MAGIC_NUMBER_G7P 0xFEAA0020
|
||||
|
||||
struct lpfc_grp_hdr {
|
||||
uint32_t size;
|
||||
|
@ -4734,6 +4947,7 @@ struct lpfc_grp_hdr {
|
|||
#define FCP_COMMAND_TRSP 0x3
|
||||
#define FCP_COMMAND_TSEND 0x7
|
||||
#define OTHER_COMMAND 0x8
|
||||
#define CMF_SYNC_COMMAND 0xA
|
||||
#define ELS_COMMAND_NON_FIP 0xC
|
||||
#define ELS_COMMAND_FIP 0xD
|
||||
|
||||
|
@ -4755,6 +4969,7 @@ struct lpfc_grp_hdr {
|
|||
#define CMD_FCP_TRECEIVE64_WQE 0xA1
|
||||
#define CMD_FCP_TRSP64_WQE 0xA3
|
||||
#define CMD_GEN_REQUEST64_WQE 0xC2
|
||||
#define CMD_CMF_SYNC_WQE 0xE8
|
||||
|
||||
#define CMD_WQE_MASK 0xff
|
||||
|
||||
|
@ -4762,3 +4977,43 @@ struct lpfc_grp_hdr {
|
|||
#define LPFC_FW_DUMP 1
|
||||
#define LPFC_FW_RESET 2
|
||||
#define LPFC_DV_RESET 3
|
||||
|
||||
/* On some kernels, enum fc_ls_tlv_dtag does not have
|
||||
* these 2 enums defined, on other kernels it does.
|
||||
* To get aound this we need to add these 2 defines here.
|
||||
*/
|
||||
#ifndef ELS_DTAG_LNK_FAULT_CAP
|
||||
#define ELS_DTAG_LNK_FAULT_CAP 0x0001000D
|
||||
#endif
|
||||
#ifndef ELS_DTAG_CG_SIGNAL_CAP
|
||||
#define ELS_DTAG_CG_SIGNAL_CAP 0x0001000F
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Initializer useful for decoding FPIN string table.
|
||||
*/
|
||||
#define FC_FPIN_CONGN_SEVERITY_INIT { \
|
||||
{ FPIN_CONGN_SEVERITY_WARNING, "Warning" }, \
|
||||
{ FPIN_CONGN_SEVERITY_ERROR, "Alarm" }, \
|
||||
}
|
||||
|
||||
/* EDC supports two descriptors. When allocated, it is the
|
||||
* size of this structure plus each supported descriptor.
|
||||
*/
|
||||
struct lpfc_els_edc_req {
|
||||
struct fc_els_edc edc; /* hdr up to descriptors */
|
||||
struct fc_diag_cg_sig_desc cgn_desc; /* 1st descriptor */
|
||||
};
|
||||
|
||||
/* Minimum structure defines for the EDC response.
|
||||
* Balance is in buffer.
|
||||
*/
|
||||
struct lpfc_els_edc_rsp {
|
||||
struct fc_els_edc_resp edc_rsp; /* hdr up to descriptors */
|
||||
struct fc_diag_cg_sig_desc cgn_desc; /* 1st descriptor */
|
||||
};
|
||||
|
||||
/* Used for logging FPIN messages */
|
||||
#define LPFC_FPIN_WWPN_LINE_SZ 128
|
||||
#define LPFC_FPIN_WWPN_LINE_CNT 6
|
||||
#define LPFC_FPIN_WWPN_NUM_LINE 6
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
/*******************************************************************
|
||||
* This file is part of the Emulex Linux Device Driver for *
|
||||
* Fibre Channel Host Bus Adapters. *
|
||||
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
|
||||
* Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
|
||||
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
|
||||
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
|
||||
* EMULEX and SLI are trademarks of Emulex. *
|
||||
|
@ -118,6 +118,8 @@ const struct pci_device_id lpfc_id_table[] = {
|
|||
PCI_ANY_ID, PCI_ANY_ID, },
|
||||
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_G7_FC,
|
||||
PCI_ANY_ID, PCI_ANY_ID, },
|
||||
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_G7P_FC,
|
||||
PCI_ANY_ID, PCI_ANY_ID, },
|
||||
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SKYHAWK,
|
||||
PCI_ANY_ID, PCI_ANY_ID, },
|
||||
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SKYHAWK_VF,
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,7 +1,7 @@
|
|||
/*******************************************************************
|
||||
* This file is part of the Emulex Linux Device Driver for *
|
||||
* Fibre Channel Host Bus Adapters. *
|
||||
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
|
||||
* Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
|
||||
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
|
||||
* Copyright (C) 2004-2009 Emulex. All rights reserved. *
|
||||
* EMULEX and SLI are trademarks of Emulex. *
|
||||
|
@ -44,6 +44,9 @@
|
|||
#define LOG_NVME_DISC 0x00200000 /* NVME Discovery/Connect events. */
|
||||
#define LOG_NVME_ABTS 0x00400000 /* NVME ABTS events. */
|
||||
#define LOG_NVME_IOERR 0x00800000 /* NVME IO Error events. */
|
||||
#define LOG_RSVD1 0x01000000 /* Reserved */
|
||||
#define LOG_RSVD2 0x02000000 /* Reserved */
|
||||
#define LOG_CGN_MGMT 0x04000000 /* Congestion Mgmt events */
|
||||
#define LOG_TRACE_EVENT 0x80000000 /* Dmp the DBG log on this err */
|
||||
#define LOG_ALL_MSG 0x7fffffff /* LOG all messages */
|
||||
|
||||
|
|
|
@ -513,8 +513,9 @@ lpfc_init_link(struct lpfc_hba * phba,
|
|||
break;
|
||||
}
|
||||
|
||||
if ((phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC ||
|
||||
phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC) &&
|
||||
/* Topology handling for ASIC_GEN_NUM 0xC and later */
|
||||
if ((phba->sli4_hba.pc_sli4_params.sli_family == LPFC_SLI_INTF_FAMILY_G6 ||
|
||||
phba->sli4_hba.pc_sli4_params.if_type == LPFC_SLI_INTF_IF_TYPE_6) &&
|
||||
!(phba->sli4_hba.pc_sli4_params.pls) &&
|
||||
mb->un.varInitLnk.link_flags & FLAGS_TOPOLOGY_MODE_LOOP) {
|
||||
mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT;
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
/*******************************************************************
|
||||
* This file is part of the Emulex Linux Device Driver for *
|
||||
* Fibre Channel Host Bus Adapters. *
|
||||
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
|
||||
* Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
|
||||
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
|
||||
* Copyright (C) 2004-2014 Emulex. All rights reserved. *
|
||||
* EMULEX and SLI are trademarks of Emulex. *
|
||||
|
@ -335,6 +335,19 @@ lpfc_mem_free_all(struct lpfc_hba *phba)
|
|||
dma_pool_destroy(phba->lpfc_cmd_rsp_buf_pool);
|
||||
phba->lpfc_cmd_rsp_buf_pool = NULL;
|
||||
|
||||
/* Free Congestion Data buffer */
|
||||
if (phba->cgn_i) {
|
||||
dma_free_coherent(&phba->pcidev->dev,
|
||||
sizeof(struct lpfc_cgn_info),
|
||||
phba->cgn_i->virt, phba->cgn_i->phys);
|
||||
kfree(phba->cgn_i);
|
||||
phba->cgn_i = NULL;
|
||||
}
|
||||
|
||||
/* Free RX table */
|
||||
kfree(phba->rxtable);
|
||||
phba->rxtable = NULL;
|
||||
|
||||
/* Free the iocb lookup array */
|
||||
kfree(psli->iocbq_lookup);
|
||||
psli->iocbq_lookup = NULL;
|
||||
|
|
|
@ -736,9 +736,13 @@ lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
|||
* is already in MAPPED or UNMAPPED state. Catch this
|
||||
* condition and don't set the nlp_state again because
|
||||
* it causes an unnecessary transport unregister/register.
|
||||
*
|
||||
* Nodes marked for ADISC will move MAPPED or UNMAPPED state
|
||||
* after issuing ADISC
|
||||
*/
|
||||
if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET)) {
|
||||
if (ndlp->nlp_state != NLP_STE_MAPPED_NODE)
|
||||
if ((ndlp->nlp_state != NLP_STE_MAPPED_NODE) &&
|
||||
!(ndlp->nlp_flag & NLP_NPR_ADISC))
|
||||
lpfc_nlp_set_state(vport, ndlp,
|
||||
NLP_STE_MAPPED_NODE);
|
||||
}
|
||||
|
@ -863,6 +867,9 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
|||
ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
|
||||
}
|
||||
out:
|
||||
/* Unregister from backend, could have been skipped due to ADISC */
|
||||
lpfc_nlp_unreg_node(vport, ndlp);
|
||||
|
||||
ndlp->nlp_prev_state = ndlp->nlp_state;
|
||||
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
|
||||
|
||||
|
@ -1677,9 +1684,6 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
|
|||
spin_unlock_irq(&ndlp->lock);
|
||||
ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
|
||||
|
||||
memset(&ndlp->nlp_nodename, 0, sizeof(struct lpfc_name));
|
||||
memset(&ndlp->nlp_portname, 0, sizeof(struct lpfc_name));
|
||||
|
||||
ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
|
||||
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
|
||||
lpfc_unreg_rpi(vport, ndlp);
|
||||
|
@ -2597,13 +2601,14 @@ lpfc_device_recov_mapped_node(struct lpfc_vport *vport,
|
|||
void *arg,
|
||||
uint32_t evt)
|
||||
{
|
||||
lpfc_disc_set_adisc(vport, ndlp);
|
||||
|
||||
ndlp->nlp_prev_state = NLP_STE_MAPPED_NODE;
|
||||
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
|
||||
spin_lock_irq(&ndlp->lock);
|
||||
ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
|
||||
ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
|
||||
spin_unlock_irq(&ndlp->lock);
|
||||
lpfc_disc_set_adisc(vport, ndlp);
|
||||
return ndlp->nlp_state;
|
||||
}
|
||||
|
||||
|
@ -2645,14 +2650,13 @@ lpfc_rcv_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
|||
lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
|
||||
|
||||
if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
|
||||
if (ndlp->nlp_flag & NLP_NPR_ADISC) {
|
||||
spin_lock_irq(&ndlp->lock);
|
||||
ndlp->nlp_flag &= ~NLP_NPR_ADISC;
|
||||
ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
|
||||
spin_unlock_irq(&ndlp->lock);
|
||||
lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
|
||||
lpfc_issue_els_adisc(vport, ndlp, 0);
|
||||
} else {
|
||||
/*
|
||||
* ADISC nodes will be handled in regular discovery path after
|
||||
* receiving response from NS.
|
||||
*
|
||||
* For other nodes, Send PLOGI to trigger an implicit LOGO.
|
||||
*/
|
||||
if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
|
||||
ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
|
||||
lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
|
||||
lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
|
||||
|
@ -2685,12 +2689,13 @@ lpfc_rcv_padisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
|||
*/
|
||||
if (!(ndlp->nlp_flag & NLP_DELAY_TMO) &&
|
||||
!(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
|
||||
if (ndlp->nlp_flag & NLP_NPR_ADISC) {
|
||||
ndlp->nlp_flag &= ~NLP_NPR_ADISC;
|
||||
ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
|
||||
lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
|
||||
lpfc_issue_els_adisc(vport, ndlp, 0);
|
||||
} else {
|
||||
/*
|
||||
* ADISC nodes will be handled in regular discovery path after
|
||||
* receiving response from NS.
|
||||
*
|
||||
* For other nodes, Send PLOGI to trigger an implicit LOGO.
|
||||
*/
|
||||
if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
|
||||
ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
|
||||
lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
|
||||
lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
|
||||
|
|
|
@ -216,8 +216,8 @@ lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport)
|
|||
/* The register rebind might have occurred before the delete
|
||||
* downcall. Guard against this race.
|
||||
*/
|
||||
if (ndlp->fc4_xpt_flags & NLP_WAIT_FOR_UNREG)
|
||||
ndlp->fc4_xpt_flags &= ~(NLP_WAIT_FOR_UNREG | NVME_XPT_REGD);
|
||||
if (ndlp->fc4_xpt_flags & NVME_XPT_UNREG_WAIT)
|
||||
ndlp->fc4_xpt_flags &= ~(NVME_XPT_UNREG_WAIT | NVME_XPT_REGD);
|
||||
|
||||
spin_unlock_irq(&ndlp->lock);
|
||||
|
||||
|
@ -931,6 +931,8 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
|
|||
uint32_t code, status, idx;
|
||||
uint16_t cid, sqhd, data;
|
||||
uint32_t *ptr;
|
||||
uint32_t lat;
|
||||
bool call_done = false;
|
||||
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
|
||||
int cpu;
|
||||
#endif
|
||||
|
@ -1135,10 +1137,21 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
|
|||
freqpriv = nCmd->private;
|
||||
freqpriv->nvme_buf = NULL;
|
||||
lpfc_ncmd->nvmeCmd = NULL;
|
||||
spin_unlock(&lpfc_ncmd->buf_lock);
|
||||
call_done = true;
|
||||
}
|
||||
spin_unlock(&lpfc_ncmd->buf_lock);
|
||||
|
||||
/* Check if IO qualified for CMF */
|
||||
if (phba->cmf_active_mode != LPFC_CFG_OFF &&
|
||||
nCmd->io_dir == NVMEFC_FCP_READ &&
|
||||
nCmd->payload_length) {
|
||||
/* Used when calculating average latency */
|
||||
lat = ktime_get_ns() - lpfc_ncmd->rx_cmd_start;
|
||||
lpfc_update_cmf_cmpl(phba, lat, nCmd->payload_length, NULL);
|
||||
}
|
||||
|
||||
if (call_done)
|
||||
nCmd->done(nCmd);
|
||||
} else
|
||||
spin_unlock(&lpfc_ncmd->buf_lock);
|
||||
|
||||
/* Call release with XB=1 to queue the IO into the abort list. */
|
||||
lpfc_release_nvme_buf(phba, lpfc_ncmd);
|
||||
|
@ -1212,6 +1225,10 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
|
|||
/* Word 5 */
|
||||
wqe->fcp_iread.rsrvd5 = 0;
|
||||
|
||||
/* For a CMF Managed port, iod must be zero'ed */
|
||||
if (phba->cmf_active_mode == LPFC_CFG_MANAGED)
|
||||
bf_set(wqe_iod, &wqe->fcp_iread.wqe_com,
|
||||
LPFC_WQE_IOD_NONE);
|
||||
cstat->input_requests++;
|
||||
}
|
||||
} else {
|
||||
|
@ -1562,6 +1579,19 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
|
|||
expedite = 1;
|
||||
}
|
||||
|
||||
/* Check if IO qualifies for CMF */
|
||||
if (phba->cmf_active_mode != LPFC_CFG_OFF &&
|
||||
pnvme_fcreq->io_dir == NVMEFC_FCP_READ &&
|
||||
pnvme_fcreq->payload_length) {
|
||||
ret = lpfc_update_cmf_cmd(phba, pnvme_fcreq->payload_length);
|
||||
if (ret) {
|
||||
ret = -EBUSY;
|
||||
goto out_fail;
|
||||
}
|
||||
/* Get start time for IO latency */
|
||||
start = ktime_get_ns();
|
||||
}
|
||||
|
||||
/* The node is shared with FCP IO, make sure the IO pending count does
|
||||
* not exceed the programmed depth.
|
||||
*/
|
||||
|
@ -1576,7 +1606,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
|
|||
ndlp->cmd_qdepth);
|
||||
atomic_inc(&lport->xmt_fcp_qdepth);
|
||||
ret = -EBUSY;
|
||||
goto out_fail;
|
||||
goto out_fail1;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1596,7 +1626,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
|
|||
"idx %d DID %x\n",
|
||||
lpfc_queue_info->index, ndlp->nlp_DID);
|
||||
ret = -EBUSY;
|
||||
goto out_fail;
|
||||
goto out_fail1;
|
||||
}
|
||||
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
|
||||
if (start) {
|
||||
|
@ -1606,6 +1636,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
|
|||
lpfc_ncmd->ts_cmd_start = 0;
|
||||
}
|
||||
#endif
|
||||
lpfc_ncmd->rx_cmd_start = start;
|
||||
|
||||
/*
|
||||
* Store the data needed by the driver to issue, abort, and complete
|
||||
|
@ -1687,6 +1718,9 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
|
|||
} else
|
||||
cstat->control_requests--;
|
||||
lpfc_release_nvme_buf(phba, lpfc_ncmd);
|
||||
out_fail1:
|
||||
lpfc_update_cmf_cmpl(phba, LPFC_CGN_NOT_SENT,
|
||||
pnvme_fcreq->payload_length, NULL);
|
||||
out_fail:
|
||||
return ret;
|
||||
}
|
||||
|
@ -2324,7 +2358,7 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
|
|||
* race that leaves the WAIT flag set.
|
||||
*/
|
||||
spin_lock_irq(&ndlp->lock);
|
||||
ndlp->fc4_xpt_flags &= ~NLP_WAIT_FOR_UNREG;
|
||||
ndlp->fc4_xpt_flags &= ~NVME_XPT_UNREG_WAIT;
|
||||
ndlp->fc4_xpt_flags |= NVME_XPT_REGD;
|
||||
spin_unlock_irq(&ndlp->lock);
|
||||
rport = remote_port->private;
|
||||
|
@ -2336,7 +2370,7 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
|
|||
*/
|
||||
spin_lock_irq(&ndlp->lock);
|
||||
ndlp->nrport = NULL;
|
||||
ndlp->fc4_xpt_flags &= ~NLP_WAIT_FOR_UNREG;
|
||||
ndlp->fc4_xpt_flags &= ~NVME_XPT_UNREG_WAIT;
|
||||
spin_unlock_irq(&ndlp->lock);
|
||||
rport->ndlp = NULL;
|
||||
rport->remoteport = NULL;
|
||||
|
@ -2488,7 +2522,7 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
|
|||
* The transport will update it.
|
||||
*/
|
||||
spin_lock_irq(&vport->phba->hbalock);
|
||||
ndlp->fc4_xpt_flags |= NLP_WAIT_FOR_UNREG;
|
||||
ndlp->fc4_xpt_flags |= NVME_XPT_UNREG_WAIT;
|
||||
spin_unlock_irq(&vport->phba->hbalock);
|
||||
|
||||
/* Don't let the host nvme transport keep sending keep-alives
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
/*******************************************************************
|
||||
* This file is part of the Emulex Linux Device Driver for *
|
||||
* Fibre Channel Host Bus Adapters. *
|
||||
* Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
|
||||
* Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
|
||||
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
|
||||
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
|
||||
* EMULEX and SLI are trademarks of Emulex. *
|
||||
|
@ -34,11 +34,8 @@
|
|||
#define LPFC_NVME_FB_SHIFT 9
|
||||
#define LPFC_NVME_MAX_FB (1 << 20) /* 1M */
|
||||
|
||||
#define LPFC_MAX_NVME_INFO_TMP_LEN 100
|
||||
#define LPFC_NVME_INFO_MORE_STR "\nCould be more info...\n"
|
||||
|
||||
#define lpfc_ndlp_get_nrport(ndlp) \
|
||||
((!ndlp->nrport || (ndlp->fc4_xpt_flags & NLP_WAIT_FOR_UNREG)) \
|
||||
#define lpfc_ndlp_get_nrport(ndlp) \
|
||||
((!ndlp->nrport || (ndlp->fc4_xpt_flags & NVME_XPT_UNREG_WAIT))\
|
||||
? NULL : ndlp->nrport)
|
||||
|
||||
struct lpfc_nvme_qhandle {
|
||||
|
|
|
@ -1797,19 +1797,22 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
|
|||
if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
|
||||
continue;
|
||||
|
||||
spin_lock(&ctxp->ctxlock);
|
||||
spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock,
|
||||
iflag);
|
||||
|
||||
spin_lock_irqsave(&ctxp->ctxlock, iflag);
|
||||
/* Check if we already received a free context call
|
||||
* and we have completed processing an abort situation.
|
||||
*/
|
||||
if (ctxp->flag & LPFC_NVME_CTX_RLS &&
|
||||
!(ctxp->flag & LPFC_NVME_ABORT_OP)) {
|
||||
spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
|
||||
list_del_init(&ctxp->list);
|
||||
spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
|
||||
released = true;
|
||||
}
|
||||
ctxp->flag &= ~LPFC_NVME_XBUSY;
|
||||
spin_unlock(&ctxp->ctxlock);
|
||||
spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock,
|
||||
iflag);
|
||||
spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
|
||||
|
||||
rrq_empty = list_empty(&phba->active_rrq_list);
|
||||
ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
|
||||
|
|
|
@ -96,30 +96,6 @@ static void lpfc_vmid_update_entry(struct lpfc_vport *vport, struct scsi_cmnd
|
|||
static void lpfc_vmid_assign_cs_ctl(struct lpfc_vport *vport,
|
||||
struct lpfc_vmid *vmid);
|
||||
|
||||
static inline unsigned
|
||||
lpfc_cmd_blksize(struct scsi_cmnd *sc)
|
||||
{
|
||||
return sc->device->sector_size;
|
||||
}
|
||||
|
||||
#define LPFC_CHECK_PROTECT_GUARD 1
|
||||
#define LPFC_CHECK_PROTECT_REF 2
|
||||
static inline unsigned
|
||||
lpfc_cmd_protect(struct scsi_cmnd *sc, int flag)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline unsigned
|
||||
lpfc_cmd_guard_csum(struct scsi_cmnd *sc)
|
||||
{
|
||||
if (lpfc_prot_group_type(NULL, sc) == LPFC_PG_TYPE_NO_DIF)
|
||||
return 0;
|
||||
if (scsi_host_get_guard(sc->device->host) == SHOST_DIX_GUARD_IP)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_sli4_set_rsp_sgl_last - Set the last bit in the response sge.
|
||||
* @phba: Pointer to HBA object.
|
||||
|
@ -683,7 +659,7 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
|
|||
|
||||
cpu = raw_smp_processor_id();
|
||||
if (cmnd && phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) {
|
||||
tag = blk_mq_unique_tag(cmnd->request);
|
||||
tag = blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
|
||||
idx = blk_mq_unique_tag_to_hwq(tag);
|
||||
} else {
|
||||
idx = phba->sli4_hba.cpu_map[cpu].hdwq;
|
||||
|
@ -1046,13 +1022,13 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
|
|||
return 0;
|
||||
|
||||
sgpe = scsi_prot_sglist(sc);
|
||||
lba = t10_pi_ref_tag(sc->request);
|
||||
lba = scsi_prot_ref_tag(sc);
|
||||
if (lba == LPFC_INVALID_REFTAG)
|
||||
return 0;
|
||||
|
||||
/* First check if we need to match the LBA */
|
||||
if (phba->lpfc_injerr_lba != LPFC_INJERR_LBA_OFF) {
|
||||
blksize = lpfc_cmd_blksize(sc);
|
||||
blksize = scsi_prot_interval(sc);
|
||||
numblks = (scsi_bufflen(sc) + blksize - 1) / blksize;
|
||||
|
||||
/* Make sure we have the right LBA if one is specified */
|
||||
|
@ -1441,7 +1417,7 @@ lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
|
|||
{
|
||||
uint8_t ret = 0;
|
||||
|
||||
if (lpfc_cmd_guard_csum(sc)) {
|
||||
if (sc->prot_flags & SCSI_PROT_IP_CHECKSUM) {
|
||||
switch (scsi_get_prot_op(sc)) {
|
||||
case SCSI_PROT_READ_INSERT:
|
||||
case SCSI_PROT_WRITE_STRIP:
|
||||
|
@ -1521,7 +1497,7 @@ lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
|
|||
{
|
||||
uint8_t ret = 0;
|
||||
|
||||
if (lpfc_cmd_guard_csum(sc)) {
|
||||
if (sc->prot_flags & SCSI_PROT_IP_CHECKSUM) {
|
||||
switch (scsi_get_prot_op(sc)) {
|
||||
case SCSI_PROT_READ_INSERT:
|
||||
case SCSI_PROT_WRITE_STRIP:
|
||||
|
@ -1629,7 +1605,7 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
|
|||
goto out;
|
||||
|
||||
/* extract some info from the scsi command for pde*/
|
||||
reftag = t10_pi_ref_tag(sc->request);
|
||||
reftag = scsi_prot_ref_tag(sc);
|
||||
if (reftag == LPFC_INVALID_REFTAG)
|
||||
goto out;
|
||||
|
||||
|
@ -1668,12 +1644,12 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
|
|||
* protection data is automatically generated, not checked.
|
||||
*/
|
||||
if (datadir == DMA_FROM_DEVICE) {
|
||||
if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
|
||||
if (sc->prot_flags & SCSI_PROT_GUARD_CHECK)
|
||||
bf_set(pde6_ce, pde6, checking);
|
||||
else
|
||||
bf_set(pde6_ce, pde6, 0);
|
||||
|
||||
if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
|
||||
if (sc->prot_flags & SCSI_PROT_REF_CHECK)
|
||||
bf_set(pde6_re, pde6, checking);
|
||||
else
|
||||
bf_set(pde6_re, pde6, 0);
|
||||
|
@ -1791,8 +1767,8 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
|
|||
goto out;
|
||||
|
||||
/* extract some info from the scsi command */
|
||||
blksize = lpfc_cmd_blksize(sc);
|
||||
reftag = t10_pi_ref_tag(sc->request);
|
||||
blksize = scsi_prot_interval(sc);
|
||||
reftag = scsi_prot_ref_tag(sc);
|
||||
if (reftag == LPFC_INVALID_REFTAG)
|
||||
goto out;
|
||||
|
||||
|
@ -1832,12 +1808,12 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
|
|||
bf_set(pde6_optx, pde6, txop);
|
||||
bf_set(pde6_oprx, pde6, rxop);
|
||||
|
||||
if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
|
||||
if (sc->prot_flags & SCSI_PROT_GUARD_CHECK)
|
||||
bf_set(pde6_ce, pde6, checking);
|
||||
else
|
||||
bf_set(pde6_ce, pde6, 0);
|
||||
|
||||
if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
|
||||
if (sc->prot_flags & SCSI_PROT_REF_CHECK)
|
||||
bf_set(pde6_re, pde6, checking);
|
||||
else
|
||||
bf_set(pde6_re, pde6, 0);
|
||||
|
@ -2023,7 +1999,7 @@ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
|
|||
goto out;
|
||||
|
||||
/* extract some info from the scsi command for pde*/
|
||||
reftag = t10_pi_ref_tag(sc->request);
|
||||
reftag = scsi_prot_ref_tag(sc);
|
||||
if (reftag == LPFC_INVALID_REFTAG)
|
||||
goto out;
|
||||
|
||||
|
@ -2051,12 +2027,12 @@ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
|
|||
* protection data is automatically generated, not checked.
|
||||
*/
|
||||
if (sc->sc_data_direction == DMA_FROM_DEVICE) {
|
||||
if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
|
||||
if (sc->prot_flags & SCSI_PROT_GUARD_CHECK)
|
||||
bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
|
||||
else
|
||||
bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
|
||||
|
||||
if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
|
||||
if (sc->prot_flags & SCSI_PROT_REF_CHECK)
|
||||
bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
|
||||
else
|
||||
bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
|
||||
|
@ -2223,8 +2199,8 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
|
|||
goto out;
|
||||
|
||||
/* extract some info from the scsi command */
|
||||
blksize = lpfc_cmd_blksize(sc);
|
||||
reftag = t10_pi_ref_tag(sc->request);
|
||||
blksize = scsi_prot_interval(sc);
|
||||
reftag = scsi_prot_ref_tag(sc);
|
||||
if (reftag == LPFC_INVALID_REFTAG)
|
||||
goto out;
|
||||
|
||||
|
@ -2281,9 +2257,8 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
|
|||
diseed->ref_tag = cpu_to_le32(reftag);
|
||||
diseed->ref_tag_tran = diseed->ref_tag;
|
||||
|
||||
if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD)) {
|
||||
if (sc->prot_flags & SCSI_PROT_GUARD_CHECK) {
|
||||
bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
|
||||
|
||||
} else {
|
||||
bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
|
||||
/*
|
||||
|
@ -2300,7 +2275,7 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
|
|||
}
|
||||
|
||||
|
||||
if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
|
||||
if (sc->prot_flags & SCSI_PROT_REF_CHECK)
|
||||
bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
|
||||
else
|
||||
bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
|
||||
|
@ -2557,7 +2532,7 @@ lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba,
|
|||
* DIF (trailer) attached to it. Must ajust FCP data length
|
||||
* to account for the protection data.
|
||||
*/
|
||||
fcpdl += (fcpdl / lpfc_cmd_blksize(sc)) * 8;
|
||||
fcpdl += (fcpdl / scsi_prot_interval(sc)) * 8;
|
||||
|
||||
return fcpdl;
|
||||
}
|
||||
|
@ -2811,14 +2786,14 @@ lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
|
|||
* data length is a multiple of the blksize.
|
||||
*/
|
||||
sgde = scsi_sglist(cmd);
|
||||
blksize = lpfc_cmd_blksize(cmd);
|
||||
blksize = scsi_prot_interval(cmd);
|
||||
data_src = (uint8_t *)sg_virt(sgde);
|
||||
data_len = sgde->length;
|
||||
if ((data_len & (blksize - 1)) == 0)
|
||||
chk_guard = 1;
|
||||
|
||||
src = (struct scsi_dif_tuple *)sg_virt(sgpe);
|
||||
start_ref_tag = t10_pi_ref_tag(cmd->request);
|
||||
start_ref_tag = scsi_prot_ref_tag(cmd);
|
||||
if (start_ref_tag == LPFC_INVALID_REFTAG)
|
||||
goto out;
|
||||
start_app_tag = src->app_tag;
|
||||
|
@ -2839,7 +2814,8 @@ lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
|
|||
/* First Guard Tag checking */
|
||||
if (chk_guard) {
|
||||
guard_tag = src->guard_tag;
|
||||
if (lpfc_cmd_guard_csum(cmd))
|
||||
if (cmd->prot_flags
|
||||
& SCSI_PROT_IP_CHECKSUM)
|
||||
sum = lpfc_bg_csum(data_src,
|
||||
blksize);
|
||||
else
|
||||
|
@ -2910,7 +2886,7 @@ lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
|
|||
phba->bg_guard_err_cnt++;
|
||||
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
|
||||
"9069 BLKGRD: reftag %x grd_tag err %x != %x\n",
|
||||
t10_pi_ref_tag(cmd->request),
|
||||
scsi_prot_ref_tag(cmd),
|
||||
sum, guard_tag);
|
||||
|
||||
} else if (err_type == BGS_REFTAG_ERR_MASK) {
|
||||
|
@ -2920,7 +2896,7 @@ lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
|
|||
phba->bg_reftag_err_cnt++;
|
||||
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
|
||||
"9066 BLKGRD: reftag %x ref_tag err %x != %x\n",
|
||||
t10_pi_ref_tag(cmd->request),
|
||||
scsi_prot_ref_tag(cmd),
|
||||
ref_tag, start_ref_tag);
|
||||
|
||||
} else if (err_type == BGS_APPTAG_ERR_MASK) {
|
||||
|
@ -2930,7 +2906,7 @@ lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
|
|||
phba->bg_apptag_err_cnt++;
|
||||
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
|
||||
"9041 BLKGRD: reftag %x app_tag err %x != %x\n",
|
||||
t10_pi_ref_tag(cmd->request),
|
||||
scsi_prot_ref_tag(cmd),
|
||||
app_tag, start_app_tag);
|
||||
}
|
||||
}
|
||||
|
@ -2992,7 +2968,7 @@ lpfc_sli4_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
|
|||
" 0x%x lba 0x%llx blk cnt 0x%x "
|
||||
"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
|
||||
(unsigned long long)scsi_get_lba(cmd),
|
||||
blk_rq_sectors(cmd->request), bgstat, bghm);
|
||||
scsi_logical_block_count(cmd), bgstat, bghm);
|
||||
}
|
||||
|
||||
if (lpfc_bgs_get_reftag_err(bgstat)) {
|
||||
|
@ -3007,7 +2983,7 @@ lpfc_sli4_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
|
|||
" 0x%x lba 0x%llx blk cnt 0x%x "
|
||||
"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
|
||||
(unsigned long long)scsi_get_lba(cmd),
|
||||
blk_rq_sectors(cmd->request), bgstat, bghm);
|
||||
scsi_logical_block_count(cmd), bgstat, bghm);
|
||||
}
|
||||
|
||||
if (lpfc_bgs_get_apptag_err(bgstat)) {
|
||||
|
@ -3022,7 +2998,7 @@ lpfc_sli4_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
|
|||
" 0x%x lba 0x%llx blk cnt 0x%x "
|
||||
"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
|
||||
(unsigned long long)scsi_get_lba(cmd),
|
||||
blk_rq_sectors(cmd->request), bgstat, bghm);
|
||||
scsi_logical_block_count(cmd), bgstat, bghm);
|
||||
}
|
||||
|
||||
if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
|
||||
|
@ -3066,9 +3042,9 @@ lpfc_sli4_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
|
|||
" 0x%x lba 0x%llx blk cnt 0x%x "
|
||||
"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
|
||||
(unsigned long long)scsi_get_lba(cmd),
|
||||
blk_rq_sectors(cmd->request), bgstat, bghm);
|
||||
scsi_logical_block_count(cmd), bgstat, bghm);
|
||||
|
||||
/* Calcuate what type of error it was */
|
||||
/* Calculate what type of error it was */
|
||||
lpfc_calc_bg_err(phba, lpfc_cmd);
|
||||
}
|
||||
return ret;
|
||||
|
@ -3103,8 +3079,8 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
|
|||
"9072 BLKGRD: Invalid BG Profile in cmd "
|
||||
"0x%x reftag 0x%x blk cnt 0x%x "
|
||||
"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
|
||||
t10_pi_ref_tag(cmd->request),
|
||||
blk_rq_sectors(cmd->request), bgstat, bghm);
|
||||
scsi_prot_ref_tag(cmd),
|
||||
scsi_logical_block_count(cmd), bgstat, bghm);
|
||||
ret = (-1);
|
||||
goto out;
|
||||
}
|
||||
|
@ -3115,8 +3091,8 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
|
|||
"9073 BLKGRD: Invalid BG PDIF Block in cmd "
|
||||
"0x%x reftag 0x%x blk cnt 0x%x "
|
||||
"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
|
||||
t10_pi_ref_tag(cmd->request),
|
||||
blk_rq_sectors(cmd->request), bgstat, bghm);
|
||||
scsi_prot_ref_tag(cmd),
|
||||
scsi_logical_block_count(cmd), bgstat, bghm);
|
||||
ret = (-1);
|
||||
goto out;
|
||||
}
|
||||
|
@ -3131,8 +3107,8 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
|
|||
"9055 BLKGRD: Guard Tag error in cmd "
|
||||
"0x%x reftag 0x%x blk cnt 0x%x "
|
||||
"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
|
||||
t10_pi_ref_tag(cmd->request),
|
||||
blk_rq_sectors(cmd->request), bgstat, bghm);
|
||||
scsi_prot_ref_tag(cmd),
|
||||
scsi_logical_block_count(cmd), bgstat, bghm);
|
||||
}
|
||||
|
||||
if (lpfc_bgs_get_reftag_err(bgstat)) {
|
||||
|
@ -3146,8 +3122,8 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
|
|||
"9056 BLKGRD: Ref Tag error in cmd "
|
||||
"0x%x reftag 0x%x blk cnt 0x%x "
|
||||
"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
|
||||
t10_pi_ref_tag(cmd->request),
|
||||
blk_rq_sectors(cmd->request), bgstat, bghm);
|
||||
scsi_prot_ref_tag(cmd),
|
||||
scsi_logical_block_count(cmd), bgstat, bghm);
|
||||
}
|
||||
|
||||
if (lpfc_bgs_get_apptag_err(bgstat)) {
|
||||
|
@ -3161,8 +3137,8 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
|
|||
"9061 BLKGRD: App Tag error in cmd "
|
||||
"0x%x reftag 0x%x blk cnt 0x%x "
|
||||
"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
|
||||
t10_pi_ref_tag(cmd->request),
|
||||
blk_rq_sectors(cmd->request), bgstat, bghm);
|
||||
scsi_prot_ref_tag(cmd),
|
||||
scsi_logical_block_count(cmd), bgstat, bghm);
|
||||
}
|
||||
|
||||
if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
|
||||
|
@ -3205,10 +3181,10 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
|
|||
"9057 BLKGRD: Unknown error in cmd "
|
||||
"0x%x reftag 0x%x blk cnt 0x%x "
|
||||
"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
|
||||
t10_pi_ref_tag(cmd->request),
|
||||
blk_rq_sectors(cmd->request), bgstat, bghm);
|
||||
scsi_prot_ref_tag(cmd),
|
||||
scsi_logical_block_count(cmd), bgstat, bghm);
|
||||
|
||||
/* Calcuate what type of error it was */
|
||||
/* Calculate what type of error it was */
|
||||
lpfc_calc_bg_err(phba, lpfc_cmd);
|
||||
}
|
||||
out:
|
||||
|
@ -3853,6 +3829,143 @@ lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
|
|||
psb->pCmd->sc_data_direction);
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_unblock_requests - allow further commands to be queued.
|
||||
* @phba: pointer to phba object
|
||||
*
|
||||
* For single vport, just call scsi_unblock_requests on physical port.
|
||||
* For multiple vports, send scsi_unblock_requests for all the vports.
|
||||
*/
|
||||
void
|
||||
lpfc_unblock_requests(struct lpfc_hba *phba)
|
||||
{
|
||||
struct lpfc_vport **vports;
|
||||
struct Scsi_Host *shost;
|
||||
int i;
|
||||
|
||||
if (phba->sli_rev == LPFC_SLI_REV4 &&
|
||||
!phba->sli4_hba.max_cfg_param.vpi_used) {
|
||||
shost = lpfc_shost_from_vport(phba->pport);
|
||||
scsi_unblock_requests(shost);
|
||||
return;
|
||||
}
|
||||
|
||||
vports = lpfc_create_vport_work_array(phba);
|
||||
if (vports != NULL)
|
||||
for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
|
||||
shost = lpfc_shost_from_vport(vports[i]);
|
||||
scsi_unblock_requests(shost);
|
||||
}
|
||||
lpfc_destroy_vport_work_array(phba, vports);
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_block_requests - prevent further commands from being queued.
|
||||
* @phba: pointer to phba object
|
||||
*
|
||||
* For single vport, just call scsi_block_requests on physical port.
|
||||
* For multiple vports, send scsi_block_requests for all the vports.
|
||||
*/
|
||||
void
|
||||
lpfc_block_requests(struct lpfc_hba *phba)
|
||||
{
|
||||
struct lpfc_vport **vports;
|
||||
struct Scsi_Host *shost;
|
||||
int i;
|
||||
|
||||
if (atomic_read(&phba->cmf_stop_io))
|
||||
return;
|
||||
|
||||
if (phba->sli_rev == LPFC_SLI_REV4 &&
|
||||
!phba->sli4_hba.max_cfg_param.vpi_used) {
|
||||
shost = lpfc_shost_from_vport(phba->pport);
|
||||
scsi_block_requests(shost);
|
||||
return;
|
||||
}
|
||||
|
||||
vports = lpfc_create_vport_work_array(phba);
|
||||
if (vports != NULL)
|
||||
for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
|
||||
shost = lpfc_shost_from_vport(vports[i]);
|
||||
scsi_block_requests(shost);
|
||||
}
|
||||
lpfc_destroy_vport_work_array(phba, vports);
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_update_cmf_cmpl - Adjust CMF counters for IO completion
|
||||
* @phba: The HBA for which this call is being executed.
|
||||
* @time: The latency of the IO that completed (in ns)
|
||||
* @size: The size of the IO that completed
|
||||
* @shost: SCSI host the IO completed on (NULL for a NVME IO)
|
||||
*
|
||||
* The routine adjusts the various Burst and Bandwidth counters used in
|
||||
* Congestion management and E2E. If time is set to LPFC_CGN_NOT_SENT,
|
||||
* that means the IO was never issued to the HBA, so this routine is
|
||||
* just being called to cleanup the counter from a previous
|
||||
* lpfc_update_cmf_cmd call.
|
||||
*/
|
||||
int
|
||||
lpfc_update_cmf_cmpl(struct lpfc_hba *phba,
|
||||
uint64_t time, uint32_t size, struct Scsi_Host *shost)
|
||||
{
|
||||
struct lpfc_cgn_stat *cgs;
|
||||
|
||||
if (time != LPFC_CGN_NOT_SENT) {
|
||||
/* lat is ns coming in, save latency in us */
|
||||
if (time < 1000)
|
||||
time = 1;
|
||||
else
|
||||
time = div_u64(time + 500, 1000); /* round it */
|
||||
|
||||
cgs = this_cpu_ptr(phba->cmf_stat);
|
||||
atomic64_add(size, &cgs->rcv_bytes);
|
||||
atomic64_add(time, &cgs->rx_latency);
|
||||
atomic_inc(&cgs->rx_io_cnt);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_update_cmf_cmd - Adjust CMF counters for IO submission
|
||||
* @phba: The HBA for which this call is being executed.
|
||||
* @size: The size of the IO that will be issued
|
||||
*
|
||||
* The routine adjusts the various Burst and Bandwidth counters used in
|
||||
* Congestion management and E2E.
|
||||
*/
|
||||
int
|
||||
lpfc_update_cmf_cmd(struct lpfc_hba *phba, uint32_t size)
|
||||
{
|
||||
uint64_t total;
|
||||
struct lpfc_cgn_stat *cgs;
|
||||
int cpu;
|
||||
|
||||
/* At this point we are either LPFC_CFG_MANAGED or LPFC_CFG_MONITOR */
|
||||
if (phba->cmf_active_mode == LPFC_CFG_MANAGED) {
|
||||
total = 0;
|
||||
for_each_present_cpu(cpu) {
|
||||
cgs = per_cpu_ptr(phba->cmf_stat, cpu);
|
||||
total += atomic64_read(&cgs->total_bytes);
|
||||
}
|
||||
if (total >= phba->cmf_max_bytes_per_interval) {
|
||||
if (!atomic_xchg(&phba->cmf_bw_wait, 1)) {
|
||||
lpfc_block_requests(phba);
|
||||
phba->cmf_last_ts =
|
||||
lpfc_calc_cmf_latency(phba);
|
||||
}
|
||||
atomic_inc(&phba->cmf_busy);
|
||||
return -EBUSY;
|
||||
}
|
||||
if (size > atomic_read(&phba->rx_max_read_cnt))
|
||||
atomic_set(&phba->rx_max_read_cnt, size);
|
||||
}
|
||||
|
||||
cgs = this_cpu_ptr(phba->cmf_stat);
|
||||
atomic64_add(size, &cgs->total_bytes);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_handle_fcp_err - FCP response handler
|
||||
* @vport: The virtual port for which this call is being executed.
|
||||
|
@ -4063,6 +4176,7 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
|
|||
u32 logit = LOG_FCP;
|
||||
u32 status, idx;
|
||||
unsigned long iflags = 0;
|
||||
u32 lat;
|
||||
u8 wait_xb_clr = 0;
|
||||
|
||||
/* Sanity check on return of outstanding command */
|
||||
|
@ -4351,10 +4465,21 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
|
|||
lpfc_io_ktime(phba, lpfc_cmd);
|
||||
}
|
||||
#endif
|
||||
if (likely(!wait_xb_clr))
|
||||
lpfc_cmd->pCmd = NULL;
|
||||
spin_unlock(&lpfc_cmd->buf_lock);
|
||||
|
||||
/* Check if IO qualified for CMF */
|
||||
if (phba->cmf_active_mode != LPFC_CFG_OFF &&
|
||||
cmd->sc_data_direction == DMA_FROM_DEVICE &&
|
||||
(scsi_sg_count(cmd))) {
|
||||
/* Used when calculating average latency */
|
||||
lat = ktime_get_ns() - lpfc_cmd->rx_cmd_start;
|
||||
lpfc_update_cmf_cmpl(phba, lat, scsi_bufflen(cmd), shost);
|
||||
}
|
||||
|
||||
if (wait_xb_clr)
|
||||
goto out;
|
||||
lpfc_cmd->pCmd = NULL;
|
||||
spin_unlock(&lpfc_cmd->buf_lock);
|
||||
|
||||
/* The sdev is not guaranteed to be valid post scsi_done upcall. */
|
||||
cmd->scsi_done(cmd);
|
||||
|
@ -4367,8 +4492,8 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
|
|||
lpfc_cmd->cur_iocbq.iocb_flag &= ~LPFC_DRIVER_ABORTED;
|
||||
if (lpfc_cmd->waitq)
|
||||
wake_up(lpfc_cmd->waitq);
|
||||
out:
|
||||
spin_unlock(&lpfc_cmd->buf_lock);
|
||||
out:
|
||||
lpfc_release_scsi_buf(phba, lpfc_cmd);
|
||||
}
|
||||
|
||||
|
@ -4775,6 +4900,11 @@ static int lpfc_scsi_prep_cmnd_buf_s4(struct lpfc_vport *vport,
|
|||
fcp_cmnd->fcpCntl3 = READ_DATA;
|
||||
if (hdwq)
|
||||
hdwq->scsi_cstat.input_requests++;
|
||||
|
||||
/* For a CMF Managed port, iod must be zero'ed */
|
||||
if (phba->cmf_active_mode == LPFC_CFG_MANAGED)
|
||||
bf_set(wqe_iod, &wqe->fcp_iread.wqe_com,
|
||||
LPFC_WQE_IOD_NONE);
|
||||
}
|
||||
} else {
|
||||
/* From the icmnd template, initialize words 4 - 11 */
|
||||
|
@ -5029,12 +5159,8 @@ lpfc_check_pci_resettable(struct lpfc_hba *phba)
|
|||
}
|
||||
|
||||
/* Check for valid Emulex Device ID */
|
||||
switch (ptr->device) {
|
||||
case PCI_DEVICE_ID_LANCER_FC:
|
||||
case PCI_DEVICE_ID_LANCER_G6_FC:
|
||||
case PCI_DEVICE_ID_LANCER_G7_FC:
|
||||
break;
|
||||
default:
|
||||
if (phba->sli_rev != LPFC_SLI_REV4 ||
|
||||
phba->hba_flag & HBA_FCOE_MODE) {
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
|
||||
"8347 Incapable PCI reset device: "
|
||||
"0x%04x\n", ptr->device);
|
||||
|
@ -5423,13 +5549,9 @@ static int lpfc_vmid_get_appid(struct lpfc_vport *vport, char *uuid, struct
|
|||
*/
|
||||
static char *lpfc_is_command_vm_io(struct scsi_cmnd *cmd)
|
||||
{
|
||||
char *uuid = NULL;
|
||||
struct bio *bio = scsi_cmd_to_rq(cmd)->bio;
|
||||
|
||||
if (cmd->request) {
|
||||
if (cmd->request->bio)
|
||||
uuid = blkcg_get_fc_appid(cmd->request->bio);
|
||||
}
|
||||
return uuid;
|
||||
return bio ? blkcg_get_fc_appid(bio) : NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -5462,7 +5584,7 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
|
|||
if (phba->ktime_on)
|
||||
start = ktime_get_ns();
|
||||
#endif
|
||||
|
||||
start = ktime_get_ns();
|
||||
rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
|
||||
|
||||
/* sanity check on references */
|
||||
|
@ -5493,7 +5615,18 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
|
|||
* transport is still transitioning.
|
||||
*/
|
||||
if (!ndlp)
|
||||
goto out_tgt_busy;
|
||||
goto out_tgt_busy1;
|
||||
|
||||
/* Check if IO qualifies for CMF */
|
||||
if (phba->cmf_active_mode != LPFC_CFG_OFF &&
|
||||
cmnd->sc_data_direction == DMA_FROM_DEVICE &&
|
||||
(scsi_sg_count(cmnd))) {
|
||||
/* Latency start time saved in rx_cmd_start later in routine */
|
||||
err = lpfc_update_cmf_cmd(phba, scsi_bufflen(cmnd));
|
||||
if (err)
|
||||
goto out_tgt_busy1;
|
||||
}
|
||||
|
||||
if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
|
||||
if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) {
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR,
|
||||
|
@ -5521,7 +5654,7 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
|
|||
ndlp->nlp_portname.u.wwn[5],
|
||||
ndlp->nlp_portname.u.wwn[6],
|
||||
ndlp->nlp_portname.u.wwn[7]);
|
||||
goto out_tgt_busy;
|
||||
goto out_tgt_busy2;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -5534,6 +5667,7 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
|
|||
"IO busied\n");
|
||||
goto out_host_busy;
|
||||
}
|
||||
lpfc_cmd->rx_cmd_start = start;
|
||||
|
||||
/*
|
||||
* Store the midlayer's command structure for the completion phase
|
||||
|
@ -5557,8 +5691,8 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
|
|||
"reftag x%x cnt %u pt %x\n",
|
||||
dif_op_str[scsi_get_prot_op(cmnd)],
|
||||
cmnd->cmnd[0],
|
||||
t10_pi_ref_tag(cmnd->request),
|
||||
blk_rq_sectors(cmnd->request),
|
||||
scsi_prot_ref_tag(cmnd),
|
||||
scsi_logical_block_count(cmnd),
|
||||
(cmnd->cmnd[1]>>5));
|
||||
}
|
||||
err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
|
||||
|
@ -5569,8 +5703,8 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
|
|||
"9038 BLKGRD: rcvd PROT_NORMAL cmd: "
|
||||
"x%x reftag x%x cnt %u pt %x\n",
|
||||
cmnd->cmnd[0],
|
||||
t10_pi_ref_tag(cmnd->request),
|
||||
blk_rq_sectors(cmnd->request),
|
||||
scsi_prot_ref_tag(cmnd),
|
||||
scsi_logical_block_count(cmnd),
|
||||
(cmnd->cmnd[1]>>5));
|
||||
}
|
||||
err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
|
||||
|
@ -5641,8 +5775,7 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
|
|||
bf_get(wqe_tmo,
|
||||
&lpfc_cmd->cur_iocbq.wqe.generic.wqe_com) :
|
||||
lpfc_cmd->cur_iocbq.iocb.ulpTimeout,
|
||||
(uint32_t)
|
||||
(cmnd->request->timeout / 1000));
|
||||
(uint32_t)(scsi_cmd_to_rq(cmnd)->timeout / 1000));
|
||||
|
||||
goto out_host_busy_free_buf;
|
||||
}
|
||||
|
@ -5678,13 +5811,20 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
|
|||
out_host_busy_release_buf:
|
||||
lpfc_release_scsi_buf(phba, lpfc_cmd);
|
||||
out_host_busy:
|
||||
lpfc_update_cmf_cmpl(phba, LPFC_CGN_NOT_SENT, scsi_bufflen(cmnd),
|
||||
shost);
|
||||
return SCSI_MLQUEUE_HOST_BUSY;
|
||||
|
||||
out_tgt_busy:
|
||||
out_tgt_busy2:
|
||||
lpfc_update_cmf_cmpl(phba, LPFC_CGN_NOT_SENT, scsi_bufflen(cmnd),
|
||||
shost);
|
||||
out_tgt_busy1:
|
||||
return SCSI_MLQUEUE_TARGET_BUSY;
|
||||
|
||||
out_fail_command_release_buf:
|
||||
lpfc_release_scsi_buf(phba, lpfc_cmd);
|
||||
lpfc_update_cmf_cmpl(phba, LPFC_CGN_NOT_SENT, scsi_bufflen(cmnd),
|
||||
shost);
|
||||
|
||||
out_fail_command:
|
||||
cmnd->scsi_done(cmnd);
|
||||
|
@ -6273,6 +6413,7 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
|
|||
struct lpfc_scsi_event_header scsi_event;
|
||||
int status;
|
||||
u32 logit = LOG_FCP;
|
||||
u32 dev_loss_tmo = vport->cfg_devloss_tmo;
|
||||
unsigned long flags;
|
||||
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
|
||||
|
||||
|
@ -6314,39 +6455,44 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
|
|||
|
||||
status = lpfc_send_taskmgmt(vport, cmnd, tgt_id, lun_id,
|
||||
FCP_TARGET_RESET);
|
||||
if (status != SUCCESS)
|
||||
logit = LOG_TRACE_EVENT;
|
||||
spin_lock_irqsave(&pnode->lock, flags);
|
||||
if (status != SUCCESS &&
|
||||
(!(pnode->upcall_flags & NLP_WAIT_FOR_LOGO)) &&
|
||||
!pnode->logo_waitq) {
|
||||
pnode->logo_waitq = &waitq;
|
||||
pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
|
||||
pnode->nlp_flag |= NLP_ISSUE_LOGO;
|
||||
pnode->upcall_flags |= NLP_WAIT_FOR_LOGO;
|
||||
spin_unlock_irqrestore(&pnode->lock, flags);
|
||||
lpfc_unreg_rpi(vport, pnode);
|
||||
wait_event_timeout(waitq,
|
||||
(!(pnode->upcall_flags & NLP_WAIT_FOR_LOGO)),
|
||||
msecs_to_jiffies(vport->cfg_devloss_tmo *
|
||||
1000));
|
||||
if (status != SUCCESS) {
|
||||
logit = LOG_TRACE_EVENT;
|
||||
|
||||
/* Issue LOGO, if no LOGO is outstanding */
|
||||
spin_lock_irqsave(&pnode->lock, flags);
|
||||
if (!(pnode->upcall_flags & NLP_WAIT_FOR_LOGO) &&
|
||||
!pnode->logo_waitq) {
|
||||
pnode->logo_waitq = &waitq;
|
||||
pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
|
||||
pnode->nlp_flag |= NLP_ISSUE_LOGO;
|
||||
pnode->upcall_flags |= NLP_WAIT_FOR_LOGO;
|
||||
spin_unlock_irqrestore(&pnode->lock, flags);
|
||||
lpfc_unreg_rpi(vport, pnode);
|
||||
wait_event_timeout(waitq,
|
||||
(!(pnode->upcall_flags &
|
||||
NLP_WAIT_FOR_LOGO)),
|
||||
msecs_to_jiffies(dev_loss_tmo *
|
||||
1000));
|
||||
|
||||
if (pnode->upcall_flags & NLP_WAIT_FOR_LOGO) {
|
||||
lpfc_printf_vlog(vport, KERN_ERR, logit,
|
||||
"0725 SCSI layer TGTRST "
|
||||
"failed & LOGO TMO (%d, %llu) "
|
||||
"return x%x\n",
|
||||
tgt_id, lun_id, status);
|
||||
spin_lock_irqsave(&pnode->lock, flags);
|
||||
pnode->upcall_flags &= ~NLP_WAIT_FOR_LOGO;
|
||||
} else {
|
||||
spin_lock_irqsave(&pnode->lock, flags);
|
||||
}
|
||||
pnode->logo_waitq = NULL;
|
||||
spin_unlock_irqrestore(&pnode->lock, flags);
|
||||
status = SUCCESS;
|
||||
|
||||
if (pnode->upcall_flags & NLP_WAIT_FOR_LOGO) {
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
|
||||
"0725 SCSI layer TGTRST failed & LOGO TMO "
|
||||
" (%d, %llu) return x%x\n", tgt_id,
|
||||
lun_id, status);
|
||||
spin_lock_irqsave(&pnode->lock, flags);
|
||||
pnode->upcall_flags &= ~NLP_WAIT_FOR_LOGO;
|
||||
} else {
|
||||
spin_lock_irqsave(&pnode->lock, flags);
|
||||
spin_unlock_irqrestore(&pnode->lock, flags);
|
||||
status = FAILED;
|
||||
}
|
||||
pnode->logo_waitq = NULL;
|
||||
spin_unlock_irqrestore(&pnode->lock, flags);
|
||||
status = SUCCESS;
|
||||
} else {
|
||||
status = FAILED;
|
||||
spin_unlock_irqrestore(&pnode->lock, flags);
|
||||
}
|
||||
|
||||
lpfc_printf_vlog(vport, KERN_ERR, logit,
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
/*******************************************************************
|
||||
* This file is part of the Emulex Linux Device Driver for *
|
||||
* Fibre Channel Host Bus Adapters. *
|
||||
* Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
|
||||
* Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
|
||||
* “Broadcom” refers to Broadcom Inc and/or its subsidiaries. *
|
||||
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
|
||||
* EMULEX and SLI are trademarks of Emulex. *
|
||||
|
@ -142,6 +142,10 @@ struct lpfc_scsicmd_bkt {
|
|||
#define FC_PORTSPEED_128GBIT 0x2000
|
||||
#endif
|
||||
|
||||
#ifndef FC_PORTSPEED_256GBIT
|
||||
#define FC_PORTSPEED_256GBIT 0x4000
|
||||
#endif
|
||||
|
||||
#define TXRDY_PAYLOAD_LEN 12
|
||||
|
||||
/* For sysfs/debugfs tmp string max len */
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -107,6 +107,7 @@ struct lpfc_iocbq {
|
|||
#define LPFC_IO_NVME_LS 0x400000 /* NVME LS command */
|
||||
#define LPFC_IO_NVMET 0x800000 /* NVMET command */
|
||||
#define LPFC_IO_VMID 0x1000000 /* VMID tagged IO */
|
||||
#define LPFC_IO_CMF 0x4000000 /* CMF command */
|
||||
|
||||
uint32_t drvrTimeout; /* driver timeout in seconds */
|
||||
struct lpfc_vport *vport;/* virtual port pointer */
|
||||
|
@ -462,4 +463,5 @@ struct lpfc_io_buf {
|
|||
uint64_t ts_isr_cmpl;
|
||||
uint64_t ts_data_io;
|
||||
#endif
|
||||
uint64_t rx_cmd_start;
|
||||
};
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
/*******************************************************************
|
||||
* This file is part of the Emulex Linux Device Driver for *
|
||||
* Fibre Channel Host Bus Adapters. *
|
||||
* Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
|
||||
* Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
|
||||
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
|
||||
* Copyright (C) 2009-2016 Emulex. All rights reserved. *
|
||||
* EMULEX and SLI are trademarks of Emulex. *
|
||||
|
@ -557,6 +557,7 @@ struct lpfc_pc_sli4_params {
|
|||
uint16_t mi_value;
|
||||
#define LPFC_DFLT_MIB_VAL 2
|
||||
uint8_t mib_bde_cnt;
|
||||
uint8_t cmf;
|
||||
uint8_t cqv;
|
||||
uint8_t mqv;
|
||||
uint8_t wqv;
|
||||
|
@ -978,6 +979,8 @@ struct lpfc_sli4_hba {
|
|||
#define lpfc_conf_trunk_port3_nd_WORD conf_trunk
|
||||
#define lpfc_conf_trunk_port3_nd_SHIFT 7
|
||||
#define lpfc_conf_trunk_port3_nd_MASK 0x1
|
||||
uint8_t flash_id;
|
||||
uint8_t asic_rev;
|
||||
};
|
||||
|
||||
enum lpfc_sge_type {
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
* included with this package. *
|
||||
*******************************************************************/
|
||||
|
||||
#define LPFC_DRIVER_VERSION "12.8.0.10"
|
||||
#define LPFC_DRIVER_VERSION "14.0.0.1"
|
||||
#define LPFC_DRIVER_NAME "lpfc"
|
||||
|
||||
/* Used for SLI 2/3 */
|
||||
|
|
|
@ -1451,10 +1451,10 @@ megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
|
|||
* pthru timeout to the os layer timeout value.
|
||||
*/
|
||||
if (scp->device->type == TYPE_TAPE) {
|
||||
if ((scp->request->timeout / HZ) > 0xFFFF)
|
||||
if (scsi_cmd_to_rq(scp)->timeout / HZ > 0xFFFF)
|
||||
pthru->timeout = cpu_to_le16(0xFFFF);
|
||||
else
|
||||
pthru->timeout = cpu_to_le16(scp->request->timeout / HZ);
|
||||
pthru->timeout = cpu_to_le16(scsi_cmd_to_rq(scp)->timeout / HZ);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -402,7 +402,7 @@ megasas_get_msix_index(struct megasas_instance *instance,
|
|||
(mega_mod64(atomic64_add_return(1, &instance->total_io_count),
|
||||
instance->msix_vectors));
|
||||
} else if (instance->host->nr_hw_queues > 1) {
|
||||
u32 tag = blk_mq_unique_tag(scmd->request);
|
||||
u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
|
||||
|
||||
cmd->request_desc->SCSIIO.MSIxIndex = blk_mq_unique_tag_to_hwq(tag) +
|
||||
instance->low_latency_index_start;
|
||||
|
@ -3023,7 +3023,7 @@ static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance,
|
|||
io_request->DevHandle = cpu_to_le16(device_id);
|
||||
io_request->LUN[1] = scmd->device->lun;
|
||||
pRAID_Context->timeout_value =
|
||||
cpu_to_le16 (scmd->request->timeout / HZ);
|
||||
cpu_to_le16(scsi_cmd_to_rq(scmd)->timeout / HZ);
|
||||
cmd->request_desc->SCSIIO.RequestFlags =
|
||||
(MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
|
||||
MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
|
||||
|
@ -3086,7 +3086,7 @@ megasas_build_syspd_fusion(struct megasas_instance *instance,
|
|||
|
||||
device_id = MEGASAS_DEV_INDEX(scmd);
|
||||
pd_index = MEGASAS_PD_INDEX(scmd);
|
||||
os_timeout_value = scmd->request->timeout / HZ;
|
||||
os_timeout_value = scsi_cmd_to_rq(scmd)->timeout / HZ;
|
||||
mr_device_priv_data = scmd->device->hostdata;
|
||||
cmd->pd_interface = mr_device_priv_data->interface_type;
|
||||
|
||||
|
@ -3381,7 +3381,7 @@ megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance,
|
|||
return SCSI_MLQUEUE_HOST_BUSY;
|
||||
}
|
||||
|
||||
cmd = megasas_get_cmd_fusion(instance, scmd->request->tag);
|
||||
cmd = megasas_get_cmd_fusion(instance, scsi_cmd_to_rq(scmd)->tag);
|
||||
|
||||
if (!cmd) {
|
||||
atomic_dec(&instance->fw_outstanding);
|
||||
|
@ -3422,7 +3422,7 @@ megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance,
|
|||
*/
|
||||
if (cmd->r1_alt_dev_handle != MR_DEVHANDLE_INVALID) {
|
||||
r1_cmd = megasas_get_cmd_fusion(instance,
|
||||
(scmd->request->tag + instance->max_fw_cmds));
|
||||
scsi_cmd_to_rq(scmd)->tag + instance->max_fw_cmds);
|
||||
megasas_prepare_secondRaid1_IO(instance, cmd, r1_cmd);
|
||||
}
|
||||
|
||||
|
|
|
@ -183,6 +183,20 @@ enum mpi3mr_iocstate {
|
|||
MRIOC_STATE_UNRECOVERABLE,
|
||||
};
|
||||
|
||||
/* Init type definitions */
|
||||
enum mpi3mr_init_type {
|
||||
MPI3MR_IT_INIT = 0,
|
||||
MPI3MR_IT_RESET,
|
||||
MPI3MR_IT_RESUME,
|
||||
};
|
||||
|
||||
/* Cleanup reason definitions */
|
||||
enum mpi3mr_cleanup_reason {
|
||||
MPI3MR_COMPLETE_CLEANUP = 0,
|
||||
MPI3MR_REINIT_FAILURE,
|
||||
MPI3MR_SUSPEND,
|
||||
};
|
||||
|
||||
/* Reset reason code definitions*/
|
||||
enum mpi3mr_reset_reason {
|
||||
MPI3MR_RESET_FROM_BRINGUP = 1,
|
||||
|
@ -855,8 +869,8 @@ struct delayed_dev_rmhs_node {
|
|||
|
||||
int mpi3mr_setup_resources(struct mpi3mr_ioc *mrioc);
|
||||
void mpi3mr_cleanup_resources(struct mpi3mr_ioc *mrioc);
|
||||
int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc, u8 re_init);
|
||||
void mpi3mr_cleanup_ioc(struct mpi3mr_ioc *mrioc, u8 re_init);
|
||||
int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc, u8 init_type);
|
||||
void mpi3mr_cleanup_ioc(struct mpi3mr_ioc *mrioc, u8 reason);
|
||||
int mpi3mr_issue_port_enable(struct mpi3mr_ioc *mrioc, u8 async);
|
||||
int mpi3mr_admin_request_post(struct mpi3mr_ioc *mrioc, void *admin_req,
|
||||
u16 admin_req_sz, u8 ignore_reset);
|
||||
|
@ -872,6 +886,7 @@ void *mpi3mr_get_reply_virt_addr(struct mpi3mr_ioc *mrioc,
|
|||
void mpi3mr_repost_sense_buf(struct mpi3mr_ioc *mrioc,
|
||||
u64 sense_buf_dma);
|
||||
|
||||
void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc);
|
||||
void mpi3mr_os_handle_events(struct mpi3mr_ioc *mrioc,
|
||||
struct mpi3_event_notification_reply *event_reply);
|
||||
void mpi3mr_process_op_reply_desc(struct mpi3mr_ioc *mrioc,
|
||||
|
|
|
@ -3205,7 +3205,7 @@ int mpi3mr_setup_resources(struct mpi3mr_ioc *mrioc)
|
|||
/**
|
||||
* mpi3mr_init_ioc - Initialize the controller
|
||||
* @mrioc: Adapter instance reference
|
||||
* @re_init: Flag to indicate is this fresh init or re-init
|
||||
* @init_type: Flag to indicate is the init_type
|
||||
*
|
||||
* This the controller initialization routine, executed either
|
||||
* after soft reset or from pci probe callback.
|
||||
|
@ -3218,7 +3218,7 @@ int mpi3mr_setup_resources(struct mpi3mr_ioc *mrioc)
|
|||
*
|
||||
* Return: 0 on success and non-zero on failure.
|
||||
*/
|
||||
int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc, u8 re_init)
|
||||
int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc, u8 init_type)
|
||||
{
|
||||
int retval = 0;
|
||||
enum mpi3mr_iocstate ioc_state;
|
||||
|
@ -3229,7 +3229,7 @@ int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc, u8 re_init)
|
|||
|
||||
mrioc->irqpoll_sleep = MPI3MR_IRQ_POLL_SLEEP;
|
||||
mrioc->change_count = 0;
|
||||
if (!re_init) {
|
||||
if (init_type == MPI3MR_IT_INIT) {
|
||||
mrioc->cpu_count = num_online_cpus();
|
||||
retval = mpi3mr_setup_resources(mrioc);
|
||||
if (retval) {
|
||||
|
@ -3314,7 +3314,7 @@ int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc, u8 re_init)
|
|||
goto out_failed;
|
||||
}
|
||||
|
||||
if (!re_init) {
|
||||
if (init_type != MPI3MR_IT_RESET) {
|
||||
retval = mpi3mr_setup_isr(mrioc, 1);
|
||||
if (retval) {
|
||||
ioc_err(mrioc, "Failed to setup ISR error %d\n",
|
||||
|
@ -3332,7 +3332,7 @@ int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc, u8 re_init)
|
|||
}
|
||||
|
||||
mpi3mr_process_factsdata(mrioc, &facts_data);
|
||||
if (!re_init) {
|
||||
if (init_type == MPI3MR_IT_INIT) {
|
||||
retval = mpi3mr_check_reset_dma_mask(mrioc);
|
||||
if (retval) {
|
||||
ioc_err(mrioc, "Resetting dma mask failed %d\n",
|
||||
|
@ -3351,7 +3351,7 @@ int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc, u8 re_init)
|
|||
goto out_failed;
|
||||
}
|
||||
|
||||
if (!re_init) {
|
||||
if (init_type == MPI3MR_IT_INIT) {
|
||||
retval = mpi3mr_alloc_chain_bufs(mrioc);
|
||||
if (retval) {
|
||||
ioc_err(mrioc, "Failed to allocated chain buffers %d\n",
|
||||
|
@ -3374,7 +3374,7 @@ int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc, u8 re_init)
|
|||
writel(mrioc->sbq_host_index,
|
||||
&mrioc->sysif_regs->sense_buffer_free_host_index);
|
||||
|
||||
if (!re_init) {
|
||||
if (init_type != MPI3MR_IT_RESET) {
|
||||
retval = mpi3mr_setup_isr(mrioc, 0);
|
||||
if (retval) {
|
||||
ioc_err(mrioc, "Failed to re-setup ISR, error %d\n",
|
||||
|
@ -3390,7 +3390,7 @@ int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc, u8 re_init)
|
|||
goto out_failed;
|
||||
}
|
||||
|
||||
if (re_init &&
|
||||
if ((init_type != MPI3MR_IT_INIT) &&
|
||||
(mrioc->shost->nr_hw_queues > mrioc->num_op_reply_q)) {
|
||||
retval = -1;
|
||||
ioc_err(mrioc,
|
||||
|
@ -3422,7 +3422,7 @@ int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc, u8 re_init)
|
|||
goto out_failed;
|
||||
}
|
||||
|
||||
if (re_init) {
|
||||
if (init_type != MPI3MR_IT_INIT) {
|
||||
ioc_info(mrioc, "Issuing Port Enable\n");
|
||||
retval = mpi3mr_issue_port_enable(mrioc, 0);
|
||||
if (retval) {
|
||||
|
@ -3434,7 +3434,10 @@ int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc, u8 re_init)
|
|||
return retval;
|
||||
|
||||
out_failed:
|
||||
mpi3mr_cleanup_ioc(mrioc, re_init);
|
||||
if (init_type == MPI3MR_IT_INIT)
|
||||
mpi3mr_cleanup_ioc(mrioc, MPI3MR_COMPLETE_CLEANUP);
|
||||
else
|
||||
mpi3mr_cleanup_ioc(mrioc, MPI3MR_REINIT_FAILURE);
|
||||
out_nocleanup:
|
||||
return retval;
|
||||
}
|
||||
|
@ -3495,7 +3498,7 @@ static void mpi3mr_memset_op_req_q_buffers(struct mpi3mr_ioc *mrioc, u16 qidx)
|
|||
*
|
||||
* Return: Nothing.
|
||||
*/
|
||||
static void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc)
|
||||
void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc)
|
||||
{
|
||||
u16 i;
|
||||
|
||||
|
@ -3710,7 +3713,7 @@ static void mpi3mr_issue_ioc_shutdown(struct mpi3mr_ioc *mrioc)
|
|||
/**
|
||||
* mpi3mr_cleanup_ioc - Cleanup controller
|
||||
* @mrioc: Adapter instance reference
|
||||
* @re_init: Cleanup due to a reinit or not
|
||||
* @reason: Cleanup reason
|
||||
*
|
||||
* controller cleanup handler, Message unit reset or soft reset
|
||||
* and shutdown notification is issued to the controller and the
|
||||
|
@ -3718,11 +3721,11 @@ static void mpi3mr_issue_ioc_shutdown(struct mpi3mr_ioc *mrioc)
|
|||
*
|
||||
* Return: Nothing.
|
||||
*/
|
||||
void mpi3mr_cleanup_ioc(struct mpi3mr_ioc *mrioc, u8 re_init)
|
||||
void mpi3mr_cleanup_ioc(struct mpi3mr_ioc *mrioc, u8 reason)
|
||||
{
|
||||
enum mpi3mr_iocstate ioc_state;
|
||||
|
||||
if (!re_init)
|
||||
if (reason == MPI3MR_COMPLETE_CLEANUP)
|
||||
mpi3mr_stop_watchdog(mrioc);
|
||||
|
||||
mpi3mr_ioc_disable_intr(mrioc);
|
||||
|
@ -3737,11 +3740,11 @@ void mpi3mr_cleanup_ioc(struct mpi3mr_ioc *mrioc, u8 re_init)
|
|||
MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET,
|
||||
MPI3MR_RESET_FROM_MUR_FAILURE);
|
||||
|
||||
if (!re_init)
|
||||
if (reason != MPI3MR_REINIT_FAILURE)
|
||||
mpi3mr_issue_ioc_shutdown(mrioc);
|
||||
}
|
||||
|
||||
if (!re_init) {
|
||||
if (reason == MPI3MR_COMPLETE_CLEANUP) {
|
||||
mpi3mr_free_mem(mrioc);
|
||||
mpi3mr_cleanup_resources(mrioc);
|
||||
}
|
||||
|
@ -3923,7 +3926,7 @@ int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc,
|
|||
mpi3mr_flush_host_io(mrioc);
|
||||
mpi3mr_invalidate_devhandles(mrioc);
|
||||
mpi3mr_memset_buffers(mrioc);
|
||||
retval = mpi3mr_init_ioc(mrioc, 1);
|
||||
retval = mpi3mr_init_ioc(mrioc, MPI3MR_IT_RESET);
|
||||
if (retval) {
|
||||
pr_err(IOCNAME "reinit after soft reset failed: reason %d\n",
|
||||
mrioc->name, reset_reason);
|
||||
|
|
|
@ -50,7 +50,7 @@ static u16 mpi3mr_host_tag_for_scmd(struct mpi3mr_ioc *mrioc,
|
|||
u32 unique_tag;
|
||||
u16 host_tag, hw_queue;
|
||||
|
||||
unique_tag = blk_mq_unique_tag(scmd->request);
|
||||
unique_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
|
||||
|
||||
hw_queue = blk_mq_unique_tag_to_hwq(unique_tag);
|
||||
if (hw_queue >= mrioc->num_op_reply_q)
|
||||
|
@ -1963,7 +1963,6 @@ static void mpi3mr_setup_eedp(struct mpi3mr_ioc *mrioc,
|
|||
{
|
||||
u16 eedp_flags = 0;
|
||||
unsigned char prot_op = scsi_get_prot_op(scmd);
|
||||
unsigned char prot_type = scsi_get_prot_type(scmd);
|
||||
|
||||
switch (prot_op) {
|
||||
case SCSI_PROT_NORMAL:
|
||||
|
@ -1983,60 +1982,42 @@ static void mpi3mr_setup_eedp(struct mpi3mr_ioc *mrioc,
|
|||
scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID;
|
||||
break;
|
||||
case SCSI_PROT_READ_PASS:
|
||||
eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK |
|
||||
MPI3_EEDPFLAGS_CHK_REF_TAG | MPI3_EEDPFLAGS_CHK_APP_TAG |
|
||||
MPI3_EEDPFLAGS_CHK_GUARD;
|
||||
eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK;
|
||||
scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID;
|
||||
break;
|
||||
case SCSI_PROT_WRITE_PASS:
|
||||
if (scsi_host_get_guard(scmd->device->host)
|
||||
& SHOST_DIX_GUARD_IP) {
|
||||
eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REGEN |
|
||||
MPI3_EEDPFLAGS_CHK_APP_TAG |
|
||||
MPI3_EEDPFLAGS_CHK_GUARD |
|
||||
MPI3_EEDPFLAGS_INCR_PRI_REF_TAG;
|
||||
if (scmd->prot_flags & SCSI_PROT_IP_CHECKSUM) {
|
||||
eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REGEN;
|
||||
scsiio_req->sgl[0].eedp.application_tag_translation_mask =
|
||||
0xffff;
|
||||
} else {
|
||||
eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK |
|
||||
MPI3_EEDPFLAGS_CHK_REF_TAG |
|
||||
MPI3_EEDPFLAGS_CHK_APP_TAG |
|
||||
MPI3_EEDPFLAGS_CHK_GUARD;
|
||||
}
|
||||
} else
|
||||
eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK;
|
||||
|
||||
scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID;
|
||||
break;
|
||||
default:
|
||||
return;
|
||||
}
|
||||
|
||||
if (scsi_host_get_guard(scmd->device->host) & SHOST_DIX_GUARD_IP)
|
||||
if (scmd->prot_flags & SCSI_PROT_GUARD_CHECK)
|
||||
eedp_flags |= MPI3_EEDPFLAGS_CHK_GUARD;
|
||||
|
||||
if (scmd->prot_flags & SCSI_PROT_IP_CHECKSUM)
|
||||
eedp_flags |= MPI3_EEDPFLAGS_HOST_GUARD_IP_CHKSUM;
|
||||
|
||||
switch (prot_type) {
|
||||
case SCSI_PROT_DIF_TYPE0:
|
||||
eedp_flags |= MPI3_EEDPFLAGS_INCR_PRI_REF_TAG;
|
||||
if (scmd->prot_flags & SCSI_PROT_REF_CHECK) {
|
||||
eedp_flags |= MPI3_EEDPFLAGS_CHK_REF_TAG |
|
||||
MPI3_EEDPFLAGS_INCR_PRI_REF_TAG;
|
||||
scsiio_req->cdb.eedp32.primary_reference_tag =
|
||||
cpu_to_be32(t10_pi_ref_tag(scmd->request));
|
||||
break;
|
||||
case SCSI_PROT_DIF_TYPE1:
|
||||
case SCSI_PROT_DIF_TYPE2:
|
||||
eedp_flags |= MPI3_EEDPFLAGS_INCR_PRI_REF_TAG |
|
||||
MPI3_EEDPFLAGS_ESC_MODE_APPTAG_DISABLE |
|
||||
MPI3_EEDPFLAGS_CHK_GUARD;
|
||||
scsiio_req->cdb.eedp32.primary_reference_tag =
|
||||
cpu_to_be32(t10_pi_ref_tag(scmd->request));
|
||||
break;
|
||||
case SCSI_PROT_DIF_TYPE3:
|
||||
eedp_flags |= MPI3_EEDPFLAGS_CHK_GUARD |
|
||||
MPI3_EEDPFLAGS_ESC_MODE_APPTAG_DISABLE;
|
||||
break;
|
||||
|
||||
default:
|
||||
scsiio_req->msg_flags &= ~(MPI3_SCSIIO_MSGFLAGS_METASGL_VALID);
|
||||
return;
|
||||
cpu_to_be32(scsi_prot_ref_tag(scmd));
|
||||
}
|
||||
|
||||
switch (scmd->device->sector_size) {
|
||||
if (scmd->prot_flags & SCSI_PROT_REF_INCREMENT)
|
||||
eedp_flags |= MPI3_EEDPFLAGS_INCR_PRI_REF_TAG;
|
||||
|
||||
eedp_flags |= MPI3_EEDPFLAGS_ESC_MODE_APPTAG_DISABLE;
|
||||
|
||||
switch (scsi_prot_interval(scmd)) {
|
||||
case 512:
|
||||
scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_512;
|
||||
break;
|
||||
|
@ -3451,7 +3432,7 @@ static int mpi3mr_qcmd(struct Scsi_Host *shost,
|
|||
u16 dev_handle;
|
||||
u16 host_tag;
|
||||
u32 scsiio_flags = 0;
|
||||
struct request *rq = scmd->request;
|
||||
struct request *rq = scsi_cmd_to_rq(scmd);
|
||||
int iprio_class;
|
||||
|
||||
sdev_priv_data = scmd->device->hostdata;
|
||||
|
@ -3795,7 +3776,7 @@ mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||
}
|
||||
|
||||
mrioc->is_driver_loading = 1;
|
||||
if (mpi3mr_init_ioc(mrioc, 0)) {
|
||||
if (mpi3mr_init_ioc(mrioc, MPI3MR_IT_INIT)) {
|
||||
ioc_err(mrioc, "failure at %s:%d/%s()!\n",
|
||||
__FILE__, __LINE__, __func__);
|
||||
retval = -ENODEV;
|
||||
|
@ -3818,7 +3799,7 @@ mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||
return retval;
|
||||
|
||||
addhost_failed:
|
||||
mpi3mr_cleanup_ioc(mrioc, 0);
|
||||
mpi3mr_cleanup_ioc(mrioc, MPI3MR_COMPLETE_CLEANUP);
|
||||
out_iocinit_failed:
|
||||
destroy_workqueue(mrioc->fwevt_worker_thread);
|
||||
out_fwevtthread_failed:
|
||||
|
@ -3870,7 +3851,7 @@ static void mpi3mr_remove(struct pci_dev *pdev)
|
|||
mpi3mr_tgtdev_del_from_list(mrioc, tgtdev);
|
||||
mpi3mr_tgtdev_put(tgtdev);
|
||||
}
|
||||
mpi3mr_cleanup_ioc(mrioc, 0);
|
||||
mpi3mr_cleanup_ioc(mrioc, MPI3MR_COMPLETE_CLEANUP);
|
||||
|
||||
spin_lock(&mrioc_list_lock);
|
||||
list_del(&mrioc->list);
|
||||
|
@ -3910,7 +3891,7 @@ static void mpi3mr_shutdown(struct pci_dev *pdev)
|
|||
spin_unlock_irqrestore(&mrioc->fwevt_lock, flags);
|
||||
if (wq)
|
||||
destroy_workqueue(wq);
|
||||
mpi3mr_cleanup_ioc(mrioc, 0);
|
||||
mpi3mr_cleanup_ioc(mrioc, MPI3MR_COMPLETE_CLEANUP);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
|
@ -3940,7 +3921,7 @@ static int mpi3mr_suspend(struct pci_dev *pdev, pm_message_t state)
|
|||
mpi3mr_cleanup_fwevt_list(mrioc);
|
||||
scsi_block_requests(shost);
|
||||
mpi3mr_stop_watchdog(mrioc);
|
||||
mpi3mr_cleanup_ioc(mrioc, 1);
|
||||
mpi3mr_cleanup_ioc(mrioc, MPI3MR_SUSPEND);
|
||||
|
||||
device_state = pci_choose_state(pdev, state);
|
||||
ioc_info(mrioc, "pdev=0x%p, slot=%s, entering operating state [D%d]\n",
|
||||
|
@ -3988,7 +3969,8 @@ static int mpi3mr_resume(struct pci_dev *pdev)
|
|||
}
|
||||
|
||||
mrioc->stop_drv_processing = 0;
|
||||
mpi3mr_init_ioc(mrioc, 1);
|
||||
mpi3mr_memset_buffers(mrioc);
|
||||
mpi3mr_init_ioc(mrioc, MPI3MR_IT_RESUME);
|
||||
scsi_unblock_requests(shost);
|
||||
mpi3mr_start_watchdog(mrioc);
|
||||
|
||||
|
|
|
@ -116,6 +116,14 @@ MODULE_PARM_DESC(perf_mode,
|
|||
"\t\tdefault - default perf_mode is 'balanced'"
|
||||
);
|
||||
|
||||
static int poll_queues;
|
||||
module_param(poll_queues, int, 0444);
|
||||
MODULE_PARM_DESC(poll_queues, "Number of queues to be use for io_uring poll mode.\n\t\t"
|
||||
"This parameter is effective only if host_tagset_enable=1. &\n\t\t"
|
||||
"when poll_queues are enabled then &\n\t\t"
|
||||
"perf_mode is set to latency mode. &\n\t\t"
|
||||
);
|
||||
|
||||
enum mpt3sas_perf_mode {
|
||||
MPT_PERF_MODE_DEFAULT = -1,
|
||||
MPT_PERF_MODE_BALANCED = 0,
|
||||
|
@ -709,6 +717,7 @@ _base_fault_reset_work(struct work_struct *work)
|
|||
* and this call is safe since dead ioc will never return any
|
||||
* command back from HW.
|
||||
*/
|
||||
mpt3sas_base_pause_mq_polling(ioc);
|
||||
ioc->schedule_dead_ioc_flush_running_cmds(ioc);
|
||||
/*
|
||||
* Set remove_host flag early since kernel thread will
|
||||
|
@ -744,6 +753,7 @@ _base_fault_reset_work(struct work_struct *work)
|
|||
spin_unlock_irqrestore(
|
||||
&ioc->ioc_reset_in_progress_lock, flags);
|
||||
mpt3sas_base_mask_interrupts(ioc);
|
||||
mpt3sas_base_pause_mq_polling(ioc);
|
||||
_base_clear_outstanding_commands(ioc);
|
||||
}
|
||||
|
||||
|
@ -1547,6 +1557,53 @@ _base_get_cb_idx(struct MPT3SAS_ADAPTER *ioc, u16 smid)
|
|||
return cb_idx;
|
||||
}
|
||||
|
||||
/**
|
||||
* mpt3sas_base_pause_mq_polling - pause polling on the mq poll queues
|
||||
* when driver is flushing out the IOs.
|
||||
* @ioc: per adapter object
|
||||
*
|
||||
* Pause polling on the mq poll (io uring) queues when driver is flushing
|
||||
* out the IOs. Otherwise we may see the race condition of completing the same
|
||||
* IO from two paths.
|
||||
*
|
||||
* Returns nothing.
|
||||
*/
|
||||
void
|
||||
mpt3sas_base_pause_mq_polling(struct MPT3SAS_ADAPTER *ioc)
|
||||
{
|
||||
int iopoll_q_count =
|
||||
ioc->reply_queue_count - ioc->iopoll_q_start_index;
|
||||
int qid;
|
||||
|
||||
for (qid = 0; qid < iopoll_q_count; qid++)
|
||||
atomic_set(&ioc->io_uring_poll_queues[qid].pause, 1);
|
||||
|
||||
/*
|
||||
* wait for current poll to complete.
|
||||
*/
|
||||
for (qid = 0; qid < iopoll_q_count; qid++) {
|
||||
while (atomic_read(&ioc->io_uring_poll_queues[qid].busy))
|
||||
udelay(500);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* mpt3sas_base_resume_mq_polling - Resume polling on mq poll queues.
|
||||
* @ioc: per adapter object
|
||||
*
|
||||
* Returns nothing.
|
||||
*/
|
||||
void
|
||||
mpt3sas_base_resume_mq_polling(struct MPT3SAS_ADAPTER *ioc)
|
||||
{
|
||||
int iopoll_q_count =
|
||||
ioc->reply_queue_count - ioc->iopoll_q_start_index;
|
||||
int qid;
|
||||
|
||||
for (qid = 0; qid < iopoll_q_count; qid++)
|
||||
atomic_set(&ioc->io_uring_poll_queues[qid].pause, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* mpt3sas_base_mask_interrupts - disable interrupts
|
||||
* @ioc: per adapter object
|
||||
|
@ -1722,7 +1779,8 @@ _base_process_reply_queue(struct adapter_reply_queue *reply_q)
|
|||
MPI2_RPHI_MSIX_INDEX_SHIFT),
|
||||
&ioc->chip->ReplyPostHostIndex);
|
||||
}
|
||||
if (!reply_q->irq_poll_scheduled) {
|
||||
if (!reply_q->is_iouring_poll_q &&
|
||||
!reply_q->irq_poll_scheduled) {
|
||||
reply_q->irq_poll_scheduled = true;
|
||||
irq_poll_sched(&reply_q->irqpoll);
|
||||
}
|
||||
|
@ -1778,6 +1836,33 @@ _base_process_reply_queue(struct adapter_reply_queue *reply_q)
|
|||
return completed_cmds;
|
||||
}
|
||||
|
||||
/**
|
||||
* mpt3sas_blk_mq_poll - poll the blk mq poll queue
|
||||
* @shost: Scsi_Host object
|
||||
* @queue_num: hw ctx queue number
|
||||
*
|
||||
* Return number of entries that has been processed from poll queue.
|
||||
*/
|
||||
int mpt3sas_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
|
||||
{
|
||||
struct MPT3SAS_ADAPTER *ioc =
|
||||
(struct MPT3SAS_ADAPTER *)shost->hostdata;
|
||||
struct adapter_reply_queue *reply_q;
|
||||
int num_entries = 0;
|
||||
int qid = queue_num - ioc->iopoll_q_start_index;
|
||||
|
||||
if (atomic_read(&ioc->io_uring_poll_queues[qid].pause) ||
|
||||
!atomic_add_unless(&ioc->io_uring_poll_queues[qid].busy, 1, 1))
|
||||
return 0;
|
||||
|
||||
reply_q = ioc->io_uring_poll_queues[qid].reply_q;
|
||||
|
||||
num_entries = _base_process_reply_queue(reply_q);
|
||||
atomic_dec(&ioc->io_uring_poll_queues[qid].busy);
|
||||
|
||||
return num_entries;
|
||||
}
|
||||
|
||||
/**
|
||||
* _base_interrupt - MPT adapter (IOC) specific interrupt handler.
|
||||
* @irq: irq number (not used)
|
||||
|
@ -1851,6 +1936,8 @@ _base_init_irqpolls(struct MPT3SAS_ADAPTER *ioc)
|
|||
return;
|
||||
|
||||
list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
|
||||
if (reply_q->is_iouring_poll_q)
|
||||
continue;
|
||||
irq_poll_init(&reply_q->irqpoll,
|
||||
ioc->hba_queue_depth/4, _base_irqpoll);
|
||||
reply_q->irq_poll_scheduled = false;
|
||||
|
@ -1900,6 +1987,12 @@ mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc, u8 poll)
|
|||
/* TMs are on msix_index == 0 */
|
||||
if (reply_q->msix_index == 0)
|
||||
continue;
|
||||
|
||||
if (reply_q->is_iouring_poll_q) {
|
||||
_base_process_reply_queue(reply_q);
|
||||
continue;
|
||||
}
|
||||
|
||||
synchronize_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index));
|
||||
if (reply_q->irq_poll_scheduled) {
|
||||
/* Calling irq_poll_disable will wait for any pending
|
||||
|
@ -2998,6 +3091,11 @@ mpt3sas_base_free_irq(struct MPT3SAS_ADAPTER *ioc)
|
|||
|
||||
list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
|
||||
list_del(&reply_q->list);
|
||||
if (reply_q->is_iouring_poll_q) {
|
||||
kfree(reply_q);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (ioc->smp_affinity_enable)
|
||||
irq_set_affinity_hint(pci_irq_vector(ioc->pdev,
|
||||
reply_q->msix_index), NULL);
|
||||
|
@ -3019,7 +3117,7 @@ _base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index)
|
|||
{
|
||||
struct pci_dev *pdev = ioc->pdev;
|
||||
struct adapter_reply_queue *reply_q;
|
||||
int r;
|
||||
int r, qid;
|
||||
|
||||
reply_q = kzalloc(sizeof(struct adapter_reply_queue), GFP_KERNEL);
|
||||
if (!reply_q) {
|
||||
|
@ -3031,6 +3129,17 @@ _base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index)
|
|||
reply_q->msix_index = index;
|
||||
|
||||
atomic_set(&reply_q->busy, 0);
|
||||
|
||||
if (index >= ioc->iopoll_q_start_index) {
|
||||
qid = index - ioc->iopoll_q_start_index;
|
||||
snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-mq-poll%d",
|
||||
ioc->driver_name, ioc->id, qid);
|
||||
reply_q->is_iouring_poll_q = 1;
|
||||
ioc->io_uring_poll_queues[qid].reply_q = reply_q;
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
||||
if (ioc->msix_enable)
|
||||
snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-msix%d",
|
||||
ioc->driver_name, ioc->id, index);
|
||||
|
@ -3045,7 +3154,7 @@ _base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index)
|
|||
kfree(reply_q);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
out:
|
||||
INIT_LIST_HEAD(&reply_q->list);
|
||||
list_add_tail(&reply_q->list, &ioc->reply_queue_list);
|
||||
return 0;
|
||||
|
@ -3066,6 +3175,8 @@ _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
|
|||
unsigned int cpu, nr_cpus, nr_msix, index = 0;
|
||||
struct adapter_reply_queue *reply_q;
|
||||
int local_numa_node;
|
||||
int iopoll_q_count = ioc->reply_queue_count -
|
||||
ioc->iopoll_q_start_index;
|
||||
|
||||
if (!_base_is_controller_msix_enabled(ioc))
|
||||
return;
|
||||
|
@ -3099,7 +3210,8 @@ _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
|
|||
list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
|
||||
const cpumask_t *mask;
|
||||
|
||||
if (reply_q->msix_index < ioc->high_iops_queues)
|
||||
if (reply_q->msix_index < ioc->high_iops_queues ||
|
||||
reply_q->msix_index >= ioc->iopoll_q_start_index)
|
||||
continue;
|
||||
|
||||
mask = pci_irq_get_affinity(ioc->pdev,
|
||||
|
@ -3121,13 +3233,14 @@ _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
|
|||
|
||||
fall_back:
|
||||
cpu = cpumask_first(cpu_online_mask);
|
||||
nr_msix -= ioc->high_iops_queues;
|
||||
nr_msix -= (ioc->high_iops_queues - iopoll_q_count);
|
||||
index = 0;
|
||||
|
||||
list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
|
||||
unsigned int i, group = nr_cpus / nr_msix;
|
||||
|
||||
if (reply_q->msix_index < ioc->high_iops_queues)
|
||||
if (reply_q->msix_index < ioc->high_iops_queues ||
|
||||
reply_q->msix_index >= ioc->iopoll_q_start_index)
|
||||
continue;
|
||||
|
||||
if (cpu >= nr_cpus)
|
||||
|
@ -3164,8 +3277,12 @@ _base_check_and_enable_high_iops_queues(struct MPT3SAS_ADAPTER *ioc,
|
|||
{
|
||||
u16 lnksta, speed;
|
||||
|
||||
/*
|
||||
* Disable high iops queues if io uring poll queues are enabled.
|
||||
*/
|
||||
if (perf_mode == MPT_PERF_MODE_IOPS ||
|
||||
perf_mode == MPT_PERF_MODE_LATENCY) {
|
||||
perf_mode == MPT_PERF_MODE_LATENCY ||
|
||||
ioc->io_uring_poll_queues) {
|
||||
ioc->high_iops_queues = 0;
|
||||
return;
|
||||
}
|
||||
|
@ -3202,6 +3319,7 @@ mpt3sas_base_disable_msix(struct MPT3SAS_ADAPTER *ioc)
|
|||
return;
|
||||
pci_free_irq_vectors(ioc->pdev);
|
||||
ioc->msix_enable = 0;
|
||||
kfree(ioc->io_uring_poll_queues);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -3215,18 +3333,24 @@ _base_alloc_irq_vectors(struct MPT3SAS_ADAPTER *ioc)
|
|||
int i, irq_flags = PCI_IRQ_MSIX;
|
||||
struct irq_affinity desc = { .pre_vectors = ioc->high_iops_queues };
|
||||
struct irq_affinity *descp = &desc;
|
||||
/*
|
||||
* Don't allocate msix vectors for poll_queues.
|
||||
* msix_vectors is always within a range of FW supported reply queue.
|
||||
*/
|
||||
int nr_msix_vectors = ioc->iopoll_q_start_index;
|
||||
|
||||
|
||||
if (ioc->smp_affinity_enable)
|
||||
irq_flags |= PCI_IRQ_AFFINITY;
|
||||
irq_flags |= PCI_IRQ_AFFINITY | PCI_IRQ_ALL_TYPES;
|
||||
else
|
||||
descp = NULL;
|
||||
|
||||
ioc_info(ioc, " %d %d\n", ioc->high_iops_queues,
|
||||
ioc->reply_queue_count);
|
||||
ioc_info(ioc, " %d %d %d\n", ioc->high_iops_queues,
|
||||
ioc->reply_queue_count, nr_msix_vectors);
|
||||
|
||||
i = pci_alloc_irq_vectors_affinity(ioc->pdev,
|
||||
ioc->high_iops_queues,
|
||||
ioc->reply_queue_count, irq_flags, descp);
|
||||
nr_msix_vectors, irq_flags, descp);
|
||||
|
||||
return i;
|
||||
}
|
||||
|
@ -3242,6 +3366,7 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
|
|||
int r;
|
||||
int i, local_max_msix_vectors;
|
||||
u8 try_msix = 0;
|
||||
int iopoll_q_count = 0;
|
||||
|
||||
ioc->msix_load_balance = false;
|
||||
|
||||
|
@ -3257,22 +3382,16 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
|
|||
ioc_info(ioc, "MSI-X vectors supported: %d\n", ioc->msix_vector_count);
|
||||
pr_info("\t no of cores: %d, max_msix_vectors: %d\n",
|
||||
ioc->cpu_count, max_msix_vectors);
|
||||
if (ioc->is_aero_ioc)
|
||||
_base_check_and_enable_high_iops_queues(ioc,
|
||||
ioc->msix_vector_count);
|
||||
|
||||
ioc->reply_queue_count =
|
||||
min_t(int, ioc->cpu_count + ioc->high_iops_queues,
|
||||
ioc->msix_vector_count);
|
||||
min_t(int, ioc->cpu_count, ioc->msix_vector_count);
|
||||
|
||||
if (!ioc->rdpq_array_enable && max_msix_vectors == -1)
|
||||
local_max_msix_vectors = (reset_devices) ? 1 : 8;
|
||||
else
|
||||
local_max_msix_vectors = max_msix_vectors;
|
||||
|
||||
if (local_max_msix_vectors > 0)
|
||||
ioc->reply_queue_count = min_t(int, local_max_msix_vectors,
|
||||
ioc->reply_queue_count);
|
||||
else if (local_max_msix_vectors == 0)
|
||||
if (local_max_msix_vectors == 0)
|
||||
goto try_ioapic;
|
||||
|
||||
/*
|
||||
|
@ -3293,14 +3412,77 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
|
|||
if (ioc->msix_load_balance)
|
||||
ioc->smp_affinity_enable = 0;
|
||||
|
||||
if (!ioc->smp_affinity_enable || ioc->reply_queue_count <= 1)
|
||||
ioc->shost->host_tagset = 0;
|
||||
|
||||
/*
|
||||
* Enable io uring poll queues only if host_tagset is enabled.
|
||||
*/
|
||||
if (ioc->shost->host_tagset)
|
||||
iopoll_q_count = poll_queues;
|
||||
|
||||
if (iopoll_q_count) {
|
||||
ioc->io_uring_poll_queues = kcalloc(iopoll_q_count,
|
||||
sizeof(struct io_uring_poll_queue), GFP_KERNEL);
|
||||
if (!ioc->io_uring_poll_queues)
|
||||
iopoll_q_count = 0;
|
||||
}
|
||||
|
||||
if (ioc->is_aero_ioc)
|
||||
_base_check_and_enable_high_iops_queues(ioc,
|
||||
ioc->msix_vector_count);
|
||||
|
||||
/*
|
||||
* Add high iops queues count to reply queue count if high iops queues
|
||||
* are enabled.
|
||||
*/
|
||||
ioc->reply_queue_count = min_t(int,
|
||||
ioc->reply_queue_count + ioc->high_iops_queues,
|
||||
ioc->msix_vector_count);
|
||||
|
||||
/*
|
||||
* Adjust the reply queue count incase reply queue count
|
||||
* exceeds the user provided MSIx vectors count.
|
||||
*/
|
||||
if (local_max_msix_vectors > 0)
|
||||
ioc->reply_queue_count = min_t(int, local_max_msix_vectors,
|
||||
ioc->reply_queue_count);
|
||||
/*
|
||||
* Add io uring poll queues count to reply queues count
|
||||
* if io uring is enabled in driver.
|
||||
*/
|
||||
if (iopoll_q_count) {
|
||||
if (ioc->reply_queue_count < (iopoll_q_count + MPT3_MIN_IRQS))
|
||||
iopoll_q_count = 0;
|
||||
ioc->reply_queue_count = min_t(int,
|
||||
ioc->reply_queue_count + iopoll_q_count,
|
||||
ioc->msix_vector_count);
|
||||
}
|
||||
|
||||
/*
|
||||
* Starting index of io uring poll queues in reply queue list.
|
||||
*/
|
||||
ioc->iopoll_q_start_index =
|
||||
ioc->reply_queue_count - iopoll_q_count;
|
||||
|
||||
r = _base_alloc_irq_vectors(ioc);
|
||||
if (r < 0) {
|
||||
ioc_info(ioc, "pci_alloc_irq_vectors failed (r=%d) !!!\n", r);
|
||||
goto try_ioapic;
|
||||
}
|
||||
|
||||
/*
|
||||
* Adjust the reply queue count if the allocated
|
||||
* MSIx vectors is less then the requested number
|
||||
* of MSIx vectors.
|
||||
*/
|
||||
if (r < ioc->iopoll_q_start_index) {
|
||||
ioc->reply_queue_count = r + iopoll_q_count;
|
||||
ioc->iopoll_q_start_index =
|
||||
ioc->reply_queue_count - iopoll_q_count;
|
||||
}
|
||||
|
||||
ioc->msix_enable = 1;
|
||||
ioc->reply_queue_count = r;
|
||||
for (i = 0; i < ioc->reply_queue_count; i++) {
|
||||
r = _base_request_irq(ioc, i);
|
||||
if (r) {
|
||||
|
@ -3320,6 +3502,7 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
|
|||
ioc->high_iops_queues = 0;
|
||||
ioc_info(ioc, "High IOPs queues : disabled\n");
|
||||
ioc->reply_queue_count = 1;
|
||||
ioc->iopoll_q_start_index = ioc->reply_queue_count - 0;
|
||||
r = pci_alloc_irq_vectors(ioc->pdev, 1, 1, PCI_IRQ_LEGACY);
|
||||
if (r < 0) {
|
||||
dfailprintk(ioc,
|
||||
|
@ -3416,6 +3599,7 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
|
|||
u64 pio_chip = 0;
|
||||
phys_addr_t chip_phys = 0;
|
||||
struct adapter_reply_queue *reply_q;
|
||||
int iopoll_q_count = 0;
|
||||
|
||||
dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
|
||||
|
||||
|
@ -3489,6 +3673,12 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
|
|||
if (r)
|
||||
goto out_fail;
|
||||
|
||||
iopoll_q_count = ioc->reply_queue_count - ioc->iopoll_q_start_index;
|
||||
for (i = 0; i < iopoll_q_count; i++) {
|
||||
atomic_set(&ioc->io_uring_poll_queues[i].busy, 0);
|
||||
atomic_set(&ioc->io_uring_poll_queues[i].pause, 0);
|
||||
}
|
||||
|
||||
if (!ioc->is_driver_loading)
|
||||
_base_init_irqpolls(ioc);
|
||||
/* Use the Combined reply queue feature only for SAS3 C0 & higher
|
||||
|
@ -3530,11 +3720,18 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
|
|||
* 4)));
|
||||
}
|
||||
|
||||
list_for_each_entry(reply_q, &ioc->reply_queue_list, list)
|
||||
list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
|
||||
if (reply_q->msix_index >= ioc->iopoll_q_start_index) {
|
||||
pr_info("%s: enabled: index: %d\n",
|
||||
reply_q->name, reply_q->msix_index);
|
||||
continue;
|
||||
}
|
||||
|
||||
pr_info("%s: %s enabled: IRQ %d\n",
|
||||
reply_q->name,
|
||||
ioc->msix_enable ? "PCI-MSI-X" : "IO-APIC",
|
||||
pci_irq_vector(ioc->pdev, reply_q->msix_index));
|
||||
}
|
||||
|
||||
ioc_info(ioc, "iomem(%pap), mapped(0x%p), size(%d)\n",
|
||||
&chip_phys, ioc->chip, memap_sz);
|
||||
|
@ -3651,7 +3848,7 @@ _base_get_msix_index(struct MPT3SAS_ADAPTER *ioc,
|
|||
&ioc->total_io_cnt), ioc->reply_queue_count) : 0;
|
||||
|
||||
if (scmd && ioc->shost->nr_hw_queues > 1) {
|
||||
u32 tag = blk_mq_unique_tag(scmd->request);
|
||||
u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
|
||||
|
||||
return blk_mq_unique_tag_to_hwq(tag) +
|
||||
ioc->high_iops_queues;
|
||||
|
@ -3735,7 +3932,7 @@ mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx,
|
|||
u16 smid;
|
||||
u32 tag, unique_tag;
|
||||
|
||||
unique_tag = blk_mq_unique_tag(scmd->request);
|
||||
unique_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
|
||||
tag = blk_mq_unique_tag_to_tag(unique_tag);
|
||||
|
||||
/*
|
||||
|
@ -5168,6 +5365,73 @@ _base_update_diag_trigger_pages(struct MPT3SAS_ADAPTER *ioc)
|
|||
&ioc->diag_trigger_mpi, 1);
|
||||
}
|
||||
|
||||
/**
|
||||
* _base_assign_fw_reported_qd - Get FW reported QD for SAS/SATA devices.
|
||||
* - On failure set default QD values.
|
||||
* @ioc : per adapter object
|
||||
*
|
||||
* Returns 0 for success, non-zero for failure.
|
||||
*
|
||||
*/
|
||||
static int _base_assign_fw_reported_qd(struct MPT3SAS_ADAPTER *ioc)
|
||||
{
|
||||
Mpi2ConfigReply_t mpi_reply;
|
||||
Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
|
||||
Mpi26PCIeIOUnitPage1_t pcie_iounit_pg1;
|
||||
int sz;
|
||||
int rc = 0;
|
||||
|
||||
ioc->max_wideport_qd = MPT3SAS_SAS_QUEUE_DEPTH;
|
||||
ioc->max_narrowport_qd = MPT3SAS_SAS_QUEUE_DEPTH;
|
||||
ioc->max_sata_qd = MPT3SAS_SATA_QUEUE_DEPTH;
|
||||
ioc->max_nvme_qd = MPT3SAS_NVME_QUEUE_DEPTH;
|
||||
if (!ioc->is_gen35_ioc)
|
||||
goto out;
|
||||
/* sas iounit page 1 */
|
||||
sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData);
|
||||
sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
|
||||
if (!sas_iounit_pg1) {
|
||||
pr_err("%s: failure at %s:%d/%s()!\n",
|
||||
ioc->name, __FILE__, __LINE__, __func__);
|
||||
return rc;
|
||||
}
|
||||
rc = mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
|
||||
sas_iounit_pg1, sz);
|
||||
if (rc) {
|
||||
pr_err("%s: failure at %s:%d/%s()!\n",
|
||||
ioc->name, __FILE__, __LINE__, __func__);
|
||||
goto out;
|
||||
}
|
||||
ioc->max_wideport_qd =
|
||||
(le16_to_cpu(sas_iounit_pg1->SASWideMaxQueueDepth)) ?
|
||||
le16_to_cpu(sas_iounit_pg1->SASWideMaxQueueDepth) :
|
||||
MPT3SAS_SAS_QUEUE_DEPTH;
|
||||
ioc->max_narrowport_qd =
|
||||
(le16_to_cpu(sas_iounit_pg1->SASNarrowMaxQueueDepth)) ?
|
||||
le16_to_cpu(sas_iounit_pg1->SASNarrowMaxQueueDepth) :
|
||||
MPT3SAS_SAS_QUEUE_DEPTH;
|
||||
ioc->max_sata_qd = (sas_iounit_pg1->SATAMaxQDepth) ?
|
||||
sas_iounit_pg1->SATAMaxQDepth : MPT3SAS_SATA_QUEUE_DEPTH;
|
||||
/* pcie iounit page 1 */
|
||||
rc = mpt3sas_config_get_pcie_iounit_pg1(ioc, &mpi_reply,
|
||||
&pcie_iounit_pg1, sizeof(Mpi26PCIeIOUnitPage1_t));
|
||||
if (rc) {
|
||||
pr_err("%s: failure at %s:%d/%s()!\n",
|
||||
ioc->name, __FILE__, __LINE__, __func__);
|
||||
goto out;
|
||||
}
|
||||
ioc->max_nvme_qd = (le16_to_cpu(pcie_iounit_pg1.NVMeMaxQueueDepth)) ?
|
||||
(le16_to_cpu(pcie_iounit_pg1.NVMeMaxQueueDepth)) :
|
||||
MPT3SAS_NVME_QUEUE_DEPTH;
|
||||
out:
|
||||
dinitprintk(ioc, pr_err(
|
||||
"MaxWidePortQD: 0x%x MaxNarrowPortQD: 0x%x MaxSataQD: 0x%x MaxNvmeQD: 0x%x\n",
|
||||
ioc->max_wideport_qd, ioc->max_narrowport_qd,
|
||||
ioc->max_sata_qd, ioc->max_nvme_qd));
|
||||
kfree(sas_iounit_pg1);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
* _base_static_config_pages - static start of day config pages
|
||||
* @ioc: per adapter object
|
||||
|
@ -5237,6 +5501,9 @@ _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
|
|||
ioc_warn(ioc,
|
||||
"TimeSync Interval in Manuf page-11 is not enabled. Periodic Time-Sync will be disabled\n");
|
||||
}
|
||||
rc = _base_assign_fw_reported_qd(ioc);
|
||||
if (rc)
|
||||
return rc;
|
||||
rc = mpt3sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
@ -8471,6 +8738,7 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
|
|||
_base_pre_reset_handler(ioc);
|
||||
mpt3sas_wait_for_commands_to_complete(ioc);
|
||||
mpt3sas_base_mask_interrupts(ioc);
|
||||
mpt3sas_base_pause_mq_polling(ioc);
|
||||
r = mpt3sas_base_make_ioc_ready(ioc, type);
|
||||
if (r)
|
||||
goto out;
|
||||
|
@ -8512,6 +8780,7 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
|
|||
spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
|
||||
ioc->ioc_reset_count++;
|
||||
mutex_unlock(&ioc->reset_in_progress_mutex);
|
||||
mpt3sas_base_resume_mq_polling(ioc);
|
||||
|
||||
out_unlocked:
|
||||
if ((r == 0) && is_trigger) {
|
||||
|
|
|
@ -77,9 +77,9 @@
|
|||
#define MPT3SAS_DRIVER_NAME "mpt3sas"
|
||||
#define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>"
|
||||
#define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver"
|
||||
#define MPT3SAS_DRIVER_VERSION "37.101.00.00"
|
||||
#define MPT3SAS_MAJOR_VERSION 37
|
||||
#define MPT3SAS_MINOR_VERSION 101
|
||||
#define MPT3SAS_DRIVER_VERSION "39.100.00.00"
|
||||
#define MPT3SAS_MAJOR_VERSION 39
|
||||
#define MPT3SAS_MINOR_VERSION 100
|
||||
#define MPT3SAS_BUILD_VERSION 0
|
||||
#define MPT3SAS_RELEASE_VERSION 00
|
||||
|
||||
|
@ -354,6 +354,7 @@ struct mpt3sas_nvme_cmd {
|
|||
#define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G3 12
|
||||
#define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G35 16
|
||||
#define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET (0x10)
|
||||
#define MPT3_MIN_IRQS 1
|
||||
|
||||
/* OEM Identifiers */
|
||||
#define MFG10_OEM_ID_INVALID (0x00000000)
|
||||
|
@ -575,6 +576,7 @@ struct _sas_device {
|
|||
u8 is_chassis_slot_valid;
|
||||
u8 connector_name[5];
|
||||
struct kref refcount;
|
||||
u8 port_type;
|
||||
struct hba_port *port;
|
||||
struct sas_rphy *rphy;
|
||||
};
|
||||
|
@ -936,6 +938,8 @@ struct _event_ack_list {
|
|||
* @os_irq: irq number
|
||||
* @irqpoll: irq_poll object
|
||||
* @irq_poll_scheduled: Tells whether irq poll is scheduled or not
|
||||
* @is_iouring_poll_q: Tells whether reply queues is assigned
|
||||
* to io uring poll queues or not
|
||||
* @list: this list
|
||||
*/
|
||||
struct adapter_reply_queue {
|
||||
|
@ -949,9 +953,22 @@ struct adapter_reply_queue {
|
|||
struct irq_poll irqpoll;
|
||||
bool irq_poll_scheduled;
|
||||
bool irq_line_enable;
|
||||
bool is_iouring_poll_q;
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct io_uring_poll_queue - the io uring poll queue structure
|
||||
* @busy: Tells whether io uring poll queue is busy or not
|
||||
* @pause: Tells whether IOs are paused on io uring poll queue or not
|
||||
* @reply_q: reply queue mapped for io uring poll queue
|
||||
*/
|
||||
struct io_uring_poll_queue {
|
||||
atomic_t busy;
|
||||
atomic_t pause;
|
||||
struct adapter_reply_queue *reply_q;
|
||||
};
|
||||
|
||||
typedef void (*MPT_ADD_SGE)(void *paddr, u32 flags_length, dma_addr_t dma_addr);
|
||||
|
||||
/* SAS3.0 support */
|
||||
|
@ -1176,6 +1193,8 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc);
|
|||
* @schedule_dead_ioc_flush_running_cmds: callback to flush pending commands
|
||||
* @thresh_hold: Max number of reply descriptors processed
|
||||
* before updating Host Index
|
||||
* @iopoll_q_start_index: starting index of io uring poll queues
|
||||
* in reply queue list
|
||||
* @drv_internal_flags: Bit map internal to driver
|
||||
* @drv_support_bitmap: driver's supported feature bit map
|
||||
* @use_32bit_dma: Flag to use 32 bit consistent dma mask
|
||||
|
@ -1372,11 +1391,13 @@ struct MPT3SAS_ADAPTER {
|
|||
bool msix_load_balance;
|
||||
u16 thresh_hold;
|
||||
u8 high_iops_queues;
|
||||
u8 iopoll_q_start_index;
|
||||
u32 drv_internal_flags;
|
||||
u32 drv_support_bitmap;
|
||||
u32 dma_mask;
|
||||
bool enable_sdev_max_qd;
|
||||
bool use_32bit_dma;
|
||||
struct io_uring_poll_queue *io_uring_poll_queues;
|
||||
|
||||
/* internal commands, callback index */
|
||||
u8 scsi_io_cb_idx;
|
||||
|
@ -1423,6 +1444,10 @@ struct MPT3SAS_ADAPTER {
|
|||
u8 tm_custom_handling;
|
||||
u8 nvme_abort_timeout;
|
||||
u16 max_shutdown_latency;
|
||||
u16 max_wideport_qd;
|
||||
u16 max_narrowport_qd;
|
||||
u16 max_nvme_qd;
|
||||
u8 max_sata_qd;
|
||||
|
||||
/* static config pages */
|
||||
struct mpt3sas_facts facts;
|
||||
|
@ -1730,10 +1755,12 @@ do { ioc_err(ioc, "In func: %s\n", __func__); \
|
|||
status, mpi_request, sz); } while (0)
|
||||
|
||||
int mpt3sas_wait_for_ioc(struct MPT3SAS_ADAPTER *ioc, int wait_count);
|
||||
int
|
||||
mpt3sas_base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type);
|
||||
int mpt3sas_base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type);
|
||||
void mpt3sas_base_free_irq(struct MPT3SAS_ADAPTER *ioc);
|
||||
void mpt3sas_base_disable_msix(struct MPT3SAS_ADAPTER *ioc);
|
||||
int mpt3sas_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num);
|
||||
void mpt3sas_base_pause_mq_polling(struct MPT3SAS_ADAPTER *ioc);
|
||||
void mpt3sas_base_resume_mq_polling(struct MPT3SAS_ADAPTER *ioc);
|
||||
|
||||
/* scsih shared API */
|
||||
struct scsi_cmnd *mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc,
|
||||
|
@ -1829,6 +1856,9 @@ int mpt3sas_config_get_pcie_device_pg0(struct MPT3SAS_ADAPTER *ioc,
|
|||
int mpt3sas_config_get_pcie_device_pg2(struct MPT3SAS_ADAPTER *ioc,
|
||||
Mpi2ConfigReply_t *mpi_reply, Mpi26PCIeDevicePage2_t *config_page,
|
||||
u32 form, u32 handle);
|
||||
int mpt3sas_config_get_pcie_iounit_pg1(struct MPT3SAS_ADAPTER *ioc,
|
||||
Mpi2ConfigReply_t *mpi_reply, Mpi26PCIeIOUnitPage1_t *config_page,
|
||||
u16 sz);
|
||||
int mpt3sas_config_get_sas_iounit_pg0(struct MPT3SAS_ADAPTER *ioc,
|
||||
Mpi2ConfigReply_t *mpi_reply, Mpi2SasIOUnitPage0_t *config_page,
|
||||
u16 sz);
|
||||
|
|
|
@ -1168,6 +1168,43 @@ mpt3sas_config_get_pcie_device_pg0(struct MPT3SAS_ADAPTER *ioc,
|
|||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
* mpt3sas_config_get_pcie_iounit_pg1 - obtain pcie iounit page 1
|
||||
* @ioc: per adapter object
|
||||
* @mpi_reply: reply mf payload returned from firmware
|
||||
* @config_page: contents of the config page
|
||||
* @sz: size of buffer passed in config_page
|
||||
* Context: sleep.
|
||||
*
|
||||
* Returns 0 for success, non-zero for failure.
|
||||
*/
|
||||
int
|
||||
mpt3sas_config_get_pcie_iounit_pg1(struct MPT3SAS_ADAPTER *ioc,
|
||||
Mpi2ConfigReply_t *mpi_reply, Mpi26PCIeIOUnitPage1_t *config_page,
|
||||
u16 sz)
|
||||
{
|
||||
Mpi2ConfigRequest_t mpi_request;
|
||||
int r;
|
||||
|
||||
memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
|
||||
mpi_request.Function = MPI2_FUNCTION_CONFIG;
|
||||
mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
|
||||
mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
|
||||
mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_PCIE_IO_UNIT;
|
||||
mpi_request.Header.PageVersion = MPI26_PCIEIOUNITPAGE1_PAGEVERSION;
|
||||
mpi_request.Header.PageNumber = 1;
|
||||
ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
|
||||
r = _config_request(ioc, &mpi_request, mpi_reply,
|
||||
MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
|
||||
if (r)
|
||||
goto out;
|
||||
mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
|
||||
r = _config_request(ioc, &mpi_request, mpi_reply,
|
||||
MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz);
|
||||
out:
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
* mpt3sas_config_get_pcie_device_pg2 - obtain pcie device page 2
|
||||
* @ioc: per adapter object
|
||||
|
|
|
@ -3820,9 +3820,10 @@ enable_sdev_max_qd_store(struct device *cdev,
|
|||
}
|
||||
} else if (sas_target_priv_data->flags &
|
||||
MPT_TARGET_FLAGS_PCIE_DEVICE)
|
||||
qdepth = MPT3SAS_NVME_QUEUE_DEPTH;
|
||||
qdepth = ioc->max_nvme_qd;
|
||||
else
|
||||
qdepth = MPT3SAS_SAS_QUEUE_DEPTH;
|
||||
qdepth = (sas_target_priv_data->sas_dev->port_type > 1) ?
|
||||
ioc->max_wideport_qd : ioc->max_narrowport_qd;
|
||||
|
||||
mpt3sas_scsih_change_queue_depth(sdev, qdepth);
|
||||
}
|
||||
|
@ -3918,6 +3919,24 @@ sas_device_handle_show(struct device *dev, struct device_attribute *attr,
|
|||
}
|
||||
static DEVICE_ATTR_RO(sas_device_handle);
|
||||
|
||||
/**
|
||||
* sas_ncq_prio_supported_show - Indicate if device supports NCQ priority
|
||||
* @dev: pointer to embedded device
|
||||
* @attr: sas_ncq_prio_supported attribute descriptor
|
||||
* @buf: the buffer returned
|
||||
*
|
||||
* A sysfs 'read-only' sdev attribute, only works with SATA
|
||||
*/
|
||||
static ssize_t
|
||||
sas_ncq_prio_supported_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct scsi_device *sdev = to_scsi_device(dev);
|
||||
|
||||
return sysfs_emit(buf, "%d\n", scsih_ncq_prio_supp(sdev));
|
||||
}
|
||||
static DEVICE_ATTR_RO(sas_ncq_prio_supported);
|
||||
|
||||
/**
|
||||
* sas_ncq_prio_enable_show - send prioritized io commands to device
|
||||
* @dev: pointer to embedded device
|
||||
|
@ -3960,6 +3979,7 @@ static DEVICE_ATTR_RW(sas_ncq_prio_enable);
|
|||
struct device_attribute *mpt3sas_dev_attrs[] = {
|
||||
&dev_attr_sas_address,
|
||||
&dev_attr_sas_device_handle,
|
||||
&dev_attr_sas_ncq_prio_supported,
|
||||
&dev_attr_sas_ncq_prio_enable,
|
||||
NULL,
|
||||
};
|
||||
|
|
|
@ -1803,7 +1803,7 @@ scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
|
|||
* limit max device queue for SATA to 32 if enable_sdev_max_qd
|
||||
* is disabled.
|
||||
*/
|
||||
if (ioc->enable_sdev_max_qd)
|
||||
if (ioc->enable_sdev_max_qd || ioc->is_gen35_ioc)
|
||||
goto not_sata;
|
||||
|
||||
sas_device_priv_data = sdev->hostdata;
|
||||
|
@ -2657,7 +2657,7 @@ scsih_slave_configure(struct scsi_device *sdev)
|
|||
return 1;
|
||||
}
|
||||
|
||||
qdepth = MPT3SAS_NVME_QUEUE_DEPTH;
|
||||
qdepth = ioc->max_nvme_qd;
|
||||
ds = "NVMe";
|
||||
sdev_printk(KERN_INFO, sdev,
|
||||
"%s: handle(0x%04x), wwid(0x%016llx), port(%d)\n",
|
||||
|
@ -2709,7 +2709,8 @@ scsih_slave_configure(struct scsi_device *sdev)
|
|||
sas_device->volume_handle = volume_handle;
|
||||
sas_device->volume_wwid = volume_wwid;
|
||||
if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
|
||||
qdepth = MPT3SAS_SAS_QUEUE_DEPTH;
|
||||
qdepth = (sas_device->port_type > 1) ?
|
||||
ioc->max_wideport_qd : ioc->max_narrowport_qd;
|
||||
ssp_target = 1;
|
||||
if (sas_device->device_info &
|
||||
MPI2_SAS_DEVICE_INFO_SEP) {
|
||||
|
@ -2721,7 +2722,7 @@ scsih_slave_configure(struct scsi_device *sdev)
|
|||
} else
|
||||
ds = "SSP";
|
||||
} else {
|
||||
qdepth = MPT3SAS_SATA_QUEUE_DEPTH;
|
||||
qdepth = ioc->max_sata_qd;
|
||||
if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET)
|
||||
ds = "STP";
|
||||
else if (sas_device->device_info &
|
||||
|
@ -3304,7 +3305,7 @@ scsih_abort(struct scsi_cmnd *scmd)
|
|||
sdev_printk(KERN_INFO, scmd->device, "attempting task abort!"
|
||||
"scmd(0x%p), outstanding for %u ms & timeout %u ms\n",
|
||||
scmd, jiffies_to_msecs(jiffies - scmd->jiffies_at_alloc),
|
||||
(scmd->request->timeout / HZ) * 1000);
|
||||
(scsi_cmd_to_rq(scmd)->timeout / HZ) * 1000);
|
||||
_scsih_tm_display_info(ioc, scmd);
|
||||
|
||||
sas_device_priv_data = scmd->device->hostdata;
|
||||
|
@ -5047,48 +5048,31 @@ _scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
|
|||
Mpi25SCSIIORequest_t *mpi_request)
|
||||
{
|
||||
u16 eedp_flags;
|
||||
unsigned char prot_op = scsi_get_prot_op(scmd);
|
||||
unsigned char prot_type = scsi_get_prot_type(scmd);
|
||||
Mpi25SCSIIORequest_t *mpi_request_3v =
|
||||
(Mpi25SCSIIORequest_t *)mpi_request;
|
||||
|
||||
if (prot_type == SCSI_PROT_DIF_TYPE0 || prot_op == SCSI_PROT_NORMAL)
|
||||
return;
|
||||
|
||||
if (prot_op == SCSI_PROT_READ_STRIP)
|
||||
switch (scsi_get_prot_op(scmd)) {
|
||||
case SCSI_PROT_READ_STRIP:
|
||||
eedp_flags = MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP;
|
||||
else if (prot_op == SCSI_PROT_WRITE_INSERT)
|
||||
break;
|
||||
case SCSI_PROT_WRITE_INSERT:
|
||||
eedp_flags = MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
|
||||
else
|
||||
break;
|
||||
default:
|
||||
return;
|
||||
|
||||
switch (prot_type) {
|
||||
case SCSI_PROT_DIF_TYPE1:
|
||||
case SCSI_PROT_DIF_TYPE2:
|
||||
|
||||
/*
|
||||
* enable ref/guard checking
|
||||
* auto increment ref tag
|
||||
*/
|
||||
eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
|
||||
MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
|
||||
MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
|
||||
mpi_request->CDB.EEDP32.PrimaryReferenceTag =
|
||||
cpu_to_be32(t10_pi_ref_tag(scmd->request));
|
||||
break;
|
||||
|
||||
case SCSI_PROT_DIF_TYPE3:
|
||||
|
||||
/*
|
||||
* enable guard checking
|
||||
*/
|
||||
eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
mpi_request_3v->EEDPBlockSize =
|
||||
cpu_to_le16(scmd->device->sector_size);
|
||||
if (scmd->prot_flags & SCSI_PROT_GUARD_CHECK)
|
||||
eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
|
||||
|
||||
if (scmd->prot_flags & SCSI_PROT_REF_CHECK) {
|
||||
eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
|
||||
MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG;
|
||||
mpi_request->CDB.EEDP32.PrimaryReferenceTag =
|
||||
cpu_to_be32(scsi_prot_ref_tag(scmd));
|
||||
}
|
||||
|
||||
mpi_request_3v->EEDPBlockSize = cpu_to_le16(scsi_prot_interval(scmd));
|
||||
|
||||
if (ioc->is_gen35_ioc)
|
||||
eedp_flags |= MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE;
|
||||
|
@ -5141,7 +5125,7 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
|
|||
struct MPT3SAS_DEVICE *sas_device_priv_data;
|
||||
struct MPT3SAS_TARGET *sas_target_priv_data;
|
||||
struct _raid_device *raid_device;
|
||||
struct request *rq = scmd->request;
|
||||
struct request *rq = scsi_cmd_to_rq(scmd);
|
||||
int class;
|
||||
Mpi25SCSIIORequest_t *mpi_request;
|
||||
struct _pcie_device *pcie_device = NULL;
|
||||
|
@ -7371,6 +7355,10 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
|
|||
|
||||
/* get device name */
|
||||
sas_device->device_name = le64_to_cpu(sas_device_pg0.DeviceName);
|
||||
sas_device->port_type = sas_device_pg0.MaxPortConnections;
|
||||
ioc_info(ioc,
|
||||
"handle(0x%0x) sas_address(0x%016llx) port_type(0x%0x)\n",
|
||||
handle, sas_device->sas_address, sas_device->port_type);
|
||||
|
||||
if (ioc->wait_for_discovery_to_complete)
|
||||
_scsih_sas_device_init_add(ioc, sas_device);
|
||||
|
@ -9603,6 +9591,42 @@ _scsih_prep_device_scan(struct MPT3SAS_ADAPTER *ioc)
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* _scsih_update_device_qdepth - Update QD during Reset.
|
||||
* @ioc: per adapter object
|
||||
*
|
||||
*/
|
||||
static void
|
||||
_scsih_update_device_qdepth(struct MPT3SAS_ADAPTER *ioc)
|
||||
{
|
||||
struct MPT3SAS_DEVICE *sas_device_priv_data;
|
||||
struct MPT3SAS_TARGET *sas_target_priv_data;
|
||||
struct _sas_device *sas_device;
|
||||
struct scsi_device *sdev;
|
||||
u16 qdepth;
|
||||
|
||||
ioc_info(ioc, "Update devices with firmware reported queue depth\n");
|
||||
shost_for_each_device(sdev, ioc->shost) {
|
||||
sas_device_priv_data = sdev->hostdata;
|
||||
if (sas_device_priv_data && sas_device_priv_data->sas_target) {
|
||||
sas_target_priv_data = sas_device_priv_data->sas_target;
|
||||
sas_device = sas_device_priv_data->sas_target->sas_dev;
|
||||
if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE)
|
||||
qdepth = ioc->max_nvme_qd;
|
||||
else if (sas_device &&
|
||||
sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET)
|
||||
qdepth = (sas_device->port_type > 1) ?
|
||||
ioc->max_wideport_qd : ioc->max_narrowport_qd;
|
||||
else if (sas_device &&
|
||||
sas_device->device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
|
||||
qdepth = ioc->max_sata_qd;
|
||||
else
|
||||
continue;
|
||||
mpt3sas_scsih_change_queue_depth(sdev, qdepth);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* _scsih_mark_responding_sas_device - mark a sas_devices as responding
|
||||
* @ioc: per adapter object
|
||||
|
@ -10654,6 +10678,8 @@ _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
|
|||
_scsih_remove_unresponding_devices(ioc);
|
||||
_scsih_del_dirty_vphy(ioc);
|
||||
_scsih_del_dirty_port_entries(ioc);
|
||||
if (ioc->is_gen35_ioc)
|
||||
_scsih_update_device_qdepth(ioc);
|
||||
_scsih_scan_for_devices_after_reset(ioc);
|
||||
/*
|
||||
* If diag reset has occurred during the driver load
|
||||
|
@ -11178,8 +11204,10 @@ static void scsih_remove(struct pci_dev *pdev)
|
|||
|
||||
ioc->remove_host = 1;
|
||||
|
||||
if (!pci_device_is_present(pdev))
|
||||
if (!pci_device_is_present(pdev)) {
|
||||
mpt3sas_base_pause_mq_polling(ioc);
|
||||
_scsih_flush_running_cmds(ioc);
|
||||
}
|
||||
|
||||
_scsih_fw_event_cleanup_queue(ioc);
|
||||
|
||||
|
@ -11274,8 +11302,10 @@ scsih_shutdown(struct pci_dev *pdev)
|
|||
|
||||
ioc->remove_host = 1;
|
||||
|
||||
if (!pci_device_is_present(pdev))
|
||||
if (!pci_device_is_present(pdev)) {
|
||||
mpt3sas_base_pause_mq_polling(ioc);
|
||||
_scsih_flush_running_cmds(ioc);
|
||||
}
|
||||
|
||||
_scsih_fw_event_cleanup_queue(ioc);
|
||||
|
||||
|
@ -11785,12 +11815,41 @@ static int scsih_map_queues(struct Scsi_Host *shost)
|
|||
{
|
||||
struct MPT3SAS_ADAPTER *ioc =
|
||||
(struct MPT3SAS_ADAPTER *)shost->hostdata;
|
||||
struct blk_mq_queue_map *map;
|
||||
int i, qoff, offset;
|
||||
int nr_msix_vectors = ioc->iopoll_q_start_index;
|
||||
int iopoll_q_count = ioc->reply_queue_count - nr_msix_vectors;
|
||||
|
||||
if (ioc->shost->nr_hw_queues == 1)
|
||||
if (shost->nr_hw_queues == 1)
|
||||
return 0;
|
||||
|
||||
return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
|
||||
ioc->pdev, ioc->high_iops_queues);
|
||||
for (i = 0, qoff = 0; i < shost->nr_maps; i++) {
|
||||
map = &shost->tag_set.map[i];
|
||||
map->nr_queues = 0;
|
||||
offset = 0;
|
||||
if (i == HCTX_TYPE_DEFAULT) {
|
||||
map->nr_queues =
|
||||
nr_msix_vectors - ioc->high_iops_queues;
|
||||
offset = ioc->high_iops_queues;
|
||||
} else if (i == HCTX_TYPE_POLL)
|
||||
map->nr_queues = iopoll_q_count;
|
||||
|
||||
if (!map->nr_queues)
|
||||
BUG_ON(i == HCTX_TYPE_DEFAULT);
|
||||
|
||||
/*
|
||||
* The poll queue(s) doesn't have an IRQ (and hence IRQ
|
||||
* affinity), so use the regular blk-mq cpu mapping
|
||||
*/
|
||||
map->queue_offset = qoff;
|
||||
if (i != HCTX_TYPE_POLL)
|
||||
blk_mq_pci_map_queues(map, ioc->pdev, offset);
|
||||
else
|
||||
blk_mq_map_queues(map);
|
||||
|
||||
qoff += map->nr_queues;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* shost template for SAS 2.0 HBA devices */
|
||||
|
@ -11861,6 +11920,7 @@ static struct scsi_host_template mpt3sas_driver_template = {
|
|||
.track_queue_depth = 1,
|
||||
.cmd_size = sizeof(struct scsiio_tracker),
|
||||
.map_queues = scsih_map_queues,
|
||||
.mq_poll = mpt3sas_blk_mq_poll,
|
||||
};
|
||||
|
||||
/* raid transport support for SAS 3.0 HBA devices */
|
||||
|
@ -11957,6 +12017,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||
struct Scsi_Host *shost = NULL;
|
||||
int rv;
|
||||
u16 hba_mpi_version;
|
||||
int iopoll_q_count = 0;
|
||||
|
||||
/* Determine in which MPI version class this pci device belongs */
|
||||
hba_mpi_version = _scsih_determine_hba_mpi_version(pdev);
|
||||
|
@ -12204,6 +12265,11 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||
goto out_thread_fail;
|
||||
}
|
||||
|
||||
shost->host_tagset = 0;
|
||||
|
||||
if (ioc->is_gen35_ioc && host_tagset_enable)
|
||||
shost->host_tagset = 1;
|
||||
|
||||
ioc->is_driver_loading = 1;
|
||||
if ((mpt3sas_base_attach(ioc))) {
|
||||
ioc_err(ioc, "failure at %s:%d/%s()!\n",
|
||||
|
@ -12226,16 +12292,17 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||
} else
|
||||
ioc->hide_drives = 0;
|
||||
|
||||
shost->host_tagset = 0;
|
||||
shost->nr_hw_queues = 1;
|
||||
|
||||
if (ioc->is_gen35_ioc && ioc->reply_queue_count > 1 &&
|
||||
host_tagset_enable && ioc->smp_affinity_enable) {
|
||||
|
||||
shost->host_tagset = 1;
|
||||
if (shost->host_tagset) {
|
||||
shost->nr_hw_queues =
|
||||
ioc->reply_queue_count - ioc->high_iops_queues;
|
||||
|
||||
iopoll_q_count =
|
||||
ioc->reply_queue_count - ioc->iopoll_q_start_index;
|
||||
|
||||
shost->nr_maps = iopoll_q_count ? 3 : 1;
|
||||
|
||||
dev_info(&ioc->pdev->dev,
|
||||
"Max SCSIIO MPT commands: %d shared with nr_hw_queues = %d\n",
|
||||
shost->can_queue, shost->nr_hw_queues);
|
||||
|
@ -12359,6 +12426,7 @@ scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
|
|||
/* Permanent error, prepare for device removal */
|
||||
ioc->pci_error_recovery = 1;
|
||||
mpt3sas_base_stop_watchdog(ioc);
|
||||
mpt3sas_base_pause_mq_polling(ioc);
|
||||
_scsih_flush_running_cmds(ioc);
|
||||
return PCI_ERS_RESULT_DISCONNECT;
|
||||
}
|
||||
|
|
|
@ -702,7 +702,7 @@ static int mvumi_host_reset(struct scsi_cmnd *scmd)
|
|||
mhba = (struct mvumi_hba *) scmd->device->host->hostdata;
|
||||
|
||||
scmd_printk(KERN_NOTICE, scmd, "RESET -%u cmd=%x retries=%x\n",
|
||||
scmd->request->tag, scmd->cmnd[0], scmd->retries);
|
||||
scsi_cmd_to_rq(scmd)->tag, scmd->cmnd[0], scmd->retries);
|
||||
|
||||
return mhba->instancet->reset_host(mhba);
|
||||
}
|
||||
|
|
|
@ -1263,6 +1263,7 @@ static int myrb_host_reset(struct scsi_cmnd *scmd)
|
|||
static int myrb_pthru_queuecommand(struct Scsi_Host *shost,
|
||||
struct scsi_cmnd *scmd)
|
||||
{
|
||||
struct request *rq = scsi_cmd_to_rq(scmd);
|
||||
struct myrb_hba *cb = shost_priv(shost);
|
||||
struct myrb_cmdblk *cmd_blk = scsi_cmd_priv(scmd);
|
||||
union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
|
||||
|
@ -1286,7 +1287,7 @@ static int myrb_pthru_queuecommand(struct Scsi_Host *shost,
|
|||
}
|
||||
|
||||
mbox->type3.opcode = MYRB_CMD_DCDB;
|
||||
mbox->type3.id = scmd->request->tag + 3;
|
||||
mbox->type3.id = rq->tag + 3;
|
||||
mbox->type3.addr = dcdb_addr;
|
||||
dcdb->channel = sdev->channel;
|
||||
dcdb->target = sdev->id;
|
||||
|
@ -1305,11 +1306,11 @@ static int myrb_pthru_queuecommand(struct Scsi_Host *shost,
|
|||
break;
|
||||
}
|
||||
dcdb->early_status = false;
|
||||
if (scmd->request->timeout <= 10)
|
||||
if (rq->timeout <= 10)
|
||||
dcdb->timeout = MYRB_DCDB_TMO_10_SECS;
|
||||
else if (scmd->request->timeout <= 60)
|
||||
else if (rq->timeout <= 60)
|
||||
dcdb->timeout = MYRB_DCDB_TMO_60_SECS;
|
||||
else if (scmd->request->timeout <= 600)
|
||||
else if (rq->timeout <= 600)
|
||||
dcdb->timeout = MYRB_DCDB_TMO_10_MINS;
|
||||
else
|
||||
dcdb->timeout = MYRB_DCDB_TMO_24_HRS;
|
||||
|
@ -1550,7 +1551,7 @@ static int myrb_ldev_queuecommand(struct Scsi_Host *shost,
|
|||
}
|
||||
|
||||
myrb_reset_cmd(cmd_blk);
|
||||
mbox->type5.id = scmd->request->tag + 3;
|
||||
mbox->type5.id = scsi_cmd_to_rq(scmd)->tag + 3;
|
||||
if (scmd->sc_data_direction == DMA_NONE)
|
||||
goto submit;
|
||||
nsge = scsi_dma_map(scmd);
|
||||
|
|
|
@ -1582,6 +1582,7 @@ static void myrs_mode_sense(struct myrs_hba *cs, struct scsi_cmnd *scmd,
|
|||
static int myrs_queuecommand(struct Scsi_Host *shost,
|
||||
struct scsi_cmnd *scmd)
|
||||
{
|
||||
struct request *rq = scsi_cmd_to_rq(scmd);
|
||||
struct myrs_hba *cs = shost_priv(shost);
|
||||
struct myrs_cmdblk *cmd_blk = scsi_cmd_priv(scmd);
|
||||
union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
|
||||
|
@ -1628,7 +1629,7 @@ static int myrs_queuecommand(struct Scsi_Host *shost,
|
|||
return SCSI_MLQUEUE_HOST_BUSY;
|
||||
cmd_blk->sense_addr = sense_addr;
|
||||
|
||||
timeout = scmd->request->timeout;
|
||||
timeout = rq->timeout;
|
||||
if (scmd->cmd_len <= 10) {
|
||||
if (scmd->device->channel >= cs->ctlr_info->physchan_present) {
|
||||
struct myrs_ldev_info *ldev_info = sdev->hostdata;
|
||||
|
@ -1644,10 +1645,10 @@ static int myrs_queuecommand(struct Scsi_Host *shost,
|
|||
mbox->SCSI_10.pdev.target = sdev->id;
|
||||
mbox->SCSI_10.pdev.channel = sdev->channel;
|
||||
}
|
||||
mbox->SCSI_10.id = scmd->request->tag + 3;
|
||||
mbox->SCSI_10.id = rq->tag + 3;
|
||||
mbox->SCSI_10.control.dma_ctrl_to_host =
|
||||
(scmd->sc_data_direction == DMA_FROM_DEVICE);
|
||||
if (scmd->request->cmd_flags & REQ_FUA)
|
||||
if (rq->cmd_flags & REQ_FUA)
|
||||
mbox->SCSI_10.control.fua = true;
|
||||
mbox->SCSI_10.dma_size = scsi_bufflen(scmd);
|
||||
mbox->SCSI_10.sense_addr = cmd_blk->sense_addr;
|
||||
|
@ -1690,10 +1691,10 @@ static int myrs_queuecommand(struct Scsi_Host *shost,
|
|||
mbox->SCSI_255.pdev.target = sdev->id;
|
||||
mbox->SCSI_255.pdev.channel = sdev->channel;
|
||||
}
|
||||
mbox->SCSI_255.id = scmd->request->tag + 3;
|
||||
mbox->SCSI_255.id = rq->tag + 3;
|
||||
mbox->SCSI_255.control.dma_ctrl_to_host =
|
||||
(scmd->sc_data_direction == DMA_FROM_DEVICE);
|
||||
if (scmd->request->cmd_flags & REQ_FUA)
|
||||
if (rq->cmd_flags & REQ_FUA)
|
||||
mbox->SCSI_255.control.fua = true;
|
||||
mbox->SCSI_255.dma_size = scsi_bufflen(scmd);
|
||||
mbox->SCSI_255.sense_addr = cmd_blk->sense_addr;
|
||||
|
|
|
@ -1453,11 +1453,6 @@ struct head {
|
|||
#define xerr_status phys.xerr_st
|
||||
#define nego_status phys.nego_st
|
||||
|
||||
#if 0
|
||||
#define sync_status phys.sync_st
|
||||
#define wide_status phys.wide_st
|
||||
#endif
|
||||
|
||||
/*==========================================================
|
||||
**
|
||||
** Declaration of structs: Data structure block
|
||||
|
@ -1980,9 +1975,6 @@ static inline char *ncr_name (struct ncb *np)
|
|||
#define RELOC_SOFTC 0x40000000
|
||||
#define RELOC_LABEL 0x50000000
|
||||
#define RELOC_REGISTER 0x60000000
|
||||
#if 0
|
||||
#define RELOC_KVAR 0x70000000
|
||||
#endif
|
||||
#define RELOC_LABELH 0x80000000
|
||||
#define RELOC_MASK 0xf0000000
|
||||
|
||||
|
@ -1991,21 +1983,7 @@ static inline char *ncr_name (struct ncb *np)
|
|||
#define PADDRH(label) (RELOC_LABELH | offsetof(struct scripth, label))
|
||||
#define RADDR(label) (RELOC_REGISTER | REG(label))
|
||||
#define FADDR(label,ofs)(RELOC_REGISTER | ((REG(label))+(ofs)))
|
||||
#if 0
|
||||
#define KVAR(which) (RELOC_KVAR | (which))
|
||||
#endif
|
||||
|
||||
#if 0
|
||||
#define SCRIPT_KVAR_JIFFIES (0)
|
||||
#define SCRIPT_KVAR_FIRST SCRIPT_KVAR_JIFFIES
|
||||
#define SCRIPT_KVAR_LAST SCRIPT_KVAR_JIFFIES
|
||||
/*
|
||||
* Kernel variables referenced in the scripts.
|
||||
* THESE MUST ALL BE ALIGNED TO A 4-BYTE BOUNDARY.
|
||||
*/
|
||||
static void *script_kvars[] __initdata =
|
||||
{ (void *)&jiffies };
|
||||
#endif
|
||||
|
||||
static struct script script0 __initdata = {
|
||||
/*--------------------------< START >-----------------------*/ {
|
||||
|
@ -2162,11 +2140,6 @@ static struct script script0 __initdata = {
|
|||
SCR_COPY (1),
|
||||
RADDR (scratcha),
|
||||
NADDR (msgout),
|
||||
#if 0
|
||||
SCR_COPY (1),
|
||||
RADDR (scratcha),
|
||||
NADDR (msgin),
|
||||
#endif
|
||||
/*
|
||||
** Anticipate the COMMAND phase.
|
||||
** This is the normal case for initial selection.
|
||||
|
@ -4164,8 +4137,8 @@ static int ncr_queue_command (struct ncb *np, struct scsi_cmnd *cmd)
|
|||
**
|
||||
**----------------------------------------------------
|
||||
*/
|
||||
if (np->settle_time && cmd->request->timeout >= HZ) {
|
||||
u_long tlimit = jiffies + cmd->request->timeout - HZ;
|
||||
if (np->settle_time && scsi_cmd_to_rq(cmd)->timeout >= HZ) {
|
||||
u_long tlimit = jiffies + scsi_cmd_to_rq(cmd)->timeout - HZ;
|
||||
if (time_after(np->settle_time, tlimit))
|
||||
np->settle_time = tlimit;
|
||||
}
|
||||
|
@ -4378,10 +4351,6 @@ static int ncr_queue_command (struct ncb *np, struct scsi_cmnd *cmd)
|
|||
cp->parity_status = 0;
|
||||
|
||||
cp->xerr_status = XE_OK;
|
||||
#if 0
|
||||
cp->sync_status = tp->sval;
|
||||
cp->wide_status = tp->wval;
|
||||
#endif
|
||||
|
||||
/*----------------------------------------------------
|
||||
**
|
||||
|
@ -4553,12 +4522,8 @@ static void ncr_start_reset(struct ncb *np)
|
|||
**
|
||||
**==========================================================
|
||||
*/
|
||||
static int ncr_reset_bus (struct ncb *np, struct scsi_cmnd *cmd, int sync_reset)
|
||||
static int ncr_reset_bus (struct ncb *np)
|
||||
{
|
||||
/* struct scsi_device *device = cmd->device; */
|
||||
struct ccb *cp;
|
||||
int found;
|
||||
|
||||
/*
|
||||
* Return immediately if reset is in progress.
|
||||
*/
|
||||
|
@ -4572,24 +4537,6 @@ static int ncr_reset_bus (struct ncb *np, struct scsi_cmnd *cmd, int sync_reset)
|
|||
* delay of 2 seconds will be completed.
|
||||
*/
|
||||
ncr_start_reset(np);
|
||||
/*
|
||||
* First, look in the wakeup list
|
||||
*/
|
||||
for (found=0, cp=np->ccb; cp; cp=cp->link_ccb) {
|
||||
/*
|
||||
** look for the ccb of this command.
|
||||
*/
|
||||
if (cp->host_status == HS_IDLE) continue;
|
||||
if (cp->cmd == cmd) {
|
||||
found = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
/*
|
||||
* Then, look in the waiting list
|
||||
*/
|
||||
if (!found && retrieve_from_waiting_list(0, np, cmd))
|
||||
found = 1;
|
||||
/*
|
||||
* Wake-up all awaiting commands with DID_RESET.
|
||||
*/
|
||||
|
@ -4598,103 +4545,10 @@ static int ncr_reset_bus (struct ncb *np, struct scsi_cmnd *cmd, int sync_reset)
|
|||
* Wake-up all pending commands with HS_RESET -> DID_RESET.
|
||||
*/
|
||||
ncr_wakeup(np, HS_RESET);
|
||||
/*
|
||||
* If the involved command was not in a driver queue, and the
|
||||
* scsi driver told us reset is synchronous, and the command is not
|
||||
* currently in the waiting list, complete it with DID_RESET status,
|
||||
* in order to keep it alive.
|
||||
*/
|
||||
if (!found && sync_reset && !retrieve_from_waiting_list(0, np, cmd)) {
|
||||
set_host_byte(cmd, DID_RESET);
|
||||
ncr_queue_done_cmd(np, cmd);
|
||||
}
|
||||
|
||||
return SUCCESS;
|
||||
}
|
||||
|
||||
#if 0 /* unused and broken.. */
|
||||
/*==========================================================
|
||||
**
|
||||
**
|
||||
** Abort an SCSI command.
|
||||
** This is called from the generic SCSI driver.
|
||||
**
|
||||
**
|
||||
**==========================================================
|
||||
*/
|
||||
static int ncr_abort_command (struct ncb *np, struct scsi_cmnd *cmd)
|
||||
{
|
||||
/* struct scsi_device *device = cmd->device; */
|
||||
struct ccb *cp;
|
||||
int found;
|
||||
int retv;
|
||||
|
||||
/*
|
||||
* First, look for the scsi command in the waiting list
|
||||
*/
|
||||
if (remove_from_waiting_list(np, cmd)) {
|
||||
set_host_byte(cmd, DID_ABORT);
|
||||
ncr_queue_done_cmd(np, cmd);
|
||||
return SCSI_ABORT_SUCCESS;
|
||||
}
|
||||
|
||||
/*
|
||||
* Then, look in the wakeup list
|
||||
*/
|
||||
for (found=0, cp=np->ccb; cp; cp=cp->link_ccb) {
|
||||
/*
|
||||
** look for the ccb of this command.
|
||||
*/
|
||||
if (cp->host_status == HS_IDLE) continue;
|
||||
if (cp->cmd == cmd) {
|
||||
found = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!found) {
|
||||
return SCSI_ABORT_NOT_RUNNING;
|
||||
}
|
||||
|
||||
if (np->settle_time) {
|
||||
return SCSI_ABORT_SNOOZE;
|
||||
}
|
||||
|
||||
/*
|
||||
** If the CCB is active, patch schedule jumps for the
|
||||
** script to abort the command.
|
||||
*/
|
||||
|
||||
switch(cp->host_status) {
|
||||
case HS_BUSY:
|
||||
case HS_NEGOTIATE:
|
||||
printk ("%s: abort ccb=%p (cancel)\n", ncr_name (np), cp);
|
||||
cp->start.schedule.l_paddr =
|
||||
cpu_to_scr(NCB_SCRIPTH_PHYS (np, cancel));
|
||||
retv = SCSI_ABORT_PENDING;
|
||||
break;
|
||||
case HS_DISCONNECT:
|
||||
cp->restart.schedule.l_paddr =
|
||||
cpu_to_scr(NCB_SCRIPTH_PHYS (np, abort));
|
||||
retv = SCSI_ABORT_PENDING;
|
||||
break;
|
||||
default:
|
||||
retv = SCSI_ABORT_NOT_RUNNING;
|
||||
break;
|
||||
|
||||
}
|
||||
|
||||
/*
|
||||
** If there are no requests, the script
|
||||
** processor will sleep on SEL_WAIT_RESEL.
|
||||
** Let's wake it up, since it may have to work.
|
||||
*/
|
||||
OUTB (nc_istat, SIGP);
|
||||
|
||||
return retv;
|
||||
}
|
||||
#endif
|
||||
|
||||
static void ncr_detach(struct ncb *np)
|
||||
{
|
||||
struct ccb *cp;
|
||||
|
@ -5453,27 +5307,6 @@ static void ncr_getsync(struct ncb *np, u_char sfac, u_char *fakp, u_char *scntl
|
|||
*/
|
||||
fak = (kpc - 1) / div_10M[div] + 1;
|
||||
|
||||
#if 0 /* This optimization does not seem very useful */
|
||||
|
||||
per = (fak * div_10M[div]) / clk;
|
||||
|
||||
/*
|
||||
** Why not to try the immediate lower divisor and to choose
|
||||
** the one that allows the fastest output speed ?
|
||||
** We don't want input speed too much greater than output speed.
|
||||
*/
|
||||
if (div >= 1 && fak < 8) {
|
||||
u_long fak2, per2;
|
||||
fak2 = (kpc - 1) / div_10M[div-1] + 1;
|
||||
per2 = (fak2 * div_10M[div-1]) / clk;
|
||||
if (per2 < per && fak2 <= 8) {
|
||||
fak = fak2;
|
||||
per = per2;
|
||||
--div;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
if (fak < 4) fak = 4; /* Should never happen, too bad ... */
|
||||
|
||||
/*
|
||||
|
@ -5511,10 +5344,6 @@ static void ncr_set_sync_wide_status (struct ncb *np, u_char target)
|
|||
for (cp = np->ccb; cp; cp = cp->link_ccb) {
|
||||
if (!cp->cmd) continue;
|
||||
if (scmd_id(cp->cmd) != target) continue;
|
||||
#if 0
|
||||
cp->sync_status = tp->sval;
|
||||
cp->wide_status = tp->wval;
|
||||
#endif
|
||||
cp->phys.select.sel_scntl3 = tp->wval;
|
||||
cp->phys.select.sel_sxfer = tp->sval;
|
||||
}
|
||||
|
@ -8125,7 +7954,7 @@ static int ncr53c8xx_bus_reset(struct scsi_cmnd *cmd)
|
|||
*/
|
||||
|
||||
spin_lock_irqsave(&np->smp_lock, flags);
|
||||
sts = ncr_reset_bus(np, cmd, 1);
|
||||
sts = ncr_reset_bus(np);
|
||||
|
||||
done_list = np->done_list;
|
||||
np->done_list = NULL;
|
||||
|
@ -8136,30 +7965,6 @@ static int ncr53c8xx_bus_reset(struct scsi_cmnd *cmd)
|
|||
return sts;
|
||||
}
|
||||
|
||||
#if 0 /* unused and broken */
|
||||
static int ncr53c8xx_abort(struct scsi_cmnd *cmd)
|
||||
{
|
||||
struct ncb *np = ((struct host_data *) cmd->device->host->hostdata)->ncb;
|
||||
int sts;
|
||||
unsigned long flags;
|
||||
struct scsi_cmnd *done_list;
|
||||
|
||||
printk("ncr53c8xx_abort\n");
|
||||
|
||||
NCR_LOCK_NCB(np, flags);
|
||||
|
||||
sts = ncr_abort_command(np, cmd);
|
||||
out:
|
||||
done_list = np->done_list;
|
||||
np->done_list = NULL;
|
||||
NCR_UNLOCK_NCB(np, flags);
|
||||
|
||||
ncr_flush_done_cmds(done_list);
|
||||
|
||||
return sts;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
** Scsi command waiting list management.
|
||||
|
|
|
@ -45,8 +45,10 @@ static int fdomain_probe(struct pcmcia_device *link)
|
|||
goto fail_disable;
|
||||
|
||||
if (!request_region(link->resource[0]->start, FDOMAIN_REGION_SIZE,
|
||||
"fdomain_cs"))
|
||||
"fdomain_cs")) {
|
||||
ret = -EBUSY;
|
||||
goto fail_disable;
|
||||
}
|
||||
|
||||
sh = fdomain_create(link->resource[0]->start, link->irq, 7, &link->dev);
|
||||
if (!sh) {
|
||||
|
|
|
@ -1323,7 +1323,7 @@ int pm8001_mpi_build_cmd(struct pm8001_hba_info *pm8001_ha,
|
|||
void *pMessage;
|
||||
unsigned long flags;
|
||||
int q_index = circularQ - pm8001_ha->inbnd_q_tbl;
|
||||
int rv = -1;
|
||||
int rv;
|
||||
|
||||
WARN_ON(q_index >= PM8001_MAX_INB_NUM);
|
||||
spin_lock_irqsave(&circularQ->iq_lock, flags);
|
||||
|
|
|
@ -1162,13 +1162,7 @@ void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
|
|||
return;
|
||||
}
|
||||
|
||||
if (!sc_cmd->request) {
|
||||
QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd->request is NULL, "
|
||||
"sc_cmd=%p.\n", sc_cmd);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!sc_cmd->request->q) {
|
||||
if (!scsi_cmd_to_rq(sc_cmd)->q) {
|
||||
QEDF_WARN(&(qedf->dbg_ctx), "request->q is NULL so request "
|
||||
"is not valid, sc_cmd=%p.\n", sc_cmd);
|
||||
return;
|
||||
|
|
|
@ -3004,7 +3004,7 @@ static int qedf_alloc_global_queues(struct qedf_ctx *qedf)
|
|||
{
|
||||
u32 *list;
|
||||
int i;
|
||||
int status = 0, rc;
|
||||
int status;
|
||||
u32 *pbl;
|
||||
dma_addr_t page;
|
||||
int num_pages;
|
||||
|
@ -3016,7 +3016,7 @@ static int qedf_alloc_global_queues(struct qedf_ctx *qedf)
|
|||
*/
|
||||
if (!qedf->num_queues) {
|
||||
QEDF_ERR(&(qedf->dbg_ctx), "No MSI-X vectors available!\n");
|
||||
return 1;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -3024,7 +3024,7 @@ static int qedf_alloc_global_queues(struct qedf_ctx *qedf)
|
|||
* addresses of our queues
|
||||
*/
|
||||
if (!qedf->p_cpuq) {
|
||||
status = 1;
|
||||
status = -EINVAL;
|
||||
QEDF_ERR(&qedf->dbg_ctx, "p_cpuq is NULL.\n");
|
||||
goto mem_alloc_failure;
|
||||
}
|
||||
|
@ -3040,8 +3040,8 @@ static int qedf_alloc_global_queues(struct qedf_ctx *qedf)
|
|||
"qedf->global_queues=%p.\n", qedf->global_queues);
|
||||
|
||||
/* Allocate DMA coherent buffers for BDQ */
|
||||
rc = qedf_alloc_bdq(qedf);
|
||||
if (rc) {
|
||||
status = qedf_alloc_bdq(qedf);
|
||||
if (status) {
|
||||
QEDF_ERR(&qedf->dbg_ctx, "Unable to allocate bdq.\n");
|
||||
goto mem_alloc_failure;
|
||||
}
|
||||
|
|
|
@ -609,14 +609,7 @@ static void qedi_scsi_completion(struct qedi_ctx *qedi,
|
|||
goto error;
|
||||
}
|
||||
|
||||
if (!sc_cmd->request) {
|
||||
QEDI_WARN(&qedi->dbg_ctx,
|
||||
"sc_cmd->request is NULL, sc_cmd=%p.\n",
|
||||
sc_cmd);
|
||||
goto error;
|
||||
}
|
||||
|
||||
if (!sc_cmd->request->q) {
|
||||
if (!scsi_cmd_to_rq(sc_cmd)->q) {
|
||||
QEDI_WARN(&qedi->dbg_ctx,
|
||||
"request->q is NULL so request is not valid, sc_cmd=%p.\n",
|
||||
sc_cmd);
|
||||
|
@ -936,17 +929,11 @@ void qedi_fp_process_cqes(struct qedi_work *work)
|
|||
|
||||
static void qedi_ring_doorbell(struct qedi_conn *qedi_conn)
|
||||
{
|
||||
struct iscsi_db_data dbell = { 0 };
|
||||
qedi_conn->ep->db_data.sq_prod = qedi_conn->ep->fw_sq_prod_idx;
|
||||
|
||||
dbell.agg_flags = 0;
|
||||
|
||||
dbell.params |= DB_DEST_XCM << ISCSI_DB_DATA_DEST_SHIFT;
|
||||
dbell.params |= DB_AGG_CMD_SET << ISCSI_DB_DATA_AGG_CMD_SHIFT;
|
||||
dbell.params |=
|
||||
DQ_XCM_ISCSI_SQ_PROD_CMD << ISCSI_DB_DATA_AGG_VAL_SEL_SHIFT;
|
||||
|
||||
dbell.sq_prod = qedi_conn->ep->fw_sq_prod_idx;
|
||||
writel(*(u32 *)&dbell, qedi_conn->ep->p_doorbell);
|
||||
/* wmb - Make sure fw idx is coherent */
|
||||
wmb();
|
||||
writel(*(u32 *)&qedi_conn->ep->db_data, qedi_conn->ep->p_doorbell);
|
||||
|
||||
/* Make sure fw write idx is coherent, and include both memory barriers
|
||||
* as a failsafe as for some architectures the call is the same but on
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue