mirror of https://gitee.com/openkylin/linux.git
SCSI misc on 20181224
This is mostly update of the usual drivers: smarpqi, lpfc, qedi, megaraid_sas, libsas, zfcp, mpt3sas, hisi_sas. Additionally, we have a pile of annotation, unused variable and minor updates. The big API change is the updates for Christoph's DMA rework which include removing the DISABLE_CLUSTERING flag. And finally there are a couple of target tree updates. Signed-off-by: James E.J. Bottomley <jejb@linux.ibm.com> -----BEGIN PGP SIGNATURE----- iJwEABMIAEQWIQTnYEDbdso9F2cI+arnQslM7pishQUCXCEUNiYcamFtZXMuYm90 dG9tbGV5QGhhbnNlbnBhcnRuZXJzaGlwLmNvbQAKCRDnQslM7pishdjKAP9vrTTv qFaYmAoRSbPq9ZiixaXLMy0K/6o76Uay0gnBqgD/fgn3jg/KQ6alNaCjmfeV3wAj u1j3H7tha9j1it+4pUw= =GDa+ -----END PGP SIGNATURE----- Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi Pull SCSI updates from James Bottomley: "This is mostly update of the usual drivers: smarpqi, lpfc, qedi, megaraid_sas, libsas, zfcp, mpt3sas, hisi_sas. Additionally, we have a pile of annotation, unused variable and minor updates. The big API change is the updates for Christoph's DMA rework which include removing the DISABLE_CLUSTERING flag. And finally there are a couple of target tree updates" * tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (259 commits) scsi: isci: request: mark expected switch fall-through scsi: isci: remote_node_context: mark expected switch fall-throughs scsi: isci: remote_device: Mark expected switch fall-throughs scsi: isci: phy: Mark expected switch fall-through scsi: iscsi: Capture iscsi debug messages using tracepoints scsi: myrb: Mark expected switch fall-throughs scsi: megaraid: fix out-of-bound array accesses scsi: mpt3sas: mpt3sas_scsih: Mark expected switch fall-through scsi: fcoe: remove set but not used variable 'port' scsi: smartpqi: call pqi_free_interrupts() in pqi_shutdown() scsi: smartpqi: fix build warnings scsi: smartpqi: update driver version scsi: smartpqi: add ofa support scsi: smartpqi: increase fw status register read timeout scsi: smartpqi: bump driver version scsi: smartpqi: add smp_utils support scsi: smartpqi: correct lun reset issues scsi: smartpqi: correct volume status scsi: smartpqi: do not offline disks for transient did no connect conditions scsi: smartpqi: allow for larger raid maps ...
This commit is contained in:
commit
938edb8a31
|
@ -0,0 +1,31 @@
|
|||
* Cadence Universal Flash Storage (UFS) Controller
|
||||
|
||||
UFS nodes are defined to describe on-chip UFS host controllers.
|
||||
Each UFS controller instance should have its own node.
|
||||
Please see the ufshcd-pltfrm.txt for a list of all available properties.
|
||||
|
||||
Required properties:
|
||||
- compatible : Compatible list, contains the following controller:
|
||||
"cdns,ufshc"
|
||||
complemented with the JEDEC version:
|
||||
"jedec,ufs-2.0"
|
||||
|
||||
- reg : Address and length of the UFS register set.
|
||||
- interrupts : One interrupt mapping.
|
||||
- freq-table-hz : Clock frequency table.
|
||||
See the ufshcd-pltfrm.txt for details.
|
||||
- clocks : List of phandle and clock specifier pairs.
|
||||
- clock-names : List of clock input name strings sorted in the same
|
||||
order as the clocks property. "core_clk" is mandatory.
|
||||
Depending on a type of a PHY,
|
||||
the "phy_clk" clock can also be added, if needed.
|
||||
|
||||
Example:
|
||||
ufs@fd030000 {
|
||||
compatible = "cdns,ufshc", "jedec,ufs-2.0";
|
||||
reg = <0xfd030000 0x10000>;
|
||||
interrupts = <0 1 IRQ_TYPE_LEVEL_HIGH>;
|
||||
freq-table-hz = <0 0>, <0 0>;
|
||||
clocks = <&ufs_core_clk>, <&ufs_phy_clk>;
|
||||
clock-names = "core_clk", "phy_clk";
|
||||
};
|
|
@ -33,6 +33,12 @@ Optional properties:
|
|||
- clocks : List of phandle and clock specifier pairs
|
||||
- clock-names : List of clock input name strings sorted in the same
|
||||
order as the clocks property.
|
||||
"ref_clk" indicates reference clock frequency.
|
||||
UFS host supplies reference clock to UFS device and UFS device
|
||||
specification allows host to provide one of the 4 frequencies (19.2 MHz,
|
||||
26 MHz, 38.4 MHz, 52MHz) for reference clock. This "ref_clk" entry is
|
||||
parsed and used to update the reference clock setting in device.
|
||||
Defaults to 26 MHz(as per specification) if not specified by host.
|
||||
- freq-table-hz : Array of <min max> operating frequencies stored in the same
|
||||
order as the clocks property. If this property is not
|
||||
defined or a value in the array is "0" then it is assumed
|
||||
|
|
|
@ -1098,8 +1098,6 @@ of interest:
|
|||
unchecked_isa_dma - 1=>only use bottom 16 MB of ram (ISA DMA addressing
|
||||
restriction), 0=>can use full 32 bit (or better) DMA
|
||||
address space
|
||||
use_clustering - 1=>SCSI commands in mid level's queue can be merged,
|
||||
0=>disallow SCSI command merging
|
||||
no_async_abort - 1=>Asynchronous aborts are not supported
|
||||
0=>Timed-out commands will be aborted asynchronously
|
||||
hostt - pointer to driver's struct scsi_host_template from which
|
||||
|
|
|
@ -347,7 +347,7 @@ static struct scsi_host_template driver_template = {
|
|||
.sg_tablesize = SG_ALL,
|
||||
.max_sectors = 1024,
|
||||
.cmd_per_lun = SIMSCSI_REQ_QUEUE_LEN,
|
||||
.use_clustering = DISABLE_CLUSTERING,
|
||||
.dma_boundary = PAGE_SIZE - 1,
|
||||
};
|
||||
|
||||
static int __init
|
||||
|
|
|
@ -195,7 +195,7 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
|
|||
goto split;
|
||||
}
|
||||
|
||||
if (bvprvp && blk_queue_cluster(q)) {
|
||||
if (bvprvp) {
|
||||
if (seg_size + bv.bv_len > queue_max_segment_size(q))
|
||||
goto new_segment;
|
||||
if (!biovec_phys_mergeable(q, bvprvp, &bv))
|
||||
|
@ -295,7 +295,7 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
|
|||
bool no_sg_merge)
|
||||
{
|
||||
struct bio_vec bv, bvprv = { NULL };
|
||||
int cluster, prev = 0;
|
||||
int prev = 0;
|
||||
unsigned int seg_size, nr_phys_segs;
|
||||
struct bio *fbio, *bbio;
|
||||
struct bvec_iter iter;
|
||||
|
@ -313,7 +313,6 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
|
|||
}
|
||||
|
||||
fbio = bio;
|
||||
cluster = blk_queue_cluster(q);
|
||||
seg_size = 0;
|
||||
nr_phys_segs = 0;
|
||||
for_each_bio(bio) {
|
||||
|
@ -325,7 +324,7 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
|
|||
if (no_sg_merge)
|
||||
goto new_segment;
|
||||
|
||||
if (prev && cluster) {
|
||||
if (prev) {
|
||||
if (seg_size + bv.bv_len
|
||||
> queue_max_segment_size(q))
|
||||
goto new_segment;
|
||||
|
@ -395,9 +394,6 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
|
|||
{
|
||||
struct bio_vec end_bv = { NULL }, nxt_bv;
|
||||
|
||||
if (!blk_queue_cluster(q))
|
||||
return 0;
|
||||
|
||||
if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
|
||||
queue_max_segment_size(q))
|
||||
return 0;
|
||||
|
@ -414,12 +410,12 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
|
|||
static inline void
|
||||
__blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
|
||||
struct scatterlist *sglist, struct bio_vec *bvprv,
|
||||
struct scatterlist **sg, int *nsegs, int *cluster)
|
||||
struct scatterlist **sg, int *nsegs)
|
||||
{
|
||||
|
||||
int nbytes = bvec->bv_len;
|
||||
|
||||
if (*sg && *cluster) {
|
||||
if (*sg) {
|
||||
if ((*sg)->length + nbytes > queue_max_segment_size(q))
|
||||
goto new_segment;
|
||||
if (!biovec_phys_mergeable(q, bvprv, bvec))
|
||||
|
@ -465,12 +461,12 @@ static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
|
|||
{
|
||||
struct bio_vec bvec, bvprv = { NULL };
|
||||
struct bvec_iter iter;
|
||||
int cluster = blk_queue_cluster(q), nsegs = 0;
|
||||
int nsegs = 0;
|
||||
|
||||
for_each_bio(bio)
|
||||
bio_for_each_segment(bvec, bio, iter)
|
||||
__blk_segment_map_sg(q, &bvec, sglist, &bvprv, sg,
|
||||
&nsegs, &cluster);
|
||||
&nsegs);
|
||||
|
||||
return nsegs;
|
||||
}
|
||||
|
|
|
@ -56,7 +56,6 @@ void blk_set_default_limits(struct queue_limits *lim)
|
|||
lim->alignment_offset = 0;
|
||||
lim->io_opt = 0;
|
||||
lim->misaligned = 0;
|
||||
lim->cluster = 1;
|
||||
lim->zoned = BLK_ZONED_NONE;
|
||||
}
|
||||
EXPORT_SYMBOL(blk_set_default_limits);
|
||||
|
@ -547,8 +546,6 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
|
|||
t->io_min = max(t->io_min, b->io_min);
|
||||
t->io_opt = lcm_not_zero(t->io_opt, b->io_opt);
|
||||
|
||||
t->cluster &= b->cluster;
|
||||
|
||||
/* Physical block size a multiple of the logical block size? */
|
||||
if (t->physical_block_size & (t->logical_block_size - 1)) {
|
||||
t->physical_block_size = t->logical_block_size;
|
||||
|
|
|
@ -132,10 +132,7 @@ static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *
|
|||
|
||||
static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
|
||||
{
|
||||
if (blk_queue_cluster(q))
|
||||
return queue_var_show(queue_max_segment_size(q), (page));
|
||||
|
||||
return queue_var_show(PAGE_SIZE, (page));
|
||||
return queue_var_show(queue_max_segment_size(q), (page));
|
||||
}
|
||||
|
||||
static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
|
||||
|
|
|
@ -1610,7 +1610,6 @@ static struct scsi_host_template scsi_driver_template = {
|
|||
.eh_abort_handler = sbp2_scsi_abort,
|
||||
.this_id = -1,
|
||||
.sg_tablesize = SG_ALL,
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
.can_queue = 1,
|
||||
.sdev_attrs = sbp2_scsi_sysfs_attrs,
|
||||
};
|
||||
|
|
|
@ -997,7 +997,6 @@ static struct scsi_host_template iscsi_iser_sht = {
|
|||
.eh_device_reset_handler= iscsi_eh_device_reset,
|
||||
.eh_target_reset_handler = iscsi_eh_recover_target,
|
||||
.target_alloc = iscsi_target_alloc,
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
.slave_alloc = iscsi_iser_slave_alloc,
|
||||
.proc_name = "iscsi_iser",
|
||||
.this_id = -1,
|
||||
|
|
|
@ -3215,7 +3215,6 @@ static struct scsi_host_template srp_template = {
|
|||
.can_queue = SRP_DEFAULT_CMD_SQ_SIZE,
|
||||
.this_id = -1,
|
||||
.cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE,
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
.shost_attrs = srp_host_attrs,
|
||||
.track_queue_depth = 1,
|
||||
};
|
||||
|
|
|
@ -3147,11 +3147,6 @@ static int srpt_check_false(struct se_portal_group *se_tpg)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static char *srpt_get_fabric_name(void)
|
||||
{
|
||||
return "srpt";
|
||||
}
|
||||
|
||||
static struct srpt_port *srpt_tpg_to_sport(struct se_portal_group *tpg)
|
||||
{
|
||||
return tpg->se_tpg_wwn->priv;
|
||||
|
@ -3678,8 +3673,7 @@ static struct configfs_attribute *srpt_wwn_attrs[] = {
|
|||
|
||||
static const struct target_core_fabric_ops srpt_template = {
|
||||
.module = THIS_MODULE,
|
||||
.name = "srpt",
|
||||
.get_fabric_name = srpt_get_fabric_name,
|
||||
.fabric_name = "srpt",
|
||||
.tpg_get_wwn = srpt_get_fabric_wwn,
|
||||
.tpg_get_tag = srpt_get_tag,
|
||||
.tpg_check_demo_mode = srpt_check_false,
|
||||
|
|
|
@ -129,7 +129,6 @@ static struct scsi_host_template mptfc_driver_template = {
|
|||
.sg_tablesize = MPT_SCSI_SG_DEPTH,
|
||||
.max_sectors = 8192,
|
||||
.cmd_per_lun = 7,
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
.shost_attrs = mptscsih_host_attrs,
|
||||
};
|
||||
|
||||
|
|
|
@ -1992,7 +1992,6 @@ static struct scsi_host_template mptsas_driver_template = {
|
|||
.sg_tablesize = MPT_SCSI_SG_DEPTH,
|
||||
.max_sectors = 8192,
|
||||
.cmd_per_lun = 7,
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
.shost_attrs = mptscsih_host_attrs,
|
||||
.no_write_same = 1,
|
||||
};
|
||||
|
|
|
@ -848,7 +848,6 @@ static struct scsi_host_template mptspi_driver_template = {
|
|||
.sg_tablesize = MPT_SCSI_SG_DEPTH,
|
||||
.max_sectors = 8192,
|
||||
.cmd_per_lun = 7,
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
.shost_attrs = mptscsih_host_attrs,
|
||||
};
|
||||
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
*
|
||||
* Module interface and handling of zfcp data structures.
|
||||
*
|
||||
* Copyright IBM Corp. 2002, 2013
|
||||
* Copyright IBM Corp. 2002, 2017
|
||||
*/
|
||||
|
||||
/*
|
||||
|
@ -124,6 +124,9 @@ static int __init zfcp_module_init(void)
|
|||
{
|
||||
int retval = -ENOMEM;
|
||||
|
||||
if (zfcp_experimental_dix)
|
||||
pr_warn("DIX is enabled. It is experimental and might cause problems\n");
|
||||
|
||||
zfcp_fsf_qtcb_cache = zfcp_cache_hw_align("zfcp_fsf_qtcb",
|
||||
sizeof(struct fsf_qtcb));
|
||||
if (!zfcp_fsf_qtcb_cache)
|
||||
|
@ -248,43 +251,36 @@ static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter)
|
|||
|
||||
static void zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter)
|
||||
{
|
||||
if (adapter->pool.erp_req)
|
||||
mempool_destroy(adapter->pool.erp_req);
|
||||
if (adapter->pool.scsi_req)
|
||||
mempool_destroy(adapter->pool.scsi_req);
|
||||
if (adapter->pool.scsi_abort)
|
||||
mempool_destroy(adapter->pool.scsi_abort);
|
||||
if (adapter->pool.qtcb_pool)
|
||||
mempool_destroy(adapter->pool.qtcb_pool);
|
||||
if (adapter->pool.status_read_req)
|
||||
mempool_destroy(adapter->pool.status_read_req);
|
||||
if (adapter->pool.sr_data)
|
||||
mempool_destroy(adapter->pool.sr_data);
|
||||
if (adapter->pool.gid_pn)
|
||||
mempool_destroy(adapter->pool.gid_pn);
|
||||
mempool_destroy(adapter->pool.erp_req);
|
||||
mempool_destroy(adapter->pool.scsi_req);
|
||||
mempool_destroy(adapter->pool.scsi_abort);
|
||||
mempool_destroy(adapter->pool.qtcb_pool);
|
||||
mempool_destroy(adapter->pool.status_read_req);
|
||||
mempool_destroy(adapter->pool.sr_data);
|
||||
mempool_destroy(adapter->pool.gid_pn);
|
||||
}
|
||||
|
||||
/**
|
||||
* zfcp_status_read_refill - refill the long running status_read_requests
|
||||
* @adapter: ptr to struct zfcp_adapter for which the buffers should be refilled
|
||||
*
|
||||
* Returns: 0 on success, 1 otherwise
|
||||
*
|
||||
* if there are 16 or more status_read requests missing an adapter_reopen
|
||||
* is triggered
|
||||
* Return:
|
||||
* * 0 on success meaning at least one status read is pending
|
||||
* * 1 if posting failed and not a single status read buffer is pending,
|
||||
* also triggers adapter reopen recovery
|
||||
*/
|
||||
int zfcp_status_read_refill(struct zfcp_adapter *adapter)
|
||||
{
|
||||
while (atomic_read(&adapter->stat_miss) > 0)
|
||||
while (atomic_add_unless(&adapter->stat_miss, -1, 0))
|
||||
if (zfcp_fsf_status_read(adapter->qdio)) {
|
||||
atomic_inc(&adapter->stat_miss); /* undo add -1 */
|
||||
if (atomic_read(&adapter->stat_miss) >=
|
||||
adapter->stat_read_buf_num) {
|
||||
zfcp_erp_adapter_reopen(adapter, 0, "axsref1");
|
||||
return 1;
|
||||
}
|
||||
break;
|
||||
} else
|
||||
atomic_dec(&adapter->stat_miss);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -542,45 +538,3 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
|
|||
zfcp_ccw_adapter_put(adapter);
|
||||
return ERR_PTR(retval);
|
||||
}
|
||||
|
||||
/**
|
||||
* zfcp_sg_free_table - free memory used by scatterlists
|
||||
* @sg: pointer to scatterlist
|
||||
* @count: number of scatterlist which are to be free'ed
|
||||
* the scatterlist are expected to reference pages always
|
||||
*/
|
||||
void zfcp_sg_free_table(struct scatterlist *sg, int count)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < count; i++, sg++)
|
||||
if (sg)
|
||||
free_page((unsigned long) sg_virt(sg));
|
||||
else
|
||||
break;
|
||||
}
|
||||
|
||||
/**
|
||||
* zfcp_sg_setup_table - init scatterlist and allocate, assign buffers
|
||||
* @sg: pointer to struct scatterlist
|
||||
* @count: number of scatterlists which should be assigned with buffers
|
||||
* of size page
|
||||
*
|
||||
* Returns: 0 on success, -ENOMEM otherwise
|
||||
*/
|
||||
int zfcp_sg_setup_table(struct scatterlist *sg, int count)
|
||||
{
|
||||
void *addr;
|
||||
int i;
|
||||
|
||||
sg_init_table(sg, count);
|
||||
for (i = 0; i < count; i++, sg++) {
|
||||
addr = (void *) get_zeroed_page(GFP_KERNEL);
|
||||
if (!addr) {
|
||||
zfcp_sg_free_table(sg, i);
|
||||
return -ENOMEM;
|
||||
}
|
||||
sg_set_buf(sg, addr, PAGE_SIZE);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -63,7 +63,8 @@ void zfcp_dbf_pl_write(struct zfcp_dbf *dbf, void *data, u16 length, char *area,
|
|||
|
||||
/**
|
||||
* zfcp_dbf_hba_fsf_res - trace event for fsf responses
|
||||
* @tag: tag indicating which kind of unsolicited status has been received
|
||||
* @tag: tag indicating which kind of FSF response has been received
|
||||
* @level: trace level to be used for event
|
||||
* @req: request for which a response was received
|
||||
*/
|
||||
void zfcp_dbf_hba_fsf_res(char *tag, int level, struct zfcp_fsf_req *req)
|
||||
|
@ -81,8 +82,8 @@ void zfcp_dbf_hba_fsf_res(char *tag, int level, struct zfcp_fsf_req *req)
|
|||
rec->id = ZFCP_DBF_HBA_RES;
|
||||
rec->fsf_req_id = req->req_id;
|
||||
rec->fsf_req_status = req->status;
|
||||
rec->fsf_cmd = req->fsf_command;
|
||||
rec->fsf_seq_no = req->seq_no;
|
||||
rec->fsf_cmd = q_head->fsf_command;
|
||||
rec->fsf_seq_no = q_pref->req_seq_no;
|
||||
rec->u.res.req_issued = req->issued;
|
||||
rec->u.res.prot_status = q_pref->prot_status;
|
||||
rec->u.res.fsf_status = q_head->fsf_status;
|
||||
|
@ -94,7 +95,7 @@ void zfcp_dbf_hba_fsf_res(char *tag, int level, struct zfcp_fsf_req *req)
|
|||
memcpy(rec->u.res.fsf_status_qual, &q_head->fsf_status_qual,
|
||||
FSF_STATUS_QUALIFIER_SIZE);
|
||||
|
||||
if (req->fsf_command != FSF_QTCB_FCP_CMND) {
|
||||
if (q_head->fsf_command != FSF_QTCB_FCP_CMND) {
|
||||
rec->pl_len = q_head->log_length;
|
||||
zfcp_dbf_pl_write(dbf, (char *)q_pref + q_head->log_start,
|
||||
rec->pl_len, "fsf_res", req->req_id);
|
||||
|
@ -127,7 +128,7 @@ void zfcp_dbf_hba_fsf_uss(char *tag, struct zfcp_fsf_req *req)
|
|||
rec->id = ZFCP_DBF_HBA_USS;
|
||||
rec->fsf_req_id = req->req_id;
|
||||
rec->fsf_req_status = req->status;
|
||||
rec->fsf_cmd = req->fsf_command;
|
||||
rec->fsf_cmd = FSF_QTCB_UNSOLICITED_STATUS;
|
||||
|
||||
if (!srb)
|
||||
goto log;
|
||||
|
@ -153,7 +154,7 @@ void zfcp_dbf_hba_fsf_uss(char *tag, struct zfcp_fsf_req *req)
|
|||
|
||||
/**
|
||||
* zfcp_dbf_hba_bit_err - trace event for bit error conditions
|
||||
* @tag: tag indicating which kind of unsolicited status has been received
|
||||
* @tag: tag indicating which kind of bit error unsolicited status was received
|
||||
* @req: request which caused the bit_error condition
|
||||
*/
|
||||
void zfcp_dbf_hba_bit_err(char *tag, struct zfcp_fsf_req *req)
|
||||
|
@ -174,7 +175,7 @@ void zfcp_dbf_hba_bit_err(char *tag, struct zfcp_fsf_req *req)
|
|||
rec->id = ZFCP_DBF_HBA_BIT;
|
||||
rec->fsf_req_id = req->req_id;
|
||||
rec->fsf_req_status = req->status;
|
||||
rec->fsf_cmd = req->fsf_command;
|
||||
rec->fsf_cmd = FSF_QTCB_UNSOLICITED_STATUS;
|
||||
memcpy(&rec->u.be, &sr_buf->payload.bit_error,
|
||||
sizeof(struct fsf_bit_error_payload));
|
||||
|
||||
|
@ -224,6 +225,7 @@ void zfcp_dbf_hba_def_err(struct zfcp_adapter *adapter, u64 req_id, u16 scount,
|
|||
|
||||
/**
|
||||
* zfcp_dbf_hba_basic - trace event for basic adapter events
|
||||
* @tag: identifier for event
|
||||
* @adapter: pointer to struct zfcp_adapter
|
||||
*/
|
||||
void zfcp_dbf_hba_basic(char *tag, struct zfcp_adapter *adapter)
|
||||
|
@ -357,7 +359,7 @@ void zfcp_dbf_rec_run_lvl(int level, char *tag, struct zfcp_erp_action *erp)
|
|||
rec->u.run.fsf_req_id = erp->fsf_req_id;
|
||||
rec->u.run.rec_status = erp->status;
|
||||
rec->u.run.rec_step = erp->step;
|
||||
rec->u.run.rec_action = erp->action;
|
||||
rec->u.run.rec_action = erp->type;
|
||||
|
||||
if (erp->sdev)
|
||||
rec->u.run.rec_count =
|
||||
|
@ -478,7 +480,8 @@ void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf,
|
|||
/**
|
||||
* zfcp_dbf_san_req - trace event for issued SAN request
|
||||
* @tag: identifier for event
|
||||
* @fsf_req: request containing issued CT data
|
||||
* @fsf: request containing issued CT or ELS data
|
||||
* @d_id: N_Port_ID where SAN request is sent to
|
||||
* d_id: destination ID
|
||||
*/
|
||||
void zfcp_dbf_san_req(char *tag, struct zfcp_fsf_req *fsf, u32 d_id)
|
||||
|
@ -560,7 +563,7 @@ static u16 zfcp_dbf_san_res_cap_len_if_gpn_ft(char *tag,
|
|||
/**
|
||||
* zfcp_dbf_san_res - trace event for received SAN request
|
||||
* @tag: identifier for event
|
||||
* @fsf_req: request containing issued CT data
|
||||
* @fsf: request containing received CT or ELS data
|
||||
*/
|
||||
void zfcp_dbf_san_res(char *tag, struct zfcp_fsf_req *fsf)
|
||||
{
|
||||
|
@ -580,7 +583,7 @@ void zfcp_dbf_san_res(char *tag, struct zfcp_fsf_req *fsf)
|
|||
/**
|
||||
* zfcp_dbf_san_in_els - trace event for incoming ELS
|
||||
* @tag: identifier for event
|
||||
* @fsf_req: request containing issued CT data
|
||||
* @fsf: request containing received ELS data
|
||||
*/
|
||||
void zfcp_dbf_san_in_els(char *tag, struct zfcp_fsf_req *fsf)
|
||||
{
|
||||
|
|
|
@ -42,7 +42,8 @@ struct zfcp_dbf_rec_trigger {
|
|||
* @fsf_req_id: request id for fsf requests
|
||||
* @rec_status: status of the fsf request
|
||||
* @rec_step: current step of the recovery action
|
||||
* rec_count: recovery counter
|
||||
* @rec_action: ERP action type
|
||||
* @rec_count: recoveries including retries for particular @rec_action
|
||||
*/
|
||||
struct zfcp_dbf_rec_running {
|
||||
u64 fsf_req_id;
|
||||
|
@ -72,6 +73,7 @@ enum zfcp_dbf_rec_id {
|
|||
* @adapter_status: current status of the adapter
|
||||
* @port_status: current status of the port
|
||||
* @lun_status: current status of the lun
|
||||
* @u: record type specific data
|
||||
* @u.trig: structure zfcp_dbf_rec_trigger
|
||||
* @u.run: structure zfcp_dbf_rec_running
|
||||
*/
|
||||
|
@ -126,6 +128,8 @@ struct zfcp_dbf_san {
|
|||
* @prot_status_qual: protocol status qualifier
|
||||
* @fsf_status: fsf status
|
||||
* @fsf_status_qual: fsf status qualifier
|
||||
* @port_handle: handle for port
|
||||
* @lun_handle: handle for LUN
|
||||
*/
|
||||
struct zfcp_dbf_hba_res {
|
||||
u64 req_issued;
|
||||
|
@ -158,6 +162,7 @@ struct zfcp_dbf_hba_uss {
|
|||
* @ZFCP_DBF_HBA_RES: response trace record
|
||||
* @ZFCP_DBF_HBA_USS: unsolicited status trace record
|
||||
* @ZFCP_DBF_HBA_BIT: bit error trace record
|
||||
* @ZFCP_DBF_HBA_BASIC: basic adapter event, only trace tag, no other data
|
||||
*/
|
||||
enum zfcp_dbf_hba_id {
|
||||
ZFCP_DBF_HBA_RES = 1,
|
||||
|
@ -176,6 +181,9 @@ enum zfcp_dbf_hba_id {
|
|||
* @fsf_seq_no: fsf sequence number
|
||||
* @pl_len: length of payload stored as zfcp_dbf_pay
|
||||
* @u: record type specific data
|
||||
* @u.res: data for fsf responses
|
||||
* @u.uss: data for unsolicited status buffer
|
||||
* @u.be: data for bit error unsolicited status buffer
|
||||
*/
|
||||
struct zfcp_dbf_hba {
|
||||
u8 id;
|
||||
|
@ -339,8 +347,8 @@ void zfcp_dbf_hba_fsf_response(struct zfcp_fsf_req *req)
|
|||
zfcp_dbf_hba_fsf_resp_suppress(req)
|
||||
? 5 : 1, req);
|
||||
|
||||
} else if ((req->fsf_command == FSF_QTCB_OPEN_PORT_WITH_DID) ||
|
||||
(req->fsf_command == FSF_QTCB_OPEN_LUN)) {
|
||||
} else if ((qtcb->header.fsf_command == FSF_QTCB_OPEN_PORT_WITH_DID) ||
|
||||
(qtcb->header.fsf_command == FSF_QTCB_OPEN_LUN)) {
|
||||
zfcp_dbf_hba_fsf_resp("fs_open", 4, req);
|
||||
|
||||
} else if (qtcb->header.log_length) {
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
*
|
||||
* Global definitions for the zfcp device driver.
|
||||
*
|
||||
* Copyright IBM Corp. 2002, 2010
|
||||
* Copyright IBM Corp. 2002, 2017
|
||||
*/
|
||||
|
||||
#ifndef ZFCP_DEF_H
|
||||
|
@ -41,24 +41,16 @@
|
|||
#include "zfcp_fc.h"
|
||||
#include "zfcp_qdio.h"
|
||||
|
||||
struct zfcp_reqlist;
|
||||
|
||||
/********************* SCSI SPECIFIC DEFINES *********************************/
|
||||
#define ZFCP_SCSI_ER_TIMEOUT (10*HZ)
|
||||
|
||||
/********************* FSF SPECIFIC DEFINES *********************************/
|
||||
|
||||
/* ATTENTION: value must not be used by hardware */
|
||||
#define FSF_QTCB_UNSOLICITED_STATUS 0x6305
|
||||
|
||||
/* timeout value for "default timer" for fsf requests */
|
||||
#define ZFCP_FSF_REQUEST_TIMEOUT (60*HZ)
|
||||
|
||||
/*************** ADAPTER/PORT/UNIT AND FSF_REQ STATUS FLAGS ******************/
|
||||
|
||||
/*
|
||||
* Note, the leftmost status byte is common among adapter, port
|
||||
* and unit
|
||||
* Note, the leftmost 12 status bits (3 nibbles) are common among adapter, port
|
||||
* and unit. This is a mask for bitwise 'and' with status values.
|
||||
*/
|
||||
#define ZFCP_COMMON_FLAGS 0xfff00000
|
||||
|
||||
|
@ -97,7 +89,49 @@ struct zfcp_reqlist;
|
|||
|
||||
/************************* STRUCTURE DEFINITIONS *****************************/
|
||||
|
||||
struct zfcp_fsf_req;
|
||||
/**
|
||||
* enum zfcp_erp_act_type - Type of ERP action object.
|
||||
* @ZFCP_ERP_ACTION_REOPEN_LUN: LUN recovery.
|
||||
* @ZFCP_ERP_ACTION_REOPEN_PORT: Port recovery.
|
||||
* @ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: Forced port recovery.
|
||||
* @ZFCP_ERP_ACTION_REOPEN_ADAPTER: Adapter recovery.
|
||||
*
|
||||
* Values must fit into u8 because of code dependencies:
|
||||
* zfcp_dbf_rec_trig(), &zfcp_dbf_rec_trigger.want, &zfcp_dbf_rec_trigger.need;
|
||||
* zfcp_dbf_rec_run_lvl(), zfcp_dbf_rec_run(), &zfcp_dbf_rec_running.rec_action.
|
||||
*/
|
||||
enum zfcp_erp_act_type {
|
||||
ZFCP_ERP_ACTION_REOPEN_LUN = 1,
|
||||
ZFCP_ERP_ACTION_REOPEN_PORT = 2,
|
||||
ZFCP_ERP_ACTION_REOPEN_PORT_FORCED = 3,
|
||||
ZFCP_ERP_ACTION_REOPEN_ADAPTER = 4,
|
||||
};
|
||||
|
||||
/*
|
||||
* Values must fit into u16 because of code dependencies:
|
||||
* zfcp_dbf_rec_run_lvl(), zfcp_dbf_rec_run(), zfcp_dbf_rec_run_wka(),
|
||||
* &zfcp_dbf_rec_running.rec_step.
|
||||
*/
|
||||
enum zfcp_erp_steps {
|
||||
ZFCP_ERP_STEP_UNINITIALIZED = 0x0000,
|
||||
ZFCP_ERP_STEP_PHYS_PORT_CLOSING = 0x0010,
|
||||
ZFCP_ERP_STEP_PORT_CLOSING = 0x0100,
|
||||
ZFCP_ERP_STEP_PORT_OPENING = 0x0800,
|
||||
ZFCP_ERP_STEP_LUN_CLOSING = 0x1000,
|
||||
ZFCP_ERP_STEP_LUN_OPENING = 0x2000,
|
||||
};
|
||||
|
||||
struct zfcp_erp_action {
|
||||
struct list_head list;
|
||||
enum zfcp_erp_act_type type; /* requested action code */
|
||||
struct zfcp_adapter *adapter; /* device which should be recovered */
|
||||
struct zfcp_port *port;
|
||||
struct scsi_device *sdev;
|
||||
u32 status; /* recovery status */
|
||||
enum zfcp_erp_steps step; /* active step of this erp action */
|
||||
unsigned long fsf_req_id;
|
||||
struct timer_list timer;
|
||||
};
|
||||
|
||||
/* holds various memory pools of an adapter */
|
||||
struct zfcp_adapter_mempool {
|
||||
|
@ -111,37 +145,6 @@ struct zfcp_adapter_mempool {
|
|||
mempool_t *qtcb_pool;
|
||||
};
|
||||
|
||||
struct zfcp_erp_action {
|
||||
struct list_head list;
|
||||
int action; /* requested action code */
|
||||
struct zfcp_adapter *adapter; /* device which should be recovered */
|
||||
struct zfcp_port *port;
|
||||
struct scsi_device *sdev;
|
||||
u32 status; /* recovery status */
|
||||
u32 step; /* active step of this erp action */
|
||||
unsigned long fsf_req_id;
|
||||
struct timer_list timer;
|
||||
};
|
||||
|
||||
struct fsf_latency_record {
|
||||
u32 min;
|
||||
u32 max;
|
||||
u64 sum;
|
||||
};
|
||||
|
||||
struct latency_cont {
|
||||
struct fsf_latency_record channel;
|
||||
struct fsf_latency_record fabric;
|
||||
u64 counter;
|
||||
};
|
||||
|
||||
struct zfcp_latencies {
|
||||
struct latency_cont read;
|
||||
struct latency_cont write;
|
||||
struct latency_cont cmd;
|
||||
spinlock_t lock;
|
||||
};
|
||||
|
||||
struct zfcp_adapter {
|
||||
struct kref ref;
|
||||
u64 peer_wwnn; /* P2P peer WWNN */
|
||||
|
@ -220,6 +223,25 @@ struct zfcp_port {
|
|||
unsigned int starget_id;
|
||||
};
|
||||
|
||||
struct zfcp_latency_record {
|
||||
u32 min;
|
||||
u32 max;
|
||||
u64 sum;
|
||||
};
|
||||
|
||||
struct zfcp_latency_cont {
|
||||
struct zfcp_latency_record channel;
|
||||
struct zfcp_latency_record fabric;
|
||||
u64 counter;
|
||||
};
|
||||
|
||||
struct zfcp_latencies {
|
||||
struct zfcp_latency_cont read;
|
||||
struct zfcp_latency_cont write;
|
||||
struct zfcp_latency_cont cmd;
|
||||
spinlock_t lock;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct zfcp_unit - LUN configured via zfcp sysfs
|
||||
* @dev: struct device for sysfs representation and reference counting
|
||||
|
@ -287,9 +309,7 @@ static inline u64 zfcp_scsi_dev_lun(struct scsi_device *sdev)
|
|||
* @qdio_req: qdio queue related values
|
||||
* @completion: used to signal the completion of the request
|
||||
* @status: status of the request
|
||||
* @fsf_command: FSF command issued
|
||||
* @qtcb: associated QTCB
|
||||
* @seq_no: sequence number of this request
|
||||
* @data: private data
|
||||
* @timer: timer data of this request
|
||||
* @erp_action: reference to erp action if request issued on behalf of ERP
|
||||
|
@ -304,9 +324,7 @@ struct zfcp_fsf_req {
|
|||
struct zfcp_qdio_req qdio_req;
|
||||
struct completion completion;
|
||||
u32 status;
|
||||
u32 fsf_command;
|
||||
struct fsf_qtcb *qtcb;
|
||||
u32 seq_no;
|
||||
void *data;
|
||||
struct timer_list timer;
|
||||
struct zfcp_erp_action *erp_action;
|
||||
|
@ -321,4 +339,9 @@ int zfcp_adapter_multi_buffer_active(struct zfcp_adapter *adapter)
|
|||
return atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_MB_ACT;
|
||||
}
|
||||
|
||||
static inline bool zfcp_fsf_req_is_status_read_buffer(struct zfcp_fsf_req *req)
|
||||
{
|
||||
return req->qtcb == NULL;
|
||||
}
|
||||
|
||||
#endif /* ZFCP_DEF_H */
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
*
|
||||
* Error Recovery Procedures (ERP).
|
||||
*
|
||||
* Copyright IBM Corp. 2002, 2016
|
||||
* Copyright IBM Corp. 2002, 2017
|
||||
*/
|
||||
|
||||
#define KMSG_COMPONENT "zfcp"
|
||||
|
@ -24,38 +24,18 @@ enum zfcp_erp_act_flags {
|
|||
ZFCP_STATUS_ERP_NO_REF = 0x00800000,
|
||||
};
|
||||
|
||||
enum zfcp_erp_steps {
|
||||
ZFCP_ERP_STEP_UNINITIALIZED = 0x0000,
|
||||
ZFCP_ERP_STEP_PHYS_PORT_CLOSING = 0x0010,
|
||||
ZFCP_ERP_STEP_PORT_CLOSING = 0x0100,
|
||||
ZFCP_ERP_STEP_PORT_OPENING = 0x0800,
|
||||
ZFCP_ERP_STEP_LUN_CLOSING = 0x1000,
|
||||
ZFCP_ERP_STEP_LUN_OPENING = 0x2000,
|
||||
};
|
||||
|
||||
/**
|
||||
* enum zfcp_erp_act_type - Type of ERP action object.
|
||||
* @ZFCP_ERP_ACTION_REOPEN_LUN: LUN recovery.
|
||||
* @ZFCP_ERP_ACTION_REOPEN_PORT: Port recovery.
|
||||
* @ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: Forced port recovery.
|
||||
* @ZFCP_ERP_ACTION_REOPEN_ADAPTER: Adapter recovery.
|
||||
* @ZFCP_ERP_ACTION_NONE: Eyecatcher pseudo flag to bitwise or-combine with
|
||||
* either of the first four enum values.
|
||||
* Used to indicate that an ERP action could not be
|
||||
* set up despite a detected need for some recovery.
|
||||
* @ZFCP_ERP_ACTION_FAILED: Eyecatcher pseudo flag to bitwise or-combine with
|
||||
* either of the first four enum values.
|
||||
* Used to indicate that ERP not needed because
|
||||
* the object has ZFCP_STATUS_COMMON_ERP_FAILED.
|
||||
/*
|
||||
* Eyecatcher pseudo flag to bitwise or-combine with enum zfcp_erp_act_type.
|
||||
* Used to indicate that an ERP action could not be set up despite a detected
|
||||
* need for some recovery.
|
||||
*/
|
||||
enum zfcp_erp_act_type {
|
||||
ZFCP_ERP_ACTION_REOPEN_LUN = 1,
|
||||
ZFCP_ERP_ACTION_REOPEN_PORT = 2,
|
||||
ZFCP_ERP_ACTION_REOPEN_PORT_FORCED = 3,
|
||||
ZFCP_ERP_ACTION_REOPEN_ADAPTER = 4,
|
||||
ZFCP_ERP_ACTION_NONE = 0xc0,
|
||||
ZFCP_ERP_ACTION_FAILED = 0xe0,
|
||||
};
|
||||
#define ZFCP_ERP_ACTION_NONE 0xc0
|
||||
/*
|
||||
* Eyecatcher pseudo flag to bitwise or-combine with enum zfcp_erp_act_type.
|
||||
* Used to indicate that ERP not needed because the object has
|
||||
* ZFCP_STATUS_COMMON_ERP_FAILED.
|
||||
*/
|
||||
#define ZFCP_ERP_ACTION_FAILED 0xe0
|
||||
|
||||
enum zfcp_erp_act_result {
|
||||
ZFCP_ERP_SUCCEEDED = 0,
|
||||
|
@ -136,11 +116,11 @@ static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter)
|
|||
}
|
||||
}
|
||||
|
||||
static int zfcp_erp_handle_failed(int want, struct zfcp_adapter *adapter,
|
||||
struct zfcp_port *port,
|
||||
struct scsi_device *sdev)
|
||||
static enum zfcp_erp_act_type zfcp_erp_handle_failed(
|
||||
enum zfcp_erp_act_type want, struct zfcp_adapter *adapter,
|
||||
struct zfcp_port *port, struct scsi_device *sdev)
|
||||
{
|
||||
int need = want;
|
||||
enum zfcp_erp_act_type need = want;
|
||||
struct zfcp_scsi_dev *zsdev;
|
||||
|
||||
switch (want) {
|
||||
|
@ -171,19 +151,17 @@ static int zfcp_erp_handle_failed(int want, struct zfcp_adapter *adapter,
|
|||
adapter, ZFCP_STATUS_COMMON_ERP_FAILED);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
need = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
return need;
|
||||
}
|
||||
|
||||
static int zfcp_erp_required_act(int want, struct zfcp_adapter *adapter,
|
||||
static enum zfcp_erp_act_type zfcp_erp_required_act(enum zfcp_erp_act_type want,
|
||||
struct zfcp_adapter *adapter,
|
||||
struct zfcp_port *port,
|
||||
struct scsi_device *sdev)
|
||||
{
|
||||
int need = want;
|
||||
enum zfcp_erp_act_type need = want;
|
||||
int l_status, p_status, a_status;
|
||||
struct zfcp_scsi_dev *zfcp_sdev;
|
||||
|
||||
|
@ -230,7 +208,8 @@ static int zfcp_erp_required_act(int want, struct zfcp_adapter *adapter,
|
|||
return need;
|
||||
}
|
||||
|
||||
static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
|
||||
static struct zfcp_erp_action *zfcp_erp_setup_act(enum zfcp_erp_act_type need,
|
||||
u32 act_status,
|
||||
struct zfcp_adapter *adapter,
|
||||
struct zfcp_port *port,
|
||||
struct scsi_device *sdev)
|
||||
|
@ -278,9 +257,6 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
|
|||
ZFCP_STATUS_COMMON_RUNNING))
|
||||
act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY;
|
||||
break;
|
||||
|
||||
default:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
WARN_ON_ONCE(erp_action->adapter != adapter);
|
||||
|
@ -288,18 +264,19 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
|
|||
memset(&erp_action->timer, 0, sizeof(erp_action->timer));
|
||||
erp_action->step = ZFCP_ERP_STEP_UNINITIALIZED;
|
||||
erp_action->fsf_req_id = 0;
|
||||
erp_action->action = need;
|
||||
erp_action->type = need;
|
||||
erp_action->status = act_status;
|
||||
|
||||
return erp_action;
|
||||
}
|
||||
|
||||
static void zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter,
|
||||
static void zfcp_erp_action_enqueue(enum zfcp_erp_act_type want,
|
||||
struct zfcp_adapter *adapter,
|
||||
struct zfcp_port *port,
|
||||
struct scsi_device *sdev,
|
||||
char *id, u32 act_status)
|
||||
char *dbftag, u32 act_status)
|
||||
{
|
||||
int need;
|
||||
enum zfcp_erp_act_type need;
|
||||
struct zfcp_erp_action *act;
|
||||
|
||||
need = zfcp_erp_handle_failed(want, adapter, port, sdev);
|
||||
|
@ -327,10 +304,11 @@ static void zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter,
|
|||
list_add_tail(&act->list, &adapter->erp_ready_head);
|
||||
wake_up(&adapter->erp_ready_wq);
|
||||
out:
|
||||
zfcp_dbf_rec_trig(id, adapter, port, sdev, want, need);
|
||||
zfcp_dbf_rec_trig(dbftag, adapter, port, sdev, want, need);
|
||||
}
|
||||
|
||||
void zfcp_erp_port_forced_no_port_dbf(char *id, struct zfcp_adapter *adapter,
|
||||
void zfcp_erp_port_forced_no_port_dbf(char *dbftag,
|
||||
struct zfcp_adapter *adapter,
|
||||
u64 port_name, u32 port_id)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
@ -344,29 +322,30 @@ void zfcp_erp_port_forced_no_port_dbf(char *id, struct zfcp_adapter *adapter,
|
|||
atomic_set(&tmpport.status, -1); /* unknown */
|
||||
tmpport.wwpn = port_name;
|
||||
tmpport.d_id = port_id;
|
||||
zfcp_dbf_rec_trig(id, adapter, &tmpport, NULL,
|
||||
zfcp_dbf_rec_trig(dbftag, adapter, &tmpport, NULL,
|
||||
ZFCP_ERP_ACTION_REOPEN_PORT_FORCED,
|
||||
ZFCP_ERP_ACTION_NONE);
|
||||
write_unlock_irqrestore(&adapter->erp_lock, flags);
|
||||
}
|
||||
|
||||
static void _zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter,
|
||||
int clear_mask, char *id)
|
||||
int clear_mask, char *dbftag)
|
||||
{
|
||||
zfcp_erp_adapter_block(adapter, clear_mask);
|
||||
zfcp_scsi_schedule_rports_block(adapter);
|
||||
|
||||
zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER,
|
||||
adapter, NULL, NULL, id, 0);
|
||||
adapter, NULL, NULL, dbftag, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* zfcp_erp_adapter_reopen - Reopen adapter.
|
||||
* @adapter: Adapter to reopen.
|
||||
* @clear: Status flags to clear.
|
||||
* @id: Id for debug trace event.
|
||||
* @dbftag: Tag for debug trace event.
|
||||
*/
|
||||
void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear, char *id)
|
||||
void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear,
|
||||
char *dbftag)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
|
@ -375,7 +354,7 @@ void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear, char *id)
|
|||
|
||||
write_lock_irqsave(&adapter->erp_lock, flags);
|
||||
zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, adapter,
|
||||
NULL, NULL, id, 0);
|
||||
NULL, NULL, dbftag, 0);
|
||||
write_unlock_irqrestore(&adapter->erp_lock, flags);
|
||||
}
|
||||
|
||||
|
@ -383,25 +362,25 @@ void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear, char *id)
|
|||
* zfcp_erp_adapter_shutdown - Shutdown adapter.
|
||||
* @adapter: Adapter to shut down.
|
||||
* @clear: Status flags to clear.
|
||||
* @id: Id for debug trace event.
|
||||
* @dbftag: Tag for debug trace event.
|
||||
*/
|
||||
void zfcp_erp_adapter_shutdown(struct zfcp_adapter *adapter, int clear,
|
||||
char *id)
|
||||
char *dbftag)
|
||||
{
|
||||
int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
|
||||
zfcp_erp_adapter_reopen(adapter, clear | flags, id);
|
||||
zfcp_erp_adapter_reopen(adapter, clear | flags, dbftag);
|
||||
}
|
||||
|
||||
/**
|
||||
* zfcp_erp_port_shutdown - Shutdown port
|
||||
* @port: Port to shut down.
|
||||
* @clear: Status flags to clear.
|
||||
* @id: Id for debug trace event.
|
||||
* @dbftag: Tag for debug trace event.
|
||||
*/
|
||||
void zfcp_erp_port_shutdown(struct zfcp_port *port, int clear, char *id)
|
||||
void zfcp_erp_port_shutdown(struct zfcp_port *port, int clear, char *dbftag)
|
||||
{
|
||||
int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
|
||||
zfcp_erp_port_reopen(port, clear | flags, id);
|
||||
zfcp_erp_port_reopen(port, clear | flags, dbftag);
|
||||
}
|
||||
|
||||
static void zfcp_erp_port_block(struct zfcp_port *port, int clear)
|
||||
|
@ -411,53 +390,55 @@ static void zfcp_erp_port_block(struct zfcp_port *port, int clear)
|
|||
}
|
||||
|
||||
static void _zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear,
|
||||
char *id)
|
||||
char *dbftag)
|
||||
{
|
||||
zfcp_erp_port_block(port, clear);
|
||||
zfcp_scsi_schedule_rport_block(port);
|
||||
|
||||
zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT_FORCED,
|
||||
port->adapter, port, NULL, id, 0);
|
||||
port->adapter, port, NULL, dbftag, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* zfcp_erp_port_forced_reopen - Forced close of port and open again
|
||||
* @port: Port to force close and to reopen.
|
||||
* @clear: Status flags to clear.
|
||||
* @id: Id for debug trace event.
|
||||
* @dbftag: Tag for debug trace event.
|
||||
*/
|
||||
void zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear, char *id)
|
||||
void zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear,
|
||||
char *dbftag)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct zfcp_adapter *adapter = port->adapter;
|
||||
|
||||
write_lock_irqsave(&adapter->erp_lock, flags);
|
||||
_zfcp_erp_port_forced_reopen(port, clear, id);
|
||||
_zfcp_erp_port_forced_reopen(port, clear, dbftag);
|
||||
write_unlock_irqrestore(&adapter->erp_lock, flags);
|
||||
}
|
||||
|
||||
static void _zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id)
|
||||
static void _zfcp_erp_port_reopen(struct zfcp_port *port, int clear,
|
||||
char *dbftag)
|
||||
{
|
||||
zfcp_erp_port_block(port, clear);
|
||||
zfcp_scsi_schedule_rport_block(port);
|
||||
|
||||
zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT,
|
||||
port->adapter, port, NULL, id, 0);
|
||||
port->adapter, port, NULL, dbftag, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* zfcp_erp_port_reopen - trigger remote port recovery
|
||||
* @port: port to recover
|
||||
* @clear_mask: flags in port status to be cleared
|
||||
* @id: Id for debug trace event.
|
||||
* @clear: flags in port status to be cleared
|
||||
* @dbftag: Tag for debug trace event.
|
||||
*/
|
||||
void zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id)
|
||||
void zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *dbftag)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct zfcp_adapter *adapter = port->adapter;
|
||||
|
||||
write_lock_irqsave(&adapter->erp_lock, flags);
|
||||
_zfcp_erp_port_reopen(port, clear, id);
|
||||
_zfcp_erp_port_reopen(port, clear, dbftag);
|
||||
write_unlock_irqrestore(&adapter->erp_lock, flags);
|
||||
}
|
||||
|
||||
|
@ -467,8 +448,8 @@ static void zfcp_erp_lun_block(struct scsi_device *sdev, int clear_mask)
|
|||
ZFCP_STATUS_COMMON_UNBLOCKED | clear_mask);
|
||||
}
|
||||
|
||||
static void _zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id,
|
||||
u32 act_status)
|
||||
static void _zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear,
|
||||
char *dbftag, u32 act_status)
|
||||
{
|
||||
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
|
||||
struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
|
||||
|
@ -476,18 +457,18 @@ static void _zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id,
|
|||
zfcp_erp_lun_block(sdev, clear);
|
||||
|
||||
zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_LUN, adapter,
|
||||
zfcp_sdev->port, sdev, id, act_status);
|
||||
zfcp_sdev->port, sdev, dbftag, act_status);
|
||||
}
|
||||
|
||||
/**
|
||||
* zfcp_erp_lun_reopen - initiate reopen of a LUN
|
||||
* @sdev: SCSI device / LUN to be reopened
|
||||
* @clear_mask: specifies flags in LUN status to be cleared
|
||||
* @id: Id for debug trace event.
|
||||
* @clear: specifies flags in LUN status to be cleared
|
||||
* @dbftag: Tag for debug trace event.
|
||||
*
|
||||
* Return: 0 on success, < 0 on error
|
||||
*/
|
||||
void zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id)
|
||||
void zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *dbftag)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
|
||||
|
@ -495,7 +476,7 @@ void zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id)
|
|||
struct zfcp_adapter *adapter = port->adapter;
|
||||
|
||||
write_lock_irqsave(&adapter->erp_lock, flags);
|
||||
_zfcp_erp_lun_reopen(sdev, clear, id, 0);
|
||||
_zfcp_erp_lun_reopen(sdev, clear, dbftag, 0);
|
||||
write_unlock_irqrestore(&adapter->erp_lock, flags);
|
||||
}
|
||||
|
||||
|
@ -503,25 +484,25 @@ void zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id)
|
|||
* zfcp_erp_lun_shutdown - Shutdown LUN
|
||||
* @sdev: SCSI device / LUN to shut down.
|
||||
* @clear: Status flags to clear.
|
||||
* @id: Id for debug trace event.
|
||||
* @dbftag: Tag for debug trace event.
|
||||
*/
|
||||
void zfcp_erp_lun_shutdown(struct scsi_device *sdev, int clear, char *id)
|
||||
void zfcp_erp_lun_shutdown(struct scsi_device *sdev, int clear, char *dbftag)
|
||||
{
|
||||
int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
|
||||
zfcp_erp_lun_reopen(sdev, clear | flags, id);
|
||||
zfcp_erp_lun_reopen(sdev, clear | flags, dbftag);
|
||||
}
|
||||
|
||||
/**
|
||||
* zfcp_erp_lun_shutdown_wait - Shutdown LUN and wait for erp completion
|
||||
* @sdev: SCSI device / LUN to shut down.
|
||||
* @id: Id for debug trace event.
|
||||
* @dbftag: Tag for debug trace event.
|
||||
*
|
||||
* Do not acquire a reference for the LUN when creating the ERP
|
||||
* action. It is safe, because this function waits for the ERP to
|
||||
* complete first. This allows to shutdown the LUN, even when the SCSI
|
||||
* device is in the state SDEV_DEL when scsi_device_get will fail.
|
||||
*/
|
||||
void zfcp_erp_lun_shutdown_wait(struct scsi_device *sdev, char *id)
|
||||
void zfcp_erp_lun_shutdown_wait(struct scsi_device *sdev, char *dbftag)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
|
||||
|
@ -530,7 +511,7 @@ void zfcp_erp_lun_shutdown_wait(struct scsi_device *sdev, char *id)
|
|||
int clear = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
|
||||
|
||||
write_lock_irqsave(&adapter->erp_lock, flags);
|
||||
_zfcp_erp_lun_reopen(sdev, clear, id, ZFCP_STATUS_ERP_NO_REF);
|
||||
_zfcp_erp_lun_reopen(sdev, clear, dbftag, ZFCP_STATUS_ERP_NO_REF);
|
||||
write_unlock_irqrestore(&adapter->erp_lock, flags);
|
||||
|
||||
zfcp_erp_wait(adapter);
|
||||
|
@ -619,7 +600,7 @@ void zfcp_erp_notify(struct zfcp_erp_action *erp_action, unsigned long set_mask)
|
|||
|
||||
/**
|
||||
* zfcp_erp_timeout_handler - Trigger ERP action from timed out ERP request
|
||||
* @data: ERP action (from timer data)
|
||||
* @t: timer list entry embedded in zfcp FSF request
|
||||
*/
|
||||
void zfcp_erp_timeout_handler(struct timer_list *t)
|
||||
{
|
||||
|
@ -644,31 +625,31 @@ static void zfcp_erp_strategy_memwait(struct zfcp_erp_action *erp_action)
|
|||
}
|
||||
|
||||
static void _zfcp_erp_port_reopen_all(struct zfcp_adapter *adapter,
|
||||
int clear, char *id)
|
||||
int clear, char *dbftag)
|
||||
{
|
||||
struct zfcp_port *port;
|
||||
|
||||
read_lock(&adapter->port_list_lock);
|
||||
list_for_each_entry(port, &adapter->port_list, list)
|
||||
_zfcp_erp_port_reopen(port, clear, id);
|
||||
_zfcp_erp_port_reopen(port, clear, dbftag);
|
||||
read_unlock(&adapter->port_list_lock);
|
||||
}
|
||||
|
||||
static void _zfcp_erp_lun_reopen_all(struct zfcp_port *port, int clear,
|
||||
char *id)
|
||||
char *dbftag)
|
||||
{
|
||||
struct scsi_device *sdev;
|
||||
|
||||
spin_lock(port->adapter->scsi_host->host_lock);
|
||||
__shost_for_each_device(sdev, port->adapter->scsi_host)
|
||||
if (sdev_to_zfcp(sdev)->port == port)
|
||||
_zfcp_erp_lun_reopen(sdev, clear, id, 0);
|
||||
_zfcp_erp_lun_reopen(sdev, clear, dbftag, 0);
|
||||
spin_unlock(port->adapter->scsi_host->host_lock);
|
||||
}
|
||||
|
||||
static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act)
|
||||
{
|
||||
switch (act->action) {
|
||||
switch (act->type) {
|
||||
case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
|
||||
_zfcp_erp_adapter_reopen(act->adapter, 0, "ersff_1");
|
||||
break;
|
||||
|
@ -686,7 +667,7 @@ static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act)
|
|||
|
||||
static void zfcp_erp_strategy_followup_success(struct zfcp_erp_action *act)
|
||||
{
|
||||
switch (act->action) {
|
||||
switch (act->type) {
|
||||
case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
|
||||
_zfcp_erp_port_reopen_all(act->adapter, 0, "ersfs_1");
|
||||
break;
|
||||
|
@ -696,6 +677,9 @@ static void zfcp_erp_strategy_followup_success(struct zfcp_erp_action *act)
|
|||
case ZFCP_ERP_ACTION_REOPEN_PORT:
|
||||
_zfcp_erp_lun_reopen_all(act->port, 0, "ersfs_3");
|
||||
break;
|
||||
case ZFCP_ERP_ACTION_REOPEN_LUN:
|
||||
/* NOP */
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -723,7 +707,8 @@ static void zfcp_erp_enqueue_ptp_port(struct zfcp_adapter *adapter)
|
|||
_zfcp_erp_port_reopen(port, 0, "ereptp1");
|
||||
}
|
||||
|
||||
static int zfcp_erp_adapter_strat_fsf_xconf(struct zfcp_erp_action *erp_action)
|
||||
static enum zfcp_erp_act_result zfcp_erp_adapter_strat_fsf_xconf(
|
||||
struct zfcp_erp_action *erp_action)
|
||||
{
|
||||
int retries;
|
||||
int sleep = 1;
|
||||
|
@ -768,7 +753,8 @@ static int zfcp_erp_adapter_strat_fsf_xconf(struct zfcp_erp_action *erp_action)
|
|||
return ZFCP_ERP_SUCCEEDED;
|
||||
}
|
||||
|
||||
static int zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *act)
|
||||
static enum zfcp_erp_act_result zfcp_erp_adapter_strategy_open_fsf_xport(
|
||||
struct zfcp_erp_action *act)
|
||||
{
|
||||
int ret;
|
||||
struct zfcp_adapter *adapter = act->adapter;
|
||||
|
@ -793,7 +779,8 @@ static int zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *act)
|
|||
return ZFCP_ERP_SUCCEEDED;
|
||||
}
|
||||
|
||||
static int zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *act)
|
||||
static enum zfcp_erp_act_result zfcp_erp_adapter_strategy_open_fsf(
|
||||
struct zfcp_erp_action *act)
|
||||
{
|
||||
if (zfcp_erp_adapter_strat_fsf_xconf(act) == ZFCP_ERP_FAILED)
|
||||
return ZFCP_ERP_FAILED;
|
||||
|
@ -832,7 +819,8 @@ static void zfcp_erp_adapter_strategy_close(struct zfcp_erp_action *act)
|
|||
ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status);
|
||||
}
|
||||
|
||||
static int zfcp_erp_adapter_strategy_open(struct zfcp_erp_action *act)
|
||||
static enum zfcp_erp_act_result zfcp_erp_adapter_strategy_open(
|
||||
struct zfcp_erp_action *act)
|
||||
{
|
||||
struct zfcp_adapter *adapter = act->adapter;
|
||||
|
||||
|
@ -853,7 +841,8 @@ static int zfcp_erp_adapter_strategy_open(struct zfcp_erp_action *act)
|
|||
return ZFCP_ERP_SUCCEEDED;
|
||||
}
|
||||
|
||||
static int zfcp_erp_adapter_strategy(struct zfcp_erp_action *act)
|
||||
static enum zfcp_erp_act_result zfcp_erp_adapter_strategy(
|
||||
struct zfcp_erp_action *act)
|
||||
{
|
||||
struct zfcp_adapter *adapter = act->adapter;
|
||||
|
||||
|
@ -871,7 +860,8 @@ static int zfcp_erp_adapter_strategy(struct zfcp_erp_action *act)
|
|||
return ZFCP_ERP_SUCCEEDED;
|
||||
}
|
||||
|
||||
static int zfcp_erp_port_forced_strategy_close(struct zfcp_erp_action *act)
|
||||
static enum zfcp_erp_act_result zfcp_erp_port_forced_strategy_close(
|
||||
struct zfcp_erp_action *act)
|
||||
{
|
||||
int retval;
|
||||
|
||||
|
@ -885,7 +875,8 @@ static int zfcp_erp_port_forced_strategy_close(struct zfcp_erp_action *act)
|
|||
return ZFCP_ERP_CONTINUES;
|
||||
}
|
||||
|
||||
static int zfcp_erp_port_forced_strategy(struct zfcp_erp_action *erp_action)
|
||||
static enum zfcp_erp_act_result zfcp_erp_port_forced_strategy(
|
||||
struct zfcp_erp_action *erp_action)
|
||||
{
|
||||
struct zfcp_port *port = erp_action->port;
|
||||
int status = atomic_read(&port->status);
|
||||
|
@ -901,11 +892,19 @@ static int zfcp_erp_port_forced_strategy(struct zfcp_erp_action *erp_action)
|
|||
case ZFCP_ERP_STEP_PHYS_PORT_CLOSING:
|
||||
if (!(status & ZFCP_STATUS_PORT_PHYS_OPEN))
|
||||
return ZFCP_ERP_SUCCEEDED;
|
||||
break;
|
||||
case ZFCP_ERP_STEP_PORT_CLOSING:
|
||||
case ZFCP_ERP_STEP_PORT_OPENING:
|
||||
case ZFCP_ERP_STEP_LUN_CLOSING:
|
||||
case ZFCP_ERP_STEP_LUN_OPENING:
|
||||
/* NOP */
|
||||
break;
|
||||
}
|
||||
return ZFCP_ERP_FAILED;
|
||||
}
|
||||
|
||||
static int zfcp_erp_port_strategy_close(struct zfcp_erp_action *erp_action)
|
||||
static enum zfcp_erp_act_result zfcp_erp_port_strategy_close(
|
||||
struct zfcp_erp_action *erp_action)
|
||||
{
|
||||
int retval;
|
||||
|
||||
|
@ -918,7 +917,8 @@ static int zfcp_erp_port_strategy_close(struct zfcp_erp_action *erp_action)
|
|||
return ZFCP_ERP_CONTINUES;
|
||||
}
|
||||
|
||||
static int zfcp_erp_port_strategy_open_port(struct zfcp_erp_action *erp_action)
|
||||
static enum zfcp_erp_act_result zfcp_erp_port_strategy_open_port(
|
||||
struct zfcp_erp_action *erp_action)
|
||||
{
|
||||
int retval;
|
||||
|
||||
|
@ -944,7 +944,8 @@ static int zfcp_erp_open_ptp_port(struct zfcp_erp_action *act)
|
|||
return zfcp_erp_port_strategy_open_port(act);
|
||||
}
|
||||
|
||||
static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act)
|
||||
static enum zfcp_erp_act_result zfcp_erp_port_strategy_open_common(
|
||||
struct zfcp_erp_action *act)
|
||||
{
|
||||
struct zfcp_adapter *adapter = act->adapter;
|
||||
struct zfcp_port *port = act->port;
|
||||
|
@ -975,12 +976,18 @@ static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act)
|
|||
port->d_id = 0;
|
||||
return ZFCP_ERP_FAILED;
|
||||
}
|
||||
/* fall through otherwise */
|
||||
/* no early return otherwise, continue after switch case */
|
||||
break;
|
||||
case ZFCP_ERP_STEP_LUN_CLOSING:
|
||||
case ZFCP_ERP_STEP_LUN_OPENING:
|
||||
/* NOP */
|
||||
break;
|
||||
}
|
||||
return ZFCP_ERP_FAILED;
|
||||
}
|
||||
|
||||
static int zfcp_erp_port_strategy(struct zfcp_erp_action *erp_action)
|
||||
static enum zfcp_erp_act_result zfcp_erp_port_strategy(
|
||||
struct zfcp_erp_action *erp_action)
|
||||
{
|
||||
struct zfcp_port *port = erp_action->port;
|
||||
int p_status = atomic_read(&port->status);
|
||||
|
@ -999,6 +1006,12 @@ static int zfcp_erp_port_strategy(struct zfcp_erp_action *erp_action)
|
|||
if (p_status & ZFCP_STATUS_COMMON_OPEN)
|
||||
return ZFCP_ERP_FAILED;
|
||||
break;
|
||||
case ZFCP_ERP_STEP_PHYS_PORT_CLOSING:
|
||||
case ZFCP_ERP_STEP_PORT_OPENING:
|
||||
case ZFCP_ERP_STEP_LUN_CLOSING:
|
||||
case ZFCP_ERP_STEP_LUN_OPENING:
|
||||
/* NOP */
|
||||
break;
|
||||
}
|
||||
|
||||
close_init_done:
|
||||
|
@ -1016,7 +1029,8 @@ static void zfcp_erp_lun_strategy_clearstati(struct scsi_device *sdev)
|
|||
&zfcp_sdev->status);
|
||||
}
|
||||
|
||||
static int zfcp_erp_lun_strategy_close(struct zfcp_erp_action *erp_action)
|
||||
static enum zfcp_erp_act_result zfcp_erp_lun_strategy_close(
|
||||
struct zfcp_erp_action *erp_action)
|
||||
{
|
||||
int retval = zfcp_fsf_close_lun(erp_action);
|
||||
if (retval == -ENOMEM)
|
||||
|
@ -1027,7 +1041,8 @@ static int zfcp_erp_lun_strategy_close(struct zfcp_erp_action *erp_action)
|
|||
return ZFCP_ERP_CONTINUES;
|
||||
}
|
||||
|
||||
static int zfcp_erp_lun_strategy_open(struct zfcp_erp_action *erp_action)
|
||||
static enum zfcp_erp_act_result zfcp_erp_lun_strategy_open(
|
||||
struct zfcp_erp_action *erp_action)
|
||||
{
|
||||
int retval = zfcp_fsf_open_lun(erp_action);
|
||||
if (retval == -ENOMEM)
|
||||
|
@ -1038,7 +1053,8 @@ static int zfcp_erp_lun_strategy_open(struct zfcp_erp_action *erp_action)
|
|||
return ZFCP_ERP_CONTINUES;
|
||||
}
|
||||
|
||||
static int zfcp_erp_lun_strategy(struct zfcp_erp_action *erp_action)
|
||||
static enum zfcp_erp_act_result zfcp_erp_lun_strategy(
|
||||
struct zfcp_erp_action *erp_action)
|
||||
{
|
||||
struct scsi_device *sdev = erp_action->sdev;
|
||||
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
|
||||
|
@ -1048,7 +1064,8 @@ static int zfcp_erp_lun_strategy(struct zfcp_erp_action *erp_action)
|
|||
zfcp_erp_lun_strategy_clearstati(sdev);
|
||||
if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_OPEN)
|
||||
return zfcp_erp_lun_strategy_close(erp_action);
|
||||
/* already closed, fall through */
|
||||
/* already closed */
|
||||
/* fall through */
|
||||
case ZFCP_ERP_STEP_LUN_CLOSING:
|
||||
if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_OPEN)
|
||||
return ZFCP_ERP_FAILED;
|
||||
|
@ -1059,11 +1076,18 @@ static int zfcp_erp_lun_strategy(struct zfcp_erp_action *erp_action)
|
|||
case ZFCP_ERP_STEP_LUN_OPENING:
|
||||
if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_OPEN)
|
||||
return ZFCP_ERP_SUCCEEDED;
|
||||
break;
|
||||
case ZFCP_ERP_STEP_PHYS_PORT_CLOSING:
|
||||
case ZFCP_ERP_STEP_PORT_CLOSING:
|
||||
case ZFCP_ERP_STEP_PORT_OPENING:
|
||||
/* NOP */
|
||||
break;
|
||||
}
|
||||
return ZFCP_ERP_FAILED;
|
||||
}
|
||||
|
||||
static int zfcp_erp_strategy_check_lun(struct scsi_device *sdev, int result)
|
||||
static enum zfcp_erp_act_result zfcp_erp_strategy_check_lun(
|
||||
struct scsi_device *sdev, enum zfcp_erp_act_result result)
|
||||
{
|
||||
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
|
||||
|
||||
|
@ -1084,6 +1108,12 @@ static int zfcp_erp_strategy_check_lun(struct scsi_device *sdev, int result)
|
|||
ZFCP_STATUS_COMMON_ERP_FAILED);
|
||||
}
|
||||
break;
|
||||
case ZFCP_ERP_CONTINUES:
|
||||
case ZFCP_ERP_EXIT:
|
||||
case ZFCP_ERP_DISMISSED:
|
||||
case ZFCP_ERP_NOMEM:
|
||||
/* NOP */
|
||||
break;
|
||||
}
|
||||
|
||||
if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
|
||||
|
@ -1093,7 +1123,8 @@ static int zfcp_erp_strategy_check_lun(struct scsi_device *sdev, int result)
|
|||
return result;
|
||||
}
|
||||
|
||||
static int zfcp_erp_strategy_check_port(struct zfcp_port *port, int result)
|
||||
static enum zfcp_erp_act_result zfcp_erp_strategy_check_port(
|
||||
struct zfcp_port *port, enum zfcp_erp_act_result result)
|
||||
{
|
||||
switch (result) {
|
||||
case ZFCP_ERP_SUCCEEDED :
|
||||
|
@ -1115,6 +1146,12 @@ static int zfcp_erp_strategy_check_port(struct zfcp_port *port, int result)
|
|||
ZFCP_STATUS_COMMON_ERP_FAILED);
|
||||
}
|
||||
break;
|
||||
case ZFCP_ERP_CONTINUES:
|
||||
case ZFCP_ERP_EXIT:
|
||||
case ZFCP_ERP_DISMISSED:
|
||||
case ZFCP_ERP_NOMEM:
|
||||
/* NOP */
|
||||
break;
|
||||
}
|
||||
|
||||
if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
|
||||
|
@ -1124,8 +1161,8 @@ static int zfcp_erp_strategy_check_port(struct zfcp_port *port, int result)
|
|||
return result;
|
||||
}
|
||||
|
||||
static int zfcp_erp_strategy_check_adapter(struct zfcp_adapter *adapter,
|
||||
int result)
|
||||
static enum zfcp_erp_act_result zfcp_erp_strategy_check_adapter(
|
||||
struct zfcp_adapter *adapter, enum zfcp_erp_act_result result)
|
||||
{
|
||||
switch (result) {
|
||||
case ZFCP_ERP_SUCCEEDED :
|
||||
|
@ -1143,6 +1180,12 @@ static int zfcp_erp_strategy_check_adapter(struct zfcp_adapter *adapter,
|
|||
ZFCP_STATUS_COMMON_ERP_FAILED);
|
||||
}
|
||||
break;
|
||||
case ZFCP_ERP_CONTINUES:
|
||||
case ZFCP_ERP_EXIT:
|
||||
case ZFCP_ERP_DISMISSED:
|
||||
case ZFCP_ERP_NOMEM:
|
||||
/* NOP */
|
||||
break;
|
||||
}
|
||||
|
||||
if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
|
||||
|
@ -1152,14 +1195,14 @@ static int zfcp_erp_strategy_check_adapter(struct zfcp_adapter *adapter,
|
|||
return result;
|
||||
}
|
||||
|
||||
static int zfcp_erp_strategy_check_target(struct zfcp_erp_action *erp_action,
|
||||
int result)
|
||||
static enum zfcp_erp_act_result zfcp_erp_strategy_check_target(
|
||||
struct zfcp_erp_action *erp_action, enum zfcp_erp_act_result result)
|
||||
{
|
||||
struct zfcp_adapter *adapter = erp_action->adapter;
|
||||
struct zfcp_port *port = erp_action->port;
|
||||
struct scsi_device *sdev = erp_action->sdev;
|
||||
|
||||
switch (erp_action->action) {
|
||||
switch (erp_action->type) {
|
||||
|
||||
case ZFCP_ERP_ACTION_REOPEN_LUN:
|
||||
result = zfcp_erp_strategy_check_lun(sdev, result);
|
||||
|
@ -1192,16 +1235,17 @@ static int zfcp_erp_strat_change_det(atomic_t *target_status, u32 erp_status)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int zfcp_erp_strategy_statechange(struct zfcp_erp_action *act, int ret)
|
||||
static enum zfcp_erp_act_result zfcp_erp_strategy_statechange(
|
||||
struct zfcp_erp_action *act, enum zfcp_erp_act_result result)
|
||||
{
|
||||
int action = act->action;
|
||||
enum zfcp_erp_act_type type = act->type;
|
||||
struct zfcp_adapter *adapter = act->adapter;
|
||||
struct zfcp_port *port = act->port;
|
||||
struct scsi_device *sdev = act->sdev;
|
||||
struct zfcp_scsi_dev *zfcp_sdev;
|
||||
u32 erp_status = act->status;
|
||||
|
||||
switch (action) {
|
||||
switch (type) {
|
||||
case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
|
||||
if (zfcp_erp_strat_change_det(&adapter->status, erp_status)) {
|
||||
_zfcp_erp_adapter_reopen(adapter,
|
||||
|
@ -1231,7 +1275,7 @@ static int zfcp_erp_strategy_statechange(struct zfcp_erp_action *act, int ret)
|
|||
}
|
||||
break;
|
||||
}
|
||||
return ret;
|
||||
return result;
|
||||
}
|
||||
|
||||
static void zfcp_erp_action_dequeue(struct zfcp_erp_action *erp_action)
|
||||
|
@ -1248,7 +1292,7 @@ static void zfcp_erp_action_dequeue(struct zfcp_erp_action *erp_action)
|
|||
list_del(&erp_action->list);
|
||||
zfcp_dbf_rec_run("eractd1", erp_action);
|
||||
|
||||
switch (erp_action->action) {
|
||||
switch (erp_action->type) {
|
||||
case ZFCP_ERP_ACTION_REOPEN_LUN:
|
||||
zfcp_sdev = sdev_to_zfcp(erp_action->sdev);
|
||||
atomic_andnot(ZFCP_STATUS_COMMON_ERP_INUSE,
|
||||
|
@ -1324,13 +1368,14 @@ static void zfcp_erp_try_rport_unblock(struct zfcp_port *port)
|
|||
write_unlock_irqrestore(&adapter->erp_lock, flags);
|
||||
}
|
||||
|
||||
static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
|
||||
static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act,
|
||||
enum zfcp_erp_act_result result)
|
||||
{
|
||||
struct zfcp_adapter *adapter = act->adapter;
|
||||
struct zfcp_port *port = act->port;
|
||||
struct scsi_device *sdev = act->sdev;
|
||||
|
||||
switch (act->action) {
|
||||
switch (act->type) {
|
||||
case ZFCP_ERP_ACTION_REOPEN_LUN:
|
||||
if (!(act->status & ZFCP_STATUS_ERP_NO_REF))
|
||||
scsi_device_put(sdev);
|
||||
|
@ -1364,9 +1409,10 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
|
|||
}
|
||||
}
|
||||
|
||||
static int zfcp_erp_strategy_do_action(struct zfcp_erp_action *erp_action)
|
||||
static enum zfcp_erp_act_result zfcp_erp_strategy_do_action(
|
||||
struct zfcp_erp_action *erp_action)
|
||||
{
|
||||
switch (erp_action->action) {
|
||||
switch (erp_action->type) {
|
||||
case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
|
||||
return zfcp_erp_adapter_strategy(erp_action);
|
||||
case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
|
||||
|
@ -1379,9 +1425,10 @@ static int zfcp_erp_strategy_do_action(struct zfcp_erp_action *erp_action)
|
|||
return ZFCP_ERP_FAILED;
|
||||
}
|
||||
|
||||
static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action)
|
||||
static enum zfcp_erp_act_result zfcp_erp_strategy(
|
||||
struct zfcp_erp_action *erp_action)
|
||||
{
|
||||
int retval;
|
||||
enum zfcp_erp_act_result result;
|
||||
unsigned long flags;
|
||||
struct zfcp_adapter *adapter = erp_action->adapter;
|
||||
|
||||
|
@ -1392,12 +1439,12 @@ static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action)
|
|||
|
||||
if (erp_action->status & ZFCP_STATUS_ERP_DISMISSED) {
|
||||
zfcp_erp_action_dequeue(erp_action);
|
||||
retval = ZFCP_ERP_DISMISSED;
|
||||
result = ZFCP_ERP_DISMISSED;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) {
|
||||
retval = ZFCP_ERP_FAILED;
|
||||
result = ZFCP_ERP_FAILED;
|
||||
goto check_target;
|
||||
}
|
||||
|
||||
|
@ -1405,13 +1452,13 @@ static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action)
|
|||
|
||||
/* no lock to allow for blocking operations */
|
||||
write_unlock_irqrestore(&adapter->erp_lock, flags);
|
||||
retval = zfcp_erp_strategy_do_action(erp_action);
|
||||
result = zfcp_erp_strategy_do_action(erp_action);
|
||||
write_lock_irqsave(&adapter->erp_lock, flags);
|
||||
|
||||
if (erp_action->status & ZFCP_STATUS_ERP_DISMISSED)
|
||||
retval = ZFCP_ERP_CONTINUES;
|
||||
result = ZFCP_ERP_CONTINUES;
|
||||
|
||||
switch (retval) {
|
||||
switch (result) {
|
||||
case ZFCP_ERP_NOMEM:
|
||||
if (!(erp_action->status & ZFCP_STATUS_ERP_LOWMEM)) {
|
||||
++adapter->erp_low_mem_count;
|
||||
|
@ -1421,7 +1468,7 @@ static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action)
|
|||
_zfcp_erp_adapter_reopen(adapter, 0, "erstgy1");
|
||||
else {
|
||||
zfcp_erp_strategy_memwait(erp_action);
|
||||
retval = ZFCP_ERP_CONTINUES;
|
||||
result = ZFCP_ERP_CONTINUES;
|
||||
}
|
||||
goto unlock;
|
||||
|
||||
|
@ -1431,27 +1478,33 @@ static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action)
|
|||
erp_action->status &= ~ZFCP_STATUS_ERP_LOWMEM;
|
||||
}
|
||||
goto unlock;
|
||||
case ZFCP_ERP_SUCCEEDED:
|
||||
case ZFCP_ERP_FAILED:
|
||||
case ZFCP_ERP_EXIT:
|
||||
case ZFCP_ERP_DISMISSED:
|
||||
/* NOP */
|
||||
break;
|
||||
}
|
||||
|
||||
check_target:
|
||||
retval = zfcp_erp_strategy_check_target(erp_action, retval);
|
||||
result = zfcp_erp_strategy_check_target(erp_action, result);
|
||||
zfcp_erp_action_dequeue(erp_action);
|
||||
retval = zfcp_erp_strategy_statechange(erp_action, retval);
|
||||
if (retval == ZFCP_ERP_EXIT)
|
||||
result = zfcp_erp_strategy_statechange(erp_action, result);
|
||||
if (result == ZFCP_ERP_EXIT)
|
||||
goto unlock;
|
||||
if (retval == ZFCP_ERP_SUCCEEDED)
|
||||
if (result == ZFCP_ERP_SUCCEEDED)
|
||||
zfcp_erp_strategy_followup_success(erp_action);
|
||||
if (retval == ZFCP_ERP_FAILED)
|
||||
if (result == ZFCP_ERP_FAILED)
|
||||
zfcp_erp_strategy_followup_failed(erp_action);
|
||||
|
||||
unlock:
|
||||
write_unlock_irqrestore(&adapter->erp_lock, flags);
|
||||
|
||||
if (retval != ZFCP_ERP_CONTINUES)
|
||||
zfcp_erp_action_cleanup(erp_action, retval);
|
||||
if (result != ZFCP_ERP_CONTINUES)
|
||||
zfcp_erp_action_cleanup(erp_action, result);
|
||||
|
||||
kref_put(&adapter->ref, zfcp_adapter_release);
|
||||
return retval;
|
||||
return result;
|
||||
}
|
||||
|
||||
static int zfcp_erp_thread(void *data)
|
||||
|
@ -1489,7 +1542,7 @@ static int zfcp_erp_thread(void *data)
|
|||
* zfcp_erp_thread_setup - Start ERP thread for adapter
|
||||
* @adapter: Adapter to start the ERP thread for
|
||||
*
|
||||
* Returns 0 on success or error code from kernel_thread()
|
||||
* Return: 0 on success, or error code from kthread_run().
|
||||
*/
|
||||
int zfcp_erp_thread_setup(struct zfcp_adapter *adapter)
|
||||
{
|
||||
|
@ -1694,11 +1747,11 @@ void zfcp_erp_clear_lun_status(struct scsi_device *sdev, u32 mask)
|
|||
/**
|
||||
* zfcp_erp_adapter_reset_sync() - Really reopen adapter and wait.
|
||||
* @adapter: Pointer to zfcp_adapter to reopen.
|
||||
* @id: Trace tag string of length %ZFCP_DBF_TAG_LEN.
|
||||
* @dbftag: Trace tag string of length %ZFCP_DBF_TAG_LEN.
|
||||
*/
|
||||
void zfcp_erp_adapter_reset_sync(struct zfcp_adapter *adapter, char *id)
|
||||
void zfcp_erp_adapter_reset_sync(struct zfcp_adapter *adapter, char *dbftag)
|
||||
{
|
||||
zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING);
|
||||
zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, id);
|
||||
zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, dbftag);
|
||||
zfcp_erp_wait(adapter);
|
||||
}
|
||||
|
|
|
@ -59,14 +59,15 @@ extern void zfcp_dbf_scsi_eh(char *tag, struct zfcp_adapter *adapter,
|
|||
/* zfcp_erp.c */
|
||||
extern void zfcp_erp_set_adapter_status(struct zfcp_adapter *, u32);
|
||||
extern void zfcp_erp_clear_adapter_status(struct zfcp_adapter *, u32);
|
||||
extern void zfcp_erp_port_forced_no_port_dbf(char *id,
|
||||
extern void zfcp_erp_port_forced_no_port_dbf(char *dbftag,
|
||||
struct zfcp_adapter *adapter,
|
||||
u64 port_name, u32 port_id);
|
||||
extern void zfcp_erp_adapter_reopen(struct zfcp_adapter *, int, char *);
|
||||
extern void zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int, char *);
|
||||
extern void zfcp_erp_set_port_status(struct zfcp_port *, u32);
|
||||
extern void zfcp_erp_clear_port_status(struct zfcp_port *, u32);
|
||||
extern void zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id);
|
||||
extern void zfcp_erp_port_reopen(struct zfcp_port *port, int clear,
|
||||
char *dbftag);
|
||||
extern void zfcp_erp_port_shutdown(struct zfcp_port *, int, char *);
|
||||
extern void zfcp_erp_port_forced_reopen(struct zfcp_port *, int, char *);
|
||||
extern void zfcp_erp_set_lun_status(struct scsi_device *, u32);
|
||||
|
@ -79,7 +80,8 @@ extern void zfcp_erp_thread_kill(struct zfcp_adapter *);
|
|||
extern void zfcp_erp_wait(struct zfcp_adapter *);
|
||||
extern void zfcp_erp_notify(struct zfcp_erp_action *, unsigned long);
|
||||
extern void zfcp_erp_timeout_handler(struct timer_list *t);
|
||||
extern void zfcp_erp_adapter_reset_sync(struct zfcp_adapter *adapter, char *id);
|
||||
extern void zfcp_erp_adapter_reset_sync(struct zfcp_adapter *adapter,
|
||||
char *dbftag);
|
||||
|
||||
/* zfcp_fc.c */
|
||||
extern struct kmem_cache *zfcp_fc_req_cache;
|
||||
|
@ -144,6 +146,7 @@ extern void zfcp_qdio_close(struct zfcp_qdio *);
|
|||
extern void zfcp_qdio_siosl(struct zfcp_adapter *);
|
||||
|
||||
/* zfcp_scsi.c */
|
||||
extern bool zfcp_experimental_dix;
|
||||
extern struct scsi_transport_template *zfcp_scsi_transport_template;
|
||||
extern int zfcp_scsi_adapter_register(struct zfcp_adapter *);
|
||||
extern void zfcp_scsi_adapter_unregister(struct zfcp_adapter *);
|
||||
|
|
|
@ -312,7 +312,7 @@ static void zfcp_fc_incoming_logo(struct zfcp_fsf_req *req)
|
|||
|
||||
/**
|
||||
* zfcp_fc_incoming_els - handle incoming ELS
|
||||
* @fsf_req - request which contains incoming ELS
|
||||
* @fsf_req: request which contains incoming ELS
|
||||
*/
|
||||
void zfcp_fc_incoming_els(struct zfcp_fsf_req *fsf_req)
|
||||
{
|
||||
|
@ -597,6 +597,48 @@ void zfcp_fc_test_link(struct zfcp_port *port)
|
|||
put_device(&port->dev);
|
||||
}
|
||||
|
||||
/**
|
||||
* zfcp_fc_sg_free_table - free memory used by scatterlists
|
||||
* @sg: pointer to scatterlist
|
||||
* @count: number of scatterlist which are to be free'ed
|
||||
* the scatterlist are expected to reference pages always
|
||||
*/
|
||||
static void zfcp_fc_sg_free_table(struct scatterlist *sg, int count)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < count; i++, sg++)
|
||||
if (sg)
|
||||
free_page((unsigned long) sg_virt(sg));
|
||||
else
|
||||
break;
|
||||
}
|
||||
|
||||
/**
|
||||
* zfcp_fc_sg_setup_table - init scatterlist and allocate, assign buffers
|
||||
* @sg: pointer to struct scatterlist
|
||||
* @count: number of scatterlists which should be assigned with buffers
|
||||
* of size page
|
||||
*
|
||||
* Returns: 0 on success, -ENOMEM otherwise
|
||||
*/
|
||||
static int zfcp_fc_sg_setup_table(struct scatterlist *sg, int count)
|
||||
{
|
||||
void *addr;
|
||||
int i;
|
||||
|
||||
sg_init_table(sg, count);
|
||||
for (i = 0; i < count; i++, sg++) {
|
||||
addr = (void *) get_zeroed_page(GFP_KERNEL);
|
||||
if (!addr) {
|
||||
zfcp_fc_sg_free_table(sg, i);
|
||||
return -ENOMEM;
|
||||
}
|
||||
sg_set_buf(sg, addr, PAGE_SIZE);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct zfcp_fc_req *zfcp_fc_alloc_sg_env(int buf_num)
|
||||
{
|
||||
struct zfcp_fc_req *fc_req;
|
||||
|
@ -605,7 +647,7 @@ static struct zfcp_fc_req *zfcp_fc_alloc_sg_env(int buf_num)
|
|||
if (!fc_req)
|
||||
return NULL;
|
||||
|
||||
if (zfcp_sg_setup_table(&fc_req->sg_rsp, buf_num)) {
|
||||
if (zfcp_fc_sg_setup_table(&fc_req->sg_rsp, buf_num)) {
|
||||
kmem_cache_free(zfcp_fc_req_cache, fc_req);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -763,7 +805,7 @@ void zfcp_fc_scan_ports(struct work_struct *work)
|
|||
break;
|
||||
}
|
||||
}
|
||||
zfcp_sg_free_table(&fc_req->sg_rsp, buf_num);
|
||||
zfcp_fc_sg_free_table(&fc_req->sg_rsp, buf_num);
|
||||
kmem_cache_free(zfcp_fc_req_cache, fc_req);
|
||||
out:
|
||||
zfcp_fc_wka_port_put(&adapter->gs->ds);
|
||||
|
|
|
@ -121,9 +121,24 @@ struct zfcp_fc_rspn_req {
|
|||
/**
|
||||
* struct zfcp_fc_req - Container for FC ELS and CT requests sent from zfcp
|
||||
* @ct_els: data required for issuing fsf command
|
||||
* @sg_req: scatterlist entry for request data
|
||||
* @sg_rsp: scatterlist entry for response data
|
||||
* @u: request specific data
|
||||
* @sg_req: scatterlist entry for request data, refers to embedded @u submember
|
||||
* @sg_rsp: scatterlist entry for response data, refers to embedded @u submember
|
||||
* @u: request and response specific data
|
||||
* @u.adisc: ADISC specific data
|
||||
* @u.adisc.req: ADISC request
|
||||
* @u.adisc.rsp: ADISC response
|
||||
* @u.gid_pn: GID_PN specific data
|
||||
* @u.gid_pn.req: GID_PN request
|
||||
* @u.gid_pn.rsp: GID_PN response
|
||||
* @u.gpn_ft: GPN_FT specific data
|
||||
* @u.gpn_ft.sg_rsp2: GPN_FT response, not embedded here, allocated elsewhere
|
||||
* @u.gpn_ft.req: GPN_FT request
|
||||
* @u.gspn: GSPN specific data
|
||||
* @u.gspn.req: GSPN request
|
||||
* @u.gspn.rsp: GSPN response
|
||||
* @u.rspn: RSPN specific data
|
||||
* @u.rspn.req: RSPN request
|
||||
* @u.rspn.rsp: RSPN response
|
||||
*/
|
||||
struct zfcp_fc_req {
|
||||
struct zfcp_fsf_ct_els ct_els;
|
||||
|
|
|
@ -19,6 +19,11 @@
|
|||
#include "zfcp_qdio.h"
|
||||
#include "zfcp_reqlist.h"
|
||||
|
||||
/* timeout for FSF requests sent during scsi_eh: abort or FCP TMF */
|
||||
#define ZFCP_FSF_SCSI_ER_TIMEOUT (10*HZ)
|
||||
/* timeout for: exchange config/port data outside ERP, or open/close WKA port */
|
||||
#define ZFCP_FSF_REQUEST_TIMEOUT (60*HZ)
|
||||
|
||||
struct kmem_cache *zfcp_fsf_qtcb_cache;
|
||||
|
||||
static void zfcp_fsf_request_timeout_handler(struct timer_list *t)
|
||||
|
@ -74,18 +79,18 @@ static void zfcp_fsf_class_not_supp(struct zfcp_fsf_req *req)
|
|||
|
||||
/**
|
||||
* zfcp_fsf_req_free - free memory used by fsf request
|
||||
* @fsf_req: pointer to struct zfcp_fsf_req
|
||||
* @req: pointer to struct zfcp_fsf_req
|
||||
*/
|
||||
void zfcp_fsf_req_free(struct zfcp_fsf_req *req)
|
||||
{
|
||||
if (likely(req->pool)) {
|
||||
if (likely(req->qtcb))
|
||||
if (likely(!zfcp_fsf_req_is_status_read_buffer(req)))
|
||||
mempool_free(req->qtcb, req->adapter->pool.qtcb_pool);
|
||||
mempool_free(req, req->pool);
|
||||
return;
|
||||
}
|
||||
|
||||
if (likely(req->qtcb))
|
||||
if (likely(!zfcp_fsf_req_is_status_read_buffer(req)))
|
||||
kmem_cache_free(zfcp_fsf_qtcb_cache, req->qtcb);
|
||||
kfree(req);
|
||||
}
|
||||
|
@ -379,7 +384,7 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
|
|||
|
||||
/**
|
||||
* zfcp_fsf_req_complete - process completion of a FSF request
|
||||
* @fsf_req: The FSF request that has been completed.
|
||||
* @req: The FSF request that has been completed.
|
||||
*
|
||||
* When a request has been completed either from the FCP adapter,
|
||||
* or it has been dismissed due to a queue shutdown, this function
|
||||
|
@ -388,7 +393,7 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
|
|||
*/
|
||||
static void zfcp_fsf_req_complete(struct zfcp_fsf_req *req)
|
||||
{
|
||||
if (unlikely(req->fsf_command == FSF_QTCB_UNSOLICITED_STATUS)) {
|
||||
if (unlikely(zfcp_fsf_req_is_status_read_buffer(req))) {
|
||||
zfcp_fsf_status_read_handler(req);
|
||||
return;
|
||||
}
|
||||
|
@ -705,7 +710,6 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
|
|||
init_completion(&req->completion);
|
||||
|
||||
req->adapter = adapter;
|
||||
req->fsf_command = fsf_cmd;
|
||||
req->req_id = adapter->req_no;
|
||||
|
||||
if (likely(fsf_cmd != FSF_QTCB_UNSOLICITED_STATUS)) {
|
||||
|
@ -720,14 +724,13 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
|
|||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
req->seq_no = adapter->fsf_req_seq_no;
|
||||
req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no;
|
||||
req->qtcb->prefix.req_id = req->req_id;
|
||||
req->qtcb->prefix.ulp_info = 26;
|
||||
req->qtcb->prefix.qtcb_type = fsf_qtcb_type[req->fsf_command];
|
||||
req->qtcb->prefix.qtcb_type = fsf_qtcb_type[fsf_cmd];
|
||||
req->qtcb->prefix.qtcb_version = FSF_QTCB_CURRENT_VERSION;
|
||||
req->qtcb->header.req_handle = req->req_id;
|
||||
req->qtcb->header.fsf_command = req->fsf_command;
|
||||
req->qtcb->header.fsf_command = fsf_cmd;
|
||||
}
|
||||
|
||||
zfcp_qdio_req_init(adapter->qdio, &req->qdio_req, req->req_id, sbtype,
|
||||
|
@ -740,7 +743,6 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
|
|||
{
|
||||
struct zfcp_adapter *adapter = req->adapter;
|
||||
struct zfcp_qdio *qdio = adapter->qdio;
|
||||
int with_qtcb = (req->qtcb != NULL);
|
||||
int req_id = req->req_id;
|
||||
|
||||
zfcp_reqlist_add(adapter->req_list, req);
|
||||
|
@ -756,7 +758,7 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
|
|||
}
|
||||
|
||||
/* Don't increase for unsolicited status */
|
||||
if (with_qtcb)
|
||||
if (!zfcp_fsf_req_is_status_read_buffer(req))
|
||||
adapter->fsf_req_seq_no++;
|
||||
adapter->req_no++;
|
||||
|
||||
|
@ -765,8 +767,7 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
|
|||
|
||||
/**
|
||||
* zfcp_fsf_status_read - send status read request
|
||||
* @adapter: pointer to struct zfcp_adapter
|
||||
* @req_flags: request flags
|
||||
* @qdio: pointer to struct zfcp_qdio
|
||||
* Returns: 0 on success, ERROR otherwise
|
||||
*/
|
||||
int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
|
||||
|
@ -912,7 +913,7 @@ struct zfcp_fsf_req *zfcp_fsf_abort_fcp_cmnd(struct scsi_cmnd *scmnd)
|
|||
req->qtcb->header.port_handle = zfcp_sdev->port->handle;
|
||||
req->qtcb->bottom.support.req_handle = (u64) old_req_id;
|
||||
|
||||
zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT);
|
||||
zfcp_fsf_start_timer(req, ZFCP_FSF_SCSI_ER_TIMEOUT);
|
||||
if (!zfcp_fsf_req_send(req))
|
||||
goto out;
|
||||
|
||||
|
@ -1057,8 +1058,10 @@ static int zfcp_fsf_setup_ct_els(struct zfcp_fsf_req *req,
|
|||
|
||||
/**
|
||||
* zfcp_fsf_send_ct - initiate a Generic Service request (FC-GS)
|
||||
* @wka_port: pointer to zfcp WKA port to send CT/GS to
|
||||
* @ct: pointer to struct zfcp_send_ct with data for request
|
||||
* @pool: if non-null this mempool is used to allocate struct zfcp_fsf_req
|
||||
* @timeout: timeout that hardware should use, and a later software timeout
|
||||
*/
|
||||
int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port,
|
||||
struct zfcp_fsf_ct_els *ct, mempool_t *pool,
|
||||
|
@ -1151,7 +1154,10 @@ static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req)
|
|||
|
||||
/**
|
||||
* zfcp_fsf_send_els - initiate an ELS command (FC-FS)
|
||||
* @adapter: pointer to zfcp adapter
|
||||
* @d_id: N_Port_ID to send ELS to
|
||||
* @els: pointer to struct zfcp_send_els with data for the command
|
||||
* @timeout: timeout that hardware should use, and a later software timeout
|
||||
*/
|
||||
int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id,
|
||||
struct zfcp_fsf_ct_els *els, unsigned int timeout)
|
||||
|
@ -1809,7 +1815,7 @@ static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req)
|
|||
case FSF_LUN_SHARING_VIOLATION:
|
||||
if (qual->word[0])
|
||||
dev_warn(&zfcp_sdev->port->adapter->ccw_device->dev,
|
||||
"LUN 0x%Lx on port 0x%Lx is already in "
|
||||
"LUN 0x%016Lx on port 0x%016Lx is already in "
|
||||
"use by CSS%d, MIF Image ID %x\n",
|
||||
zfcp_scsi_dev_lun(sdev),
|
||||
(unsigned long long)zfcp_sdev->port->wwpn,
|
||||
|
@ -1986,7 +1992,7 @@ int zfcp_fsf_close_lun(struct zfcp_erp_action *erp_action)
|
|||
return retval;
|
||||
}
|
||||
|
||||
static void zfcp_fsf_update_lat(struct fsf_latency_record *lat_rec, u32 lat)
|
||||
static void zfcp_fsf_update_lat(struct zfcp_latency_record *lat_rec, u32 lat)
|
||||
{
|
||||
lat_rec->sum += lat;
|
||||
lat_rec->min = min(lat_rec->min, lat);
|
||||
|
@ -1996,7 +2002,7 @@ static void zfcp_fsf_update_lat(struct fsf_latency_record *lat_rec, u32 lat)
|
|||
static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
|
||||
{
|
||||
struct fsf_qual_latency_info *lat_in;
|
||||
struct latency_cont *lat = NULL;
|
||||
struct zfcp_latency_cont *lat = NULL;
|
||||
struct zfcp_scsi_dev *zfcp_sdev;
|
||||
struct zfcp_blk_drv_data blktrc;
|
||||
int ticks = req->adapter->timer_ticks;
|
||||
|
@ -2088,11 +2094,8 @@ static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req,
|
|||
break;
|
||||
case FSF_CMND_LENGTH_NOT_VALID:
|
||||
dev_err(&req->adapter->ccw_device->dev,
|
||||
"Incorrect CDB length %d, LUN 0x%016Lx on "
|
||||
"port 0x%016Lx closed\n",
|
||||
req->qtcb->bottom.io.fcp_cmnd_length,
|
||||
(unsigned long long)zfcp_scsi_dev_lun(sdev),
|
||||
(unsigned long long)zfcp_sdev->port->wwpn);
|
||||
"Incorrect FCP_CMND length %d, FCP device closed\n",
|
||||
req->qtcb->bottom.io.fcp_cmnd_length);
|
||||
zfcp_erp_adapter_shutdown(req->adapter, 0, "fssfch4");
|
||||
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
|
||||
break;
|
||||
|
@ -2369,7 +2372,7 @@ struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_device *sdev,
|
|||
fcp_cmnd = &req->qtcb->bottom.io.fcp_cmnd.iu;
|
||||
zfcp_fc_fcp_tm(fcp_cmnd, sdev, tm_flags);
|
||||
|
||||
zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT);
|
||||
zfcp_fsf_start_timer(req, ZFCP_FSF_SCSI_ER_TIMEOUT);
|
||||
if (!zfcp_fsf_req_send(req))
|
||||
goto out;
|
||||
|
||||
|
@ -2382,7 +2385,7 @@ struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_device *sdev,
|
|||
|
||||
/**
|
||||
* zfcp_fsf_reqid_check - validate req_id contained in SBAL returned by QDIO
|
||||
* @adapter: pointer to struct zfcp_adapter
|
||||
* @qdio: pointer to struct zfcp_qdio
|
||||
* @sbal_idx: response queue index of SBAL to be processed
|
||||
*/
|
||||
void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx)
|
||||
|
|
|
@ -438,8 +438,8 @@ struct zfcp_blk_drv_data {
|
|||
|
||||
/**
|
||||
* struct zfcp_fsf_ct_els - zfcp data for ct or els request
|
||||
* @req: scatter-gather list for request
|
||||
* @resp: scatter-gather list for response
|
||||
* @req: scatter-gather list for request, points to &zfcp_fc_req.sg_req or BSG
|
||||
* @resp: scatter-gather list for response, points to &zfcp_fc_req.sg_rsp or BSG
|
||||
* @handler: handler function (called for response to the request)
|
||||
* @handler_data: data passed to handler function
|
||||
* @port: Optional pointer to port for zfcp internal ELS (only test link ADISC)
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
*
|
||||
* Setup and helper functions to access QDIO.
|
||||
*
|
||||
* Copyright IBM Corp. 2002, 2010
|
||||
* Copyright IBM Corp. 2002, 2017
|
||||
*/
|
||||
|
||||
#define KMSG_COMPONENT "zfcp"
|
||||
|
@ -19,7 +19,7 @@ static bool enable_multibuffer = true;
|
|||
module_param_named(datarouter, enable_multibuffer, bool, 0400);
|
||||
MODULE_PARM_DESC(datarouter, "Enable hardware data router support (default on)");
|
||||
|
||||
static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *id,
|
||||
static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *dbftag,
|
||||
unsigned int qdio_err)
|
||||
{
|
||||
struct zfcp_adapter *adapter = qdio->adapter;
|
||||
|
@ -28,12 +28,12 @@ static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *id,
|
|||
|
||||
if (qdio_err & QDIO_ERROR_SLSB_STATE) {
|
||||
zfcp_qdio_siosl(adapter);
|
||||
zfcp_erp_adapter_shutdown(adapter, 0, id);
|
||||
zfcp_erp_adapter_shutdown(adapter, 0, dbftag);
|
||||
return;
|
||||
}
|
||||
zfcp_erp_adapter_reopen(adapter,
|
||||
ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
|
||||
ZFCP_STATUS_COMMON_ERP_FAILED, id);
|
||||
ZFCP_STATUS_COMMON_ERP_FAILED, dbftag);
|
||||
}
|
||||
|
||||
static void zfcp_qdio_zero_sbals(struct qdio_buffer *sbal[], int first, int cnt)
|
||||
|
@ -180,7 +180,6 @@ zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
|
|||
* @qdio: pointer to struct zfcp_qdio
|
||||
* @q_req: pointer to struct zfcp_qdio_req
|
||||
* @sg: scatter-gather list
|
||||
* @max_sbals: upper bound for number of SBALs to be used
|
||||
* Returns: zero or -EINVAL on error
|
||||
*/
|
||||
int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
|
||||
|
@ -303,7 +302,7 @@ static void zfcp_qdio_setup_init_data(struct qdio_initialize *id,
|
|||
|
||||
/**
|
||||
* zfcp_qdio_allocate - allocate queue memory and initialize QDIO data
|
||||
* @adapter: pointer to struct zfcp_adapter
|
||||
* @qdio: pointer to struct zfcp_qdio
|
||||
* Returns: -ENOMEM on memory allocation error or return value from
|
||||
* qdio_allocate
|
||||
*/
|
||||
|
|
|
@ -30,6 +30,8 @@
|
|||
* @req_q_full: queue full incidents
|
||||
* @req_q_wq: used to wait for SBAL availability
|
||||
* @adapter: adapter used in conjunction with this qdio structure
|
||||
* @max_sbale_per_sbal: qdio limit per sbal
|
||||
* @max_sbale_per_req: qdio limit per request
|
||||
*/
|
||||
struct zfcp_qdio {
|
||||
struct qdio_buffer *res_q[QDIO_MAX_BUFFERS_PER_Q];
|
||||
|
@ -70,7 +72,7 @@ struct zfcp_qdio_req {
|
|||
/**
|
||||
* zfcp_qdio_sbale_req - return pointer to sbale on req_q for a request
|
||||
* @qdio: pointer to struct zfcp_qdio
|
||||
* @q_rec: pointer to struct zfcp_qdio_req
|
||||
* @q_req: pointer to struct zfcp_qdio_req
|
||||
* Returns: pointer to qdio_buffer_element (sbale) structure
|
||||
*/
|
||||
static inline struct qdio_buffer_element *
|
||||
|
@ -82,7 +84,7 @@ zfcp_qdio_sbale_req(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
|
|||
/**
|
||||
* zfcp_qdio_sbale_curr - return current sbale on req_q for a request
|
||||
* @qdio: pointer to struct zfcp_qdio
|
||||
* @fsf_req: pointer to struct zfcp_fsf_req
|
||||
* @q_req: pointer to struct zfcp_qdio_req
|
||||
* Returns: pointer to qdio_buffer_element (sbale) structure
|
||||
*/
|
||||
static inline struct qdio_buffer_element *
|
||||
|
@ -135,6 +137,8 @@ void zfcp_qdio_req_init(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
|
|||
* zfcp_qdio_fill_next - Fill next sbale, only for single sbal requests
|
||||
* @qdio: pointer to struct zfcp_qdio
|
||||
* @q_req: pointer to struct zfcp_queue_req
|
||||
* @data: pointer to data
|
||||
* @len: length of data
|
||||
*
|
||||
* This is only required for single sbal requests, calling it when
|
||||
* wrapping around to the next sbal is a bug.
|
||||
|
@ -182,6 +186,7 @@ int zfcp_qdio_sg_one_sbale(struct scatterlist *sg)
|
|||
|
||||
/**
|
||||
* zfcp_qdio_skip_to_last_sbale - skip to last sbale in sbal
|
||||
* @qdio: pointer to struct zfcp_qdio
|
||||
* @q_req: The current zfcp_qdio_req
|
||||
*/
|
||||
static inline
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
/**
|
||||
* struct zfcp_reqlist - Container for request list (reqlist)
|
||||
* @lock: Spinlock for protecting the hash list
|
||||
* @list: Array of hashbuckets, each is a list of requests in this bucket
|
||||
* @buckets: Array of hashbuckets, each is a list of requests in this bucket
|
||||
*/
|
||||
struct zfcp_reqlist {
|
||||
spinlock_t lock;
|
||||
|
|
|
@ -27,7 +27,11 @@ MODULE_PARM_DESC(queue_depth, "Default queue depth for new SCSI devices");
|
|||
|
||||
static bool enable_dif;
|
||||
module_param_named(dif, enable_dif, bool, 0400);
|
||||
MODULE_PARM_DESC(dif, "Enable DIF/DIX data integrity support");
|
||||
MODULE_PARM_DESC(dif, "Enable DIF data integrity support (default off)");
|
||||
|
||||
bool zfcp_experimental_dix;
|
||||
module_param_named(dix, zfcp_experimental_dix, bool, 0400);
|
||||
MODULE_PARM_DESC(dix, "Enable experimental DIX (data integrity extension) support which implies DIF support (default off)");
|
||||
|
||||
static bool allow_lun_scan = true;
|
||||
module_param(allow_lun_scan, bool, 0600);
|
||||
|
@ -226,7 +230,9 @@ static void zfcp_scsi_forget_cmnd(struct zfcp_fsf_req *old_req, void *data)
|
|||
(struct zfcp_scsi_req_filter *)data;
|
||||
|
||||
/* already aborted - prevent side-effects - or not a SCSI command */
|
||||
if (old_req->data == NULL || old_req->fsf_command != FSF_QTCB_FCP_CMND)
|
||||
if (old_req->data == NULL ||
|
||||
zfcp_fsf_req_is_status_read_buffer(old_req) ||
|
||||
old_req->qtcb->header.fsf_command != FSF_QTCB_FCP_CMND)
|
||||
return;
|
||||
|
||||
/* (tmf_scope == FCP_TMF_TGT_RESET || tmf_scope == FCP_TMF_LUN_RESET) */
|
||||
|
@ -423,7 +429,6 @@ static struct scsi_host_template zfcp_scsi_host_template = {
|
|||
* ZFCP_QDIO_MAX_SBALS_PER_REQ) - 2) * 8,
|
||||
/* GCD, adjusted later */
|
||||
.dma_boundary = ZFCP_QDIO_SBALE_LEN - 1,
|
||||
.use_clustering = 1,
|
||||
.shost_attrs = zfcp_sysfs_shost_attrs,
|
||||
.sdev_attrs = zfcp_sysfs_sdev_attrs,
|
||||
.track_queue_depth = 1,
|
||||
|
@ -788,11 +793,11 @@ void zfcp_scsi_set_prot(struct zfcp_adapter *adapter)
|
|||
data_div = atomic_read(&adapter->status) &
|
||||
ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED;
|
||||
|
||||
if (enable_dif &&
|
||||
if ((enable_dif || zfcp_experimental_dix) &&
|
||||
adapter->adapter_features & FSF_FEATURE_DIF_PROT_TYPE1)
|
||||
mask |= SHOST_DIF_TYPE1_PROTECTION;
|
||||
|
||||
if (enable_dif && data_div &&
|
||||
if (zfcp_experimental_dix && data_div &&
|
||||
adapter->adapter_features & FSF_FEATURE_DIX_PROT_TCPIP) {
|
||||
mask |= SHOST_DIX_TYPE1_PROTECTION;
|
||||
scsi_host_set_guard(shost, SHOST_DIX_GUARD_IP);
|
||||
|
|
|
@ -1998,7 +1998,6 @@ static struct scsi_host_template driver_template = {
|
|||
.sg_tablesize = TW_APACHE_MAX_SGL_LENGTH,
|
||||
.max_sectors = TW_MAX_SECTORS,
|
||||
.cmd_per_lun = TW_MAX_CMDS_PER_LUN,
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
.shost_attrs = twa_host_attrs,
|
||||
.emulated = 1,
|
||||
.no_write_same = 1,
|
||||
|
|
|
@ -1550,7 +1550,6 @@ static struct scsi_host_template driver_template = {
|
|||
.sg_tablesize = TW_LIBERATOR_MAX_SGL_LENGTH,
|
||||
.max_sectors = TW_MAX_SECTORS,
|
||||
.cmd_per_lun = TW_MAX_CMDS_PER_LUN,
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
.shost_attrs = twl_host_attrs,
|
||||
.emulated = 1,
|
||||
.no_write_same = 1,
|
||||
|
|
|
@ -1174,7 +1174,7 @@ static int tw_setfeature(TW_Device_Extension *tw_dev, int parm, int param_size,
|
|||
command_que_value = tw_dev->command_packet_physical_address[request_id];
|
||||
if (command_que_value == 0) {
|
||||
printk(KERN_WARNING "3w-xxxx: tw_setfeature(): Bad command packet physical address.\n");
|
||||
return 1;
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* Send command packet to the board */
|
||||
|
@ -2247,7 +2247,6 @@ static struct scsi_host_template driver_template = {
|
|||
.sg_tablesize = TW_MAX_SGL_LENGTH,
|
||||
.max_sectors = TW_MAX_SECTORS,
|
||||
.cmd_per_lun = TW_MAX_CMDS_PER_LUN,
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
.shost_attrs = tw_host_attrs,
|
||||
.emulated = 1,
|
||||
.no_write_same = 1,
|
||||
|
|
|
@ -318,7 +318,6 @@ NCR_700_detect(struct scsi_host_template *tpnt,
|
|||
tpnt->can_queue = NCR_700_COMMAND_SLOTS_PER_HOST;
|
||||
tpnt->sg_tablesize = NCR_700_SG_SEGMENTS;
|
||||
tpnt->cmd_per_lun = NCR_700_CMD_PER_LUN;
|
||||
tpnt->use_clustering = ENABLE_CLUSTERING;
|
||||
tpnt->slave_configure = NCR_700_slave_configure;
|
||||
tpnt->slave_destroy = NCR_700_slave_destroy;
|
||||
tpnt->slave_alloc = NCR_700_slave_alloc;
|
||||
|
|
|
@ -2641,6 +2641,7 @@ static int blogic_resultcode(struct blogic_adapter *adapter,
|
|||
case BLOGIC_BAD_CMD_PARAM:
|
||||
blogic_warn("BusLogic Driver Protocol Error 0x%02X\n",
|
||||
adapter, adapter_status);
|
||||
/* fall through */
|
||||
case BLOGIC_DATA_UNDERRUN:
|
||||
case BLOGIC_DATA_OVERRUN:
|
||||
case BLOGIC_NOEXPECT_BUSFREE:
|
||||
|
@ -3857,7 +3858,6 @@ static struct scsi_host_template blogic_template = {
|
|||
#endif
|
||||
.unchecked_isa_dma = 1,
|
||||
.max_sectors = 128,
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
@ -1078,7 +1078,6 @@ static struct scsi_host_template inia100_template = {
|
|||
.can_queue = 1,
|
||||
.this_id = 1,
|
||||
.sg_tablesize = SG_ALL,
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
};
|
||||
|
||||
static int inia100_probe_one(struct pci_dev *pdev,
|
||||
|
|
|
@ -160,7 +160,7 @@ static struct scsi_host_template a2091_scsi_template = {
|
|||
.this_id = 7,
|
||||
.sg_tablesize = SG_ALL,
|
||||
.cmd_per_lun = CMD_PER_LUN,
|
||||
.use_clustering = DISABLE_CLUSTERING
|
||||
.dma_boundary = PAGE_SIZE - 1,
|
||||
};
|
||||
|
||||
static int a2091_probe(struct zorro_dev *z, const struct zorro_device_id *ent)
|
||||
|
|
|
@ -175,7 +175,6 @@ static struct scsi_host_template amiga_a3000_scsi_template = {
|
|||
.this_id = 7,
|
||||
.sg_tablesize = SG_ALL,
|
||||
.cmd_per_lun = CMD_PER_LUN,
|
||||
.use_clustering = ENABLE_CLUSTERING
|
||||
};
|
||||
|
||||
static int __init amiga_a3000_scsi_probe(struct platform_device *pdev)
|
||||
|
|
|
@ -2892,6 +2892,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
|
|||
!(dev->raw_io_64) ||
|
||||
((scsicmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16))
|
||||
break;
|
||||
/* fall through */
|
||||
case INQUIRY:
|
||||
case READ_CAPACITY:
|
||||
case TEST_UNIT_READY:
|
||||
|
@ -2966,6 +2967,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
|
|||
/* Issue FIB to tell Firmware to flush it's cache */
|
||||
if ((aac_cache & 6) != 2)
|
||||
return aac_synchronize(scsicmd);
|
||||
/* fall through */
|
||||
case INQUIRY:
|
||||
{
|
||||
struct inquiry_data inq_data;
|
||||
|
@ -3319,8 +3321,9 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
|
|||
min_t(size_t,
|
||||
sizeof(dev->fsa_dev[cid].sense_data),
|
||||
SCSI_SENSE_BUFFERSIZE));
|
||||
break;
|
||||
break;
|
||||
}
|
||||
/* fall through */
|
||||
case RESERVE:
|
||||
case RELEASE:
|
||||
case REZERO_UNIT:
|
||||
|
|
|
@ -40,6 +40,7 @@
|
|||
#define nblank(x) _nblank(x)[0]
|
||||
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/pci.h>
|
||||
#include <scsi/scsi_host.h>
|
||||
|
||||
|
@ -1241,7 +1242,7 @@ struct aac_fib_context {
|
|||
u32 unique; // unique value representing this context
|
||||
ulong jiffies; // used for cleanup - dmb changed to ulong
|
||||
struct list_head next; // used to link context's into a linked list
|
||||
struct semaphore wait_sem; // this is used to wait for the next fib to arrive.
|
||||
struct completion completion; // this is used to wait for the next fib to arrive.
|
||||
int wait; // Set to true when thread is in WaitForSingleObject
|
||||
unsigned long count; // total number of FIBs on FibList
|
||||
struct list_head fib_list; // this holds fibs and their attachd hw_fibs
|
||||
|
@ -1313,7 +1314,7 @@ struct fib {
|
|||
* This is the event the sendfib routine will wait on if the
|
||||
* caller did not pass one and this is synch io.
|
||||
*/
|
||||
struct semaphore event_wait;
|
||||
struct completion event_wait;
|
||||
spinlock_t event_lock;
|
||||
|
||||
u32 done; /* gets set to 1 when fib is complete */
|
||||
|
|
|
@ -41,7 +41,6 @@
|
|||
#include <linux/blkdev.h>
|
||||
#include <linux/delay.h> /* ssleep prototype */
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/semaphore.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <scsi/scsi_host.h>
|
||||
|
||||
|
@ -203,7 +202,7 @@ static int open_getadapter_fib(struct aac_dev * dev, void __user *arg)
|
|||
/*
|
||||
* Initialize the mutex used to wait for the next AIF.
|
||||
*/
|
||||
sema_init(&fibctx->wait_sem, 0);
|
||||
init_completion(&fibctx->completion);
|
||||
fibctx->wait = 0;
|
||||
/*
|
||||
* Initialize the fibs and set the count of fibs on
|
||||
|
@ -335,7 +334,7 @@ static int next_getadapter_fib(struct aac_dev * dev, void __user *arg)
|
|||
ssleep(1);
|
||||
}
|
||||
if (f.wait) {
|
||||
if(down_interruptible(&fibctx->wait_sem) < 0) {
|
||||
if (wait_for_completion_interruptible(&fibctx->completion) < 0) {
|
||||
status = -ERESTARTSYS;
|
||||
} else {
|
||||
/* Lock again and retry */
|
||||
|
|
|
@ -44,7 +44,6 @@
|
|||
#include <linux/delay.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/semaphore.h>
|
||||
#include <linux/bcd.h>
|
||||
#include <scsi/scsi.h>
|
||||
#include <scsi/scsi_host.h>
|
||||
|
@ -189,7 +188,7 @@ int aac_fib_setup(struct aac_dev * dev)
|
|||
fibptr->hw_fib_va = hw_fib;
|
||||
fibptr->data = (void *) fibptr->hw_fib_va->data;
|
||||
fibptr->next = fibptr+1; /* Forward chain the fibs */
|
||||
sema_init(&fibptr->event_wait, 0);
|
||||
init_completion(&fibptr->event_wait);
|
||||
spin_lock_init(&fibptr->event_lock);
|
||||
hw_fib->header.XferState = cpu_to_le32(0xffffffff);
|
||||
hw_fib->header.SenderSize =
|
||||
|
@ -623,7 +622,7 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
|
|||
}
|
||||
if (wait) {
|
||||
fibptr->flags |= FIB_CONTEXT_FLAG_WAIT;
|
||||
if (down_interruptible(&fibptr->event_wait)) {
|
||||
if (wait_for_completion_interruptible(&fibptr->event_wait)) {
|
||||
fibptr->flags &= ~FIB_CONTEXT_FLAG_WAIT;
|
||||
return -EFAULT;
|
||||
}
|
||||
|
@ -659,7 +658,7 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
|
|||
* hardware failure has occurred.
|
||||
*/
|
||||
unsigned long timeout = jiffies + (180 * HZ); /* 3 minutes */
|
||||
while (down_trylock(&fibptr->event_wait)) {
|
||||
while (!try_wait_for_completion(&fibptr->event_wait)) {
|
||||
int blink;
|
||||
if (time_is_before_eq_jiffies(timeout)) {
|
||||
struct aac_queue * q = &dev->queues->queue[AdapNormCmdQueue];
|
||||
|
@ -689,9 +688,9 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
|
|||
*/
|
||||
schedule();
|
||||
}
|
||||
} else if (down_interruptible(&fibptr->event_wait)) {
|
||||
} else if (wait_for_completion_interruptible(&fibptr->event_wait)) {
|
||||
/* Do nothing ... satisfy
|
||||
* down_interruptible must_check */
|
||||
* wait_for_completion_interruptible must_check */
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&fibptr->event_lock, flags);
|
||||
|
@ -777,7 +776,7 @@ int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback,
|
|||
return -EFAULT;
|
||||
|
||||
fibptr->flags |= FIB_CONTEXT_FLAG_WAIT;
|
||||
if (down_interruptible(&fibptr->event_wait))
|
||||
if (wait_for_completion_interruptible(&fibptr->event_wait))
|
||||
fibptr->done = 2;
|
||||
fibptr->flags &= ~(FIB_CONTEXT_FLAG_WAIT);
|
||||
|
||||
|
@ -1538,7 +1537,7 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
|
|||
|| fib->flags & FIB_CONTEXT_FLAG_WAIT) {
|
||||
unsigned long flagv;
|
||||
spin_lock_irqsave(&fib->event_lock, flagv);
|
||||
up(&fib->event_wait);
|
||||
complete(&fib->event_wait);
|
||||
spin_unlock_irqrestore(&fib->event_lock, flagv);
|
||||
schedule();
|
||||
retval = 0;
|
||||
|
@ -1828,7 +1827,7 @@ int aac_check_health(struct aac_dev * aac)
|
|||
* Set the event to wake up the
|
||||
* thread that will waiting.
|
||||
*/
|
||||
up(&fibctx->wait_sem);
|
||||
complete(&fibctx->completion);
|
||||
} else {
|
||||
printk(KERN_WARNING "aifd: didn't allocate NewFib.\n");
|
||||
kfree(fib);
|
||||
|
@ -2165,7 +2164,7 @@ static void wakeup_fibctx_threads(struct aac_dev *dev,
|
|||
* Set the event to wake up the
|
||||
* thread that is waiting.
|
||||
*/
|
||||
up(&fibctx->wait_sem);
|
||||
complete(&fibctx->completion);
|
||||
|
||||
entry = entry->next;
|
||||
}
|
||||
|
|
|
@ -38,7 +38,6 @@
|
|||
#include <linux/slab.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/semaphore.h>
|
||||
|
||||
#include "aacraid.h"
|
||||
|
||||
|
@ -129,7 +128,7 @@ unsigned int aac_response_normal(struct aac_queue * q)
|
|||
spin_lock_irqsave(&fib->event_lock, flagv);
|
||||
if (!fib->done) {
|
||||
fib->done = 1;
|
||||
up(&fib->event_wait);
|
||||
complete(&fib->event_wait);
|
||||
}
|
||||
spin_unlock_irqrestore(&fib->event_lock, flagv);
|
||||
|
||||
|
@ -376,16 +375,16 @@ unsigned int aac_intr_normal(struct aac_dev *dev, u32 index, int isAif,
|
|||
start_callback = 1;
|
||||
} else {
|
||||
unsigned long flagv;
|
||||
int complete = 0;
|
||||
int completed = 0;
|
||||
|
||||
dprintk((KERN_INFO "event_wait up\n"));
|
||||
spin_lock_irqsave(&fib->event_lock, flagv);
|
||||
if (fib->done == 2) {
|
||||
fib->done = 1;
|
||||
complete = 1;
|
||||
completed = 1;
|
||||
} else {
|
||||
fib->done = 1;
|
||||
up(&fib->event_wait);
|
||||
complete(&fib->event_wait);
|
||||
}
|
||||
spin_unlock_irqrestore(&fib->event_lock, flagv);
|
||||
|
||||
|
@ -395,7 +394,7 @@ unsigned int aac_intr_normal(struct aac_dev *dev, u32 index, int isAif,
|
|||
mflags);
|
||||
|
||||
FIB_COUNTER_INCREMENT(aac_config.NativeRecved);
|
||||
if (complete)
|
||||
if (completed)
|
||||
aac_fib_complete(fib);
|
||||
}
|
||||
} else {
|
||||
|
@ -428,16 +427,16 @@ unsigned int aac_intr_normal(struct aac_dev *dev, u32 index, int isAif,
|
|||
start_callback = 1;
|
||||
} else {
|
||||
unsigned long flagv;
|
||||
int complete = 0;
|
||||
int completed = 0;
|
||||
|
||||
dprintk((KERN_INFO "event_wait up\n"));
|
||||
spin_lock_irqsave(&fib->event_lock, flagv);
|
||||
if (fib->done == 2) {
|
||||
fib->done = 1;
|
||||
complete = 1;
|
||||
completed = 1;
|
||||
} else {
|
||||
fib->done = 1;
|
||||
up(&fib->event_wait);
|
||||
complete(&fib->event_wait);
|
||||
}
|
||||
spin_unlock_irqrestore(&fib->event_lock, flagv);
|
||||
|
||||
|
@ -447,7 +446,7 @@ unsigned int aac_intr_normal(struct aac_dev *dev, u32 index, int isAif,
|
|||
mflags);
|
||||
|
||||
FIB_COUNTER_INCREMENT(aac_config.NormalRecved);
|
||||
if (complete)
|
||||
if (completed)
|
||||
aac_fib_complete(fib);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -759,6 +759,7 @@ static int aac_eh_abort(struct scsi_cmnd* cmd)
|
|||
!(aac->raw_io_64) ||
|
||||
((cmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16))
|
||||
break;
|
||||
/* fall through */
|
||||
case INQUIRY:
|
||||
case READ_CAPACITY:
|
||||
/*
|
||||
|
@ -1539,7 +1540,6 @@ static struct scsi_host_template aac_driver_template = {
|
|||
#else
|
||||
.cmd_per_lun = AAC_NUM_IO_FIB,
|
||||
#endif
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
.emulated = 1,
|
||||
.no_write_same = 1,
|
||||
};
|
||||
|
@ -1559,7 +1559,7 @@ static void __aac_shutdown(struct aac_dev * aac)
|
|||
struct fib *fib = &aac->fibs[i];
|
||||
if (!(fib->hw_fib_va->header.XferState & cpu_to_le32(NoResponseExpected | Async)) &&
|
||||
(fib->hw_fib_va->header.XferState & cpu_to_le32(ResponseExpected)))
|
||||
up(&fib->event_wait);
|
||||
complete(&fib->event_wait);
|
||||
}
|
||||
kthread_stop(aac->thread);
|
||||
aac->thread = NULL;
|
||||
|
|
|
@ -106,7 +106,7 @@ static irqreturn_t aac_src_intr_message(int irq, void *dev_id)
|
|||
spin_lock_irqsave(&dev->sync_fib->event_lock, sflags);
|
||||
if (dev->sync_fib->flags & FIB_CONTEXT_FLAG_WAIT) {
|
||||
dev->management_fib_count--;
|
||||
up(&dev->sync_fib->event_wait);
|
||||
complete(&dev->sync_fib->event_wait);
|
||||
}
|
||||
spin_unlock_irqrestore(&dev->sync_fib->event_lock,
|
||||
sflags);
|
||||
|
|
|
@ -3192,8 +3192,8 @@ static void asc_prt_driver_conf(struct seq_file *m, struct Scsi_Host *shost)
|
|||
shost->sg_tablesize, shost->cmd_per_lun);
|
||||
|
||||
seq_printf(m,
|
||||
" unchecked_isa_dma %d, use_clustering %d\n",
|
||||
shost->unchecked_isa_dma, shost->use_clustering);
|
||||
" unchecked_isa_dma %d\n",
|
||||
shost->unchecked_isa_dma);
|
||||
|
||||
seq_printf(m,
|
||||
" flags 0x%x, last_reset 0x%lx, jiffies 0x%lx, asc_n_io_port 0x%x\n",
|
||||
|
@ -10808,14 +10808,6 @@ static struct scsi_host_template advansys_template = {
|
|||
* for non-ISA adapters.
|
||||
*/
|
||||
.unchecked_isa_dma = true,
|
||||
/*
|
||||
* All adapters controlled by this driver are capable of large
|
||||
* scatter-gather lists. According to the mid-level SCSI documentation
|
||||
* this obviates any performance gain provided by setting
|
||||
* 'use_clustering'. But empirically while CPU utilization is increased
|
||||
* by enabling clustering, I/O throughput increases as well.
|
||||
*/
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
};
|
||||
|
||||
static int advansys_wide_init_chip(struct Scsi_Host *shost)
|
||||
|
|
|
@ -2920,7 +2920,7 @@ static struct scsi_host_template aha152x_driver_template = {
|
|||
.can_queue = 1,
|
||||
.this_id = 7,
|
||||
.sg_tablesize = SG_ALL,
|
||||
.use_clustering = DISABLE_CLUSTERING,
|
||||
.dma_boundary = PAGE_SIZE - 1,
|
||||
.slave_alloc = aha152x_adjust_queue,
|
||||
};
|
||||
|
||||
|
|
|
@ -58,8 +58,15 @@ struct aha1542_hostdata {
|
|||
int aha1542_last_mbi_used;
|
||||
int aha1542_last_mbo_used;
|
||||
struct scsi_cmnd *int_cmds[AHA1542_MAILBOXES];
|
||||
struct mailbox mb[2 * AHA1542_MAILBOXES];
|
||||
struct ccb ccb[AHA1542_MAILBOXES];
|
||||
struct mailbox *mb;
|
||||
dma_addr_t mb_handle;
|
||||
struct ccb *ccb;
|
||||
dma_addr_t ccb_handle;
|
||||
};
|
||||
|
||||
struct aha1542_cmd {
|
||||
struct chain *chain;
|
||||
dma_addr_t chain_handle;
|
||||
};
|
||||
|
||||
static inline void aha1542_intr_reset(u16 base)
|
||||
|
@ -233,6 +240,21 @@ static int aha1542_test_port(struct Scsi_Host *sh)
|
|||
return 1;
|
||||
}
|
||||
|
||||
static void aha1542_free_cmd(struct scsi_cmnd *cmd)
|
||||
{
|
||||
struct aha1542_cmd *acmd = scsi_cmd_priv(cmd);
|
||||
struct device *dev = cmd->device->host->dma_dev;
|
||||
size_t len = scsi_sg_count(cmd) * sizeof(struct chain);
|
||||
|
||||
if (acmd->chain) {
|
||||
dma_unmap_single(dev, acmd->chain_handle, len, DMA_TO_DEVICE);
|
||||
kfree(acmd->chain);
|
||||
}
|
||||
|
||||
acmd->chain = NULL;
|
||||
scsi_dma_unmap(cmd);
|
||||
}
|
||||
|
||||
static irqreturn_t aha1542_interrupt(int irq, void *dev_id)
|
||||
{
|
||||
struct Scsi_Host *sh = dev_id;
|
||||
|
@ -303,7 +325,7 @@ static irqreturn_t aha1542_interrupt(int irq, void *dev_id)
|
|||
return IRQ_HANDLED;
|
||||
};
|
||||
|
||||
mbo = (scsi2int(mb[mbi].ccbptr) - (isa_virt_to_bus(&ccb[0]))) / sizeof(struct ccb);
|
||||
mbo = (scsi2int(mb[mbi].ccbptr) - (unsigned long)aha1542->ccb_handle) / sizeof(struct ccb);
|
||||
mbistatus = mb[mbi].status;
|
||||
mb[mbi].status = 0;
|
||||
aha1542->aha1542_last_mbi_used = mbi;
|
||||
|
@ -331,8 +353,7 @@ static irqreturn_t aha1542_interrupt(int irq, void *dev_id)
|
|||
return IRQ_HANDLED;
|
||||
}
|
||||
my_done = tmp_cmd->scsi_done;
|
||||
kfree(tmp_cmd->host_scribble);
|
||||
tmp_cmd->host_scribble = NULL;
|
||||
aha1542_free_cmd(tmp_cmd);
|
||||
/* Fetch the sense data, and tuck it away, in the required slot. The
|
||||
Adaptec automatically fetches it, and there is no guarantee that
|
||||
we will still have it in the cdb when we come back */
|
||||
|
@ -369,6 +390,7 @@ static irqreturn_t aha1542_interrupt(int irq, void *dev_id)
|
|||
|
||||
static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
|
||||
{
|
||||
struct aha1542_cmd *acmd = scsi_cmd_priv(cmd);
|
||||
struct aha1542_hostdata *aha1542 = shost_priv(sh);
|
||||
u8 direction;
|
||||
u8 target = cmd->device->id;
|
||||
|
@ -378,7 +400,6 @@ static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
|
|||
int mbo, sg_count;
|
||||
struct mailbox *mb = aha1542->mb;
|
||||
struct ccb *ccb = aha1542->ccb;
|
||||
struct chain *cptr;
|
||||
|
||||
if (*cmd->cmnd == REQUEST_SENSE) {
|
||||
/* Don't do the command - we have the sense data already */
|
||||
|
@ -398,15 +419,17 @@ static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
|
|||
print_hex_dump_bytes("command: ", DUMP_PREFIX_NONE, cmd->cmnd, cmd->cmd_len);
|
||||
}
|
||||
#endif
|
||||
if (bufflen) { /* allocate memory before taking host_lock */
|
||||
sg_count = scsi_sg_count(cmd);
|
||||
cptr = kmalloc_array(sg_count, sizeof(*cptr),
|
||||
GFP_KERNEL | GFP_DMA);
|
||||
if (!cptr)
|
||||
return SCSI_MLQUEUE_HOST_BUSY;
|
||||
} else {
|
||||
sg_count = 0;
|
||||
cptr = NULL;
|
||||
sg_count = scsi_dma_map(cmd);
|
||||
if (sg_count) {
|
||||
size_t len = sg_count * sizeof(struct chain);
|
||||
|
||||
acmd->chain = kmalloc(len, GFP_DMA);
|
||||
if (!acmd->chain)
|
||||
goto out_unmap;
|
||||
acmd->chain_handle = dma_map_single(sh->dma_dev, acmd->chain,
|
||||
len, DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(sh->dma_dev, acmd->chain_handle))
|
||||
goto out_free_chain;
|
||||
}
|
||||
|
||||
/* Use the outgoing mailboxes in a round-robin fashion, because this
|
||||
|
@ -437,7 +460,8 @@ static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
|
|||
shost_printk(KERN_DEBUG, sh, "Sending command (%d %p)...", mbo, cmd->scsi_done);
|
||||
#endif
|
||||
|
||||
any2scsi(mb[mbo].ccbptr, isa_virt_to_bus(&ccb[mbo])); /* This gets trashed for some reason */
|
||||
/* This gets trashed for some reason */
|
||||
any2scsi(mb[mbo].ccbptr, aha1542->ccb_handle + mbo * sizeof(*ccb));
|
||||
|
||||
memset(&ccb[mbo], 0, sizeof(struct ccb));
|
||||
|
||||
|
@ -456,21 +480,18 @@ static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
|
|||
int i;
|
||||
|
||||
ccb[mbo].op = 2; /* SCSI Initiator Command w/scatter-gather */
|
||||
cmd->host_scribble = (void *)cptr;
|
||||
scsi_for_each_sg(cmd, sg, sg_count, i) {
|
||||
any2scsi(cptr[i].dataptr, isa_page_to_bus(sg_page(sg))
|
||||
+ sg->offset);
|
||||
any2scsi(cptr[i].datalen, sg->length);
|
||||
any2scsi(acmd->chain[i].dataptr, sg_dma_address(sg));
|
||||
any2scsi(acmd->chain[i].datalen, sg_dma_len(sg));
|
||||
};
|
||||
any2scsi(ccb[mbo].datalen, sg_count * sizeof(struct chain));
|
||||
any2scsi(ccb[mbo].dataptr, isa_virt_to_bus(cptr));
|
||||
any2scsi(ccb[mbo].dataptr, acmd->chain_handle);
|
||||
#ifdef DEBUG
|
||||
shost_printk(KERN_DEBUG, sh, "cptr %p: ", cptr);
|
||||
print_hex_dump_bytes("cptr: ", DUMP_PREFIX_NONE, cptr, 18);
|
||||
shost_printk(KERN_DEBUG, sh, "cptr %p: ", acmd->chain);
|
||||
print_hex_dump_bytes("cptr: ", DUMP_PREFIX_NONE, acmd->chain, 18);
|
||||
#endif
|
||||
} else {
|
||||
ccb[mbo].op = 0; /* SCSI Initiator Command */
|
||||
cmd->host_scribble = NULL;
|
||||
any2scsi(ccb[mbo].datalen, 0);
|
||||
any2scsi(ccb[mbo].dataptr, 0);
|
||||
};
|
||||
|
@ -488,24 +509,29 @@ static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
|
|||
spin_unlock_irqrestore(sh->host_lock, flags);
|
||||
|
||||
return 0;
|
||||
out_free_chain:
|
||||
kfree(acmd->chain);
|
||||
acmd->chain = NULL;
|
||||
out_unmap:
|
||||
scsi_dma_unmap(cmd);
|
||||
return SCSI_MLQUEUE_HOST_BUSY;
|
||||
}
|
||||
|
||||
/* Initialize mailboxes */
|
||||
static void setup_mailboxes(struct Scsi_Host *sh)
|
||||
{
|
||||
struct aha1542_hostdata *aha1542 = shost_priv(sh);
|
||||
int i;
|
||||
struct mailbox *mb = aha1542->mb;
|
||||
struct ccb *ccb = aha1542->ccb;
|
||||
|
||||
u8 mb_cmd[5] = { CMD_MBINIT, AHA1542_MAILBOXES, 0, 0, 0};
|
||||
int i;
|
||||
|
||||
for (i = 0; i < AHA1542_MAILBOXES; i++) {
|
||||
mb[i].status = mb[AHA1542_MAILBOXES + i].status = 0;
|
||||
any2scsi(mb[i].ccbptr, isa_virt_to_bus(&ccb[i]));
|
||||
aha1542->mb[i].status = 0;
|
||||
any2scsi(aha1542->mb[i].ccbptr,
|
||||
aha1542->ccb_handle + i * sizeof(struct ccb));
|
||||
aha1542->mb[AHA1542_MAILBOXES + i].status = 0;
|
||||
};
|
||||
aha1542_intr_reset(sh->io_port); /* reset interrupts, so they don't block */
|
||||
any2scsi((mb_cmd + 2), isa_virt_to_bus(mb));
|
||||
any2scsi(mb_cmd + 2, aha1542->mb_handle);
|
||||
if (aha1542_out(sh->io_port, mb_cmd, 5))
|
||||
shost_printk(KERN_ERR, sh, "failed setting up mailboxes\n");
|
||||
aha1542_intr_reset(sh->io_port);
|
||||
|
@ -739,11 +765,26 @@ static struct Scsi_Host *aha1542_hw_init(struct scsi_host_template *tpnt, struct
|
|||
if (aha1542->bios_translation == BIOS_TRANSLATION_25563)
|
||||
shost_printk(KERN_INFO, sh, "Using extended bios translation\n");
|
||||
|
||||
if (dma_set_mask_and_coherent(pdev, DMA_BIT_MASK(24)) < 0)
|
||||
goto unregister;
|
||||
|
||||
aha1542->mb = dma_alloc_coherent(pdev,
|
||||
AHA1542_MAILBOXES * 2 * sizeof(struct mailbox),
|
||||
&aha1542->mb_handle, GFP_KERNEL);
|
||||
if (!aha1542->mb)
|
||||
goto unregister;
|
||||
|
||||
aha1542->ccb = dma_alloc_coherent(pdev,
|
||||
AHA1542_MAILBOXES * sizeof(struct ccb),
|
||||
&aha1542->ccb_handle, GFP_KERNEL);
|
||||
if (!aha1542->ccb)
|
||||
goto free_mb;
|
||||
|
||||
setup_mailboxes(sh);
|
||||
|
||||
if (request_irq(sh->irq, aha1542_interrupt, 0, "aha1542", sh)) {
|
||||
shost_printk(KERN_ERR, sh, "Unable to allocate IRQ.\n");
|
||||
goto unregister;
|
||||
goto free_ccb;
|
||||
}
|
||||
if (sh->dma_channel != 0xFF) {
|
||||
if (request_dma(sh->dma_channel, "aha1542")) {
|
||||
|
@ -762,11 +803,18 @@ static struct Scsi_Host *aha1542_hw_init(struct scsi_host_template *tpnt, struct
|
|||
scsi_scan_host(sh);
|
||||
|
||||
return sh;
|
||||
|
||||
free_dma:
|
||||
if (sh->dma_channel != 0xff)
|
||||
free_dma(sh->dma_channel);
|
||||
free_irq:
|
||||
free_irq(sh->irq, sh);
|
||||
free_ccb:
|
||||
dma_free_coherent(pdev, AHA1542_MAILBOXES * sizeof(struct ccb),
|
||||
aha1542->ccb, aha1542->ccb_handle);
|
||||
free_mb:
|
||||
dma_free_coherent(pdev, AHA1542_MAILBOXES * 2 * sizeof(struct mailbox),
|
||||
aha1542->mb, aha1542->mb_handle);
|
||||
unregister:
|
||||
scsi_host_put(sh);
|
||||
release:
|
||||
|
@ -777,9 +825,16 @@ static struct Scsi_Host *aha1542_hw_init(struct scsi_host_template *tpnt, struct
|
|||
|
||||
static int aha1542_release(struct Scsi_Host *sh)
|
||||
{
|
||||
struct aha1542_hostdata *aha1542 = shost_priv(sh);
|
||||
struct device *dev = sh->dma_dev;
|
||||
|
||||
scsi_remove_host(sh);
|
||||
if (sh->dma_channel != 0xff)
|
||||
free_dma(sh->dma_channel);
|
||||
dma_free_coherent(dev, AHA1542_MAILBOXES * sizeof(struct ccb),
|
||||
aha1542->ccb, aha1542->ccb_handle);
|
||||
dma_free_coherent(dev, AHA1542_MAILBOXES * 2 * sizeof(struct mailbox),
|
||||
aha1542->mb, aha1542->mb_handle);
|
||||
if (sh->irq)
|
||||
free_irq(sh->irq, sh);
|
||||
if (sh->io_port && sh->n_io_port)
|
||||
|
@ -826,7 +881,8 @@ static int aha1542_dev_reset(struct scsi_cmnd *cmd)
|
|||
|
||||
aha1542->aha1542_last_mbo_used = mbo;
|
||||
|
||||
any2scsi(mb[mbo].ccbptr, isa_virt_to_bus(&ccb[mbo])); /* This gets trashed for some reason */
|
||||
/* This gets trashed for some reason */
|
||||
any2scsi(mb[mbo].ccbptr, aha1542->ccb_handle + mbo * sizeof(*ccb));
|
||||
|
||||
memset(&ccb[mbo], 0, sizeof(struct ccb));
|
||||
|
||||
|
@ -901,8 +957,7 @@ static int aha1542_reset(struct scsi_cmnd *cmd, u8 reset_cmd)
|
|||
*/
|
||||
continue;
|
||||
}
|
||||
kfree(tmp_cmd->host_scribble);
|
||||
tmp_cmd->host_scribble = NULL;
|
||||
aha1542_free_cmd(tmp_cmd);
|
||||
aha1542->int_cmds[i] = NULL;
|
||||
aha1542->mb[i].status = 0;
|
||||
}
|
||||
|
@ -946,6 +1001,7 @@ static struct scsi_host_template driver_template = {
|
|||
.module = THIS_MODULE,
|
||||
.proc_name = "aha1542",
|
||||
.name = "Adaptec 1542",
|
||||
.cmd_size = sizeof(struct aha1542_cmd),
|
||||
.queuecommand = aha1542_queuecommand,
|
||||
.eh_device_reset_handler= aha1542_dev_reset,
|
||||
.eh_bus_reset_handler = aha1542_bus_reset,
|
||||
|
@ -955,7 +1011,6 @@ static struct scsi_host_template driver_template = {
|
|||
.this_id = 7,
|
||||
.sg_tablesize = 16,
|
||||
.unchecked_isa_dma = 1,
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
};
|
||||
|
||||
static int aha1542_isa_match(struct device *pdev, unsigned int ndev)
|
||||
|
|
|
@ -545,7 +545,6 @@ static struct scsi_host_template aha1740_template = {
|
|||
.can_queue = AHA1740_ECBS,
|
||||
.this_id = 7,
|
||||
.sg_tablesize = AHA1740_SCATTER,
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
.eh_abort_handler = aha1740_eh_abort_handler,
|
||||
};
|
||||
|
||||
|
|
|
@ -920,7 +920,6 @@ struct scsi_host_template aic79xx_driver_template = {
|
|||
.this_id = -1,
|
||||
.max_sectors = 8192,
|
||||
.cmd_per_lun = 2,
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
.slave_alloc = ahd_linux_slave_alloc,
|
||||
.slave_configure = ahd_linux_slave_configure,
|
||||
.target_alloc = ahd_linux_target_alloc,
|
||||
|
|
|
@ -807,7 +807,6 @@ struct scsi_host_template aic7xxx_driver_template = {
|
|||
.this_id = -1,
|
||||
.max_sectors = 8192,
|
||||
.cmd_per_lun = 2,
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
.slave_alloc = ahc_linux_slave_alloc,
|
||||
.slave_configure = ahc_linux_slave_configure,
|
||||
.target_alloc = ahc_linux_target_alloc,
|
||||
|
|
|
@ -1057,14 +1057,13 @@ static struct asd_ascb *asd_ascb_alloc(struct asd_ha_struct *asd_ha,
|
|||
|
||||
if (ascb) {
|
||||
ascb->dma_scb.size = sizeof(struct scb);
|
||||
ascb->dma_scb.vaddr = dma_pool_alloc(asd_ha->scb_pool,
|
||||
ascb->dma_scb.vaddr = dma_pool_zalloc(asd_ha->scb_pool,
|
||||
gfp_flags,
|
||||
&ascb->dma_scb.dma_handle);
|
||||
if (!ascb->dma_scb.vaddr) {
|
||||
kmem_cache_free(asd_ascb_cache, ascb);
|
||||
return NULL;
|
||||
}
|
||||
memset(ascb->dma_scb.vaddr, 0, sizeof(struct scb));
|
||||
asd_init_ascb(asd_ha, ascb);
|
||||
|
||||
spin_lock_irqsave(&seq->tc_index_lock, flags);
|
||||
|
|
|
@ -68,7 +68,6 @@ static struct scsi_host_template aic94xx_sht = {
|
|||
.this_id = -1,
|
||||
.sg_tablesize = SG_ALL,
|
||||
.max_sectors = SCSI_DEFAULT_MAX_SECTORS,
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
.eh_device_reset_handler = sas_eh_device_reset_handler,
|
||||
.eh_target_reset_handler = sas_eh_target_reset_handler,
|
||||
.target_destroy = sas_target_destroy,
|
||||
|
|
|
@ -156,7 +156,6 @@ static struct scsi_host_template arcmsr_scsi_host_template = {
|
|||
.sg_tablesize = ARCMSR_DEFAULT_SG_ENTRIES,
|
||||
.max_sectors = ARCMSR_MAX_XFER_SECTORS_C,
|
||||
.cmd_per_lun = ARCMSR_DEFAULT_CMD_PERLUN,
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
.shost_attrs = arcmsr_host_attrs,
|
||||
.no_write_same = 1,
|
||||
};
|
||||
|
@ -903,9 +902,9 @@ static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||
if(!host){
|
||||
goto pci_disable_dev;
|
||||
}
|
||||
error = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
|
||||
error = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
|
||||
if(error){
|
||||
error = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
|
||||
error = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
|
||||
if(error){
|
||||
printk(KERN_WARNING
|
||||
"scsi%d: No suitable DMA mask available\n",
|
||||
|
@ -1049,9 +1048,9 @@ static int arcmsr_resume(struct pci_dev *pdev)
|
|||
pr_warn("%s: pci_enable_device error\n", __func__);
|
||||
return -ENODEV;
|
||||
}
|
||||
error = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
|
||||
error = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
|
||||
if (error) {
|
||||
error = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
|
||||
error = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
|
||||
if (error) {
|
||||
pr_warn("scsi%d: No suitable DMA mask available\n",
|
||||
host->host_no);
|
||||
|
|
|
@ -2890,7 +2890,7 @@ static struct scsi_host_template acornscsi_template = {
|
|||
.this_id = 7,
|
||||
.sg_tablesize = SG_ALL,
|
||||
.cmd_per_lun = 2,
|
||||
.use_clustering = DISABLE_CLUSTERING,
|
||||
.dma_boundary = PAGE_SIZE - 1,
|
||||
.proc_name = "acornscsi",
|
||||
};
|
||||
|
||||
|
|
|
@ -245,7 +245,7 @@ static struct scsi_host_template arxescsi_template = {
|
|||
.can_queue = 0,
|
||||
.this_id = 7,
|
||||
.sg_tablesize = SG_ALL,
|
||||
.use_clustering = DISABLE_CLUSTERING,
|
||||
.dma_boundary = PAGE_SIZE - 1,
|
||||
.proc_name = "arxescsi",
|
||||
};
|
||||
|
||||
|
|
|
@ -221,10 +221,10 @@ static struct scsi_host_template cumanascsi_template = {
|
|||
.this_id = 7,
|
||||
.sg_tablesize = SG_ALL,
|
||||
.cmd_per_lun = 2,
|
||||
.use_clustering = DISABLE_CLUSTERING,
|
||||
.proc_name = "CumanaSCSI-1",
|
||||
.cmd_size = NCR5380_CMD_SIZE,
|
||||
.max_sectors = 128,
|
||||
.dma_boundary = PAGE_SIZE - 1,
|
||||
};
|
||||
|
||||
static int cumanascsi1_probe(struct expansion_card *ec,
|
||||
|
|
|
@ -367,7 +367,6 @@ static struct scsi_host_template cumanascsi2_template = {
|
|||
.this_id = 7,
|
||||
.sg_tablesize = SG_MAX_SEGMENTS,
|
||||
.dma_boundary = IOMD_DMA_BOUNDARY,
|
||||
.use_clustering = DISABLE_CLUSTERING,
|
||||
.proc_name = "cumanascsi2",
|
||||
};
|
||||
|
||||
|
|
|
@ -486,7 +486,6 @@ static struct scsi_host_template eesox_template = {
|
|||
.this_id = 7,
|
||||
.sg_tablesize = SG_MAX_SEGMENTS,
|
||||
.dma_boundary = IOMD_DMA_BOUNDARY,
|
||||
.use_clustering = DISABLE_CLUSTERING,
|
||||
.proc_name = "eesox",
|
||||
};
|
||||
|
||||
|
|
|
@ -110,7 +110,7 @@ static struct scsi_host_template oakscsi_template = {
|
|||
.this_id = 7,
|
||||
.sg_tablesize = SG_ALL,
|
||||
.cmd_per_lun = 2,
|
||||
.use_clustering = DISABLE_CLUSTERING,
|
||||
.dma_boundary = PAGE_SIZE - 1,
|
||||
.proc_name = "oakscsi",
|
||||
.cmd_size = NCR5380_CMD_SIZE,
|
||||
.max_sectors = 128,
|
||||
|
|
|
@ -294,7 +294,6 @@ static struct scsi_host_template powertecscsi_template = {
|
|||
.sg_tablesize = SG_MAX_SEGMENTS,
|
||||
.dma_boundary = IOMD_DMA_BOUNDARY,
|
||||
.cmd_per_lun = 2,
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
.proc_name = "powertec",
|
||||
};
|
||||
|
||||
|
|
|
@ -714,7 +714,7 @@ static struct scsi_host_template atari_scsi_template = {
|
|||
.eh_host_reset_handler = atari_scsi_host_reset,
|
||||
.this_id = 7,
|
||||
.cmd_per_lun = 2,
|
||||
.use_clustering = DISABLE_CLUSTERING,
|
||||
.dma_boundary = PAGE_SIZE - 1,
|
||||
.cmd_size = NCR5380_CMD_SIZE,
|
||||
};
|
||||
|
||||
|
|
|
@ -1681,7 +1681,6 @@ static struct scsi_host_template atp870u_template = {
|
|||
.can_queue = qcnt /* can_queue */,
|
||||
.this_id = 7 /* SCSI ID */,
|
||||
.sg_tablesize = ATP870U_SCATTER /*SG_ALL*/ /*SG_NONE*/,
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
.max_sectors = ATP870U_MAX_SECTORS,
|
||||
};
|
||||
|
||||
|
|
|
@ -214,12 +214,6 @@ static char const *cqe_desc[] = {
|
|||
"CXN_KILLED_IMM_DATA_RCVD"
|
||||
};
|
||||
|
||||
static int beiscsi_slave_configure(struct scsi_device *sdev)
|
||||
{
|
||||
blk_queue_max_segment_size(sdev->request_queue, 65536);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int beiscsi_eh_abort(struct scsi_cmnd *sc)
|
||||
{
|
||||
struct iscsi_task *abrt_task = (struct iscsi_task *)sc->SCp.ptr;
|
||||
|
@ -393,7 +387,6 @@ static struct scsi_host_template beiscsi_sht = {
|
|||
.proc_name = DRV_NAME,
|
||||
.queuecommand = iscsi_queuecommand,
|
||||
.change_queue_depth = scsi_change_queue_depth,
|
||||
.slave_configure = beiscsi_slave_configure,
|
||||
.target_alloc = iscsi_target_alloc,
|
||||
.eh_timed_out = iscsi_eh_cmd_timed_out,
|
||||
.eh_abort_handler = beiscsi_eh_abort,
|
||||
|
@ -404,8 +397,8 @@ static struct scsi_host_template beiscsi_sht = {
|
|||
.can_queue = BE2_IO_DEPTH,
|
||||
.this_id = -1,
|
||||
.max_sectors = BEISCSI_MAX_SECTORS,
|
||||
.max_segment_size = 65536,
|
||||
.cmd_per_lun = BEISCSI_CMD_PER_LUN,
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
.vendor_id = SCSI_NL_VID_TYPE_PCI | BE_VENDOR_ID,
|
||||
.track_queue_depth = 1,
|
||||
};
|
||||
|
|
|
@ -3819,7 +3819,7 @@ bfa_sfp_scn(struct bfa_sfp_s *sfp, struct bfi_mbmsg_s *msg)
|
|||
sfp->state = BFA_SFP_STATE_REMOVED;
|
||||
sfp->data_valid = 0;
|
||||
bfa_sfp_scn_aen_post(sfp, rsp);
|
||||
break;
|
||||
break;
|
||||
case BFA_SFP_SCN_FAILED:
|
||||
sfp->state = BFA_SFP_STATE_FAILED;
|
||||
sfp->data_valid = 0;
|
||||
|
@ -5763,7 +5763,7 @@ bfa_phy_intr(void *phyarg, struct bfi_mbmsg_s *msg)
|
|||
(struct bfa_phy_stats_s *) phy->ubuf;
|
||||
bfa_phy_ntoh32((u32 *)stats, (u32 *)phy->dbuf_kva,
|
||||
sizeof(struct bfa_phy_stats_s));
|
||||
bfa_trc(phy, stats->status);
|
||||
bfa_trc(phy, stats->status);
|
||||
}
|
||||
|
||||
phy->status = status;
|
||||
|
|
|
@ -739,14 +739,10 @@ bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad)
|
|||
|
||||
pci_set_master(pdev);
|
||||
|
||||
|
||||
if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) ||
|
||||
(pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)) {
|
||||
if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) ||
|
||||
(pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)) {
|
||||
printk(KERN_ERR "pci_set_dma_mask fail %p\n", pdev);
|
||||
goto out_release_region;
|
||||
}
|
||||
if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
|
||||
dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
|
||||
printk(KERN_ERR "dma_set_mask_and_coherent fail %p\n", pdev);
|
||||
goto out_release_region;
|
||||
}
|
||||
|
||||
/* Enable PCIE Advanced Error Recovery (AER) if kernel supports */
|
||||
|
@ -1565,9 +1561,9 @@ bfad_pci_slot_reset(struct pci_dev *pdev)
|
|||
pci_save_state(pdev);
|
||||
pci_set_master(pdev);
|
||||
|
||||
if (pci_set_dma_mask(bfad->pcidev, DMA_BIT_MASK(64)) != 0)
|
||||
if (pci_set_dma_mask(bfad->pcidev, DMA_BIT_MASK(32)) != 0)
|
||||
goto out_disable_device;
|
||||
if (dma_set_mask_and_coherent(&bfad->pcidev->dev, DMA_BIT_MASK(64)) ||
|
||||
dma_set_mask_and_coherent(&bfad->pcidev->dev, DMA_BIT_MASK(32)))
|
||||
goto out_disable_device;
|
||||
|
||||
if (restart_bfa(bfad) == -1)
|
||||
goto out_disable_device;
|
||||
|
|
|
@ -817,7 +817,6 @@ struct scsi_host_template bfad_im_scsi_host_template = {
|
|||
.this_id = -1,
|
||||
.sg_tablesize = BFAD_IO_MAX_SGE,
|
||||
.cmd_per_lun = 3,
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
.shost_attrs = bfad_im_host_attrs,
|
||||
.max_sectors = BFAD_MAX_SECTORS,
|
||||
.vendor_id = BFA_PCI_VENDOR_ID_BROCADE,
|
||||
|
@ -840,7 +839,6 @@ struct scsi_host_template bfad_im_vport_template = {
|
|||
.this_id = -1,
|
||||
.sg_tablesize = BFAD_IO_MAX_SGE,
|
||||
.cmd_per_lun = 3,
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
.shost_attrs = bfad_im_vport_attrs,
|
||||
.max_sectors = BFAD_MAX_SECTORS,
|
||||
};
|
||||
|
|
|
@ -2970,7 +2970,6 @@ static struct scsi_host_template bnx2fc_shost_template = {
|
|||
.change_queue_depth = scsi_change_queue_depth,
|
||||
.this_id = -1,
|
||||
.cmd_per_lun = 3,
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
.sg_tablesize = BNX2FC_MAX_BDS_PER_CMD,
|
||||
.max_sectors = 1024,
|
||||
.track_queue_depth = 1,
|
||||
|
|
|
@ -2427,7 +2427,6 @@ static void bnx2i_process_ofld_cmpl(struct bnx2i_hba *hba,
|
|||
{
|
||||
u32 cid_addr;
|
||||
struct bnx2i_endpoint *ep;
|
||||
u32 cid_num;
|
||||
|
||||
ep = bnx2i_find_ep_in_ofld_list(hba, ofld_kcqe->iscsi_conn_id);
|
||||
if (!ep) {
|
||||
|
@ -2462,7 +2461,6 @@ static void bnx2i_process_ofld_cmpl(struct bnx2i_hba *hba,
|
|||
} else {
|
||||
ep->state = EP_STATE_OFLD_COMPL;
|
||||
cid_addr = ofld_kcqe->iscsi_conn_context_id;
|
||||
cid_num = bnx2i_get_cid_num(ep);
|
||||
ep->ep_cid = cid_addr;
|
||||
ep->qp.ctx_base = NULL;
|
||||
}
|
||||
|
|
|
@ -2263,7 +2263,6 @@ static struct scsi_host_template bnx2i_host_template = {
|
|||
.max_sectors = 127,
|
||||
.cmd_per_lun = 128,
|
||||
.this_id = -1,
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
.sg_tablesize = ISCSI_MAX_BDS_PER_CMD,
|
||||
.shost_attrs = bnx2i_dev_attributes,
|
||||
.track_queue_depth = 1,
|
||||
|
|
|
@ -255,7 +255,6 @@ static void
|
|||
csio_hw_exit_workers(struct csio_hw *hw)
|
||||
{
|
||||
cancel_work_sync(&hw->evtq_work);
|
||||
flush_scheduled_work();
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -646,7 +645,7 @@ csio_shost_init(struct csio_hw *hw, struct device *dev,
|
|||
if (csio_lnode_init(ln, hw, pln))
|
||||
goto err_shost_put;
|
||||
|
||||
if (scsi_add_host(shost, dev))
|
||||
if (scsi_add_host_with_dma(shost, dev, &hw->pdev->dev))
|
||||
goto err_lnode_exit;
|
||||
|
||||
return ln;
|
||||
|
|
|
@ -2274,7 +2274,6 @@ struct scsi_host_template csio_fcoe_shost_template = {
|
|||
.this_id = -1,
|
||||
.sg_tablesize = CSIO_SCSI_MAX_SGE,
|
||||
.cmd_per_lun = CSIO_MAX_CMD_PER_LUN,
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
.shost_attrs = csio_fcoe_lport_attrs,
|
||||
.max_sectors = CSIO_MAX_SECTOR_SIZE,
|
||||
};
|
||||
|
@ -2294,7 +2293,6 @@ struct scsi_host_template csio_fcoe_shost_vport_template = {
|
|||
.this_id = -1,
|
||||
.sg_tablesize = CSIO_SCSI_MAX_SGE,
|
||||
.cmd_per_lun = CSIO_MAX_CMD_PER_LUN,
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
.shost_attrs = csio_fcoe_vport_attrs,
|
||||
.max_sectors = CSIO_MAX_SECTOR_SIZE,
|
||||
};
|
||||
|
|
|
@ -95,7 +95,7 @@ static struct scsi_host_template cxgb3i_host_template = {
|
|||
.eh_device_reset_handler = iscsi_eh_device_reset,
|
||||
.eh_target_reset_handler = iscsi_eh_recover_target,
|
||||
.target_alloc = iscsi_target_alloc,
|
||||
.use_clustering = DISABLE_CLUSTERING,
|
||||
.dma_boundary = PAGE_SIZE - 1,
|
||||
.this_id = -1,
|
||||
.track_queue_depth = 1,
|
||||
};
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
config SCSI_CXGB4_ISCSI
|
||||
tristate "Chelsio T4 iSCSI support"
|
||||
depends on PCI && INET && (IPV6 || IPV6=n)
|
||||
select NETDEVICES
|
||||
select ETHERNET
|
||||
depends on THERMAL || !THERMAL
|
||||
depends on ETHERNET
|
||||
select NET_VENDOR_CHELSIO
|
||||
select CHELSIO_T4
|
||||
select CHELSIO_LIB
|
||||
|
|
|
@ -113,7 +113,7 @@ static struct scsi_host_template cxgb4i_host_template = {
|
|||
.eh_device_reset_handler = iscsi_eh_device_reset,
|
||||
.eh_target_reset_handler = iscsi_eh_recover_target,
|
||||
.target_alloc = iscsi_target_alloc,
|
||||
.use_clustering = DISABLE_CLUSTERING,
|
||||
.dma_boundary = PAGE_SIZE - 1,
|
||||
.this_id = -1,
|
||||
.track_queue_depth = 1,
|
||||
};
|
||||
|
|
|
@ -3174,7 +3174,6 @@ static struct scsi_host_template driver_template = {
|
|||
.this_id = -1,
|
||||
.sg_tablesize = 1, /* No scatter gather support */
|
||||
.max_sectors = CXLFLASH_MAX_SECTORS,
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
.shost_attrs = cxlflash_host_attrs,
|
||||
.sdev_attrs = cxlflash_dev_attrs,
|
||||
};
|
||||
|
|
|
@ -4631,7 +4631,7 @@ static struct scsi_host_template dc395x_driver_template = {
|
|||
.cmd_per_lun = DC395x_MAX_CMD_PER_LUN,
|
||||
.eh_abort_handler = dc395x_eh_abort,
|
||||
.eh_bus_reset_handler = dc395x_eh_bus_reset,
|
||||
.use_clustering = DISABLE_CLUSTERING,
|
||||
.dma_boundary = PAGE_SIZE - 1,
|
||||
};
|
||||
|
||||
|
||||
|
|
|
@ -63,7 +63,7 @@ static struct scsi_host_template dmx3191d_driver_template = {
|
|||
.this_id = 7,
|
||||
.sg_tablesize = SG_ALL,
|
||||
.cmd_per_lun = 2,
|
||||
.use_clustering = DISABLE_CLUSTERING,
|
||||
.dma_boundary = PAGE_SIZE - 1,
|
||||
.cmd_size = NCR5380_CMD_SIZE,
|
||||
};
|
||||
|
||||
|
|
|
@ -934,15 +934,15 @@ static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev
|
|||
* See if we should enable dma64 mode.
|
||||
*/
|
||||
if (sizeof(dma_addr_t) > 4 &&
|
||||
pci_set_dma_mask(pDev, DMA_BIT_MASK(64)) == 0) {
|
||||
if (dma_get_required_mask(&pDev->dev) > DMA_BIT_MASK(32))
|
||||
dma64 = 1;
|
||||
}
|
||||
if (!dma64 && pci_set_dma_mask(pDev, DMA_BIT_MASK(32)) != 0)
|
||||
dma_get_required_mask(&pDev->dev) > DMA_BIT_MASK(32) &&
|
||||
dma_set_mask(&pDev->dev, DMA_BIT_MASK(64)) == 0)
|
||||
dma64 = 1;
|
||||
|
||||
if (!dma64 && dma_set_mask(&pDev->dev, DMA_BIT_MASK(32)) != 0)
|
||||
return -EINVAL;
|
||||
|
||||
/* adapter only supports message blocks below 4GB */
|
||||
pci_set_consistent_dma_mask(pDev, DMA_BIT_MASK(32));
|
||||
dma_set_coherent_mask(&pDev->dev, DMA_BIT_MASK(32));
|
||||
|
||||
base_addr0_phys = pci_resource_start(pDev,0);
|
||||
hba_map0_area_size = pci_resource_len(pDev,0);
|
||||
|
@ -3569,7 +3569,6 @@ static struct scsi_host_template driver_template = {
|
|||
.slave_configure = adpt_slave_configure,
|
||||
.can_queue = MAX_TO_IOP_MESSAGES,
|
||||
.this_id = 7,
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
};
|
||||
|
||||
static int __init adpt_init(void)
|
||||
|
|
|
@ -266,6 +266,7 @@ int esas2r_init_adapter(struct Scsi_Host *host, struct pci_dev *pcid,
|
|||
int i;
|
||||
void *next_uncached;
|
||||
struct esas2r_request *first_request, *last_request;
|
||||
bool dma64 = false;
|
||||
|
||||
if (index >= MAX_ADAPTERS) {
|
||||
esas2r_log(ESAS2R_LOG_CRIT,
|
||||
|
@ -286,42 +287,20 @@ int esas2r_init_adapter(struct Scsi_Host *host, struct pci_dev *pcid,
|
|||
a->pcid = pcid;
|
||||
a->host = host;
|
||||
|
||||
if (sizeof(dma_addr_t) > 4) {
|
||||
const uint64_t required_mask = dma_get_required_mask
|
||||
(&pcid->dev);
|
||||
if (required_mask > DMA_BIT_MASK(32)
|
||||
&& !pci_set_dma_mask(pcid, DMA_BIT_MASK(64))
|
||||
&& !pci_set_consistent_dma_mask(pcid,
|
||||
DMA_BIT_MASK(64))) {
|
||||
esas2r_log_dev(ESAS2R_LOG_INFO,
|
||||
&(a->pcid->dev),
|
||||
"64-bit PCI addressing enabled\n");
|
||||
} else if (!pci_set_dma_mask(pcid, DMA_BIT_MASK(32))
|
||||
&& !pci_set_consistent_dma_mask(pcid,
|
||||
DMA_BIT_MASK(32))) {
|
||||
esas2r_log_dev(ESAS2R_LOG_INFO,
|
||||
&(a->pcid->dev),
|
||||
"32-bit PCI addressing enabled\n");
|
||||
} else {
|
||||
esas2r_log(ESAS2R_LOG_CRIT,
|
||||
"failed to set DMA mask");
|
||||
esas2r_kill_adapter(index);
|
||||
return 0;
|
||||
}
|
||||
} else {
|
||||
if (!pci_set_dma_mask(pcid, DMA_BIT_MASK(32))
|
||||
&& !pci_set_consistent_dma_mask(pcid,
|
||||
DMA_BIT_MASK(32))) {
|
||||
esas2r_log_dev(ESAS2R_LOG_INFO,
|
||||
&(a->pcid->dev),
|
||||
"32-bit PCI addressing enabled\n");
|
||||
} else {
|
||||
esas2r_log(ESAS2R_LOG_CRIT,
|
||||
"failed to set DMA mask");
|
||||
esas2r_kill_adapter(index);
|
||||
return 0;
|
||||
}
|
||||
if (sizeof(dma_addr_t) > 4 &&
|
||||
dma_get_required_mask(&pcid->dev) > DMA_BIT_MASK(32) &&
|
||||
!dma_set_mask_and_coherent(&pcid->dev, DMA_BIT_MASK(64)))
|
||||
dma64 = true;
|
||||
|
||||
if (!dma64 && dma_set_mask_and_coherent(&pcid->dev, DMA_BIT_MASK(32))) {
|
||||
esas2r_log(ESAS2R_LOG_CRIT, "failed to set DMA mask");
|
||||
esas2r_kill_adapter(index);
|
||||
return 0;
|
||||
}
|
||||
|
||||
esas2r_log_dev(ESAS2R_LOG_INFO, &pcid->dev,
|
||||
"%s-bit PCI addressing enabled\n", dma64 ? "64" : "32");
|
||||
|
||||
esas2r_adapters[index] = a;
|
||||
sprintf(a->name, ESAS2R_DRVR_NAME "_%02d", index);
|
||||
esas2r_debug("new adapter %p, name %s", a, a->name);
|
||||
|
|
|
@ -250,7 +250,6 @@ static struct scsi_host_template driver_template = {
|
|||
ESAS2R_DEFAULT_CMD_PER_LUN,
|
||||
.present = 0,
|
||||
.unchecked_isa_dma = 0,
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
.emulated = 0,
|
||||
.proc_name = ESAS2R_DRVR_NAME,
|
||||
.change_queue_depth = scsi_change_queue_depth,
|
||||
|
|
|
@ -2676,7 +2676,6 @@ struct scsi_host_template scsi_esp_template = {
|
|||
.can_queue = 7,
|
||||
.this_id = 7,
|
||||
.sg_tablesize = SG_ALL,
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
.max_sectors = 0xffff,
|
||||
.skip_settle_delay = 1,
|
||||
};
|
||||
|
|
|
@ -286,7 +286,6 @@ static struct scsi_host_template fcoe_shost_template = {
|
|||
.this_id = -1,
|
||||
.cmd_per_lun = 3,
|
||||
.can_queue = FCOE_MAX_OUTSTANDING_COMMANDS,
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
.sg_tablesize = SG_ALL,
|
||||
.max_sectors = 0xffff,
|
||||
.track_queue_depth = 1,
|
||||
|
@ -1670,7 +1669,6 @@ static void fcoe_recv_frame(struct sk_buff *skb)
|
|||
struct fc_stats *stats;
|
||||
struct fcoe_crc_eof crc_eof;
|
||||
struct fc_frame *fp;
|
||||
struct fcoe_port *port;
|
||||
struct fcoe_hdr *hp;
|
||||
|
||||
fr = fcoe_dev_from_skb(skb);
|
||||
|
@ -1688,7 +1686,6 @@ static void fcoe_recv_frame(struct sk_buff *skb)
|
|||
skb_end_pointer(skb), skb->csum,
|
||||
skb->dev ? skb->dev->name : "<NULL>");
|
||||
|
||||
port = lport_priv(lport);
|
||||
skb_linearize(skb); /* check for skb_is_nonlinear is within skb_linearize */
|
||||
|
||||
/*
|
||||
|
@ -1859,7 +1856,6 @@ static int fcoe_device_notification(struct notifier_block *notifier,
|
|||
struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
|
||||
struct fcoe_ctlr *ctlr;
|
||||
struct fcoe_interface *fcoe;
|
||||
struct fcoe_port *port;
|
||||
struct fc_stats *stats;
|
||||
u32 link_possible = 1;
|
||||
u32 mfs;
|
||||
|
@ -1897,7 +1893,6 @@ static int fcoe_device_notification(struct notifier_block *notifier,
|
|||
break;
|
||||
case NETDEV_UNREGISTER:
|
||||
list_del(&fcoe->list);
|
||||
port = lport_priv(ctlr->lp);
|
||||
fcoe_vport_remove(lport);
|
||||
mutex_lock(&fcoe_config_mutex);
|
||||
fcoe_if_destroy(lport);
|
||||
|
|
|
@ -115,7 +115,6 @@ static struct scsi_host_template fnic_host_template = {
|
|||
.this_id = -1,
|
||||
.cmd_per_lun = 3,
|
||||
.can_queue = FNIC_DFLT_IO_REQ,
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
.sg_tablesize = FNIC_MAX_SG_DESC_CNT,
|
||||
.max_sectors = 0xffff,
|
||||
.shost_attrs = fnic_attrs,
|
||||
|
|
|
@ -468,14 +468,13 @@ int fnic_trace_buf_init(void)
|
|||
fnic_max_trace_entries = (trace_max_pages * PAGE_SIZE)/
|
||||
FNIC_ENTRY_SIZE_BYTES;
|
||||
|
||||
fnic_trace_buf_p = (unsigned long)vmalloc((trace_max_pages * PAGE_SIZE));
|
||||
fnic_trace_buf_p = (unsigned long)vzalloc(trace_max_pages * PAGE_SIZE);
|
||||
if (!fnic_trace_buf_p) {
|
||||
printk(KERN_ERR PFX "Failed to allocate memory "
|
||||
"for fnic_trace_buf_p\n");
|
||||
err = -ENOMEM;
|
||||
goto err_fnic_trace_buf_init;
|
||||
}
|
||||
memset((void *)fnic_trace_buf_p, 0, (trace_max_pages * PAGE_SIZE));
|
||||
|
||||
fnic_trace_entries.page_offset =
|
||||
vmalloc(array_size(fnic_max_trace_entries,
|
||||
|
|
|
@ -700,7 +700,7 @@ static struct scsi_host_template driver_template = {
|
|||
.this_id = 7,
|
||||
.sg_tablesize = SG_ALL,
|
||||
.cmd_per_lun = 2,
|
||||
.use_clustering = DISABLE_CLUSTERING,
|
||||
.dma_boundary = PAGE_SIZE - 1,
|
||||
.cmd_size = NCR5380_CMD_SIZE,
|
||||
.max_sectors = 128,
|
||||
};
|
||||
|
|
|
@ -4680,7 +4680,6 @@ static struct scsi_host_template gdth_template = {
|
|||
.sg_tablesize = GDTH_MAXSG,
|
||||
.cmd_per_lun = GDTH_MAXC_P_L,
|
||||
.unchecked_isa_dma = 1,
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
.no_write_same = 1,
|
||||
};
|
||||
|
||||
|
|
|
@ -184,7 +184,7 @@ static struct scsi_host_template gvp11_scsi_template = {
|
|||
.this_id = 7,
|
||||
.sg_tablesize = SG_ALL,
|
||||
.cmd_per_lun = CMD_PER_LUN,
|
||||
.use_clustering = DISABLE_CLUSTERING
|
||||
.dma_boundary = PAGE_SIZE - 1,
|
||||
};
|
||||
|
||||
static int check_wd33c93(struct gvp11_scsiregs *regs)
|
||||
|
|
|
@ -69,6 +69,12 @@
|
|||
#define HISI_SAS_SATA_PROTOCOL_FPDMA 0x8
|
||||
#define HISI_SAS_SATA_PROTOCOL_ATAPI 0x10
|
||||
|
||||
#define HISI_SAS_DIF_PROT_MASK (SHOST_DIF_TYPE1_PROTECTION | \
|
||||
SHOST_DIF_TYPE2_PROTECTION | \
|
||||
SHOST_DIF_TYPE3_PROTECTION)
|
||||
|
||||
#define HISI_SAS_PROT_MASK (HISI_SAS_DIF_PROT_MASK)
|
||||
|
||||
struct hisi_hba;
|
||||
|
||||
enum {
|
||||
|
@ -211,7 +217,7 @@ struct hisi_sas_slot {
|
|||
/* Do not reorder/change members after here */
|
||||
void *buf;
|
||||
dma_addr_t buf_dma;
|
||||
int idx;
|
||||
u16 idx;
|
||||
};
|
||||
|
||||
struct hisi_sas_hw {
|
||||
|
@ -268,6 +274,8 @@ struct hisi_hba {
|
|||
struct pci_dev *pci_dev;
|
||||
struct device *dev;
|
||||
|
||||
int prot_mask;
|
||||
|
||||
void __iomem *regs;
|
||||
void __iomem *sgpio_regs;
|
||||
struct regmap *ctrl;
|
||||
|
@ -322,6 +330,8 @@ struct hisi_hba {
|
|||
unsigned long sata_dev_bitmap[BITS_TO_LONGS(HISI_SAS_MAX_DEVICES)];
|
||||
struct work_struct rst_work;
|
||||
u32 phy_state;
|
||||
u32 intr_coal_ticks; /* Time of interrupt coalesce in us */
|
||||
u32 intr_coal_count; /* Interrupt count to coalesce */
|
||||
};
|
||||
|
||||
/* Generic HW DMA host memory structures */
|
||||
|
@ -468,7 +478,6 @@ extern int hisi_sas_remove(struct platform_device *pdev);
|
|||
extern int hisi_sas_slave_configure(struct scsi_device *sdev);
|
||||
extern int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time);
|
||||
extern void hisi_sas_scan_start(struct Scsi_Host *shost);
|
||||
extern struct device_attribute *host_attrs[];
|
||||
extern int hisi_sas_host_reset(struct Scsi_Host *shost, int reset_type);
|
||||
extern void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy);
|
||||
extern void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba,
|
||||
|
|
|
@ -296,42 +296,109 @@ static void hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba,
|
|||
device_id, abort_flag, tag_to_abort);
|
||||
}
|
||||
|
||||
static void hisi_sas_dma_unmap(struct hisi_hba *hisi_hba,
|
||||
struct sas_task *task, int n_elem,
|
||||
int n_elem_req, int n_elem_resp)
|
||||
{
|
||||
struct device *dev = hisi_hba->dev;
|
||||
|
||||
if (!sas_protocol_ata(task->task_proto)) {
|
||||
if (task->num_scatter) {
|
||||
if (n_elem)
|
||||
dma_unmap_sg(dev, task->scatter,
|
||||
task->num_scatter,
|
||||
task->data_dir);
|
||||
} else if (task->task_proto & SAS_PROTOCOL_SMP) {
|
||||
if (n_elem_req)
|
||||
dma_unmap_sg(dev, &task->smp_task.smp_req,
|
||||
1, DMA_TO_DEVICE);
|
||||
if (n_elem_resp)
|
||||
dma_unmap_sg(dev, &task->smp_task.smp_resp,
|
||||
1, DMA_FROM_DEVICE);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int hisi_sas_dma_map(struct hisi_hba *hisi_hba,
|
||||
struct sas_task *task, int *n_elem,
|
||||
int *n_elem_req, int *n_elem_resp)
|
||||
{
|
||||
struct device *dev = hisi_hba->dev;
|
||||
int rc;
|
||||
|
||||
if (sas_protocol_ata(task->task_proto)) {
|
||||
*n_elem = task->num_scatter;
|
||||
} else {
|
||||
unsigned int req_len, resp_len;
|
||||
|
||||
if (task->num_scatter) {
|
||||
*n_elem = dma_map_sg(dev, task->scatter,
|
||||
task->num_scatter, task->data_dir);
|
||||
if (!*n_elem) {
|
||||
rc = -ENOMEM;
|
||||
goto prep_out;
|
||||
}
|
||||
} else if (task->task_proto & SAS_PROTOCOL_SMP) {
|
||||
*n_elem_req = dma_map_sg(dev, &task->smp_task.smp_req,
|
||||
1, DMA_TO_DEVICE);
|
||||
if (!*n_elem_req) {
|
||||
rc = -ENOMEM;
|
||||
goto prep_out;
|
||||
}
|
||||
req_len = sg_dma_len(&task->smp_task.smp_req);
|
||||
if (req_len & 0x3) {
|
||||
rc = -EINVAL;
|
||||
goto err_out_dma_unmap;
|
||||
}
|
||||
*n_elem_resp = dma_map_sg(dev, &task->smp_task.smp_resp,
|
||||
1, DMA_FROM_DEVICE);
|
||||
if (!*n_elem_resp) {
|
||||
rc = -ENOMEM;
|
||||
goto err_out_dma_unmap;
|
||||
}
|
||||
resp_len = sg_dma_len(&task->smp_task.smp_resp);
|
||||
if (resp_len & 0x3) {
|
||||
rc = -EINVAL;
|
||||
goto err_out_dma_unmap;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (*n_elem > HISI_SAS_SGE_PAGE_CNT) {
|
||||
dev_err(dev, "task prep: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT",
|
||||
*n_elem);
|
||||
rc = -EINVAL;
|
||||
goto err_out_dma_unmap;
|
||||
}
|
||||
return 0;
|
||||
|
||||
err_out_dma_unmap:
|
||||
/* It would be better to call dma_unmap_sg() here, but it's messy */
|
||||
hisi_sas_dma_unmap(hisi_hba, task, *n_elem,
|
||||
*n_elem_req, *n_elem_resp);
|
||||
prep_out:
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int hisi_sas_task_prep(struct sas_task *task,
|
||||
struct hisi_sas_dq **dq_pointer,
|
||||
bool is_tmf, struct hisi_sas_tmf_task *tmf,
|
||||
int *pass)
|
||||
{
|
||||
struct domain_device *device = task->dev;
|
||||
struct hisi_hba *hisi_hba;
|
||||
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
|
||||
struct hisi_sas_device *sas_dev = device->lldd_dev;
|
||||
struct hisi_sas_port *port;
|
||||
struct hisi_sas_slot *slot;
|
||||
struct hisi_sas_cmd_hdr *cmd_hdr_base;
|
||||
struct asd_sas_port *sas_port = device->port;
|
||||
struct device *dev;
|
||||
struct device *dev = hisi_hba->dev;
|
||||
int dlvry_queue_slot, dlvry_queue, rc, slot_idx;
|
||||
int n_elem = 0, n_elem_req = 0, n_elem_resp = 0;
|
||||
struct hisi_sas_dq *dq;
|
||||
unsigned long flags;
|
||||
int wr_q_index;
|
||||
|
||||
if (!sas_port) {
|
||||
struct task_status_struct *ts = &task->task_status;
|
||||
|
||||
ts->resp = SAS_TASK_UNDELIVERED;
|
||||
ts->stat = SAS_PHY_DOWN;
|
||||
/*
|
||||
* libsas will use dev->port, should
|
||||
* not call task_done for sata
|
||||
*/
|
||||
if (device->dev_type != SAS_SATA_DEV)
|
||||
task->task_done(task);
|
||||
return -ECOMM;
|
||||
}
|
||||
|
||||
hisi_hba = dev_to_hisi_hba(device);
|
||||
dev = hisi_hba->dev;
|
||||
|
||||
if (DEV_IS_GONE(sas_dev)) {
|
||||
if (sas_dev)
|
||||
dev_info(dev, "task prep: device %d not ready\n",
|
||||
|
@ -355,49 +422,10 @@ static int hisi_sas_task_prep(struct sas_task *task,
|
|||
return -ECOMM;
|
||||
}
|
||||
|
||||
if (!sas_protocol_ata(task->task_proto)) {
|
||||
unsigned int req_len, resp_len;
|
||||
|
||||
if (task->num_scatter) {
|
||||
n_elem = dma_map_sg(dev, task->scatter,
|
||||
task->num_scatter, task->data_dir);
|
||||
if (!n_elem) {
|
||||
rc = -ENOMEM;
|
||||
goto prep_out;
|
||||
}
|
||||
} else if (task->task_proto & SAS_PROTOCOL_SMP) {
|
||||
n_elem_req = dma_map_sg(dev, &task->smp_task.smp_req,
|
||||
1, DMA_TO_DEVICE);
|
||||
if (!n_elem_req) {
|
||||
rc = -ENOMEM;
|
||||
goto prep_out;
|
||||
}
|
||||
req_len = sg_dma_len(&task->smp_task.smp_req);
|
||||
if (req_len & 0x3) {
|
||||
rc = -EINVAL;
|
||||
goto err_out_dma_unmap;
|
||||
}
|
||||
n_elem_resp = dma_map_sg(dev, &task->smp_task.smp_resp,
|
||||
1, DMA_FROM_DEVICE);
|
||||
if (!n_elem_resp) {
|
||||
rc = -ENOMEM;
|
||||
goto err_out_dma_unmap;
|
||||
}
|
||||
resp_len = sg_dma_len(&task->smp_task.smp_resp);
|
||||
if (resp_len & 0x3) {
|
||||
rc = -EINVAL;
|
||||
goto err_out_dma_unmap;
|
||||
}
|
||||
}
|
||||
} else
|
||||
n_elem = task->num_scatter;
|
||||
|
||||
if (n_elem > HISI_SAS_SGE_PAGE_CNT) {
|
||||
dev_err(dev, "task prep: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT",
|
||||
n_elem);
|
||||
rc = -EINVAL;
|
||||
goto err_out_dma_unmap;
|
||||
}
|
||||
rc = hisi_sas_dma_map(hisi_hba, task, &n_elem,
|
||||
&n_elem_req, &n_elem_resp);
|
||||
if (rc < 0)
|
||||
goto prep_out;
|
||||
|
||||
if (hisi_hba->hw->slot_index_alloc)
|
||||
rc = hisi_hba->hw->slot_index_alloc(hisi_hba, device);
|
||||
|
@ -482,19 +510,8 @@ static int hisi_sas_task_prep(struct sas_task *task,
|
|||
err_out_tag:
|
||||
hisi_sas_slot_index_free(hisi_hba, slot_idx);
|
||||
err_out_dma_unmap:
|
||||
if (!sas_protocol_ata(task->task_proto)) {
|
||||
if (task->num_scatter) {
|
||||
dma_unmap_sg(dev, task->scatter, task->num_scatter,
|
||||
task->data_dir);
|
||||
} else if (task->task_proto & SAS_PROTOCOL_SMP) {
|
||||
if (n_elem_req)
|
||||
dma_unmap_sg(dev, &task->smp_task.smp_req,
|
||||
1, DMA_TO_DEVICE);
|
||||
if (n_elem_resp)
|
||||
dma_unmap_sg(dev, &task->smp_task.smp_resp,
|
||||
1, DMA_FROM_DEVICE);
|
||||
}
|
||||
}
|
||||
hisi_sas_dma_unmap(hisi_hba, task, n_elem,
|
||||
n_elem_req, n_elem_resp);
|
||||
prep_out:
|
||||
dev_err(dev, "task prep: failed[%d]!\n", rc);
|
||||
return rc;
|
||||
|
@ -506,10 +523,29 @@ static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
|
|||
u32 rc;
|
||||
u32 pass = 0;
|
||||
unsigned long flags;
|
||||
struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
|
||||
struct device *dev = hisi_hba->dev;
|
||||
struct hisi_hba *hisi_hba;
|
||||
struct device *dev;
|
||||
struct domain_device *device = task->dev;
|
||||
struct asd_sas_port *sas_port = device->port;
|
||||
struct hisi_sas_dq *dq = NULL;
|
||||
|
||||
if (!sas_port) {
|
||||
struct task_status_struct *ts = &task->task_status;
|
||||
|
||||
ts->resp = SAS_TASK_UNDELIVERED;
|
||||
ts->stat = SAS_PHY_DOWN;
|
||||
/*
|
||||
* libsas will use dev->port, should
|
||||
* not call task_done for sata
|
||||
*/
|
||||
if (device->dev_type != SAS_SATA_DEV)
|
||||
task->task_done(task);
|
||||
return -ECOMM;
|
||||
}
|
||||
|
||||
hisi_hba = dev_to_hisi_hba(device);
|
||||
dev = hisi_hba->dev;
|
||||
|
||||
if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) {
|
||||
if (in_softirq())
|
||||
return -EINVAL;
|
||||
|
@ -1459,12 +1495,12 @@ static int hisi_sas_abort_task(struct sas_task *task)
|
|||
if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
|
||||
struct scsi_cmnd *cmnd = task->uldd_task;
|
||||
struct hisi_sas_slot *slot = task->lldd_task;
|
||||
u32 tag = slot->idx;
|
||||
u16 tag = slot->idx;
|
||||
int rc2;
|
||||
|
||||
int_to_scsilun(cmnd->device->lun, &lun);
|
||||
tmf_task.tmf = TMF_ABORT_TASK;
|
||||
tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
|
||||
tmf_task.tag_of_task_to_be_managed = tag;
|
||||
|
||||
rc = hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun,
|
||||
&tmf_task);
|
||||
|
@ -1718,7 +1754,7 @@ static int hisi_sas_query_task(struct sas_task *task)
|
|||
|
||||
int_to_scsilun(cmnd->device->lun, &lun);
|
||||
tmf_task.tmf = TMF_QUERY_TASK;
|
||||
tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
|
||||
tmf_task.tag_of_task_to_be_managed = tag;
|
||||
|
||||
rc = hisi_sas_debug_issue_ssp_tmf(device,
|
||||
lun.scsi_lun,
|
||||
|
@ -1994,12 +2030,6 @@ EXPORT_SYMBOL_GPL(hisi_sas_kill_tasklets);
|
|||
struct scsi_transport_template *hisi_sas_stt;
|
||||
EXPORT_SYMBOL_GPL(hisi_sas_stt);
|
||||
|
||||
struct device_attribute *host_attrs[] = {
|
||||
&dev_attr_phy_event_threshold,
|
||||
NULL,
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(host_attrs);
|
||||
|
||||
static struct sas_domain_function_template hisi_sas_transport_ops = {
|
||||
.lldd_dev_found = hisi_sas_dev_found,
|
||||
.lldd_dev_gone = hisi_sas_dev_gone,
|
||||
|
@ -2380,7 +2410,6 @@ int hisi_sas_probe(struct platform_device *pdev,
|
|||
shost->max_lun = ~0;
|
||||
shost->max_channel = 1;
|
||||
shost->max_cmd_len = 16;
|
||||
shost->sg_tablesize = min_t(u16, SG_ALL, HISI_SAS_SGE_PAGE_CNT);
|
||||
if (hisi_hba->hw->slot_index_alloc) {
|
||||
shost->can_queue = hisi_hba->hw->max_command_entries;
|
||||
shost->cmd_per_lun = hisi_hba->hw->max_command_entries;
|
||||
|
|
|
@ -510,6 +510,7 @@ static void setup_itct_v1_hw(struct hisi_hba *hisi_hba,
|
|||
struct hisi_sas_itct *itct = &hisi_hba->itct[device_id];
|
||||
struct asd_sas_port *sas_port = device->port;
|
||||
struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
|
||||
u64 sas_addr;
|
||||
|
||||
memset(itct, 0, sizeof(*itct));
|
||||
|
||||
|
@ -534,8 +535,8 @@ static void setup_itct_v1_hw(struct hisi_hba *hisi_hba,
|
|||
itct->qw0 = cpu_to_le64(qw0);
|
||||
|
||||
/* qw1 */
|
||||
memcpy(&itct->sas_addr, device->sas_addr, SAS_ADDR_SIZE);
|
||||
itct->sas_addr = __swab64(itct->sas_addr);
|
||||
memcpy(&sas_addr, device->sas_addr, SAS_ADDR_SIZE);
|
||||
itct->sas_addr = cpu_to_le64(__swab64(sas_addr));
|
||||
|
||||
/* qw2 */
|
||||
itct->qw2 = cpu_to_le64((500ULL << ITCT_HDR_IT_NEXUS_LOSS_TL_OFF) |
|
||||
|
@ -561,7 +562,7 @@ static void clear_itct_v1_hw(struct hisi_hba *hisi_hba,
|
|||
reg_val &= ~CFG_AGING_TIME_ITCT_REL_MSK;
|
||||
hisi_sas_write32(hisi_hba, CFG_AGING_TIME, reg_val);
|
||||
|
||||
qw0 = cpu_to_le64(itct->qw0);
|
||||
qw0 = le64_to_cpu(itct->qw0);
|
||||
qw0 &= ~ITCT_HDR_VALID_MSK;
|
||||
itct->qw0 = cpu_to_le64(qw0);
|
||||
}
|
||||
|
@ -1100,7 +1101,7 @@ static void slot_err_v1_hw(struct hisi_hba *hisi_hba,
|
|||
case SAS_PROTOCOL_SSP:
|
||||
{
|
||||
int error = -1;
|
||||
u32 dma_err_type = cpu_to_le32(err_record->dma_err_type);
|
||||
u32 dma_err_type = le32_to_cpu(err_record->dma_err_type);
|
||||
u32 dma_tx_err_type = ((dma_err_type &
|
||||
ERR_HDR_DMA_TX_ERR_TYPE_MSK)) >>
|
||||
ERR_HDR_DMA_TX_ERR_TYPE_OFF;
|
||||
|
@ -1108,9 +1109,9 @@ static void slot_err_v1_hw(struct hisi_hba *hisi_hba,
|
|||
ERR_HDR_DMA_RX_ERR_TYPE_MSK)) >>
|
||||
ERR_HDR_DMA_RX_ERR_TYPE_OFF;
|
||||
u32 trans_tx_fail_type =
|
||||
cpu_to_le32(err_record->trans_tx_fail_type);
|
||||
le32_to_cpu(err_record->trans_tx_fail_type);
|
||||
u32 trans_rx_fail_type =
|
||||
cpu_to_le32(err_record->trans_rx_fail_type);
|
||||
le32_to_cpu(err_record->trans_rx_fail_type);
|
||||
|
||||
if (dma_tx_err_type) {
|
||||
/* dma tx err */
|
||||
|
@ -1558,7 +1559,7 @@ static irqreturn_t cq_interrupt_v1_hw(int irq, void *p)
|
|||
u32 cmplt_hdr_data;
|
||||
|
||||
complete_hdr = &complete_queue[rd_point];
|
||||
cmplt_hdr_data = cpu_to_le32(complete_hdr->data);
|
||||
cmplt_hdr_data = le32_to_cpu(complete_hdr->data);
|
||||
idx = (cmplt_hdr_data & CMPLT_HDR_IPTT_MSK) >>
|
||||
CMPLT_HDR_IPTT_OFF;
|
||||
slot = &hisi_hba->slot_info[idx];
|
||||
|
@ -1797,6 +1798,11 @@ static int hisi_sas_v1_init(struct hisi_hba *hisi_hba)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static struct device_attribute *host_attrs_v1_hw[] = {
|
||||
&dev_attr_phy_event_threshold,
|
||||
NULL
|
||||
};
|
||||
|
||||
static struct scsi_host_template sht_v1_hw = {
|
||||
.name = DRV_NAME,
|
||||
.module = THIS_MODULE,
|
||||
|
@ -1808,14 +1814,13 @@ static struct scsi_host_template sht_v1_hw = {
|
|||
.change_queue_depth = sas_change_queue_depth,
|
||||
.bios_param = sas_bios_param,
|
||||
.this_id = -1,
|
||||
.sg_tablesize = SG_ALL,
|
||||
.sg_tablesize = HISI_SAS_SGE_PAGE_CNT,
|
||||
.max_sectors = SCSI_DEFAULT_MAX_SECTORS,
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
.eh_device_reset_handler = sas_eh_device_reset_handler,
|
||||
.eh_target_reset_handler = sas_eh_target_reset_handler,
|
||||
.target_destroy = sas_target_destroy,
|
||||
.ioctl = sas_ioctl,
|
||||
.shost_attrs = host_attrs,
|
||||
.shost_attrs = host_attrs_v1_hw,
|
||||
};
|
||||
|
||||
static const struct hisi_sas_hw hisi_sas_v1_hw = {
|
||||
|
|
|
@ -934,6 +934,7 @@ static void setup_itct_v2_hw(struct hisi_hba *hisi_hba,
|
|||
struct domain_device *parent_dev = device->parent;
|
||||
struct asd_sas_port *sas_port = device->port;
|
||||
struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
|
||||
u64 sas_addr;
|
||||
|
||||
memset(itct, 0, sizeof(*itct));
|
||||
|
||||
|
@ -966,8 +967,8 @@ static void setup_itct_v2_hw(struct hisi_hba *hisi_hba,
|
|||
itct->qw0 = cpu_to_le64(qw0);
|
||||
|
||||
/* qw1 */
|
||||
memcpy(&itct->sas_addr, device->sas_addr, SAS_ADDR_SIZE);
|
||||
itct->sas_addr = __swab64(itct->sas_addr);
|
||||
memcpy(&sas_addr, device->sas_addr, SAS_ADDR_SIZE);
|
||||
itct->sas_addr = cpu_to_le64(__swab64(sas_addr));
|
||||
|
||||
/* qw2 */
|
||||
if (!dev_is_sata(device))
|
||||
|
@ -2044,11 +2045,11 @@ static void slot_err_v2_hw(struct hisi_hba *hisi_hba,
|
|||
struct task_status_struct *ts = &task->task_status;
|
||||
struct hisi_sas_err_record_v2 *err_record =
|
||||
hisi_sas_status_buf_addr_mem(slot);
|
||||
u32 trans_tx_fail_type = cpu_to_le32(err_record->trans_tx_fail_type);
|
||||
u32 trans_rx_fail_type = cpu_to_le32(err_record->trans_rx_fail_type);
|
||||
u16 dma_tx_err_type = cpu_to_le16(err_record->dma_tx_err_type);
|
||||
u16 sipc_rx_err_type = cpu_to_le16(err_record->sipc_rx_err_type);
|
||||
u32 dma_rx_err_type = cpu_to_le32(err_record->dma_rx_err_type);
|
||||
u32 trans_tx_fail_type = le32_to_cpu(err_record->trans_tx_fail_type);
|
||||
u32 trans_rx_fail_type = le32_to_cpu(err_record->trans_rx_fail_type);
|
||||
u16 dma_tx_err_type = le16_to_cpu(err_record->dma_tx_err_type);
|
||||
u16 sipc_rx_err_type = le16_to_cpu(err_record->sipc_rx_err_type);
|
||||
u32 dma_rx_err_type = le32_to_cpu(err_record->dma_rx_err_type);
|
||||
int error = -1;
|
||||
|
||||
if (err_phase == 1) {
|
||||
|
@ -2059,8 +2060,7 @@ static void slot_err_v2_hw(struct hisi_hba *hisi_hba,
|
|||
trans_tx_fail_type);
|
||||
} else if (err_phase == 2) {
|
||||
/* error in RX phase, the priority is: DW1 > DW3 > DW2 */
|
||||
error = parse_trans_rx_err_code_v2_hw(
|
||||
trans_rx_fail_type);
|
||||
error = parse_trans_rx_err_code_v2_hw(trans_rx_fail_type);
|
||||
if (error == -1) {
|
||||
error = parse_dma_rx_err_code_v2_hw(
|
||||
dma_rx_err_type);
|
||||
|
@ -2358,6 +2358,7 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
|
|||
&complete_queue[slot->cmplt_queue_slot];
|
||||
unsigned long flags;
|
||||
bool is_internal = slot->is_internal;
|
||||
u32 dw0;
|
||||
|
||||
if (unlikely(!task || !task->lldd_task || !task->dev))
|
||||
return -EINVAL;
|
||||
|
@ -2382,8 +2383,9 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
|
|||
}
|
||||
|
||||
/* Use SAS+TMF status codes */
|
||||
switch ((complete_hdr->dw0 & CMPLT_HDR_ABORT_STAT_MSK)
|
||||
>> CMPLT_HDR_ABORT_STAT_OFF) {
|
||||
dw0 = le32_to_cpu(complete_hdr->dw0);
|
||||
switch ((dw0 & CMPLT_HDR_ABORT_STAT_MSK) >>
|
||||
CMPLT_HDR_ABORT_STAT_OFF) {
|
||||
case STAT_IO_ABORTED:
|
||||
/* this io has been aborted by abort command */
|
||||
ts->stat = SAS_ABORTED_TASK;
|
||||
|
@ -2408,9 +2410,8 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
|
|||
break;
|
||||
}
|
||||
|
||||
if ((complete_hdr->dw0 & CMPLT_HDR_ERX_MSK) &&
|
||||
(!(complete_hdr->dw0 & CMPLT_HDR_RSPNS_XFRD_MSK))) {
|
||||
u32 err_phase = (complete_hdr->dw0 & CMPLT_HDR_ERR_PHASE_MSK)
|
||||
if ((dw0 & CMPLT_HDR_ERX_MSK) && (!(dw0 & CMPLT_HDR_RSPNS_XFRD_MSK))) {
|
||||
u32 err_phase = (dw0 & CMPLT_HDR_ERR_PHASE_MSK)
|
||||
>> CMPLT_HDR_ERR_PHASE_OFF;
|
||||
u32 *error_info = hisi_sas_status_buf_addr_mem(slot);
|
||||
|
||||
|
@ -2526,22 +2527,23 @@ static void prep_ata_v2_hw(struct hisi_hba *hisi_hba,
|
|||
struct hisi_sas_tmf_task *tmf = slot->tmf;
|
||||
u8 *buf_cmd;
|
||||
int has_data = 0, hdr_tag = 0;
|
||||
u32 dw1 = 0, dw2 = 0;
|
||||
u32 dw0, dw1 = 0, dw2 = 0;
|
||||
|
||||
/* create header */
|
||||
/* dw0 */
|
||||
hdr->dw0 = cpu_to_le32(port->id << CMD_HDR_PORT_OFF);
|
||||
dw0 = port->id << CMD_HDR_PORT_OFF;
|
||||
if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type))
|
||||
hdr->dw0 |= cpu_to_le32(3 << CMD_HDR_CMD_OFF);
|
||||
dw0 |= 3 << CMD_HDR_CMD_OFF;
|
||||
else
|
||||
hdr->dw0 |= cpu_to_le32(4 << CMD_HDR_CMD_OFF);
|
||||
dw0 |= 4 << CMD_HDR_CMD_OFF;
|
||||
|
||||
if (tmf && tmf->force_phy) {
|
||||
hdr->dw0 |= CMD_HDR_FORCE_PHY_MSK;
|
||||
hdr->dw0 |= cpu_to_le32((1 << tmf->phy_id)
|
||||
<< CMD_HDR_PHY_ID_OFF);
|
||||
dw0 |= CMD_HDR_FORCE_PHY_MSK;
|
||||
dw0 |= (1 << tmf->phy_id) << CMD_HDR_PHY_ID_OFF;
|
||||
}
|
||||
|
||||
hdr->dw0 = cpu_to_le32(dw0);
|
||||
|
||||
/* dw1 */
|
||||
switch (task->data_dir) {
|
||||
case DMA_TO_DEVICE:
|
||||
|
@ -3152,20 +3154,24 @@ static void cq_tasklet_v2_hw(unsigned long val)
|
|||
|
||||
/* Check for NCQ completion */
|
||||
if (complete_hdr->act) {
|
||||
u32 act_tmp = complete_hdr->act;
|
||||
u32 act_tmp = le32_to_cpu(complete_hdr->act);
|
||||
int ncq_tag_count = ffs(act_tmp);
|
||||
u32 dw1 = le32_to_cpu(complete_hdr->dw1);
|
||||
|
||||
dev_id = (complete_hdr->dw1 & CMPLT_HDR_DEV_ID_MSK) >>
|
||||
dev_id = (dw1 & CMPLT_HDR_DEV_ID_MSK) >>
|
||||
CMPLT_HDR_DEV_ID_OFF;
|
||||
itct = &hisi_hba->itct[dev_id];
|
||||
|
||||
/* The NCQ tags are held in the itct header */
|
||||
while (ncq_tag_count) {
|
||||
__le64 *ncq_tag = &itct->qw4_15[0];
|
||||
__le64 *_ncq_tag = &itct->qw4_15[0], __ncq_tag;
|
||||
u64 ncq_tag;
|
||||
|
||||
ncq_tag_count -= 1;
|
||||
iptt = (ncq_tag[ncq_tag_count / 5]
|
||||
>> (ncq_tag_count % 5) * 12) & 0xfff;
|
||||
ncq_tag_count--;
|
||||
__ncq_tag = _ncq_tag[ncq_tag_count / 5];
|
||||
ncq_tag = le64_to_cpu(__ncq_tag);
|
||||
iptt = (ncq_tag >> (ncq_tag_count % 5) * 12) &
|
||||
0xfff;
|
||||
|
||||
slot = &hisi_hba->slot_info[iptt];
|
||||
slot->cmplt_queue_slot = rd_point;
|
||||
|
@ -3176,7 +3182,9 @@ static void cq_tasklet_v2_hw(unsigned long val)
|
|||
ncq_tag_count = ffs(act_tmp);
|
||||
}
|
||||
} else {
|
||||
iptt = (complete_hdr->dw1) & CMPLT_HDR_IPTT_MSK;
|
||||
u32 dw1 = le32_to_cpu(complete_hdr->dw1);
|
||||
|
||||
iptt = dw1 & CMPLT_HDR_IPTT_MSK;
|
||||
slot = &hisi_hba->slot_info[iptt];
|
||||
slot->cmplt_queue_slot = rd_point;
|
||||
slot->cmplt_queue = queue;
|
||||
|
@ -3552,6 +3560,11 @@ static void wait_cmds_complete_timeout_v2_hw(struct hisi_hba *hisi_hba,
|
|||
dev_dbg(dev, "wait commands complete %dms\n", time);
|
||||
}
|
||||
|
||||
static struct device_attribute *host_attrs_v2_hw[] = {
|
||||
&dev_attr_phy_event_threshold,
|
||||
NULL
|
||||
};
|
||||
|
||||
static struct scsi_host_template sht_v2_hw = {
|
||||
.name = DRV_NAME,
|
||||
.module = THIS_MODULE,
|
||||
|
@ -3563,14 +3576,13 @@ static struct scsi_host_template sht_v2_hw = {
|
|||
.change_queue_depth = sas_change_queue_depth,
|
||||
.bios_param = sas_bios_param,
|
||||
.this_id = -1,
|
||||
.sg_tablesize = SG_ALL,
|
||||
.sg_tablesize = HISI_SAS_SGE_PAGE_CNT,
|
||||
.max_sectors = SCSI_DEFAULT_MAX_SECTORS,
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
.eh_device_reset_handler = sas_eh_device_reset_handler,
|
||||
.eh_target_reset_handler = sas_eh_target_reset_handler,
|
||||
.target_destroy = sas_target_destroy,
|
||||
.ioctl = sas_ioctl,
|
||||
.shost_attrs = host_attrs,
|
||||
.shost_attrs = host_attrs_v2_hw,
|
||||
};
|
||||
|
||||
static const struct hisi_sas_hw hisi_sas_v2_hw = {
|
||||
|
|
|
@ -42,6 +42,7 @@
|
|||
#define MAX_CON_TIME_LIMIT_TIME 0xa4
|
||||
#define BUS_INACTIVE_LIMIT_TIME 0xa8
|
||||
#define REJECT_TO_OPEN_LIMIT_TIME 0xac
|
||||
#define CQ_INT_CONVERGE_EN 0xb0
|
||||
#define CFG_AGING_TIME 0xbc
|
||||
#define HGC_DFX_CFG2 0xc0
|
||||
#define CFG_ABT_SET_QUERY_IPTT 0xd4
|
||||
|
@ -126,6 +127,8 @@
|
|||
#define PHY_CTRL (PORT_BASE + 0x14)
|
||||
#define PHY_CTRL_RESET_OFF 0
|
||||
#define PHY_CTRL_RESET_MSK (0x1 << PHY_CTRL_RESET_OFF)
|
||||
#define CMD_HDR_PIR_OFF 8
|
||||
#define CMD_HDR_PIR_MSK (0x1 << CMD_HDR_PIR_OFF)
|
||||
#define SL_CFG (PORT_BASE + 0x84)
|
||||
#define AIP_LIMIT (PORT_BASE + 0x90)
|
||||
#define SL_CONTROL (PORT_BASE + 0x94)
|
||||
|
@ -332,6 +335,16 @@
|
|||
#define ITCT_HDR_RTOLT_OFF 48
|
||||
#define ITCT_HDR_RTOLT_MSK (0xffffULL << ITCT_HDR_RTOLT_OFF)
|
||||
|
||||
struct hisi_sas_protect_iu_v3_hw {
|
||||
u32 dw0;
|
||||
u32 lbrtcv;
|
||||
u32 lbrtgv;
|
||||
u32 dw3;
|
||||
u32 dw4;
|
||||
u32 dw5;
|
||||
u32 rsv;
|
||||
};
|
||||
|
||||
struct hisi_sas_complete_v3_hdr {
|
||||
__le32 dw0;
|
||||
__le32 dw1;
|
||||
|
@ -371,6 +384,28 @@ struct hisi_sas_err_record_v3 {
|
|||
((fis.command == ATA_CMD_DEV_RESET) && \
|
||||
((fis.control & ATA_SRST) != 0)))
|
||||
|
||||
#define T10_INSRT_EN_OFF 0
|
||||
#define T10_INSRT_EN_MSK (1 << T10_INSRT_EN_OFF)
|
||||
#define T10_RMV_EN_OFF 1
|
||||
#define T10_RMV_EN_MSK (1 << T10_RMV_EN_OFF)
|
||||
#define T10_RPLC_EN_OFF 2
|
||||
#define T10_RPLC_EN_MSK (1 << T10_RPLC_EN_OFF)
|
||||
#define T10_CHK_EN_OFF 3
|
||||
#define T10_CHK_EN_MSK (1 << T10_CHK_EN_OFF)
|
||||
#define INCR_LBRT_OFF 5
|
||||
#define INCR_LBRT_MSK (1 << INCR_LBRT_OFF)
|
||||
#define USR_DATA_BLOCK_SZ_OFF 20
|
||||
#define USR_DATA_BLOCK_SZ_MSK (0x3 << USR_DATA_BLOCK_SZ_OFF)
|
||||
#define T10_CHK_MSK_OFF 16
|
||||
|
||||
static bool hisi_sas_intr_conv;
|
||||
MODULE_PARM_DESC(intr_conv, "interrupt converge enable (0-1)");
|
||||
|
||||
/* permit overriding the host protection capabilities mask (EEDP/T10 PI) */
|
||||
static int prot_mask;
|
||||
module_param(prot_mask, int, 0);
|
||||
MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=0x0 ");
|
||||
|
||||
static u32 hisi_sas_read32(struct hisi_hba *hisi_hba, u32 off)
|
||||
{
|
||||
void __iomem *regs = hisi_hba->regs + off;
|
||||
|
@ -436,6 +471,8 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
|
|||
hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x1);
|
||||
hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 0x1);
|
||||
hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 0x1);
|
||||
hisi_sas_write32(hisi_hba, CQ_INT_CONVERGE_EN,
|
||||
hisi_sas_intr_conv);
|
||||
hisi_sas_write32(hisi_hba, OQ_INT_SRC, 0xffff);
|
||||
hisi_sas_write32(hisi_hba, ENT_INT_SRC1, 0xffffffff);
|
||||
hisi_sas_write32(hisi_hba, ENT_INT_SRC2, 0xffffffff);
|
||||
|
@ -494,7 +531,7 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
|
|||
hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_OOB_RESTART_MSK, 0x1);
|
||||
hisi_sas_phy_write32(hisi_hba, i, STP_LINK_TIMER, 0x7f7a120);
|
||||
hisi_sas_phy_write32(hisi_hba, i, CON_CFG_DRIVER, 0x2a0a01);
|
||||
|
||||
hisi_sas_phy_write32(hisi_hba, i, SAS_SSP_CON_TIMER_CFG, 0x32);
|
||||
/* used for 12G negotiate */
|
||||
hisi_sas_phy_write32(hisi_hba, i, COARSETUNE_TIME, 0x1e);
|
||||
hisi_sas_phy_write32(hisi_hba, i, AIP_LIMIT, 0x2ffff);
|
||||
|
@ -622,6 +659,7 @@ static void setup_itct_v3_hw(struct hisi_hba *hisi_hba,
|
|||
struct domain_device *parent_dev = device->parent;
|
||||
struct asd_sas_port *sas_port = device->port;
|
||||
struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
|
||||
u64 sas_addr;
|
||||
|
||||
memset(itct, 0, sizeof(*itct));
|
||||
|
||||
|
@ -654,8 +692,8 @@ static void setup_itct_v3_hw(struct hisi_hba *hisi_hba,
|
|||
itct->qw0 = cpu_to_le64(qw0);
|
||||
|
||||
/* qw1 */
|
||||
memcpy(&itct->sas_addr, device->sas_addr, SAS_ADDR_SIZE);
|
||||
itct->sas_addr = __swab64(itct->sas_addr);
|
||||
memcpy(&sas_addr, device->sas_addr, SAS_ADDR_SIZE);
|
||||
itct->sas_addr = cpu_to_le64(__swab64(sas_addr));
|
||||
|
||||
/* qw2 */
|
||||
if (!dev_is_sata(device))
|
||||
|
@ -932,6 +970,58 @@ static void prep_prd_sge_v3_hw(struct hisi_hba *hisi_hba,
|
|||
hdr->sg_len = cpu_to_le32(n_elem << CMD_HDR_DATA_SGL_LEN_OFF);
|
||||
}
|
||||
|
||||
static u32 get_prot_chk_msk_v3_hw(struct scsi_cmnd *scsi_cmnd)
|
||||
{
|
||||
unsigned char prot_flags = scsi_cmnd->prot_flags;
|
||||
|
||||
if (prot_flags & SCSI_PROT_TRANSFER_PI) {
|
||||
if (prot_flags & SCSI_PROT_REF_CHECK)
|
||||
return 0xc << 16;
|
||||
return 0xfc << 16;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void fill_prot_v3_hw(struct scsi_cmnd *scsi_cmnd,
|
||||
struct hisi_sas_protect_iu_v3_hw *prot)
|
||||
{
|
||||
unsigned char prot_op = scsi_get_prot_op(scsi_cmnd);
|
||||
unsigned int interval = scsi_prot_interval(scsi_cmnd);
|
||||
u32 lbrt_chk_val = t10_pi_ref_tag(scsi_cmnd->request);
|
||||
|
||||
switch (prot_op) {
|
||||
case SCSI_PROT_READ_STRIP:
|
||||
prot->dw0 |= (T10_RMV_EN_MSK | T10_CHK_EN_MSK);
|
||||
prot->lbrtcv = lbrt_chk_val;
|
||||
prot->dw4 |= get_prot_chk_msk_v3_hw(scsi_cmnd);
|
||||
break;
|
||||
case SCSI_PROT_WRITE_INSERT:
|
||||
prot->dw0 |= T10_INSRT_EN_MSK;
|
||||
prot->lbrtgv = lbrt_chk_val;
|
||||
break;
|
||||
default:
|
||||
WARN(1, "prot_op(0x%x) is not valid\n", prot_op);
|
||||
break;
|
||||
}
|
||||
|
||||
switch (interval) {
|
||||
case 512:
|
||||
break;
|
||||
case 4096:
|
||||
prot->dw0 |= (0x1 << USR_DATA_BLOCK_SZ_OFF);
|
||||
break;
|
||||
case 520:
|
||||
prot->dw0 |= (0x2 << USR_DATA_BLOCK_SZ_OFF);
|
||||
break;
|
||||
default:
|
||||
WARN(1, "protection interval (0x%x) invalid\n",
|
||||
interval);
|
||||
break;
|
||||
}
|
||||
|
||||
prot->dw0 |= INCR_LBRT_MSK;
|
||||
}
|
||||
|
||||
static void prep_ssp_v3_hw(struct hisi_hba *hisi_hba,
|
||||
struct hisi_sas_slot *slot)
|
||||
{
|
||||
|
@ -943,9 +1033,10 @@ static void prep_ssp_v3_hw(struct hisi_hba *hisi_hba,
|
|||
struct sas_ssp_task *ssp_task = &task->ssp_task;
|
||||
struct scsi_cmnd *scsi_cmnd = ssp_task->cmd;
|
||||
struct hisi_sas_tmf_task *tmf = slot->tmf;
|
||||
unsigned char prot_op = scsi_get_prot_op(scsi_cmnd);
|
||||
int has_data = 0, priority = !!tmf;
|
||||
u8 *buf_cmd;
|
||||
u32 dw1 = 0, dw2 = 0;
|
||||
u32 dw1 = 0, dw2 = 0, len = 0;
|
||||
|
||||
hdr->dw0 = cpu_to_le32((1 << CMD_HDR_RESP_REPORT_OFF) |
|
||||
(2 << CMD_HDR_TLR_CTRL_OFF) |
|
||||
|
@ -975,7 +1066,6 @@ static void prep_ssp_v3_hw(struct hisi_hba *hisi_hba,
|
|||
|
||||
/* map itct entry */
|
||||
dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF;
|
||||
hdr->dw1 = cpu_to_le32(dw1);
|
||||
|
||||
dw2 = (((sizeof(struct ssp_command_iu) + sizeof(struct ssp_frame_hdr)
|
||||
+ 3) / 4) << CMD_HDR_CFL_OFF) |
|
||||
|
@ -988,7 +1078,6 @@ static void prep_ssp_v3_hw(struct hisi_hba *hisi_hba,
|
|||
prep_prd_sge_v3_hw(hisi_hba, slot, hdr, task->scatter,
|
||||
slot->n_elem);
|
||||
|
||||
hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len);
|
||||
hdr->cmd_table_addr = cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot));
|
||||
hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot));
|
||||
|
||||
|
@ -1013,6 +1102,38 @@ static void prep_ssp_v3_hw(struct hisi_hba *hisi_hba,
|
|||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (has_data && (prot_op != SCSI_PROT_NORMAL)) {
|
||||
struct hisi_sas_protect_iu_v3_hw prot;
|
||||
u8 *buf_cmd_prot;
|
||||
|
||||
hdr->dw7 |= cpu_to_le32(1 << CMD_HDR_ADDR_MODE_SEL_OFF);
|
||||
dw1 |= CMD_HDR_PIR_MSK;
|
||||
buf_cmd_prot = hisi_sas_cmd_hdr_addr_mem(slot) +
|
||||
sizeof(struct ssp_frame_hdr) +
|
||||
sizeof(struct ssp_command_iu);
|
||||
|
||||
memset(&prot, 0, sizeof(struct hisi_sas_protect_iu_v3_hw));
|
||||
fill_prot_v3_hw(scsi_cmnd, &prot);
|
||||
memcpy(buf_cmd_prot, &prot,
|
||||
sizeof(struct hisi_sas_protect_iu_v3_hw));
|
||||
|
||||
/*
|
||||
* For READ, we need length of info read to memory, while for
|
||||
* WRITE we need length of data written to the disk.
|
||||
*/
|
||||
if (prot_op == SCSI_PROT_WRITE_INSERT) {
|
||||
unsigned int interval = scsi_prot_interval(scsi_cmnd);
|
||||
unsigned int ilog2_interval = ilog2(interval);
|
||||
|
||||
len = (task->total_xfer_len >> ilog2_interval) * 8;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
hdr->dw1 = cpu_to_le32(dw1);
|
||||
|
||||
hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len + len);
|
||||
}
|
||||
|
||||
static void prep_smp_v3_hw(struct hisi_hba *hisi_hba,
|
||||
|
@ -1584,15 +1705,16 @@ slot_err_v3_hw(struct hisi_hba *hisi_hba, struct sas_task *task,
|
|||
&complete_queue[slot->cmplt_queue_slot];
|
||||
struct hisi_sas_err_record_v3 *record =
|
||||
hisi_sas_status_buf_addr_mem(slot);
|
||||
u32 dma_rx_err_type = record->dma_rx_err_type;
|
||||
u32 trans_tx_fail_type = record->trans_tx_fail_type;
|
||||
u32 dma_rx_err_type = le32_to_cpu(record->dma_rx_err_type);
|
||||
u32 trans_tx_fail_type = le32_to_cpu(record->trans_tx_fail_type);
|
||||
u32 dw3 = le32_to_cpu(complete_hdr->dw3);
|
||||
|
||||
switch (task->task_proto) {
|
||||
case SAS_PROTOCOL_SSP:
|
||||
if (dma_rx_err_type & RX_DATA_LEN_UNDERFLOW_MSK) {
|
||||
ts->residual = trans_tx_fail_type;
|
||||
ts->stat = SAS_DATA_UNDERRUN;
|
||||
} else if (complete_hdr->dw3 & CMPLT_HDR_IO_IN_TARGET_MSK) {
|
||||
} else if (dw3 & CMPLT_HDR_IO_IN_TARGET_MSK) {
|
||||
ts->stat = SAS_QUEUE_FULL;
|
||||
slot->abort = 1;
|
||||
} else {
|
||||
|
@ -1606,7 +1728,7 @@ slot_err_v3_hw(struct hisi_hba *hisi_hba, struct sas_task *task,
|
|||
if (dma_rx_err_type & RX_DATA_LEN_UNDERFLOW_MSK) {
|
||||
ts->residual = trans_tx_fail_type;
|
||||
ts->stat = SAS_DATA_UNDERRUN;
|
||||
} else if (complete_hdr->dw3 & CMPLT_HDR_IO_IN_TARGET_MSK) {
|
||||
} else if (dw3 & CMPLT_HDR_IO_IN_TARGET_MSK) {
|
||||
ts->stat = SAS_PHY_DOWN;
|
||||
slot->abort = 1;
|
||||
} else {
|
||||
|
@ -1639,6 +1761,7 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
|
|||
&complete_queue[slot->cmplt_queue_slot];
|
||||
unsigned long flags;
|
||||
bool is_internal = slot->is_internal;
|
||||
u32 dw0, dw1, dw3;
|
||||
|
||||
if (unlikely(!task || !task->lldd_task || !task->dev))
|
||||
return -EINVAL;
|
||||
|
@ -1662,11 +1785,14 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
|
|||
goto out;
|
||||
}
|
||||
|
||||
dw0 = le32_to_cpu(complete_hdr->dw0);
|
||||
dw1 = le32_to_cpu(complete_hdr->dw1);
|
||||
dw3 = le32_to_cpu(complete_hdr->dw3);
|
||||
|
||||
/*
|
||||
* Use SAS+TMF status codes
|
||||
*/
|
||||
switch ((complete_hdr->dw0 & CMPLT_HDR_ABORT_STAT_MSK)
|
||||
>> CMPLT_HDR_ABORT_STAT_OFF) {
|
||||
switch ((dw0 & CMPLT_HDR_ABORT_STAT_MSK) >> CMPLT_HDR_ABORT_STAT_OFF) {
|
||||
case STAT_IO_ABORTED:
|
||||
/* this IO has been aborted by abort command */
|
||||
ts->stat = SAS_ABORTED_TASK;
|
||||
|
@ -1689,7 +1815,7 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
|
|||
}
|
||||
|
||||
/* check for erroneous completion */
|
||||
if ((complete_hdr->dw0 & CMPLT_HDR_CMPLT_MSK) == 0x3) {
|
||||
if ((dw0 & CMPLT_HDR_CMPLT_MSK) == 0x3) {
|
||||
u32 *error_info = hisi_sas_status_buf_addr_mem(slot);
|
||||
|
||||
slot_err_v3_hw(hisi_hba, task, slot);
|
||||
|
@ -1698,8 +1824,7 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
|
|||
"CQ hdr: 0x%x 0x%x 0x%x 0x%x "
|
||||
"Error info: 0x%x 0x%x 0x%x 0x%x\n",
|
||||
slot->idx, task, sas_dev->device_id,
|
||||
complete_hdr->dw0, complete_hdr->dw1,
|
||||
complete_hdr->act, complete_hdr->dw3,
|
||||
dw0, dw1, complete_hdr->act, dw3,
|
||||
error_info[0], error_info[1],
|
||||
error_info[2], error_info[3]);
|
||||
if (unlikely(slot->abort))
|
||||
|
@ -1797,11 +1922,13 @@ static void cq_tasklet_v3_hw(unsigned long val)
|
|||
while (rd_point != wr_point) {
|
||||
struct hisi_sas_complete_v3_hdr *complete_hdr;
|
||||
struct device *dev = hisi_hba->dev;
|
||||
u32 dw1;
|
||||
int iptt;
|
||||
|
||||
complete_hdr = &complete_queue[rd_point];
|
||||
dw1 = le32_to_cpu(complete_hdr->dw1);
|
||||
|
||||
iptt = (complete_hdr->dw1) & CMPLT_HDR_IPTT_MSK;
|
||||
iptt = dw1 & CMPLT_HDR_IPTT_MSK;
|
||||
if (likely(iptt < HISI_SAS_COMMAND_ENTRIES_V3_HW)) {
|
||||
slot = &hisi_hba->slot_info[iptt];
|
||||
slot->cmplt_queue_slot = rd_point;
|
||||
|
@ -1878,10 +2005,12 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba)
|
|||
for (i = 0; i < hisi_hba->queue_count; i++) {
|
||||
struct hisi_sas_cq *cq = &hisi_hba->cq[i];
|
||||
struct tasklet_struct *t = &cq->tasklet;
|
||||
int nr = hisi_sas_intr_conv ? 16 : 16 + i;
|
||||
unsigned long irqflags = hisi_sas_intr_conv ? IRQF_SHARED : 0;
|
||||
|
||||
rc = devm_request_irq(dev, pci_irq_vector(pdev, i+16),
|
||||
cq_interrupt_v3_hw, 0,
|
||||
DRV_NAME " cq", cq);
|
||||
rc = devm_request_irq(dev, pci_irq_vector(pdev, nr),
|
||||
cq_interrupt_v3_hw, irqflags,
|
||||
DRV_NAME " cq", cq);
|
||||
if (rc) {
|
||||
dev_err(dev,
|
||||
"could not request cq%d interrupt, rc=%d\n",
|
||||
|
@ -1898,8 +2027,9 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba)
|
|||
free_cq_irqs:
|
||||
for (k = 0; k < i; k++) {
|
||||
struct hisi_sas_cq *cq = &hisi_hba->cq[k];
|
||||
int nr = hisi_sas_intr_conv ? 16 : 16 + k;
|
||||
|
||||
free_irq(pci_irq_vector(pdev, k+16), cq);
|
||||
free_irq(pci_irq_vector(pdev, nr), cq);
|
||||
}
|
||||
free_irq(pci_irq_vector(pdev, 11), hisi_hba);
|
||||
free_chnl_interrupt:
|
||||
|
@ -2089,6 +2219,119 @@ static void wait_cmds_complete_timeout_v3_hw(struct hisi_hba *hisi_hba,
|
|||
dev_dbg(dev, "wait commands complete %dms\n", time);
|
||||
}
|
||||
|
||||
static ssize_t intr_conv_v3_hw_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
return scnprintf(buf, PAGE_SIZE, "%u\n", hisi_sas_intr_conv);
|
||||
}
|
||||
static DEVICE_ATTR_RO(intr_conv_v3_hw);
|
||||
|
||||
static void config_intr_coal_v3_hw(struct hisi_hba *hisi_hba)
|
||||
{
|
||||
/* config those registers between enable and disable PHYs */
|
||||
hisi_sas_stop_phys(hisi_hba);
|
||||
|
||||
if (hisi_hba->intr_coal_ticks == 0 ||
|
||||
hisi_hba->intr_coal_count == 0) {
|
||||
hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x1);
|
||||
hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 0x1);
|
||||
hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 0x1);
|
||||
} else {
|
||||
hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x3);
|
||||
hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME,
|
||||
hisi_hba->intr_coal_ticks);
|
||||
hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT,
|
||||
hisi_hba->intr_coal_count);
|
||||
}
|
||||
phys_init_v3_hw(hisi_hba);
|
||||
}
|
||||
|
||||
static ssize_t intr_coal_ticks_v3_hw_show(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct Scsi_Host *shost = class_to_shost(dev);
|
||||
struct hisi_hba *hisi_hba = shost_priv(shost);
|
||||
|
||||
return scnprintf(buf, PAGE_SIZE, "%u\n",
|
||||
hisi_hba->intr_coal_ticks);
|
||||
}
|
||||
|
||||
static ssize_t intr_coal_ticks_v3_hw_store(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct Scsi_Host *shost = class_to_shost(dev);
|
||||
struct hisi_hba *hisi_hba = shost_priv(shost);
|
||||
u32 intr_coal_ticks;
|
||||
int ret;
|
||||
|
||||
ret = kstrtou32(buf, 10, &intr_coal_ticks);
|
||||
if (ret) {
|
||||
dev_err(dev, "Input data of interrupt coalesce unmatch\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (intr_coal_ticks >= BIT(24)) {
|
||||
dev_err(dev, "intr_coal_ticks must be less than 2^24!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
hisi_hba->intr_coal_ticks = intr_coal_ticks;
|
||||
|
||||
config_intr_coal_v3_hw(hisi_hba);
|
||||
|
||||
return count;
|
||||
}
|
||||
static DEVICE_ATTR_RW(intr_coal_ticks_v3_hw);
|
||||
|
||||
static ssize_t intr_coal_count_v3_hw_show(struct device *dev,
|
||||
struct device_attribute
|
||||
*attr, char *buf)
|
||||
{
|
||||
struct Scsi_Host *shost = class_to_shost(dev);
|
||||
struct hisi_hba *hisi_hba = shost_priv(shost);
|
||||
|
||||
return scnprintf(buf, PAGE_SIZE, "%u\n",
|
||||
hisi_hba->intr_coal_count);
|
||||
}
|
||||
|
||||
static ssize_t intr_coal_count_v3_hw_store(struct device *dev,
|
||||
struct device_attribute
|
||||
*attr, const char *buf, size_t count)
|
||||
{
|
||||
struct Scsi_Host *shost = class_to_shost(dev);
|
||||
struct hisi_hba *hisi_hba = shost_priv(shost);
|
||||
u32 intr_coal_count;
|
||||
int ret;
|
||||
|
||||
ret = kstrtou32(buf, 10, &intr_coal_count);
|
||||
if (ret) {
|
||||
dev_err(dev, "Input data of interrupt coalesce unmatch\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (intr_coal_count >= BIT(8)) {
|
||||
dev_err(dev, "intr_coal_count must be less than 2^8!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
hisi_hba->intr_coal_count = intr_coal_count;
|
||||
|
||||
config_intr_coal_v3_hw(hisi_hba);
|
||||
|
||||
return count;
|
||||
}
|
||||
static DEVICE_ATTR_RW(intr_coal_count_v3_hw);
|
||||
|
||||
static struct device_attribute *host_attrs_v3_hw[] = {
|
||||
&dev_attr_phy_event_threshold,
|
||||
&dev_attr_intr_conv_v3_hw,
|
||||
&dev_attr_intr_coal_ticks_v3_hw,
|
||||
&dev_attr_intr_coal_count_v3_hw,
|
||||
NULL
|
||||
};
|
||||
|
||||
static struct scsi_host_template sht_v3_hw = {
|
||||
.name = DRV_NAME,
|
||||
.module = THIS_MODULE,
|
||||
|
@ -2100,14 +2343,13 @@ static struct scsi_host_template sht_v3_hw = {
|
|||
.change_queue_depth = sas_change_queue_depth,
|
||||
.bios_param = sas_bios_param,
|
||||
.this_id = -1,
|
||||
.sg_tablesize = SG_ALL,
|
||||
.sg_tablesize = HISI_SAS_SGE_PAGE_CNT,
|
||||
.max_sectors = SCSI_DEFAULT_MAX_SECTORS,
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
.eh_device_reset_handler = sas_eh_device_reset_handler,
|
||||
.eh_target_reset_handler = sas_eh_target_reset_handler,
|
||||
.target_destroy = sas_target_destroy,
|
||||
.ioctl = sas_ioctl,
|
||||
.shost_attrs = host_attrs,
|
||||
.shost_attrs = host_attrs_v3_hw,
|
||||
.tag_alloc_policy = BLK_TAG_ALLOC_RR,
|
||||
};
|
||||
|
||||
|
@ -2161,6 +2403,12 @@ hisi_sas_shost_alloc_pci(struct pci_dev *pdev)
|
|||
hisi_hba->shost = shost;
|
||||
SHOST_TO_SAS_HA(shost) = &hisi_hba->sha;
|
||||
|
||||
if (prot_mask & ~HISI_SAS_PROT_MASK)
|
||||
dev_err(dev, "unsupported protection mask 0x%x, using default (0x0)\n",
|
||||
prot_mask);
|
||||
else
|
||||
hisi_hba->prot_mask = prot_mask;
|
||||
|
||||
timer_setup(&hisi_hba->timer, NULL, 0);
|
||||
|
||||
if (hisi_sas_get_fw_info(hisi_hba) < 0)
|
||||
|
@ -2199,14 +2447,11 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||
if (rc)
|
||||
goto err_out_disable_device;
|
||||
|
||||
if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) ||
|
||||
(pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)) {
|
||||
if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) ||
|
||||
(pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)) {
|
||||
dev_err(dev, "No usable DMA addressing method\n");
|
||||
rc = -EIO;
|
||||
goto err_out_regions;
|
||||
}
|
||||
if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
|
||||
dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
|
||||
dev_err(dev, "No usable DMA addressing method\n");
|
||||
rc = -EIO;
|
||||
goto err_out_regions;
|
||||
}
|
||||
|
||||
shost = hisi_sas_shost_alloc_pci(pdev);
|
||||
|
@ -2245,7 +2490,6 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||
shost->max_lun = ~0;
|
||||
shost->max_channel = 1;
|
||||
shost->max_cmd_len = 16;
|
||||
shost->sg_tablesize = min_t(u16, SG_ALL, HISI_SAS_SGE_PAGE_CNT);
|
||||
shost->can_queue = hisi_hba->hw->max_command_entries -
|
||||
HISI_SAS_RESERVED_IPTT_CNT;
|
||||
shost->cmd_per_lun = hisi_hba->hw->max_command_entries -
|
||||
|
@ -2275,6 +2519,12 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||
if (rc)
|
||||
goto err_out_register_ha;
|
||||
|
||||
if (hisi_hba->prot_mask) {
|
||||
dev_info(dev, "Registering for DIF/DIX prot_mask=0x%x\n",
|
||||
prot_mask);
|
||||
scsi_host_set_prot(hisi_hba->shost, prot_mask);
|
||||
}
|
||||
|
||||
scsi_scan_host(shost);
|
||||
|
||||
return 0;
|
||||
|
@ -2301,8 +2551,9 @@ hisi_sas_v3_destroy_irqs(struct pci_dev *pdev, struct hisi_hba *hisi_hba)
|
|||
free_irq(pci_irq_vector(pdev, 11), hisi_hba);
|
||||
for (i = 0; i < hisi_hba->queue_count; i++) {
|
||||
struct hisi_sas_cq *cq = &hisi_hba->cq[i];
|
||||
int nr = hisi_sas_intr_conv ? 16 : 16 + i;
|
||||
|
||||
free_irq(pci_irq_vector(pdev, i+16), cq);
|
||||
free_irq(pci_irq_vector(pdev, nr), cq);
|
||||
}
|
||||
pci_free_irq_vectors(pdev);
|
||||
}
|
||||
|
@ -2529,7 +2780,7 @@ static int hisi_sas_v3_suspend(struct pci_dev *pdev, pm_message_t state)
|
|||
struct hisi_hba *hisi_hba = sha->lldd_ha;
|
||||
struct device *dev = hisi_hba->dev;
|
||||
struct Scsi_Host *shost = hisi_hba->shost;
|
||||
u32 device_state;
|
||||
pci_power_t device_state;
|
||||
int rc;
|
||||
|
||||
if (!pdev->pm_cap) {
|
||||
|
@ -2575,7 +2826,7 @@ static int hisi_sas_v3_resume(struct pci_dev *pdev)
|
|||
struct Scsi_Host *shost = hisi_hba->shost;
|
||||
struct device *dev = hisi_hba->dev;
|
||||
unsigned int rc;
|
||||
u32 device_state = pdev->current_state;
|
||||
pci_power_t device_state = pdev->current_state;
|
||||
|
||||
dev_warn(dev, "resuming from operating state [D%d]\n",
|
||||
device_state);
|
||||
|
@ -2624,6 +2875,7 @@ static struct pci_driver sas_v3_pci_driver = {
|
|||
};
|
||||
|
||||
module_pci_driver(sas_v3_pci_driver);
|
||||
module_param_named(intr_conv, hisi_sas_intr_conv, bool, 0444);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("John Garry <john.garry@huawei.com>");
|
||||
|
|
|
@ -416,7 +416,6 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
|
|||
shost->sg_prot_tablesize = sht->sg_prot_tablesize;
|
||||
shost->cmd_per_lun = sht->cmd_per_lun;
|
||||
shost->unchecked_isa_dma = sht->unchecked_isa_dma;
|
||||
shost->use_clustering = sht->use_clustering;
|
||||
shost->no_write_same = sht->no_write_same;
|
||||
|
||||
if (shost_eh_deadline == -1 || !sht->eh_host_reset_handler)
|
||||
|
@ -449,6 +448,11 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
|
|||
else
|
||||
shost->max_sectors = SCSI_DEFAULT_MAX_SECTORS;
|
||||
|
||||
if (sht->max_segment_size)
|
||||
shost->max_segment_size = sht->max_segment_size;
|
||||
else
|
||||
shost->max_segment_size = BLK_MAX_SEGMENT_SIZE;
|
||||
|
||||
/*
|
||||
* assume a 4GB boundary, if not set
|
||||
*/
|
||||
|
|
|
@ -965,7 +965,6 @@ static struct scsi_host_template hpsa_driver_template = {
|
|||
.scan_finished = hpsa_scan_finished,
|
||||
.change_queue_depth = hpsa_change_queue_depth,
|
||||
.this_id = -1,
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
.eh_device_reset_handler = hpsa_eh_device_reset_handler,
|
||||
.ioctl = hpsa_ioctl,
|
||||
.slave_alloc = hpsa_slave_alloc,
|
||||
|
@ -4663,6 +4662,7 @@ static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len)
|
|||
case WRITE_6:
|
||||
case WRITE_12:
|
||||
is_write = 1;
|
||||
/* fall through */
|
||||
case READ_6:
|
||||
case READ_12:
|
||||
if (*cdb_len == 6) {
|
||||
|
@ -5093,6 +5093,7 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
|
|||
switch (cmd->cmnd[0]) {
|
||||
case WRITE_6:
|
||||
is_write = 1;
|
||||
/* fall through */
|
||||
case READ_6:
|
||||
first_block = (((cmd->cmnd[1] & 0x1F) << 16) |
|
||||
(cmd->cmnd[2] << 8) |
|
||||
|
@ -5103,6 +5104,7 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
|
|||
break;
|
||||
case WRITE_10:
|
||||
is_write = 1;
|
||||
/* fall through */
|
||||
case READ_10:
|
||||
first_block =
|
||||
(((u64) cmd->cmnd[2]) << 24) |
|
||||
|
@ -5115,6 +5117,7 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
|
|||
break;
|
||||
case WRITE_12:
|
||||
is_write = 1;
|
||||
/* fall through */
|
||||
case READ_12:
|
||||
first_block =
|
||||
(((u64) cmd->cmnd[2]) << 24) |
|
||||
|
@ -5129,6 +5132,7 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
|
|||
break;
|
||||
case WRITE_16:
|
||||
is_write = 1;
|
||||
/* fall through */
|
||||
case READ_16:
|
||||
first_block =
|
||||
(((u64) cmd->cmnd[2]) << 56) |
|
||||
|
|
|
@ -1180,7 +1180,6 @@ static struct scsi_host_template driver_template = {
|
|||
.eh_host_reset_handler = hptiop_reset,
|
||||
.info = hptiop_info,
|
||||
.emulated = 0,
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
.proc_name = driver_name,
|
||||
.shost_attrs = hptiop_attrs,
|
||||
.slave_configure = hptiop_slave_config,
|
||||
|
@ -1309,11 +1308,11 @@ static int hptiop_probe(struct pci_dev *pcidev, const struct pci_device_id *id)
|
|||
|
||||
/* Enable 64bit DMA if possible */
|
||||
iop_ops = (struct hptiop_adapter_ops *)id->driver_data;
|
||||
if (pci_set_dma_mask(pcidev, DMA_BIT_MASK(iop_ops->hw_dma_bit_mask))) {
|
||||
if (pci_set_dma_mask(pcidev, DMA_BIT_MASK(32))) {
|
||||
printk(KERN_ERR "hptiop: fail to set dma_mask\n");
|
||||
goto disable_pci_device;
|
||||
}
|
||||
if (dma_set_mask(&pcidev->dev,
|
||||
DMA_BIT_MASK(iop_ops->hw_dma_bit_mask)) ||
|
||||
dma_set_mask(&pcidev->dev, DMA_BIT_MASK(32))) {
|
||||
printk(KERN_ERR "hptiop: fail to set dma_mask\n");
|
||||
goto disable_pci_device;
|
||||
}
|
||||
|
||||
if (pci_request_regions(pcidev, driver_name)) {
|
||||
|
|
|
@ -3100,7 +3100,6 @@ static struct scsi_host_template driver_template = {
|
|||
.this_id = -1,
|
||||
.sg_tablesize = SG_ALL,
|
||||
.max_sectors = IBMVFC_MAX_SECTORS,
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
.shost_attrs = ibmvfc_attrs,
|
||||
.track_queue_depth = 1,
|
||||
};
|
||||
|
|
|
@ -2079,7 +2079,6 @@ static struct scsi_host_template driver_template = {
|
|||
.can_queue = IBMVSCSI_MAX_REQUESTS_DEFAULT,
|
||||
.this_id = -1,
|
||||
.sg_tablesize = SG_ALL,
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
.shost_attrs = ibmvscsi_attrs,
|
||||
};
|
||||
|
||||
|
|
|
@ -3695,11 +3695,6 @@ static int ibmvscsis_get_system_info(void)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static char *ibmvscsis_get_fabric_name(void)
|
||||
{
|
||||
return "ibmvscsis";
|
||||
}
|
||||
|
||||
static char *ibmvscsis_get_fabric_wwn(struct se_portal_group *se_tpg)
|
||||
{
|
||||
struct ibmvscsis_tport *tport =
|
||||
|
@ -4044,9 +4039,8 @@ static struct configfs_attribute *ibmvscsis_tpg_attrs[] = {
|
|||
|
||||
static const struct target_core_fabric_ops ibmvscsis_ops = {
|
||||
.module = THIS_MODULE,
|
||||
.name = "ibmvscsis",
|
||||
.fabric_name = "ibmvscsis",
|
||||
.max_data_sg_nents = MAX_TXU / PAGE_SIZE,
|
||||
.get_fabric_name = ibmvscsis_get_fabric_name,
|
||||
.tpg_get_wwn = ibmvscsis_get_fabric_wwn,
|
||||
.tpg_get_tag = ibmvscsis_get_tag,
|
||||
.tpg_get_default_depth = ibmvscsis_get_default_depth,
|
||||
|
|
|
@ -1110,7 +1110,6 @@ static struct scsi_host_template imm_template = {
|
|||
.bios_param = imm_biosparam,
|
||||
.this_id = 7,
|
||||
.sg_tablesize = SG_ALL,
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
.can_queue = 1,
|
||||
.slave_alloc = imm_adjust_queue,
|
||||
};
|
||||
|
|
|
@ -2817,7 +2817,6 @@ static struct scsi_host_template initio_template = {
|
|||
.can_queue = MAX_TARGETS * i91u_MAXQUEUE,
|
||||
.this_id = 1,
|
||||
.sg_tablesize = SG_ALL,
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
};
|
||||
|
||||
static int initio_probe_one(struct pci_dev *pdev,
|
||||
|
@ -2840,7 +2839,7 @@ static int initio_probe_one(struct pci_dev *pdev,
|
|||
reg = 0;
|
||||
bios_seg = (bios_seg << 8) + ((u16) ((reg & 0xFF00) >> 8));
|
||||
|
||||
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
|
||||
if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
|
||||
printk(KERN_WARNING "i91u: Could not set 32 bit DMA mask\n");
|
||||
error = -ENODEV;
|
||||
goto out_disable_device;
|
||||
|
|
|
@ -6754,7 +6754,6 @@ static struct scsi_host_template driver_template = {
|
|||
.sg_tablesize = IPR_MAX_SGLIST,
|
||||
.max_sectors = IPR_IOA_MAX_SECTORS,
|
||||
.cmd_per_lun = IPR_MAX_CMD_PER_LUN,
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
.shost_attrs = ipr_ioa_attrs,
|
||||
.sdev_attrs = ipr_dev_attrs,
|
||||
.proc_name = IPR_NAME,
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue