SCSI misc on 20211105

This series consists of the usual driver updates (ufs, smartpqi, lpfc,
 target, megaraid_sas, hisi_sas, qla2xxx) and minor updates and bug
 fixes.  Notable core changes are the removal of scsi->tag which caused
 some churn in obsolete drivers and a sweep through all drivers to call
 scsi_done() directly instead of scsi->done() which removes a pointer
 indirection from the hot path and a move to register core sysfs files
 earlier, which means they're available to KOBJ_ADD processing, which
 necessitates switching all drivers to using attribute groups.
 
 Signed-off-by: James E.J. Bottomley <jejb@linux.ibm.com>
 -----BEGIN PGP SIGNATURE-----
 
 iJwEABMIAEQWIQTnYEDbdso9F2cI+arnQslM7pishQUCYYUfBCYcamFtZXMuYm90
 dG9tbGV5QGhhbnNlbnBhcnRuZXJzaGlwLmNvbQAKCRDnQslM7pishbUJAQDZt4oc
 vUx9JpyrdHxxTCuOzVFd8W1oJn0k5ltCBuz4yAD8DNbGhGm93raMSJ3FOOlzLEbP
 RG8vBdpxMudlvxAPi/A=
 =BSFz
 -----END PGP SIGNATURE-----

Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi

Pull SCSI updates from James Bottomley:
 "This consists of the usual driver updates (ufs, smartpqi, lpfc,
  target, megaraid_sas, hisi_sas, qla2xxx) and minor updates and bug
  fixes.

  Notable core changes are the removal of scsi->tag which caused some
  churn in obsolete drivers and a sweep through all drivers to call
  scsi_done() directly instead of scsi->done() which removes a pointer
  indirection from the hot path and a move to register core sysfs files
  earlier, which means they're available to KOBJ_ADD processing, which
  necessitates switching all drivers to using attribute groups"

* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (279 commits)
  scsi: lpfc: Update lpfc version to 14.0.0.3
  scsi: lpfc: Allow fabric node recovery if recovery is in progress before devloss
  scsi: lpfc: Fix link down processing to address NULL pointer dereference
  scsi: lpfc: Allow PLOGI retry if previous PLOGI was aborted
  scsi: lpfc: Fix use-after-free in lpfc_unreg_rpi() routine
  scsi: lpfc: Correct sysfs reporting of loop support after SFP status change
  scsi: lpfc: Wait for successful restart of SLI3 adapter during host sg_reset
  scsi: lpfc: Revert LOG_TRACE_EVENT back to LOG_INIT prior to driver_resource_setup()
  scsi: ufs: ufshcd-pltfrm: Fix memory leak due to probe defer
  scsi: ufs: mediatek: Avoid sched_clock() misuse
  scsi: mpt3sas: Make mpt3sas_dev_attrs static
  scsi: scsi_transport_sas: Add 22.5 Gbps link rate definitions
  scsi: target: core: Stop using bdevname()
  scsi: aha1542: Use memcpy_{from,to}_bvec()
  scsi: sr: Add error handling support for add_disk()
  scsi: sd: Add error handling support for add_disk()
  scsi: target: Perform ALUA group changes in one step
  scsi: target: Replace lun_tg_pt_gp_lock with rcu in I/O path
  scsi: target: Fix alua_tg_pt_gps_count tracking
  scsi: target: Fix ordered tag handling
  ...
This commit is contained in:
Linus Torvalds 2021-11-05 08:42:02 -07:00
commit fe91c4725a
240 changed files with 4171 additions and 2787 deletions

View File

@ -0,0 +1,27 @@
What: /sys/class/fc/fc_udev_device/appid_store
Date: Aug 2021
Contact: Muneendra Kumar <muneendra.kumar@broadconm.com>
Description:
This interface allows an admin to set an FC application
identifier in the blkcg associated with a cgroup id. The
identifier is typically a UUID that is associated with
an application or logical entity such as a virtual
machine or container group. The application or logical
entity utilizes a block device via the cgroup id.
FC adapter drivers may query the identifier and tag FC
traffic based on the identifier. FC host and FC fabric
entities can utilize the application id and FC traffic
tag to identify traffic sources.
The interface expects a string "<cgroupid>:<appid>" where:
<cgroupid> is inode of the cgroup in hexadecimal
<appid> is user provided string upto 128 characters
in length.
If an appid_store is done for a cgroup id that already
has an appid set, the new value will override the
previous value.
If an admin wants to remove an FC application identifier
from a cgroup, an appid_store should be done with the
following string: "<cgroupid>:"

View File

@ -983,7 +983,7 @@ Description: This file shows the amount of data that the host plans to
What: /sys/class/scsi_device/*/device/dyn_cap_needed
Date: February 2018
Contact: Stanislav Nijnikov <stanislav.nijnikov@wdc.com>
Description: This file shows the The amount of physical memory needed
Description: This file shows the amount of physical memory needed
to be removed from the physical memory resources pool of
the particular logical unit. The full information about
the attribute could be found at UFS specifications 2.1.

View File

@ -376,8 +376,8 @@ struct ahci_host_priv {
extern int ahci_ignore_sss;
extern struct device_attribute *ahci_shost_attrs[];
extern struct device_attribute *ahci_sdev_attrs[];
extern const struct attribute_group *ahci_shost_groups[];
extern const struct attribute_group *ahci_sdev_groups[];
/*
* This must be instantiated by the edge drivers. Read the comments
@ -388,8 +388,8 @@ extern struct device_attribute *ahci_sdev_attrs[];
.can_queue = AHCI_MAX_CMDS, \
.sg_tablesize = AHCI_MAX_SG, \
.dma_boundary = AHCI_DMA_BOUNDARY, \
.shost_attrs = ahci_shost_attrs, \
.sdev_attrs = ahci_sdev_attrs, \
.shost_groups = ahci_shost_groups, \
.sdev_groups = ahci_sdev_groups, \
.change_queue_depth = ata_scsi_change_queue_depth, \
.tag_alloc_policy = BLK_TAG_ALLOC_RR, \
.slave_configure = ata_scsi_slave_config

View File

@ -1085,14 +1085,16 @@ static struct ata_port_operations ich_pata_ops = {
.set_dmamode = ich_set_dmamode,
};
static struct device_attribute *piix_sidpr_shost_attrs[] = {
&dev_attr_link_power_management_policy,
static struct attribute *piix_sidpr_shost_attrs[] = {
&dev_attr_link_power_management_policy.attr,
NULL
};
ATTRIBUTE_GROUPS(piix_sidpr_shost);
static struct scsi_host_template piix_sidpr_sht = {
ATA_BMDMA_SHT(DRV_NAME),
.shost_attrs = piix_sidpr_shost_attrs,
.shost_groups = piix_sidpr_shost_groups,
};
static struct ata_port_operations piix_sidpr_sata_ops = {

View File

@ -108,28 +108,46 @@ static DEVICE_ATTR(em_buffer, S_IWUSR | S_IRUGO,
ahci_read_em_buffer, ahci_store_em_buffer);
static DEVICE_ATTR(em_message_supported, S_IRUGO, ahci_show_em_supported, NULL);
struct device_attribute *ahci_shost_attrs[] = {
&dev_attr_link_power_management_policy,
&dev_attr_em_message_type,
&dev_attr_em_message,
&dev_attr_ahci_host_caps,
&dev_attr_ahci_host_cap2,
&dev_attr_ahci_host_version,
&dev_attr_ahci_port_cmd,
&dev_attr_em_buffer,
&dev_attr_em_message_supported,
static struct attribute *ahci_shost_attrs[] = {
&dev_attr_link_power_management_policy.attr,
&dev_attr_em_message_type.attr,
&dev_attr_em_message.attr,
&dev_attr_ahci_host_caps.attr,
&dev_attr_ahci_host_cap2.attr,
&dev_attr_ahci_host_version.attr,
&dev_attr_ahci_port_cmd.attr,
&dev_attr_em_buffer.attr,
&dev_attr_em_message_supported.attr,
NULL
};
EXPORT_SYMBOL_GPL(ahci_shost_attrs);
struct device_attribute *ahci_sdev_attrs[] = {
&dev_attr_sw_activity,
&dev_attr_unload_heads,
&dev_attr_ncq_prio_supported,
&dev_attr_ncq_prio_enable,
static const struct attribute_group ahci_shost_attr_group = {
.attrs = ahci_shost_attrs
};
const struct attribute_group *ahci_shost_groups[] = {
&ahci_shost_attr_group,
NULL
};
EXPORT_SYMBOL_GPL(ahci_sdev_attrs);
EXPORT_SYMBOL_GPL(ahci_shost_groups);
struct attribute *ahci_sdev_attrs[] = {
&dev_attr_sw_activity.attr,
&dev_attr_unload_heads.attr,
&dev_attr_ncq_prio_supported.attr,
&dev_attr_ncq_prio_enable.attr,
NULL
};
static const struct attribute_group ahci_sdev_attr_group = {
.attrs = ahci_sdev_attrs
};
const struct attribute_group *ahci_sdev_groups[] = {
&ahci_sdev_attr_group,
NULL
};
EXPORT_SYMBOL_GPL(ahci_sdev_groups);
struct ata_port_operations ahci_ops = {
.inherits = &sata_pmp_port_ops,

View File

@ -922,13 +922,22 @@ DEVICE_ATTR(ncq_prio_enable, S_IRUGO | S_IWUSR,
ata_ncq_prio_enable_show, ata_ncq_prio_enable_store);
EXPORT_SYMBOL_GPL(dev_attr_ncq_prio_enable);
struct device_attribute *ata_ncq_sdev_attrs[] = {
&dev_attr_unload_heads,
&dev_attr_ncq_prio_enable,
&dev_attr_ncq_prio_supported,
struct attribute *ata_ncq_sdev_attrs[] = {
&dev_attr_unload_heads.attr,
&dev_attr_ncq_prio_enable.attr,
&dev_attr_ncq_prio_supported.attr,
NULL
};
EXPORT_SYMBOL_GPL(ata_ncq_sdev_attrs);
static const struct attribute_group ata_ncq_sdev_attr_group = {
.attrs = ata_ncq_sdev_attrs
};
const struct attribute_group *ata_ncq_sdev_groups[] = {
&ata_ncq_sdev_attr_group,
NULL
};
EXPORT_SYMBOL_GPL(ata_ncq_sdev_groups);
static ssize_t
ata_scsi_em_message_store(struct device *dev, struct device_attribute *attr,
@ -1258,7 +1267,7 @@ int ata_sas_queuecmd(struct scsi_cmnd *cmd, struct ata_port *ap)
rc = __ata_scsi_queuecmd(cmd, ap->link.device);
else {
cmd->result = (DID_BAD_TARGET << 16);
cmd->scsi_done(cmd);
scsi_done(cmd);
}
return rc;
}

View File

@ -234,11 +234,20 @@ static void ata_scsi_set_invalid_parameter(struct ata_device *dev,
field, 0xff, 0);
}
struct device_attribute *ata_common_sdev_attrs[] = {
&dev_attr_unload_heads,
static struct attribute *ata_common_sdev_attrs[] = {
&dev_attr_unload_heads.attr,
NULL
};
EXPORT_SYMBOL_GPL(ata_common_sdev_attrs);
static const struct attribute_group ata_common_sdev_attr_group = {
.attrs = ata_common_sdev_attrs
};
const struct attribute_group *ata_common_sdev_groups[] = {
&ata_common_sdev_attr_group,
NULL
};
EXPORT_SYMBOL_GPL(ata_common_sdev_groups);
/**
* ata_std_bios_param - generic bios head/sector/cylinder calculator used by sd.
@ -634,7 +643,7 @@ static struct ata_queued_cmd *ata_scsi_qc_new(struct ata_device *dev,
qc = ata_qc_new_init(dev, scsi_cmd_to_rq(cmd)->tag);
if (qc) {
qc->scsicmd = cmd;
qc->scsidone = cmd->scsi_done;
qc->scsidone = scsi_done;
qc->sg = scsi_sglist(cmd);
qc->n_elem = scsi_sg_count(cmd);
@ -643,7 +652,7 @@ static struct ata_queued_cmd *ata_scsi_qc_new(struct ata_device *dev,
qc->flags |= ATA_QCFLAG_QUIET;
} else {
cmd->result = (DID_OK << 16) | SAM_STAT_TASK_SET_FULL;
cmd->scsi_done(cmd);
scsi_done(cmd);
}
return qc;
@ -1738,14 +1747,14 @@ static int ata_scsi_translate(struct ata_device *dev, struct scsi_cmnd *cmd,
early_finish:
ata_qc_free(qc);
cmd->scsi_done(cmd);
scsi_done(cmd);
DPRINTK("EXIT - early finish (good or error)\n");
return 0;
err_did:
ata_qc_free(qc);
cmd->result = (DID_ERROR << 16);
cmd->scsi_done(cmd);
scsi_done(cmd);
err_mem:
DPRINTK("EXIT - internal\n");
return 0;
@ -4042,7 +4051,7 @@ int __ata_scsi_queuecmd(struct scsi_cmnd *scmd, struct ata_device *dev)
DPRINTK("bad CDB len=%u, scsi_op=0x%02x, max=%u\n",
scmd->cmd_len, scsi_op, dev->cdb_len);
scmd->result = DID_ERROR << 16;
scmd->scsi_done(scmd);
scsi_done(scmd);
return 0;
}
@ -4084,7 +4093,7 @@ int ata_scsi_queuecmd(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
rc = __ata_scsi_queuecmd(cmd, dev);
else {
cmd->result = (DID_BAD_TARGET << 16);
cmd->scsi_done(cmd);
scsi_done(cmd);
}
spin_unlock_irqrestore(ap->lock, irq_flags);
@ -4218,7 +4227,7 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd)
break;
}
cmd->scsi_done(cmd);
scsi_done(cmd);
}
int ata_scsi_add_hosts(struct ata_host *host, struct scsi_host_template *sht)

View File

@ -923,7 +923,7 @@ static struct scsi_host_template pata_macio_sht = {
*/
.max_segment_size = MAX_DBDMA_SEG,
.slave_configure = pata_macio_slave_config,
.sdev_attrs = ata_common_sdev_attrs,
.sdev_groups = ata_common_sdev_groups,
.can_queue = ATA_DEF_QUEUE,
.tag_alloc_policy = BLK_TAG_ALLOC_RR,
};

View File

@ -670,7 +670,7 @@ static struct scsi_host_template mv6_sht = {
.can_queue = MV_MAX_Q_DEPTH - 1,
.sg_tablesize = MV_MAX_SG_CT / 2,
.dma_boundary = MV_DMA_BOUNDARY,
.sdev_attrs = ata_ncq_sdev_attrs,
.sdev_groups = ata_ncq_sdev_groups,
.change_queue_depth = ata_scsi_change_queue_depth,
.tag_alloc_policy = BLK_TAG_ALLOC_RR,
.slave_configure = ata_scsi_slave_config

View File

@ -380,7 +380,7 @@ static struct scsi_host_template nv_adma_sht = {
.sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN,
.dma_boundary = NV_ADMA_DMA_BOUNDARY,
.slave_configure = nv_adma_slave_config,
.sdev_attrs = ata_ncq_sdev_attrs,
.sdev_groups = ata_ncq_sdev_groups,
.change_queue_depth = ata_scsi_change_queue_depth,
.tag_alloc_policy = BLK_TAG_ALLOC_RR,
};
@ -391,7 +391,7 @@ static struct scsi_host_template nv_swncq_sht = {
.sg_tablesize = LIBATA_MAX_PRD,
.dma_boundary = ATA_DMA_BOUNDARY,
.slave_configure = nv_swncq_slave_config,
.sdev_attrs = ata_ncq_sdev_attrs,
.sdev_groups = ata_ncq_sdev_groups,
.change_queue_depth = ata_scsi_change_queue_depth,
.tag_alloc_policy = BLK_TAG_ALLOC_RR,
};

View File

@ -379,7 +379,7 @@ static struct scsi_host_template sil24_sht = {
.sg_tablesize = SIL24_MAX_SGE,
.dma_boundary = ATA_DMA_BOUNDARY,
.tag_alloc_policy = BLK_TAG_ALLOC_FIFO,
.sdev_attrs = ata_ncq_sdev_attrs,
.sdev_groups = ata_ncq_sdev_groups,
.change_queue_depth = ata_scsi_change_queue_depth,
.slave_configure = ata_scsi_slave_config
};

View File

@ -1375,7 +1375,7 @@ static void complete_command_orb(struct sbp2_orb *base_orb,
sbp2_unmap_scatterlist(device->card->device, orb);
orb->cmd->result = result;
orb->cmd->scsi_done(orb->cmd);
scsi_done(orb->cmd);
}
static int sbp2_map_scatterlist(struct sbp2_command_orb *orb,
@ -1578,11 +1578,13 @@ static ssize_t sbp2_sysfs_ieee1394_id_show(struct device *dev,
static DEVICE_ATTR(ieee1394_id, S_IRUGO, sbp2_sysfs_ieee1394_id_show, NULL);
static struct device_attribute *sbp2_scsi_sysfs_attrs[] = {
&dev_attr_ieee1394_id,
static struct attribute *sbp2_scsi_sysfs_attrs[] = {
&dev_attr_ieee1394_id.attr,
NULL
};
ATTRIBUTE_GROUPS(sbp2_scsi_sysfs);
static struct scsi_host_template scsi_driver_template = {
.module = THIS_MODULE,
.name = "SBP-2 IEEE-1394",
@ -1595,7 +1597,7 @@ static struct scsi_host_template scsi_driver_template = {
.sg_tablesize = SG_ALL,
.max_segment_size = SBP2_MAX_SEG_SIZE,
.can_queue = 1,
.sdev_attrs = sbp2_scsi_sysfs_attrs,
.sdev_groups = sbp2_scsi_sysfs_groups,
};
MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");

View File

@ -1026,10 +1026,17 @@ static int srp_init_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
*/
static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
{
struct device_attribute **attr;
const struct attribute_group **g;
struct attribute **attr;
for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
device_remove_file(&shost->shost_dev, *attr);
for (g = shost->hostt->shost_groups; *g; ++g) {
for (attr = (*g)->attrs; *attr; ++attr) {
struct device_attribute *dev_attr =
container_of(*attr, typeof(*dev_attr), attr);
device_remove_file(&shost->shost_dev, dev_attr);
}
}
}
static void srp_remove_target(struct srp_target_port *target)
@ -1266,7 +1273,7 @@ static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req,
if (scmnd) {
srp_free_req(ch, req, scmnd, 0);
scmnd->result = result;
scmnd->scsi_done(scmnd);
scsi_done(scmnd);
}
}
@ -1987,7 +1994,7 @@ static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
srp_free_req(ch, req, scmnd,
be32_to_cpu(rsp->req_lim_delta));
scmnd->scsi_done(scmnd);
scsi_done(scmnd);
}
}
@ -2239,7 +2246,7 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
err:
if (scmnd->result) {
scmnd->scsi_done(scmnd);
scsi_done(scmnd);
ret = 0;
} else {
ret = SCSI_MLQUEUE_HOST_BUSY;
@ -2811,7 +2818,7 @@ static int srp_abort(struct scsi_cmnd *scmnd)
if (ret == SUCCESS) {
srp_free_req(ch, req, scmnd, 0);
scmnd->result = DID_ABORT << 16;
scmnd->scsi_done(scmnd);
scsi_done(scmnd);
}
return ret;
@ -3050,26 +3057,28 @@ static ssize_t allow_ext_sg_show(struct device *dev,
static DEVICE_ATTR_RO(allow_ext_sg);
static struct device_attribute *srp_host_attrs[] = {
&dev_attr_id_ext,
&dev_attr_ioc_guid,
&dev_attr_service_id,
&dev_attr_pkey,
&dev_attr_sgid,
&dev_attr_dgid,
&dev_attr_orig_dgid,
&dev_attr_req_lim,
&dev_attr_zero_req_lim,
&dev_attr_local_ib_port,
&dev_attr_local_ib_device,
&dev_attr_ch_count,
&dev_attr_comp_vector,
&dev_attr_tl_retry_count,
&dev_attr_cmd_sg_entries,
&dev_attr_allow_ext_sg,
static struct attribute *srp_host_attrs[] = {
&dev_attr_id_ext.attr,
&dev_attr_ioc_guid.attr,
&dev_attr_service_id.attr,
&dev_attr_pkey.attr,
&dev_attr_sgid.attr,
&dev_attr_dgid.attr,
&dev_attr_orig_dgid.attr,
&dev_attr_req_lim.attr,
&dev_attr_zero_req_lim.attr,
&dev_attr_local_ib_port.attr,
&dev_attr_local_ib_device.attr,
&dev_attr_ch_count.attr,
&dev_attr_comp_vector.attr,
&dev_attr_tl_retry_count.attr,
&dev_attr_cmd_sg_entries.attr,
&dev_attr_allow_ext_sg.attr,
NULL
};
ATTRIBUTE_GROUPS(srp_host);
static struct scsi_host_template srp_template = {
.module = THIS_MODULE,
.name = "InfiniBand SRP initiator",
@ -3090,7 +3099,7 @@ static struct scsi_host_template srp_template = {
.can_queue = SRP_DEFAULT_CMD_SQ_SIZE,
.this_id = -1,
.cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE,
.shost_attrs = srp_host_attrs,
.shost_groups = srp_host_groups,
.track_queue_depth = 1,
.cmd_size = sizeof(struct srp_request),
};

View File

@ -3705,47 +3705,17 @@ static struct configfs_attribute *srpt_da_attrs[] = {
NULL,
};
static ssize_t srpt_tpg_enable_show(struct config_item *item, char *page)
static int srpt_enable_tpg(struct se_portal_group *se_tpg, bool enable)
{
struct se_portal_group *se_tpg = to_tpg(item);
struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
return sysfs_emit(page, "%d\n", sport->enabled);
}
static ssize_t srpt_tpg_enable_store(struct config_item *item,
const char *page, size_t count)
{
struct se_portal_group *se_tpg = to_tpg(item);
struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
unsigned long tmp;
int ret;
ret = kstrtoul(page, 0, &tmp);
if (ret < 0) {
pr_err("Unable to extract srpt_tpg_store_enable\n");
return -EINVAL;
}
if ((tmp != 0) && (tmp != 1)) {
pr_err("Illegal value for srpt_tpg_store_enable: %lu\n", tmp);
return -EINVAL;
}
mutex_lock(&sport->mutex);
srpt_set_enabled(sport, tmp);
srpt_set_enabled(sport, enable);
mutex_unlock(&sport->mutex);
return count;
return 0;
}
CONFIGFS_ATTR(srpt_tpg_, enable);
static struct configfs_attribute *srpt_tpg_attrs[] = {
&srpt_tpg_attr_enable,
NULL,
};
/**
* srpt_make_tpg - configfs callback invoked for mkdir /sys/kernel/config/target/$driver/$port/$tpg
* @wwn: Corresponds to $driver/$port.
@ -3856,12 +3826,12 @@ static const struct target_core_fabric_ops srpt_template = {
.fabric_make_wwn = srpt_make_tport,
.fabric_drop_wwn = srpt_drop_tport,
.fabric_make_tpg = srpt_make_tpg,
.fabric_enable_tpg = srpt_enable_tpg,
.fabric_drop_tpg = srpt_drop_tpg,
.fabric_init_nodeacl = srpt_init_nodeacl,
.tfc_discovery_attrs = srpt_da_attrs,
.tfc_wwn_attrs = srpt_wwn_attrs,
.tfc_tpg_base_attrs = srpt_tpg_attrs,
.tfc_tpg_attrib_attrs = srpt_tpg_attrib_attrs,
};

View File

@ -129,7 +129,7 @@ static struct scsi_host_template mptfc_driver_template = {
.sg_tablesize = MPT_SCSI_SG_DEPTH,
.max_sectors = 8192,
.cmd_per_lun = 7,
.shost_attrs = mptscsih_host_attrs,
.shost_groups = mptscsih_host_attr_groups,
};
/****************************************************************************
@ -649,14 +649,14 @@ mptfc_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *SCpnt)
if (!vdevice || !vdevice->vtarget) {
SCpnt->result = DID_NO_CONNECT << 16;
SCpnt->scsi_done(SCpnt);
scsi_done(SCpnt);
return 0;
}
err = fc_remote_port_chkready(rport);
if (unlikely(err)) {
SCpnt->result = err;
SCpnt->scsi_done(SCpnt);
scsi_done(SCpnt);
return 0;
}
@ -664,7 +664,7 @@ mptfc_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *SCpnt)
ri = *((struct mptfc_rport_info **)rport->dd_data);
if (unlikely(!ri)) {
SCpnt->result = DID_IMM_RETRY << 16;
SCpnt->scsi_done(SCpnt);
scsi_done(SCpnt);
return 0;
}

View File

@ -1929,7 +1929,7 @@ mptsas_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *SCpnt)
if (!vdevice || !vdevice->vtarget || vdevice->vtarget->deleted) {
SCpnt->result = DID_NO_CONNECT << 16;
SCpnt->scsi_done(SCpnt);
scsi_done(SCpnt);
return 0;
}
@ -2020,7 +2020,7 @@ static struct scsi_host_template mptsas_driver_template = {
.sg_tablesize = MPT_SCSI_SG_DEPTH,
.max_sectors = 8192,
.cmd_per_lun = 7,
.shost_attrs = mptscsih_host_attrs,
.shost_groups = mptscsih_host_attr_groups,
.no_write_same = 1,
};

View File

@ -1009,7 +1009,7 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
/* Unmap the DMA buffers, if any. */
scsi_dma_unmap(sc);
sc->scsi_done(sc); /* Issue the command callback */
scsi_done(sc); /* Issue the command callback */
/* Free Chain buffers */
mptscsih_freeChainBuffers(ioc, req_idx);
@ -1054,7 +1054,7 @@ mptscsih_flush_running_cmds(MPT_SCSI_HOST *hd)
dtmprintk(ioc, sdev_printk(KERN_INFO, sc->device, MYIOC_s_FMT
"completing cmds: fw_channel %d, fw_id %d, sc=%p, mf = %p, "
"idx=%x\n", ioc->name, channel, id, sc, mf, ii));
sc->scsi_done(sc);
scsi_done(sc);
}
}
EXPORT_SYMBOL(mptscsih_flush_running_cmds);
@ -1118,7 +1118,7 @@ mptscsih_search_running_cmds(MPT_SCSI_HOST *hd, VirtDevice *vdevice)
"fw_id %d, sc=%p, mf = %p, idx=%x\n", ioc->name,
vdevice->vtarget->channel, vdevice->vtarget->id,
sc, mf, ii));
sc->scsi_done(sc);
scsi_done(sc);
spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
}
}
@ -1693,7 +1693,7 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
*/
if ((hd = shost_priv(SCpnt->device->host)) == NULL) {
SCpnt->result = DID_RESET << 16;
SCpnt->scsi_done(SCpnt);
scsi_done(SCpnt);
printk(KERN_ERR MYNAM ": task abort: "
"can't locate host! (sc=%p)\n", SCpnt);
return FAILED;
@ -1710,7 +1710,7 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
"task abort: device has been deleted (sc=%p)\n",
ioc->name, SCpnt));
SCpnt->result = DID_NO_CONNECT << 16;
SCpnt->scsi_done(SCpnt);
scsi_done(SCpnt);
retval = SUCCESS;
goto out;
}
@ -3218,23 +3218,31 @@ mptscsih_debug_level_store(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(debug_level, S_IRUGO | S_IWUSR,
mptscsih_debug_level_show, mptscsih_debug_level_store);
struct device_attribute *mptscsih_host_attrs[] = {
&dev_attr_version_fw,
&dev_attr_version_bios,
&dev_attr_version_mpi,
&dev_attr_version_product,
&dev_attr_version_nvdata_persistent,
&dev_attr_version_nvdata_default,
&dev_attr_board_name,
&dev_attr_board_assembly,
&dev_attr_board_tracer,
&dev_attr_io_delay,
&dev_attr_device_delay,
&dev_attr_debug_level,
static struct attribute *mptscsih_host_attrs[] = {
&dev_attr_version_fw.attr,
&dev_attr_version_bios.attr,
&dev_attr_version_mpi.attr,
&dev_attr_version_product.attr,
&dev_attr_version_nvdata_persistent.attr,
&dev_attr_version_nvdata_default.attr,
&dev_attr_board_name.attr,
&dev_attr_board_assembly.attr,
&dev_attr_board_tracer.attr,
&dev_attr_io_delay.attr,
&dev_attr_device_delay.attr,
&dev_attr_debug_level.attr,
NULL,
};
EXPORT_SYMBOL(mptscsih_host_attrs);
static const struct attribute_group mptscsih_host_attr_group = {
.attrs = mptscsih_host_attrs
};
const struct attribute_group *mptscsih_host_attr_groups[] = {
&mptscsih_host_attr_group,
NULL
};
EXPORT_SYMBOL(mptscsih_host_attr_groups);
EXPORT_SYMBOL(mptscsih_remove);
EXPORT_SYMBOL(mptscsih_shutdown);

View File

@ -131,7 +131,7 @@ extern int mptscsih_ioc_reset(MPT_ADAPTER *ioc, int post_reset);
extern int mptscsih_change_queue_depth(struct scsi_device *sdev, int qdepth);
extern u8 mptscsih_raid_id_to_num(MPT_ADAPTER *ioc, u8 channel, u8 id);
extern int mptscsih_is_phys_disk(MPT_ADAPTER *ioc, u8 channel, u8 id);
extern struct device_attribute *mptscsih_host_attrs[];
extern const struct attribute_group *mptscsih_host_attr_groups[];
extern struct scsi_cmnd *mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i);
extern void mptscsih_taskmgmt_response_code(MPT_ADAPTER *ioc, u8 response_code);
extern void mptscsih_flush_running_cmds(MPT_SCSI_HOST *hd);

View File

@ -782,14 +782,14 @@ mptspi_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *SCpnt)
if (!vdevice || !vdevice->vtarget) {
SCpnt->result = DID_NO_CONNECT << 16;
SCpnt->scsi_done(SCpnt);
scsi_done(SCpnt);
return 0;
}
if (SCpnt->device->channel == 1 &&
mptscsih_is_phys_disk(ioc, 0, SCpnt->device->id) == 0) {
SCpnt->result = DID_NO_CONNECT << 16;
SCpnt->scsi_done(SCpnt);
scsi_done(SCpnt);
return 0;
}
@ -843,7 +843,7 @@ static struct scsi_host_template mptspi_driver_template = {
.sg_tablesize = MPT_SCSI_SG_DEPTH,
.max_sectors = 8192,
.cmd_per_lun = 7,
.shost_attrs = mptscsih_host_attrs,
.shost_groups = mptscsih_host_attr_groups,
};
static int mptspi_write_spi_device_pg1(struct scsi_target *starget,

View File

@ -184,8 +184,8 @@ extern const struct attribute_group *zfcp_sysfs_adapter_attr_groups[];
extern const struct attribute_group *zfcp_unit_attr_groups[];
extern const struct attribute_group *zfcp_port_attr_groups[];
extern struct mutex zfcp_sysfs_port_units_mutex;
extern struct device_attribute *zfcp_sysfs_sdev_attrs[];
extern struct device_attribute *zfcp_sysfs_shost_attrs[];
extern const struct attribute_group *zfcp_sysfs_sdev_attr_groups[];
extern const struct attribute_group *zfcp_sysfs_shost_attr_groups[];
bool zfcp_sysfs_port_is_removing(const struct zfcp_port *const port);
/* zfcp_unit.c */

View File

@ -2501,7 +2501,7 @@ static void zfcp_fsf_fcp_cmnd_handler(struct zfcp_fsf_req *req)
zfcp_dbf_scsi_result(scpnt, req);
scpnt->host_scribble = NULL;
(scpnt->scsi_done) (scpnt);
scsi_done(scpnt);
/*
* We must hold this lock until scsi_done has been called.
* Otherwise we may call scsi_done after abort regarding this

View File

@ -60,7 +60,7 @@ static void zfcp_scsi_command_fail(struct scsi_cmnd *scpnt, int result)
{
set_host_byte(scpnt, result);
zfcp_dbf_scsi_fail_send(scpnt);
scpnt->scsi_done(scpnt);
scsi_done(scpnt);
}
static
@ -78,7 +78,7 @@ int zfcp_scsi_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scpnt)
if (unlikely(scsi_result)) {
scpnt->result = scsi_result;
zfcp_dbf_scsi_fail_send(scpnt);
scpnt->scsi_done(scpnt);
scsi_done(scpnt);
return 0;
}
@ -444,8 +444,8 @@ static struct scsi_host_template zfcp_scsi_host_template = {
/* report size limit per scatter-gather segment */
.max_segment_size = ZFCP_QDIO_SBALE_LEN,
.dma_boundary = ZFCP_QDIO_SBALE_LEN - 1,
.shost_attrs = zfcp_sysfs_shost_attrs,
.sdev_attrs = zfcp_sysfs_sdev_attrs,
.shost_groups = zfcp_sysfs_shost_attr_groups,
.sdev_groups = zfcp_sysfs_sdev_attr_groups,
.track_queue_depth = 1,
.supported_mode = MODE_INITIATOR,
};

View File

@ -672,17 +672,26 @@ ZFCP_DEFINE_SCSI_ATTR(zfcp_in_recovery, "%d\n",
ZFCP_DEFINE_SCSI_ATTR(zfcp_status, "0x%08x\n",
atomic_read(&zfcp_sdev->status));
struct device_attribute *zfcp_sysfs_sdev_attrs[] = {
&dev_attr_fcp_lun,
&dev_attr_wwpn,
&dev_attr_hba_id,
&dev_attr_read_latency,
&dev_attr_write_latency,
&dev_attr_cmd_latency,
&dev_attr_zfcp_access_denied,
&dev_attr_zfcp_failed,
&dev_attr_zfcp_in_recovery,
&dev_attr_zfcp_status,
struct attribute *zfcp_sdev_attrs[] = {
&dev_attr_fcp_lun.attr,
&dev_attr_wwpn.attr,
&dev_attr_hba_id.attr,
&dev_attr_read_latency.attr,
&dev_attr_write_latency.attr,
&dev_attr_cmd_latency.attr,
&dev_attr_zfcp_access_denied.attr,
&dev_attr_zfcp_failed.attr,
&dev_attr_zfcp_in_recovery.attr,
&dev_attr_zfcp_status.attr,
NULL
};
static const struct attribute_group zfcp_sysfs_sdev_attr_group = {
.attrs = zfcp_sdev_attrs
};
const struct attribute_group *zfcp_sysfs_sdev_attr_groups[] = {
&zfcp_sysfs_sdev_attr_group,
NULL
};
@ -783,12 +792,21 @@ static ssize_t zfcp_sysfs_adapter_q_full_show(struct device *dev,
}
static DEVICE_ATTR(queue_full, S_IRUGO, zfcp_sysfs_adapter_q_full_show, NULL);
struct device_attribute *zfcp_sysfs_shost_attrs[] = {
&dev_attr_utilization,
&dev_attr_requests,
&dev_attr_megabytes,
&dev_attr_seconds_active,
&dev_attr_queue_full,
static struct attribute *zfcp_sysfs_shost_attrs[] = {
&dev_attr_utilization.attr,
&dev_attr_requests.attr,
&dev_attr_megabytes.attr,
&dev_attr_seconds_active.attr,
&dev_attr_queue_full.attr,
NULL
};
static const struct attribute_group zfcp_sysfs_shost_attr_group = {
.attrs = zfcp_sysfs_shost_attrs
};
const struct attribute_group *zfcp_sysfs_shost_attr_groups[] = {
&zfcp_sysfs_shost_attr_group,
NULL
};

View File

@ -197,11 +197,13 @@ static struct device_attribute twa_host_stats_attr = {
};
/* Host attributes initializer */
static struct device_attribute *twa_host_attrs[] = {
&twa_host_stats_attr,
static struct attribute *twa_host_attrs[] = {
&twa_host_stats_attr.attr,
NULL,
};
ATTRIBUTE_GROUPS(twa_host);
/* File operations struct for character device */
static const struct file_operations twa_fops = {
.owner = THIS_MODULE,
@ -1352,7 +1354,7 @@ static irqreturn_t twa_interrupt(int irq, void *dev_instance)
/* Now complete the io */
if (twa_command_mapped(cmd))
scsi_dma_unmap(cmd);
cmd->scsi_done(cmd);
scsi_done(cmd);
tw_dev->state[request_id] = TW_S_COMPLETED;
twa_free_request_id(tw_dev, request_id);
tw_dev->posted_request_count--;
@ -1596,7 +1598,7 @@ static int twa_reset_device_extension(TW_Device_Extension *tw_dev)
cmd->result = (DID_RESET << 16);
if (twa_command_mapped(cmd))
scsi_dma_unmap(cmd);
cmd->scsi_done(cmd);
scsi_done(cmd);
}
}
}
@ -1744,8 +1746,9 @@ static int twa_scsi_eh_reset(struct scsi_cmnd *SCpnt)
} /* End twa_scsi_eh_reset() */
/* This is the main scsi queue function to handle scsi opcodes */
static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt)
{
void (*done)(struct scsi_cmnd *) = scsi_done;
int request_id, retval;
TW_Device_Extension *tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata;
@ -1763,9 +1766,6 @@ static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_
goto out;
}
/* Save done function into scsi_cmnd struct */
SCpnt->scsi_done = done;
/* Get a free request id */
twa_get_request_id(tw_dev, &request_id);
@ -1990,7 +1990,7 @@ static struct scsi_host_template driver_template = {
.sg_tablesize = TW_APACHE_MAX_SGL_LENGTH,
.max_sectors = TW_MAX_SECTORS,
.cmd_per_lun = TW_MAX_CMDS_PER_LUN,
.shost_attrs = twa_host_attrs,
.shost_groups = twa_host_groups,
.emulated = 1,
.no_write_same = 1,
};

View File

@ -198,11 +198,13 @@ static struct device_attribute twl_host_stats_attr = {
};
/* Host attributes initializer */
static struct device_attribute *twl_host_attrs[] = {
&twl_host_stats_attr,
static struct attribute *twl_host_attrs[] = {
&twl_host_stats_attr.attr,
NULL,
};
ATTRIBUTE_GROUPS(twl_host);
/* This function will look up an AEN severity string */
static char *twl_aen_severity_lookup(unsigned char severity_code)
{
@ -1216,7 +1218,7 @@ static irqreturn_t twl_interrupt(int irq, void *dev_instance)
/* Now complete the io */
scsi_dma_unmap(cmd);
cmd->scsi_done(cmd);
scsi_done(cmd);
tw_dev->state[request_id] = TW_S_COMPLETED;
twl_free_request_id(tw_dev, request_id);
tw_dev->posted_request_count--;
@ -1369,7 +1371,7 @@ static int twl_reset_device_extension(TW_Device_Extension *tw_dev, int ioctl_res
if (cmd) {
cmd->result = (DID_RESET << 16);
scsi_dma_unmap(cmd);
cmd->scsi_done(cmd);
scsi_done(cmd);
}
}
}
@ -1450,8 +1452,9 @@ static int twl_scsi_eh_reset(struct scsi_cmnd *SCpnt)
} /* End twl_scsi_eh_reset() */
/* This is the main scsi queue function to handle scsi opcodes */
static int twl_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
static int twl_scsi_queue_lck(struct scsi_cmnd *SCpnt)
{
void (*done)(struct scsi_cmnd *) = scsi_done;
int request_id, retval;
TW_Device_Extension *tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata;
@ -1461,9 +1464,6 @@ static int twl_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_
goto out;
}
/* Save done function into scsi_cmnd struct */
SCpnt->scsi_done = done;
/* Get a free request id */
twl_get_request_id(tw_dev, &request_id);
@ -1544,7 +1544,7 @@ static struct scsi_host_template driver_template = {
.sg_tablesize = TW_LIBERATOR_MAX_SGL_LENGTH,
.max_sectors = TW_MAX_SECTORS,
.cmd_per_lun = TW_MAX_CMDS_PER_LUN,
.shost_attrs = twl_host_attrs,
.shost_groups = twl_host_groups,
.emulated = 1,
.no_write_same = 1,
};

View File

@ -532,11 +532,13 @@ static struct device_attribute tw_host_stats_attr = {
};
/* Host attributes initializer */
static struct device_attribute *tw_host_attrs[] = {
&tw_host_stats_attr,
static struct attribute *tw_host_attrs[] = {
&tw_host_stats_attr.attr,
NULL,
};
ATTRIBUTE_GROUPS(tw_host);
/* This function will read the aen queue from the isr */
static int tw_aen_read_queue(TW_Device_Extension *tw_dev, int request_id)
{
@ -1160,7 +1162,7 @@ static int tw_setfeature(TW_Device_Extension *tw_dev, int parm, int param_size,
tw_dev->state[request_id] = TW_S_COMPLETED;
tw_state_request_finish(tw_dev, request_id);
tw_dev->srb[request_id]->result = (DID_OK << 16);
tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]);
scsi_done(tw_dev->srb[request_id]);
}
command_packet->byte8.param.sgl[0].address = param_value;
command_packet->byte8.param.sgl[0].length = sizeof(TW_Sector);
@ -1305,7 +1307,7 @@ static int tw_reset_device_extension(TW_Device_Extension *tw_dev)
if (srb != NULL) {
srb->result = (DID_RESET << 16);
scsi_dma_unmap(srb);
srb->scsi_done(srb);
scsi_done(srb);
}
}
}
@ -1505,7 +1507,7 @@ static int tw_scsiop_mode_sense(TW_Device_Extension *tw_dev, int request_id)
tw_dev->state[request_id] = TW_S_COMPLETED;
tw_state_request_finish(tw_dev, request_id);
tw_dev->srb[request_id]->result = (DID_OK << 16);
tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]);
scsi_done(tw_dev->srb[request_id]);
return 0;
}
@ -1796,7 +1798,7 @@ static int tw_scsiop_request_sense(TW_Device_Extension *tw_dev, int request_id)
/* If we got a request_sense, we probably want a reset, return error */
tw_dev->srb[request_id]->result = (DID_ERROR << 16);
tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]);
scsi_done(tw_dev->srb[request_id]);
return 0;
} /* End tw_scsiop_request_sense() */
@ -1918,8 +1920,9 @@ static int tw_scsiop_test_unit_ready_complete(TW_Device_Extension *tw_dev, int r
} /* End tw_scsiop_test_unit_ready_complete() */
/* This is the main scsi queue function to handle scsi opcodes */
static int tw_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
static int tw_scsi_queue_lck(struct scsi_cmnd *SCpnt)
{
void (*done)(struct scsi_cmnd *) = scsi_done;
unsigned char *command = SCpnt->cmnd;
int request_id = 0;
int retval = 1;
@ -1929,9 +1932,6 @@ static int tw_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_c
if (test_bit(TW_IN_RESET, &tw_dev->flags))
return SCSI_MLQUEUE_HOST_BUSY;
/* Save done function into struct scsi_cmnd */
SCpnt->scsi_done = done;
/* Queue the command and get a request id */
tw_state_request_start(tw_dev, &request_id);
@ -2165,7 +2165,7 @@ static irqreturn_t tw_interrupt(int irq, void *dev_instance)
/* Now complete the io */
if ((error != TW_ISR_DONT_COMPLETE)) {
scsi_dma_unmap(tw_dev->srb[request_id]);
tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]);
scsi_done(tw_dev->srb[request_id]);
tw_dev->state[request_id] = TW_S_COMPLETED;
tw_state_request_finish(tw_dev, request_id);
tw_dev->posted_request_count--;
@ -2242,7 +2242,7 @@ static struct scsi_host_template driver_template = {
.sg_tablesize = TW_MAX_SGL_LENGTH,
.max_sectors = TW_MAX_SECTORS,
.cmd_per_lun = TW_MAX_CMDS_PER_LUN,
.shost_attrs = tw_host_attrs,
.shost_groups = tw_host_groups,
.emulated = 1,
.no_write_same = 1,
};
@ -2252,7 +2252,7 @@ static int tw_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
{
struct Scsi_Host *host = NULL;
TW_Device_Extension *tw_dev;
int retval = -ENODEV;
int retval;
retval = pci_enable_device(pdev);
if (retval) {

View File

@ -163,7 +163,7 @@ STATIC int NCR_700_slave_configure(struct scsi_device *SDpnt);
STATIC void NCR_700_slave_destroy(struct scsi_device *SDpnt);
static int NCR_700_change_queue_depth(struct scsi_device *SDpnt, int depth);
STATIC struct device_attribute *NCR_700_dev_attrs[];
STATIC const struct attribute_group *NCR_700_dev_groups[];
STATIC struct scsi_transport_template *NCR_700_transport_template = NULL;
@ -300,8 +300,8 @@ NCR_700_detect(struct scsi_host_template *tpnt,
static int banner = 0;
int j;
if(tpnt->sdev_attrs == NULL)
tpnt->sdev_attrs = NCR_700_dev_attrs;
if (tpnt->sdev_groups == NULL)
tpnt->sdev_groups = NCR_700_dev_groups;
memory = dma_alloc_coherent(dev, TOTAL_MEM_SIZE, &pScript, GFP_KERNEL);
if (!memory) {
@ -634,7 +634,7 @@ NCR_700_scsi_done(struct NCR_700_Host_Parameters *hostdata,
SCp->host_scribble = NULL;
SCp->result = result;
SCp->scsi_done(SCp);
scsi_done(SCp);
} else {
printk(KERN_ERR "53c700: SCSI DONE HAS NULL SCp\n");
}
@ -1571,7 +1571,7 @@ NCR_700_intr(int irq, void *dev_id)
* deadlock on the
* hostdata->state_lock */
SCp->result = DID_RESET << 16;
SCp->scsi_done(SCp);
scsi_done(SCp);
}
mdelay(25);
NCR_700_chip_setup(host);
@ -1751,8 +1751,7 @@ NCR_700_intr(int irq, void *dev_id)
return IRQ_RETVAL(handled);
}
static int
NCR_700_queuecommand_lck(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *))
static int NCR_700_queuecommand_lck(struct scsi_cmnd *SCp)
{
struct NCR_700_Host_Parameters *hostdata =
(struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0];
@ -1792,7 +1791,6 @@ NCR_700_queuecommand_lck(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *)
slot->cmnd = SCp;
SCp->scsi_done = done;
SCp->host_scribble = (unsigned char *)slot;
SCp->SCp.ptr = NULL;
SCp->SCp.buffer = NULL;
@ -2087,11 +2085,13 @@ static struct device_attribute NCR_700_active_tags_attr = {
.show = NCR_700_show_active_tags,
};
STATIC struct device_attribute *NCR_700_dev_attrs[] = {
&NCR_700_active_tags_attr,
STATIC struct attribute *NCR_700_dev_attrs[] = {
&NCR_700_active_tags_attr.attr,
NULL,
};
ATTRIBUTE_GROUPS(NCR_700_dev);
EXPORT_SYMBOL(NCR_700_detect);
EXPORT_SYMBOL(NCR_700_release);
EXPORT_SYMBOL(NCR_700_intr);

View File

@ -2624,7 +2624,7 @@ static void blogic_process_ccbs(struct blogic_adapter *adapter)
command->reset_chain;
command->reset_chain = NULL;
command->result = DID_RESET << 16;
command->scsi_done(command);
scsi_done(command);
command = nxt_cmd;
}
#endif
@ -2641,7 +2641,7 @@ static void blogic_process_ccbs(struct blogic_adapter *adapter)
blogic_dealloc_ccb(ccb, 1);
adapter->active_cmds[tgt_id]--;
command->result = DID_RESET << 16;
command->scsi_done(command);
scsi_done(command);
}
adapter->bdr_pend[tgt_id] = NULL;
} else {
@ -2713,7 +2713,7 @@ static void blogic_process_ccbs(struct blogic_adapter *adapter)
/*
Call the SCSI Command Completion Routine.
*/
command->scsi_done(command);
scsi_done(command);
}
}
adapter->processing_ccbs = false;
@ -2866,9 +2866,9 @@ static int blogic_hostreset(struct scsi_cmnd *SCpnt)
Outgoing Mailbox for execution by the associated Host Adapter.
*/
static int blogic_qcmd_lck(struct scsi_cmnd *command,
void (*comp_cb) (struct scsi_cmnd *))
static int blogic_qcmd_lck(struct scsi_cmnd *command)
{
void (*comp_cb)(struct scsi_cmnd *) = scsi_done;
struct blogic_adapter *adapter =
(struct blogic_adapter *) command->device->host->hostdata;
struct blogic_tgt_flags *tgt_flags =
@ -3038,7 +3038,6 @@ static int blogic_qcmd_lck(struct scsi_cmnd *command,
return SCSI_MLQUEUE_HOST_BUSY;
}
ccb->sensedata = sense_buf;
command->scsi_done = comp_cb;
if (blogic_multimaster_type(adapter)) {
/*
Place the CCB in an Outgoing Mailbox. The higher levels
@ -3060,7 +3059,7 @@ static int blogic_qcmd_lck(struct scsi_cmnd *command,
blogic_warn("Still unable to write Outgoing Mailbox - Host Adapter Dead?\n", adapter);
blogic_dealloc_ccb(ccb, 1);
command->result = DID_ERROR << 16;
command->scsi_done(command);
scsi_done(command);
}
}
} else {

View File

@ -547,7 +547,7 @@ static void complete_cmd(struct Scsi_Host *instance,
hostdata->sensing = NULL;
}
cmd->scsi_done(cmd);
scsi_done(cmd);
}
/**
@ -573,7 +573,7 @@ static int NCR5380_queue_command(struct Scsi_Host *instance,
case WRITE_10:
shost_printk(KERN_DEBUG, instance, "WRITE attempted with NDEBUG_NO_WRITE set\n");
cmd->result = (DID_ERROR << 16);
cmd->scsi_done(cmd);
scsi_done(cmd);
return 0;
}
#endif /* (NDEBUG & NDEBUG_NO_WRITE) */
@ -960,7 +960,7 @@ static irqreturn_t __maybe_unused NCR5380_intr(int irq, void *dev_id)
* hostdata->connected will be set to cmd.
* SELECT interrupt will be disabled.
*
* If failed (no target) : cmd->scsi_done() will be called, and the
* If failed (no target) : scsi_done() will be called, and the
* cmd->result host byte set to DID_BAD_TARGET.
*/
@ -2262,7 +2262,7 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
dsprintk(NDEBUG_ABORT, instance,
"abort: removed %p from issue queue\n", cmd);
cmd->result = DID_ABORT << 16;
cmd->scsi_done(cmd); /* No tag or busy flag to worry about */
scsi_done(cmd); /* No tag or busy flag to worry about */
goto out;
}
@ -2357,7 +2357,7 @@ static void bus_reset_cleanup(struct Scsi_Host *instance)
list_for_each_entry(ncmd, &hostdata->autosense, list) {
struct scsi_cmnd *cmd = NCR5380_to_scmd(ncmd);
cmd->scsi_done(cmd);
scsi_done(cmd);
}
INIT_LIST_HEAD(&hostdata->autosense);
@ -2400,7 +2400,7 @@ static int NCR5380_host_reset(struct scsi_cmnd *cmd)
struct scsi_cmnd *scmd = NCR5380_to_scmd(ncmd);
scmd->result = DID_RESET << 16;
scmd->scsi_done(scmd);
scsi_done(scmd);
}
INIT_LIST_HEAD(&hostdata->unissued);

View File

@ -911,13 +911,12 @@ static int inia100_build_scb(struct orc_host * host, struct orc_scb * scb, struc
* queue the command down to the controller
*/
static int inia100_queue_lck(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *))
static int inia100_queue_lck(struct scsi_cmnd *cmd)
{
struct orc_scb *scb;
struct orc_host *host; /* Point to Host adapter control block */
host = (struct orc_host *) cmd->device->host->hostdata;
cmd->scsi_done = done;
/* Get free SCSI control block */
if ((scb = orc_alloc_scb(host)) == NULL)
return SCSI_MLQUEUE_HOST_BUSY;
@ -1042,7 +1041,7 @@ static void inia100_scb_handler(struct orc_host *host, struct orc_scb *scb)
}
cmd->result = scb->tastat | (scb->hastat << 16);
scsi_dma_unmap(cmd);
cmd->scsi_done(cmd); /* Notify system DONE */
scsi_done(cmd); /* Notify system DONE */
orc_release_scb(host, scb); /* Release SCB for current channel */
}

View File

@ -223,6 +223,7 @@ static long aac_build_sghba(struct scsi_cmnd *scsicmd,
int sg_max, u64 sg_address);
static int aac_convert_sgraw2(struct aac_raw_io2 *rio2,
int pages, int nseg, int nseg_new);
static void aac_probe_container_scsi_done(struct scsi_cmnd *scsi_cmnd);
static int aac_send_srb_fib(struct scsi_cmnd* scsicmd);
static int aac_send_hba_fib(struct scsi_cmnd *scsicmd);
#ifdef AAC_DETAILED_STATUS_INFO
@ -332,7 +333,7 @@ static inline int aac_valid_context(struct scsi_cmnd *scsicmd,
struct fib *fibptr) {
struct scsi_device *device;
if (unlikely(!scsicmd || !scsicmd->scsi_done)) {
if (unlikely(!scsicmd)) {
dprintk((KERN_WARNING "aac_valid_context: scsi command corrupt\n"));
aac_fib_complete(fibptr);
return 0;
@ -517,6 +518,17 @@ int aac_get_containers(struct aac_dev *dev)
return status;
}
static void aac_scsi_done(struct scsi_cmnd *scmd)
{
if (scmd->device->request_queue) {
/* SCSI command has been submitted by the SCSI mid-layer. */
scsi_done(scmd);
} else {
/* SCSI command has been submitted by aac_probe_container(). */
aac_probe_container_scsi_done(scmd);
}
}
static void get_container_name_callback(void *context, struct fib * fibptr)
{
struct aac_get_name_resp * get_name_reply;
@ -558,7 +570,7 @@ static void get_container_name_callback(void *context, struct fib * fibptr)
scsicmd->result = DID_OK << 16 | SAM_STAT_GOOD;
aac_fib_complete(fibptr);
scsicmd->scsi_done(scsicmd);
aac_scsi_done(scsicmd);
}
/*
@ -614,7 +626,7 @@ static int aac_probe_container_callback2(struct scsi_cmnd * scsicmd)
return aac_scsi_cmd(scsicmd);
scsicmd->result = DID_NO_CONNECT << 16;
scsicmd->scsi_done(scsicmd);
aac_scsi_done(scsicmd);
return 0;
}
@ -804,8 +816,8 @@ static void aac_probe_container_scsi_done(struct scsi_cmnd *scsi_cmnd)
int aac_probe_container(struct aac_dev *dev, int cid)
{
struct scsi_cmnd *scsicmd = kmalloc(sizeof(*scsicmd), GFP_KERNEL);
struct scsi_device *scsidev = kmalloc(sizeof(*scsidev), GFP_KERNEL);
struct scsi_cmnd *scsicmd = kzalloc(sizeof(*scsicmd), GFP_KERNEL);
struct scsi_device *scsidev = kzalloc(sizeof(*scsidev), GFP_KERNEL);
int status;
if (!scsicmd || !scsidev) {
@ -813,7 +825,6 @@ int aac_probe_container(struct aac_dev *dev, int cid)
kfree(scsidev);
return -ENOMEM;
}
scsicmd->scsi_done = aac_probe_container_scsi_done;
scsicmd->device = scsidev;
scsidev->sdev_state = 0;
@ -1094,7 +1105,7 @@ static void get_container_serial_callback(void *context, struct fib * fibptr)
scsicmd->result = DID_OK << 16 | SAM_STAT_GOOD;
aac_fib_complete(fibptr);
scsicmd->scsi_done(scsicmd);
aac_scsi_done(scsicmd);
}
/*
@ -1197,7 +1208,7 @@ static int aac_bounds_32(struct aac_dev * dev, struct scsi_cmnd * cmd, u64 lba)
memcpy(cmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
SCSI_SENSE_BUFFERSIZE));
cmd->scsi_done(cmd);
aac_scsi_done(cmd);
return 1;
}
return 0;
@ -2392,7 +2403,7 @@ static void io_callback(void *context, struct fib * fibptr)
}
aac_fib_complete(fibptr);
scsicmd->scsi_done(scsicmd);
aac_scsi_done(scsicmd);
}
static int aac_read(struct scsi_cmnd * scsicmd)
@ -2463,7 +2474,7 @@ static int aac_read(struct scsi_cmnd * scsicmd)
memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
SCSI_SENSE_BUFFERSIZE));
scsicmd->scsi_done(scsicmd);
aac_scsi_done(scsicmd);
return 0;
}
@ -2489,7 +2500,7 @@ static int aac_read(struct scsi_cmnd * scsicmd)
* For some reason, the Fib didn't queue, return QUEUE_FULL
*/
scsicmd->result = DID_OK << 16 | SAM_STAT_TASK_SET_FULL;
scsicmd->scsi_done(scsicmd);
aac_scsi_done(scsicmd);
aac_fib_complete(cmd_fibcontext);
aac_fib_free(cmd_fibcontext);
return 0;
@ -2554,7 +2565,7 @@ static int aac_write(struct scsi_cmnd * scsicmd)
memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
SCSI_SENSE_BUFFERSIZE));
scsicmd->scsi_done(scsicmd);
aac_scsi_done(scsicmd);
return 0;
}
@ -2580,7 +2591,7 @@ static int aac_write(struct scsi_cmnd * scsicmd)
* For some reason, the Fib didn't queue, return QUEUE_FULL
*/
scsicmd->result = DID_OK << 16 | SAM_STAT_TASK_SET_FULL;
scsicmd->scsi_done(scsicmd);
aac_scsi_done(scsicmd);
aac_fib_complete(cmd_fibcontext);
aac_fib_free(cmd_fibcontext);
@ -2621,7 +2632,7 @@ static void synchronize_callback(void *context, struct fib *fibptr)
aac_fib_complete(fibptr);
aac_fib_free(fibptr);
cmd->scsi_done(cmd);
aac_scsi_done(cmd);
}
static int aac_synchronize(struct scsi_cmnd *scsicmd)
@ -2688,7 +2699,7 @@ static void aac_start_stop_callback(void *context, struct fib *fibptr)
aac_fib_complete(fibptr);
aac_fib_free(fibptr);
scsicmd->scsi_done(scsicmd);
aac_scsi_done(scsicmd);
}
static int aac_start_stop(struct scsi_cmnd *scsicmd)
@ -2702,7 +2713,7 @@ static int aac_start_stop(struct scsi_cmnd *scsicmd)
if (!(aac->supplement_adapter_info.supported_options2 &
AAC_OPTION_POWER_MANAGEMENT)) {
scsicmd->result = DID_OK << 16 | SAM_STAT_GOOD;
scsicmd->scsi_done(scsicmd);
aac_scsi_done(scsicmd);
return 0;
}
@ -3237,7 +3248,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
scsi_done_ret:
scsicmd->scsi_done(scsicmd);
aac_scsi_done(scsicmd);
return 0;
}
@ -3546,7 +3557,7 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
scsicmd->result |= le32_to_cpu(srbreply->scsi_status);
aac_fib_complete(fibptr);
scsicmd->scsi_done(scsicmd);
aac_scsi_done(scsicmd);
}
static void hba_resp_task_complete(struct aac_dev *dev,
@ -3686,7 +3697,7 @@ void aac_hba_callback(void *context, struct fib *fibptr)
if (fibptr->flags & FIB_CONTEXT_FLAG_NATIVE_HBA_TMF)
scsicmd->SCp.sent_command = 1;
else
scsicmd->scsi_done(scsicmd);
aac_scsi_done(scsicmd);
}
/**
@ -3706,7 +3717,7 @@ static int aac_send_srb_fib(struct scsi_cmnd* scsicmd)
if (scmd_id(scsicmd) >= dev->maximum_num_physicals ||
scsicmd->device->lun > 7) {
scsicmd->result = DID_NO_CONNECT << 16;
scsicmd->scsi_done(scsicmd);
aac_scsi_done(scsicmd);
return 0;
}
@ -3747,7 +3758,7 @@ static int aac_send_hba_fib(struct scsi_cmnd *scsicmd)
if (scmd_id(scsicmd) >= dev->maximum_num_physicals ||
scsicmd->device->lun > AAC_MAX_LUN - 1) {
scsicmd->result = DID_NO_CONNECT << 16;
scsicmd->scsi_done(scsicmd);
aac_scsi_done(scsicmd);
return 0;
}

View File

@ -605,12 +605,14 @@ static struct device_attribute aac_unique_id_attr = {
static struct device_attribute *aac_dev_attrs[] = {
&aac_raid_level_attr,
&aac_unique_id_attr,
static struct attribute *aac_dev_attrs[] = {
&aac_raid_level_attr.attr,
&aac_unique_id_attr.attr,
NULL,
};
ATTRIBUTE_GROUPS(aac_dev);
static int aac_ioctl(struct scsi_device *sdev, unsigned int cmd,
void __user *arg)
{
@ -1442,21 +1444,23 @@ static struct device_attribute aac_reset = {
.show = aac_show_reset_adapter,
};
static struct device_attribute *aac_attrs[] = {
&aac_model,
&aac_vendor,
&aac_flags,
&aac_kernel_version,
&aac_monitor_version,
&aac_bios_version,
&aac_lld_version,
&aac_serial_number,
&aac_max_channel,
&aac_max_id,
&aac_reset,
static struct attribute *aac_host_attrs[] = {
&aac_model.attr,
&aac_vendor.attr,
&aac_flags.attr,
&aac_kernel_version.attr,
&aac_monitor_version.attr,
&aac_bios_version.attr,
&aac_lld_version.attr,
&aac_serial_number.attr,
&aac_max_channel.attr,
&aac_max_id.attr,
&aac_reset.attr,
NULL
};
ATTRIBUTE_GROUPS(aac_host);
ssize_t aac_get_serial_number(struct device *device, char *buf)
{
return aac_show_serial_number(device, &aac_serial_number, buf);
@ -1483,10 +1487,10 @@ static struct scsi_host_template aac_driver_template = {
#endif
.queuecommand = aac_queuecommand,
.bios_param = aac_biosparm,
.shost_attrs = aac_attrs,
.shost_groups = aac_host_groups,
.slave_configure = aac_slave_configure,
.change_queue_depth = aac_change_queue_depth,
.sdev_attrs = aac_dev_attrs,
.sdev_groups = aac_dev_groups,
.eh_abort_handler = aac_eh_abort,
.eh_device_reset_handler = aac_eh_dev_reset,
.eh_target_reset_handler = aac_eh_target_reset,

View File

@ -3308,8 +3308,8 @@ static void asc_prt_adv_board_info(struct seq_file *m, struct Scsi_Host *shost)
shost->host_no);
seq_printf(m,
" iop_base 0x%lx, cable_detect: %X, err_code %u\n",
(unsigned long)v->iop_base,
" iop_base 0x%p, cable_detect: %X, err_code %u\n",
v->iop_base,
AdvReadWordRegister(iop_base,IOPW_SCSI_CFG1) & CABLE_DETECT,
v->err_code);
@ -3592,7 +3592,7 @@ static void asc_scsi_done(struct scsi_cmnd *scp)
{
scsi_dma_unmap(scp);
ASC_STATS(scp->device->host, done);
scp->scsi_done(scp);
scsi_done(scp);
}
static void AscSetBank(PortAddr iop_base, uchar bank)
@ -7477,8 +7477,8 @@ static int asc_build_req(struct asc_board *boardp, struct scsi_cmnd *scp,
return ASC_ERROR;
}
asc_sg_head = kzalloc(sizeof(asc_scsi_q->sg_head) +
use_sg * sizeof(struct asc_sg_list), GFP_ATOMIC);
asc_sg_head = kzalloc(struct_size(asc_sg_head, sg_list, use_sg),
GFP_ATOMIC);
if (!asc_sg_head) {
scsi_dma_unmap(scp);
set_host_byte(scp, DID_SOFT_ERROR);
@ -8453,14 +8453,12 @@ static int asc_execute_scsi_cmnd(struct scsi_cmnd *scp)
* This function always returns 0. Command return status is saved
* in the 'scp' result field.
*/
static int
advansys_queuecommand_lck(struct scsi_cmnd *scp, void (*done)(struct scsi_cmnd *))
static int advansys_queuecommand_lck(struct scsi_cmnd *scp)
{
struct Scsi_Host *shost = scp->device->host;
int asc_res, result = 0;
ASC_STATS(shost, queuecommand);
scp->scsi_done = done;
asc_res = asc_execute_scsi_cmnd(scp);

View File

@ -905,13 +905,11 @@ static int setup_expected_interrupts(struct Scsi_Host *shpnt)
* Queue a command and setup interrupts for a free bus.
*/
static int aha152x_internal_queue(struct scsi_cmnd *SCpnt,
struct completion *complete,
int phase, void (*done)(struct scsi_cmnd *))
struct completion *complete, int phase)
{
struct Scsi_Host *shpnt = SCpnt->device->host;
unsigned long flags;
SCpnt->scsi_done = done;
SCpnt->SCp.phase = not_issued | phase;
SCpnt->SCp.Status = 0x1; /* Ilegal status by SCSI standard */
SCpnt->SCp.Message = 0;
@ -977,10 +975,9 @@ static int aha152x_internal_queue(struct scsi_cmnd *SCpnt,
* queue a command
*
*/
static int aha152x_queue_lck(struct scsi_cmnd *SCpnt,
void (*done)(struct scsi_cmnd *))
static int aha152x_queue_lck(struct scsi_cmnd *SCpnt)
{
return aha152x_internal_queue(SCpnt, NULL, 0, done);
return aha152x_internal_queue(SCpnt, NULL, 0);
}
static DEF_SCSI_QCMD(aha152x_queue)
@ -998,6 +995,14 @@ static void reset_done(struct scsi_cmnd *SCpnt)
}
}
static void aha152x_scsi_done(struct scsi_cmnd *SCpnt)
{
if (SCpnt->SCp.phase & resetting)
reset_done(SCpnt);
else
scsi_done(SCpnt);
}
/*
* Abort a command
*
@ -1064,7 +1069,7 @@ static int aha152x_device_reset(struct scsi_cmnd * SCpnt)
SCpnt->cmd_len = 0;
aha152x_internal_queue(SCpnt, &done, resetting, reset_done);
aha152x_internal_queue(SCpnt, &done, resetting);
timeleft = wait_for_completion_timeout(&done, 100*HZ);
if (!timeleft) {
@ -1439,12 +1444,12 @@ static void busfree_run(struct Scsi_Host *shpnt)
scsi_eh_prep_cmnd(ptr, &sc->ses, NULL, 0, ~0);
DO_UNLOCK(flags);
aha152x_internal_queue(ptr, NULL, check_condition, ptr->scsi_done);
aha152x_internal_queue(ptr, NULL, check_condition);
DO_LOCK(flags);
}
}
if(DONE_SC && DONE_SC->scsi_done) {
if (DONE_SC) {
struct scsi_cmnd *ptr = DONE_SC;
DONE_SC=NULL;
@ -1453,13 +1458,13 @@ static void busfree_run(struct Scsi_Host *shpnt)
if (!HOSTDATA(shpnt)->commands)
SETPORT(PORTA, 0); /* turn led off */
if(ptr->scsi_done != reset_done) {
if (!(ptr->SCp.phase & resetting)) {
kfree(ptr->host_scribble);
ptr->host_scribble=NULL;
}
DO_UNLOCK(flags);
ptr->scsi_done(ptr);
aha152x_scsi_done(ptr);
DO_LOCK(flags);
}
@ -2258,7 +2263,7 @@ static void rsti_run(struct Scsi_Host *shpnt)
ptr->host_scribble=NULL;
set_host_byte(ptr, DID_RESET);
ptr->scsi_done(ptr);
aha152x_scsi_done(ptr);
}
ptr = next;

View File

@ -268,8 +268,7 @@ static void aha1542_free_cmd(struct scsi_cmnd *cmd)
struct bio_vec bv;
rq_for_each_segment(bv, rq, iter) {
memcpy_to_page(bv.bv_page, bv.bv_offset, buf,
bv.bv_len);
memcpy_to_bvec(&bv, buf);
buf += bv.bv_len;
}
}
@ -281,7 +280,6 @@ static irqreturn_t aha1542_interrupt(int irq, void *dev_id)
{
struct Scsi_Host *sh = dev_id;
struct aha1542_hostdata *aha1542 = shost_priv(sh);
void (*my_done)(struct scsi_cmnd *) = NULL;
int errstatus, mbi, mbo, mbistatus;
int number_serviced;
unsigned long flags;
@ -369,14 +367,13 @@ static irqreturn_t aha1542_interrupt(int irq, void *dev_id)
tmp_cmd = aha1542->int_cmds[mbo];
if (!tmp_cmd || !tmp_cmd->scsi_done) {
if (!tmp_cmd) {
spin_unlock_irqrestore(sh->host_lock, flags);
shost_printk(KERN_WARNING, sh, "Unexpected interrupt\n");
shost_printk(KERN_WARNING, sh, "tarstat=%x, hastat=%x idlun=%x ccb#=%d\n", ccb[mbo].tarstat,
ccb[mbo].hastat, ccb[mbo].idlun, mbo);
return IRQ_HANDLED;
}
my_done = tmp_cmd->scsi_done;
aha1542_free_cmd(tmp_cmd);
/*
* Fetch the sense data, and tuck it away, in the required slot. The
@ -410,7 +407,7 @@ static irqreturn_t aha1542_interrupt(int irq, void *dev_id)
aha1542->int_cmds[mbo] = NULL; /* This effectively frees up the mailbox slot, as
* far as queuecommand is concerned
*/
my_done(tmp_cmd);
scsi_done(tmp_cmd);
number_serviced++;
};
}
@ -431,7 +428,7 @@ static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
if (*cmd->cmnd == REQUEST_SENSE) {
/* Don't do the command - we have the sense data already */
cmd->result = 0;
cmd->scsi_done(cmd);
scsi_done(cmd);
return 0;
}
#ifdef DEBUG
@ -454,8 +451,7 @@ static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
struct bio_vec bv;
rq_for_each_segment(bv, rq, iter) {
memcpy_from_page(buf, bv.bv_page, bv.bv_offset,
bv.bv_len);
memcpy_from_bvec(buf, &bv);
buf += bv.bv_len;
}
}
@ -488,7 +484,7 @@ static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
aha1542->aha1542_last_mbo_used = mbo;
#ifdef DEBUG
shost_printk(KERN_DEBUG, sh, "Sending command (%d %p)...", mbo, cmd->scsi_done);
shost_printk(KERN_DEBUG, sh, "Sending command (%d)...", mbo);
#endif
/* This gets trashed for some reason */

View File

@ -315,9 +315,9 @@ static irqreturn_t aha1740_intr_handle(int irq, void *dev_id)
return IRQ_RETVAL(handled);
}
static int aha1740_queuecommand_lck(struct scsi_cmnd * SCpnt,
void (*done)(struct scsi_cmnd *))
static int aha1740_queuecommand_lck(struct scsi_cmnd *SCpnt)
{
void (*done)(struct scsi_cmnd *) = scsi_done;
unchar direction;
unchar *cmd = (unchar *) SCpnt->cmnd;
unchar target = scmd_id(SCpnt);

View File

@ -572,8 +572,7 @@ ahd_linux_info(struct Scsi_Host *host)
/*
* Queue an SCB to the controller.
*/
static int
ahd_linux_queue_lck(struct scsi_cmnd * cmd, void (*scsi_done) (struct scsi_cmnd *))
static int ahd_linux_queue_lck(struct scsi_cmnd *cmd)
{
struct ahd_softc *ahd;
struct ahd_linux_device *dev = scsi_transport_device_data(cmd->device);
@ -581,7 +580,6 @@ ahd_linux_queue_lck(struct scsi_cmnd * cmd, void (*scsi_done) (struct scsi_cmnd
ahd = *(struct ahd_softc **)cmd->device->host->hostdata;
cmd->scsi_done = scsi_done;
cmd->result = CAM_REQ_INPROG << 16;
rtn = ahd_linux_run_command(ahd, dev, cmd);
@ -2111,7 +2109,7 @@ ahd_linux_queue_cmd_complete(struct ahd_softc *ahd, struct scsi_cmnd *cmd)
ahd_cmd_set_transaction_status(cmd, new_status);
cmd->scsi_done(cmd);
scsi_done(cmd);
}
static void

View File

@ -196,7 +196,7 @@ int ahd_dmamap_unload(struct ahd_softc *, bus_dma_tag_t, bus_dmamap_t);
/*
* XXX
* ahd_dmamap_sync is only used on buffers allocated with
* the pci_alloc_consistent() API. Although I'm not sure how
* the dma_alloc_coherent() API. Although I'm not sure how
* this works on architectures with a write buffer, Linux does
* not have an API to sync "coherent" memory. Perhaps we need
* to do an mb()?

View File

@ -518,8 +518,7 @@ ahc_linux_info(struct Scsi_Host *host)
/*
* Queue an SCB to the controller.
*/
static int
ahc_linux_queue_lck(struct scsi_cmnd * cmd, void (*scsi_done) (struct scsi_cmnd *))
static int ahc_linux_queue_lck(struct scsi_cmnd *cmd)
{
struct ahc_softc *ahc;
struct ahc_linux_device *dev = scsi_transport_device_data(cmd->device);
@ -530,7 +529,6 @@ ahc_linux_queue_lck(struct scsi_cmnd * cmd, void (*scsi_done) (struct scsi_cmnd
ahc_lock(ahc, &flags);
if (ahc->platform_data->qfrozen == 0) {
cmd->scsi_done = scsi_done;
cmd->result = CAM_REQ_INPROG << 16;
rtn = ahc_linux_run_command(ahc, dev, cmd);
}
@ -1986,7 +1984,7 @@ ahc_linux_queue_cmd_complete(struct ahc_softc *ahc, struct scsi_cmnd *cmd)
ahc_cmd_set_transaction_status(cmd, new_status);
}
cmd->scsi_done(cmd);
scsi_done(cmd);
}
static void

View File

@ -209,7 +209,7 @@ int ahc_dmamap_unload(struct ahc_softc *, bus_dma_tag_t, bus_dmamap_t);
/*
* XXX
* ahc_dmamap_sync is only used on buffers allocated with
* the pci_alloc_consistent() API. Although I'm not sure how
* the dma_alloc_coherent() API. Although I'm not sure how
* this works on architectures with a write buffer, Linux does
* not have an API to sync "coherent" memory. Perhaps we need
* to do an mb()?

View File

@ -1041,6 +1041,6 @@ extern uint32_t arcmsr_Read_iop_rqbuffer_data(struct AdapterControlBlock *,
struct QBUFFER __iomem *);
extern void arcmsr_clear_iop2drv_rqueue_buffer(struct AdapterControlBlock *);
extern struct QBUFFER __iomem *arcmsr_get_iop_rqbuffer(struct AdapterControlBlock *);
extern struct device_attribute *arcmsr_host_attrs[];
extern const struct attribute_group *arcmsr_host_groups[];
extern int arcmsr_alloc_sysfs_attr(struct AdapterControlBlock *);
void arcmsr_free_sysfs_attr(struct AdapterControlBlock *acb);

View File

@ -58,8 +58,6 @@
#include <scsi/scsi_transport.h>
#include "arcmsr.h"
struct device_attribute *arcmsr_host_attrs[];
static ssize_t arcmsr_sysfs_iop_message_read(struct file *filp,
struct kobject *kobj,
struct bin_attribute *bin,
@ -389,16 +387,25 @@ static DEVICE_ATTR(host_fw_numbers_queue, S_IRUGO, arcmsr_attr_host_fw_numbers_q
static DEVICE_ATTR(host_fw_sdram_size, S_IRUGO, arcmsr_attr_host_fw_sdram_size, NULL);
static DEVICE_ATTR(host_fw_hd_channels, S_IRUGO, arcmsr_attr_host_fw_hd_channels, NULL);
struct device_attribute *arcmsr_host_attrs[] = {
&dev_attr_host_driver_version,
&dev_attr_host_driver_posted_cmd,
&dev_attr_host_driver_reset,
&dev_attr_host_driver_abort,
&dev_attr_host_fw_model,
&dev_attr_host_fw_version,
&dev_attr_host_fw_request_len,
&dev_attr_host_fw_numbers_queue,
&dev_attr_host_fw_sdram_size,
&dev_attr_host_fw_hd_channels,
static struct attribute *arcmsr_host_attrs[] = {
&dev_attr_host_driver_version.attr,
&dev_attr_host_driver_posted_cmd.attr,
&dev_attr_host_driver_reset.attr,
&dev_attr_host_driver_abort.attr,
&dev_attr_host_fw_model.attr,
&dev_attr_host_fw_version.attr,
&dev_attr_host_fw_request_len.attr,
&dev_attr_host_fw_numbers_queue.attr,
&dev_attr_host_fw_sdram_size.attr,
&dev_attr_host_fw_hd_channels.attr,
NULL,
};
static const struct attribute_group arcmsr_host_attr_group = {
.attrs = arcmsr_host_attrs,
};
const struct attribute_group *arcmsr_host_groups[] = {
&arcmsr_host_attr_group,
NULL
};

View File

@ -167,7 +167,7 @@ static struct scsi_host_template arcmsr_scsi_host_template = {
.sg_tablesize = ARCMSR_DEFAULT_SG_ENTRIES,
.max_sectors = ARCMSR_MAX_XFER_SECTORS_C,
.cmd_per_lun = ARCMSR_DEFAULT_CMD_PERLUN,
.shost_attrs = arcmsr_host_attrs,
.shost_groups = arcmsr_host_groups,
.no_write_same = 1,
};
@ -1318,7 +1318,7 @@ static void arcmsr_ccb_complete(struct CommandControlBlock *ccb)
spin_lock_irqsave(&acb->ccblist_lock, flags);
list_add_tail(&ccb->list, &acb->ccb_free_list);
spin_unlock_irqrestore(&acb->ccblist_lock, flags);
pcmd->scsi_done(pcmd);
scsi_done(pcmd);
}
static void arcmsr_report_sense_info(struct CommandControlBlock *ccb)
@ -1598,7 +1598,7 @@ static void arcmsr_remove_scsi_devices(struct AdapterControlBlock *acb)
if (ccb->startdone == ARCMSR_CCB_START) {
ccb->pcmd->result = DID_NO_CONNECT << 16;
arcmsr_pci_unmap_dma(ccb);
ccb->pcmd->scsi_done(ccb->pcmd);
scsi_done(ccb->pcmd);
}
}
for (target = 0; target < ARCMSR_MAX_TARGETID; target++) {
@ -3192,7 +3192,7 @@ static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb,
if (cmd->device->lun) {
cmd->result = (DID_TIME_OUT << 16);
cmd->scsi_done(cmd);
scsi_done(cmd);
return;
}
inqdata[0] = TYPE_PROCESSOR;
@ -3216,23 +3216,22 @@ static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb,
sg = scsi_sglist(cmd);
kunmap_atomic(buffer - sg->offset);
cmd->scsi_done(cmd);
scsi_done(cmd);
}
break;
case WRITE_BUFFER:
case READ_BUFFER: {
if (arcmsr_iop_message_xfer(acb, cmd))
cmd->result = (DID_ERROR << 16);
cmd->scsi_done(cmd);
scsi_done(cmd);
}
break;
default:
cmd->scsi_done(cmd);
scsi_done(cmd);
}
}
static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd,
void (* done)(struct scsi_cmnd *))
static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd)
{
struct Scsi_Host *host = cmd->device->host;
struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
@ -3241,10 +3240,9 @@ static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd,
if (acb->acb_flags & ACB_F_ADAPTER_REMOVED) {
cmd->result = (DID_NO_CONNECT << 16);
cmd->scsi_done(cmd);
scsi_done(cmd);
return 0;
}
cmd->scsi_done = done;
cmd->host_scribble = NULL;
cmd->result = 0;
if (target == 16) {
@ -3257,7 +3255,7 @@ static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd,
return SCSI_MLQUEUE_HOST_BUSY;
if (arcmsr_build_ccb( acb, ccb, cmd ) == FAILED) {
cmd->result = (DID_ERROR << 16) | SAM_STAT_RESERVATION_CONFLICT;
cmd->scsi_done(cmd);
scsi_done(cmd);
return 0;
}
arcmsr_post_ccb(acb, ccb);

View File

@ -841,13 +841,10 @@ static void acornscsi_done(AS_Host *host, struct scsi_cmnd **SCpntp,
}
}
if (!SCpnt->scsi_done)
panic("scsi%d.H: null scsi_done function in acornscsi_done", host->host->host_no);
clear_bit(SCpnt->device->id * 8 +
(u8)(SCpnt->device->lun & 0x7), host->busyluns);
SCpnt->scsi_done(SCpnt);
scsi_done(SCpnt);
} else
printk("scsi%d: null command in acornscsi_done", host->host->host_no);
@ -2400,24 +2397,16 @@ acornscsi_intr(int irq, void *dev_id)
*/
/*
* Function : acornscsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
* Function : acornscsi_queuecmd(struct scsi_cmnd *cmd)
* Purpose : queues a SCSI command
* Params : cmd - SCSI command
* done - function called on completion, with pointer to command descriptor
* Returns : 0, or < 0 on error.
*/
static int acornscsi_queuecmd_lck(struct scsi_cmnd *SCpnt,
void (*done)(struct scsi_cmnd *))
static int acornscsi_queuecmd_lck(struct scsi_cmnd *SCpnt)
{
void (*done)(struct scsi_cmnd *) = scsi_done;
AS_Host *host = (AS_Host *)SCpnt->device->host->hostdata;
if (!done) {
/* there should be some way of rejecting errors like this without panicing... */
panic("scsi%d: queuecommand called with NULL done function [cmd=%p]",
host->host->host_no, SCpnt);
return -EINVAL;
}
#if (DEBUG & DEBUG_NO_WRITE)
if (acornscsi_cmdtype(SCpnt->cmnd[0]) == CMD_WRITE && (NO_WRITE & (1 << SCpnt->device->id))) {
printk(KERN_CRIT "scsi%d.%c: WRITE attempted with NO_WRITE flag set\n",
@ -2428,7 +2417,6 @@ static int acornscsi_queuecmd_lck(struct scsi_cmnd *SCpnt,
}
#endif
SCpnt->scsi_done = done;
SCpnt->host_scribble = NULL;
SCpnt->result = 0;
SCpnt->SCp.phase = (int)acornscsi_datadirection(SCpnt->cmnd[0]);

View File

@ -243,6 +243,7 @@ static struct scsi_host_template arxescsi_template = {
.eh_bus_reset_handler = fas216_eh_bus_reset,
.eh_device_reset_handler = fas216_eh_device_reset,
.eh_abort_handler = fas216_eh_abort,
.cmd_size = sizeof(struct fas216_cmd_priv),
.can_queue = 0,
.this_id = 7,
.sg_tablesize = SG_ALL,

View File

@ -363,6 +363,7 @@ static struct scsi_host_template cumanascsi2_template = {
.eh_bus_reset_handler = fas216_eh_bus_reset,
.eh_device_reset_handler = fas216_eh_device_reset,
.eh_abort_handler = fas216_eh_abort,
.cmd_size = sizeof(struct fas216_cmd_priv),
.can_queue = 1,
.this_id = 7,
.sg_tablesize = SG_MAX_SEGMENTS,

View File

@ -480,6 +480,7 @@ static struct scsi_host_template eesox_template = {
.eh_bus_reset_handler = fas216_eh_bus_reset,
.eh_device_reset_handler = fas216_eh_device_reset,
.eh_abort_handler = fas216_eh_abort,
.cmd_size = sizeof(struct fas216_cmd_priv),
.can_queue = 1,
.this_id = 7,
.sg_tablesize = SG_MAX_SEGMENTS,

View File

@ -2015,7 +2015,7 @@ static void fas216_rq_sns_done(FAS216_Info *info, struct scsi_cmnd *SCpnt,
* correctly by fas216_std_done.
*/
scsi_eh_restore_cmnd(SCpnt, &info->ses);
SCpnt->scsi_done(SCpnt);
fas216_cmd_priv(SCpnt)->scsi_done(SCpnt);
}
/**
@ -2086,8 +2086,8 @@ fas216_std_done(FAS216_Info *info, struct scsi_cmnd *SCpnt, unsigned int result)
}
done:
if (SCpnt->scsi_done) {
SCpnt->scsi_done(SCpnt);
if (fas216_cmd_priv(SCpnt)->scsi_done) {
fas216_cmd_priv(SCpnt)->scsi_done(SCpnt);
return;
}
@ -2184,7 +2184,7 @@ static void fas216_done(FAS216_Info *info, unsigned int result)
}
/**
* fas216_queue_command - queue a command for adapter to process.
* fas216_queue_command_internal - queue a command for the adapter to process
* @SCpnt: Command to queue
* @done: done function to call once command is complete
*
@ -2192,8 +2192,8 @@ static void fas216_done(FAS216_Info *info, unsigned int result)
* Returns: 0 on success, else error.
* Notes: io_request_lock is held, interrupts are disabled.
*/
static int fas216_queue_command_lck(struct scsi_cmnd *SCpnt,
void (*done)(struct scsi_cmnd *))
static int fas216_queue_command_internal(struct scsi_cmnd *SCpnt,
void (*done)(struct scsi_cmnd *))
{
FAS216_Info *info = (FAS216_Info *)SCpnt->device->host->hostdata;
int result;
@ -2203,7 +2203,7 @@ static int fas216_queue_command_lck(struct scsi_cmnd *SCpnt,
fas216_log_command(info, LOG_CONNECT, SCpnt,
"received command (%p)", SCpnt);
SCpnt->scsi_done = done;
fas216_cmd_priv(SCpnt)->scsi_done = done;
SCpnt->host_scribble = (void *)fas216_std_done;
SCpnt->result = 0;
@ -2233,6 +2233,11 @@ static int fas216_queue_command_lck(struct scsi_cmnd *SCpnt,
return result;
}
static int fas216_queue_command_lck(struct scsi_cmnd *SCpnt)
{
return fas216_queue_command_internal(SCpnt, scsi_done);
}
DEF_SCSI_QCMD(fas216_queue_command)
/**
@ -2258,8 +2263,7 @@ static void fas216_internal_done(struct scsi_cmnd *SCpnt)
* Returns: scsi result code.
* Notes: io_request_lock is held, interrupts are disabled.
*/
static int fas216_noqueue_command_lck(struct scsi_cmnd *SCpnt,
void (*done)(struct scsi_cmnd *))
static int fas216_noqueue_command_lck(struct scsi_cmnd *SCpnt)
{
FAS216_Info *info = (FAS216_Info *)SCpnt->device->host->hostdata;
@ -2272,7 +2276,7 @@ static int fas216_noqueue_command_lck(struct scsi_cmnd *SCpnt,
BUG_ON(info->scsi.irq);
info->internal_done = 0;
fas216_queue_command_lck(SCpnt, fas216_internal_done);
fas216_queue_command_internal(SCpnt, fas216_internal_done);
/*
* This wastes time, since we can't return until the command is
@ -2300,7 +2304,7 @@ static int fas216_noqueue_command_lck(struct scsi_cmnd *SCpnt,
spin_lock_irq(info->host->host_lock);
done(SCpnt);
scsi_done(SCpnt);
return 0;
}

View File

@ -310,6 +310,16 @@ typedef struct {
unsigned long magic_end;
} FAS216_Info;
/* driver-private data per SCSI command. */
struct fas216_cmd_priv {
void (*scsi_done)(struct scsi_cmnd *cmd);
};
static inline struct fas216_cmd_priv *fas216_cmd_priv(struct scsi_cmnd *cmd)
{
return scsi_cmd_priv(cmd);
}
/* Function: int fas216_init (struct Scsi_Host *instance)
* Purpose : initialise FAS/NCR/AMD SCSI structures.
* Params : instance - a driver-specific filled-out structure

View File

@ -286,7 +286,7 @@ static struct scsi_host_template powertecscsi_template = {
.eh_bus_reset_handler = fas216_eh_bus_reset,
.eh_device_reset_handler = fas216_eh_device_reset,
.eh_abort_handler = fas216_eh_abort,
.cmd_size = sizeof(struct fas216_cmd_priv),
.can_queue = 8,
.this_id = 7,
.sg_tablesize = SG_MAX_SEGMENTS,

View File

@ -512,7 +512,7 @@ static irqreturn_t atp870u_intr_handle(int irq, void *dev_id)
scsi_dma_unmap(workreq);
spin_lock_irqsave(dev->host->host_lock, flags);
(*workreq->scsi_done) (workreq);
scsi_done(workreq);
#ifdef ED_DBGP
printk("workreq->scsi_done\n");
#endif
@ -618,9 +618,9 @@ static irqreturn_t atp870u_intr_handle(int irq, void *dev_id)
*
* Queue a command to the ATP queue. Called with the host lock held.
*/
static int atp870u_queuecommand_lck(struct scsi_cmnd *req_p,
void (*done) (struct scsi_cmnd *))
static int atp870u_queuecommand_lck(struct scsi_cmnd *req_p)
{
void (*done)(struct scsi_cmnd *) = scsi_done;
unsigned char c;
unsigned int m;
struct atp_unit *dev;
@ -654,17 +654,6 @@ static int atp870u_queuecommand_lck(struct scsi_cmnd *req_p,
return 0;
}
if (done) {
req_p->scsi_done = done;
} else {
#ifdef ED_DBGP
printk( "atp870u_queuecommand: done can't be NULL\n");
#endif
req_p->result = 0;
done(req_p);
return 0;
}
/*
* Count new command
*/

View File

@ -163,17 +163,20 @@ DEVICE_ATTR(beiscsi_active_session_count, S_IRUGO,
beiscsi_active_session_disp, NULL);
DEVICE_ATTR(beiscsi_free_session_count, S_IRUGO,
beiscsi_free_session_disp, NULL);
static struct device_attribute *beiscsi_attrs[] = {
&dev_attr_beiscsi_log_enable,
&dev_attr_beiscsi_drvr_ver,
&dev_attr_beiscsi_adapter_family,
&dev_attr_beiscsi_fw_ver,
&dev_attr_beiscsi_active_session_count,
&dev_attr_beiscsi_free_session_count,
&dev_attr_beiscsi_phys_port,
static struct attribute *beiscsi_attrs[] = {
&dev_attr_beiscsi_log_enable.attr,
&dev_attr_beiscsi_drvr_ver.attr,
&dev_attr_beiscsi_adapter_family.attr,
&dev_attr_beiscsi_fw_ver.attr,
&dev_attr_beiscsi_active_session_count.attr,
&dev_attr_beiscsi_free_session_count.attr,
&dev_attr_beiscsi_phys_port.attr,
NULL,
};
ATTRIBUTE_GROUPS(beiscsi);
static char const *cqe_desc[] = {
"RESERVED_DESC",
"SOL_CMD_COMPLETE",
@ -391,7 +394,7 @@ static struct scsi_host_template beiscsi_sht = {
.eh_abort_handler = beiscsi_eh_abort,
.eh_device_reset_handler = beiscsi_eh_device_reset,
.eh_target_reset_handler = iscsi_eh_session_reset,
.shost_attrs = beiscsi_attrs,
.shost_groups = beiscsi_groups,
.sg_tablesize = BEISCSI_SGLIST_ELEMENTS,
.can_queue = BE2_IO_DEPTH,
.this_id = -1,

View File

@ -956,36 +956,52 @@ static DEVICE_ATTR(driver_name, S_IRUGO, bfad_im_drv_name_show, NULL);
static DEVICE_ATTR(number_of_discovered_ports, S_IRUGO,
bfad_im_num_of_discovered_ports_show, NULL);
struct device_attribute *bfad_im_host_attrs[] = {
&dev_attr_serial_number,
&dev_attr_model,
&dev_attr_model_description,
&dev_attr_node_name,
&dev_attr_symbolic_name,
&dev_attr_hardware_version,
&dev_attr_driver_version,
&dev_attr_option_rom_version,
&dev_attr_firmware_version,
&dev_attr_number_of_ports,
&dev_attr_driver_name,
&dev_attr_number_of_discovered_ports,
static struct attribute *bfad_im_host_attrs[] = {
&dev_attr_serial_number.attr,
&dev_attr_model.attr,
&dev_attr_model_description.attr,
&dev_attr_node_name.attr,
&dev_attr_symbolic_name.attr,
&dev_attr_hardware_version.attr,
&dev_attr_driver_version.attr,
&dev_attr_option_rom_version.attr,
&dev_attr_firmware_version.attr,
&dev_attr_number_of_ports.attr,
&dev_attr_driver_name.attr,
&dev_attr_number_of_discovered_ports.attr,
NULL,
};
struct device_attribute *bfad_im_vport_attrs[] = {
&dev_attr_serial_number,
&dev_attr_model,
&dev_attr_model_description,
&dev_attr_node_name,
&dev_attr_symbolic_name,
&dev_attr_hardware_version,
&dev_attr_driver_version,
&dev_attr_option_rom_version,
&dev_attr_firmware_version,
&dev_attr_number_of_ports,
&dev_attr_driver_name,
&dev_attr_number_of_discovered_ports,
static const struct attribute_group bfad_im_host_attr_group = {
.attrs = bfad_im_host_attrs
};
const struct attribute_group *bfad_im_host_groups[] = {
&bfad_im_host_attr_group,
NULL
};
struct attribute *bfad_im_vport_attrs[] = {
&dev_attr_serial_number.attr,
&dev_attr_model.attr,
&dev_attr_model_description.attr,
&dev_attr_node_name.attr,
&dev_attr_symbolic_name.attr,
&dev_attr_hardware_version.attr,
&dev_attr_driver_version.attr,
&dev_attr_option_rom_version.attr,
&dev_attr_firmware_version.attr,
&dev_attr_number_of_ports.attr,
&dev_attr_driver_name.attr,
&dev_attr_number_of_discovered_ports.attr,
NULL,
};
static const struct attribute_group bfad_im_vport_attr_group = {
.attrs = bfad_im_vport_attrs
};
const struct attribute_group *bfad_im_vport_groups[] = {
&bfad_im_vport_attr_group,
NULL
};

View File

@ -96,7 +96,7 @@ bfa_cb_ioim_done(void *drv, struct bfad_ioim_s *dio,
}
}
cmnd->scsi_done(cmnd);
scsi_done(cmnd);
}
void
@ -124,7 +124,7 @@ bfa_cb_ioim_good_comp(void *drv, struct bfad_ioim_s *dio)
}
}
cmnd->scsi_done(cmnd);
scsi_done(cmnd);
}
void
@ -226,7 +226,7 @@ bfad_im_abort_handler(struct scsi_cmnd *cmnd)
timeout *= 2;
}
cmnd->scsi_done(cmnd);
scsi_done(cmnd);
bfa_trc(bfad, hal_io->iotag);
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
"scsi%d: complete abort 0x%p iotag 0x%x\n",
@ -809,7 +809,7 @@ struct scsi_host_template bfad_im_scsi_host_template = {
.this_id = -1,
.sg_tablesize = BFAD_IO_MAX_SGE,
.cmd_per_lun = 3,
.shost_attrs = bfad_im_host_attrs,
.shost_groups = bfad_im_host_groups,
.max_sectors = BFAD_MAX_SECTORS,
.vendor_id = BFA_PCI_VENDOR_ID_BROCADE,
};
@ -831,7 +831,7 @@ struct scsi_host_template bfad_im_vport_template = {
.this_id = -1,
.sg_tablesize = BFAD_IO_MAX_SGE,
.cmd_per_lun = 3,
.shost_attrs = bfad_im_vport_attrs,
.shost_groups = bfad_im_vport_groups,
.max_sectors = BFAD_MAX_SECTORS,
};
@ -1199,9 +1199,9 @@ bfad_im_itnim_work_handler(struct work_struct *work)
/*
* Scsi_Host template entry, queue a SCSI command to the BFAD.
*/
static int
bfad_im_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
static int bfad_im_queuecommand_lck(struct scsi_cmnd *cmnd)
{
void (*done)(struct scsi_cmnd *) = scsi_done;
struct bfad_im_port_s *im_port =
(struct bfad_im_port_s *) cmnd->device->host->hostdata[0];
struct bfad_s *bfad = im_port->bfad;
@ -1233,8 +1233,6 @@ bfad_im_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd
if (sg_cnt < 0)
return SCSI_MLQUEUE_HOST_BUSY;
cmnd->scsi_done = done;
spin_lock_irqsave(&bfad->bfad_lock, flags);
if (!(bfad->bfad_flags & BFAD_HAL_START_DONE)) {
printk(KERN_WARNING

View File

@ -174,8 +174,8 @@ extern struct fc_function_template bfad_im_vport_fc_function_template;
extern struct scsi_transport_template *bfad_im_scsi_transport_template;
extern struct scsi_transport_template *bfad_im_scsi_vport_transport_template;
extern struct device_attribute *bfad_im_host_attrs[];
extern struct device_attribute *bfad_im_vport_attrs[];
extern const struct attribute_group *bfad_im_host_groups[];
extern const struct attribute_group *bfad_im_vport_groups[];
irqreturn_t bfad_intx(int irq, void *dev_id);

View File

@ -2951,11 +2951,13 @@ bnx2fc_tm_timeout_store(struct device *dev,
static DEVICE_ATTR(tm_timeout, S_IRUGO|S_IWUSR, bnx2fc_tm_timeout_show,
bnx2fc_tm_timeout_store);
static struct device_attribute *bnx2fc_host_attrs[] = {
&dev_attr_tm_timeout,
static struct attribute *bnx2fc_host_attrs[] = {
&dev_attr_tm_timeout.attr,
NULL,
};
ATTRIBUTE_GROUPS(bnx2fc_host);
/*
* scsi_host_template structure used while registering with SCSI-ml
*/
@ -2977,7 +2979,7 @@ static struct scsi_host_template bnx2fc_shost_template = {
.max_sectors = 0x3fbf,
.track_queue_depth = 1,
.slave_configure = bnx2fc_slave_configure,
.shost_attrs = bnx2fc_host_attrs,
.shost_groups = bnx2fc_host_groups,
};
static struct libfc_function_template bnx2fc_libfc_fcn_templ = {

View File

@ -205,7 +205,7 @@ static void bnx2fc_scsi_done(struct bnx2fc_cmd *io_req, int err_code)
sc_cmd->allowed);
scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd));
sc_cmd->SCp.ptr = NULL;
sc_cmd->scsi_done(sc_cmd);
scsi_done(sc_cmd);
}
struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba)
@ -1610,7 +1610,7 @@ void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req,
}
sc_cmd->SCp.ptr = NULL;
sc_cmd->scsi_done(sc_cmd);
scsi_done(sc_cmd);
kref_put(&io_req->refcount, bnx2fc_cmd_release);
if (io_req->wait_for_abts_comp) {
@ -1853,7 +1853,7 @@ int bnx2fc_queuecommand(struct Scsi_Host *host,
rval = fc_remote_port_chkready(rport);
if (rval) {
sc_cmd->result = rval;
sc_cmd->scsi_done(sc_cmd);
scsi_done(sc_cmd);
return 0;
}
@ -2019,7 +2019,7 @@ void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
break;
}
sc_cmd->SCp.ptr = NULL;
sc_cmd->scsi_done(sc_cmd);
scsi_done(sc_cmd);
kref_put(&io_req->refcount, bnx2fc_cmd_release);
}

View File

@ -795,7 +795,7 @@ extern struct cnic_ulp_ops bnx2i_cnic_cb;
extern unsigned int sq_size;
extern unsigned int rq_size;
extern struct device_attribute *bnx2i_dev_attributes[];
extern const struct attribute_group *bnx2i_dev_groups[];

View File

@ -2266,7 +2266,7 @@ static struct scsi_host_template bnx2i_host_template = {
.cmd_per_lun = 128,
.this_id = -1,
.sg_tablesize = ISCSI_MAX_BDS_PER_CMD,
.shost_attrs = bnx2i_dev_attributes,
.shost_groups = bnx2i_dev_groups,
.track_queue_depth = 1,
};

View File

@ -142,8 +142,17 @@ static DEVICE_ATTR(sq_size, S_IRUGO | S_IWUSR,
static DEVICE_ATTR(num_ccell, S_IRUGO | S_IWUSR,
bnx2i_show_ccell_info, bnx2i_set_ccell_info);
struct device_attribute *bnx2i_dev_attributes[] = {
&dev_attr_sq_size,
&dev_attr_num_ccell,
static struct attribute *bnx2i_dev_attributes[] = {
&dev_attr_sq_size.attr,
&dev_attr_num_ccell.attr,
NULL
};
static const struct attribute_group bnx2i_dev_attr_group = {
.attrs = bnx2i_dev_attributes
};
const struct attribute_group *bnx2i_dev_groups[] = {
&bnx2i_dev_attr_group,
NULL
};

View File

@ -619,7 +619,7 @@ csio_ln_vnp_read_cbfn(struct csio_hw *hw, struct csio_mb *mbp)
struct fc_els_csp *csp;
struct fc_els_cssp *clsp;
enum fw_retval retval;
__be32 nport_id;
__be32 nport_id = 0;
retval = FW_CMD_RETVAL_G(ntohl(rsp->alloc_to_len16));
if (retval != FW_SUCCESS) {

View File

@ -1460,14 +1460,16 @@ static DEVICE_ATTR(disable_port, S_IWUSR, NULL, csio_disable_port);
static DEVICE_ATTR(dbg_level, S_IRUGO | S_IWUSR, csio_show_dbg_level,
csio_store_dbg_level);
static struct device_attribute *csio_fcoe_lport_attrs[] = {
&dev_attr_hw_state,
&dev_attr_device_reset,
&dev_attr_disable_port,
&dev_attr_dbg_level,
static struct attribute *csio_fcoe_lport_attrs[] = {
&dev_attr_hw_state.attr,
&dev_attr_device_reset.attr,
&dev_attr_disable_port.attr,
&dev_attr_dbg_level.attr,
NULL,
};
ATTRIBUTE_GROUPS(csio_fcoe_lport);
static ssize_t
csio_show_num_reg_rnodes(struct device *dev,
struct device_attribute *attr, char *buf)
@ -1479,12 +1481,14 @@ csio_show_num_reg_rnodes(struct device *dev,
static DEVICE_ATTR(num_reg_rnodes, S_IRUGO, csio_show_num_reg_rnodes, NULL);
static struct device_attribute *csio_fcoe_vport_attrs[] = {
&dev_attr_num_reg_rnodes,
&dev_attr_dbg_level,
static struct attribute *csio_fcoe_vport_attrs[] = {
&dev_attr_num_reg_rnodes.attr,
&dev_attr_dbg_level.attr,
NULL,
};
ATTRIBUTE_GROUPS(csio_fcoe_vport);
static inline uint32_t
csio_scsi_copy_to_sgl(struct csio_hw *hw, struct csio_ioreq *req)
{
@ -1720,7 +1724,7 @@ csio_scsi_err_handler(struct csio_hw *hw, struct csio_ioreq *req)
}
cmnd->result = (((host_status) << 16) | scsi_status);
cmnd->scsi_done(cmnd);
scsi_done(cmnd);
/* Wake up waiting threads */
csio_scsi_cmnd(req) = NULL;
@ -1748,7 +1752,7 @@ csio_scsi_cbfn(struct csio_hw *hw, struct csio_ioreq *req)
}
cmnd->result = (((host_status) << 16) | scsi_status);
cmnd->scsi_done(cmnd);
scsi_done(cmnd);
csio_scsi_cmnd(req) = NULL;
CSIO_INC_STATS(csio_hw_to_scsim(hw), n_tot_success);
} else {
@ -1876,7 +1880,7 @@ csio_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmnd)
return rv;
err_done:
cmnd->scsi_done(cmnd);
scsi_done(cmnd);
return 0;
}
@ -1979,7 +1983,7 @@ csio_eh_abort_handler(struct scsi_cmnd *cmnd)
spin_unlock_irq(&hw->lock);
cmnd->result = (DID_ERROR << 16);
cmnd->scsi_done(cmnd);
scsi_done(cmnd);
return FAILED;
}
@ -2277,7 +2281,7 @@ struct scsi_host_template csio_fcoe_shost_template = {
.this_id = -1,
.sg_tablesize = CSIO_SCSI_MAX_SGE,
.cmd_per_lun = CSIO_MAX_CMD_PER_LUN,
.shost_attrs = csio_fcoe_lport_attrs,
.shost_groups = csio_fcoe_lport_groups,
.max_sectors = CSIO_MAX_SECTOR_SIZE,
};
@ -2296,7 +2300,7 @@ struct scsi_host_template csio_fcoe_shost_vport_template = {
.this_id = -1,
.sg_tablesize = CSIO_SCSI_MAX_SGE,
.cmd_per_lun = CSIO_MAX_CMD_PER_LUN,
.shost_attrs = csio_fcoe_vport_attrs,
.shost_groups = csio_fcoe_vport_groups,
.max_sectors = CSIO_MAX_SECTOR_SIZE,
};

View File

@ -171,7 +171,7 @@ static void cmd_complete(struct afu_cmd *cmd)
dev_dbg_ratelimited(dev, "%s:scp=%p result=%08x ioasc=%08x\n",
__func__, scp, scp->result, cmd->sa.ioasc);
scp->scsi_done(scp);
scsi_done(scp);
} else if (cmd->cmd_tmf) {
spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
cfg->tmf_active = false;
@ -205,7 +205,7 @@ static void flush_pending_cmds(struct hwq *hwq)
if (cmd->scp) {
scp = cmd->scp;
scp->result = (DID_IMM_RETRY << 16);
scp->scsi_done(scp);
scsi_done(scp);
} else {
cmd->cmd_aborted = true;
@ -601,7 +601,7 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
case STATE_FAILTERM:
dev_dbg_ratelimited(dev, "%s: device has failed\n", __func__);
scp->result = (DID_NO_CONNECT << 16);
scp->scsi_done(scp);
scsi_done(scp);
rc = 0;
goto out;
default:
@ -3103,33 +3103,37 @@ static DEVICE_ATTR_RW(irqpoll_weight);
static DEVICE_ATTR_RW(num_hwqs);
static DEVICE_ATTR_RW(hwq_mode);
static struct device_attribute *cxlflash_host_attrs[] = {
&dev_attr_port0,
&dev_attr_port1,
&dev_attr_port2,
&dev_attr_port3,
&dev_attr_lun_mode,
&dev_attr_ioctl_version,
&dev_attr_port0_lun_table,
&dev_attr_port1_lun_table,
&dev_attr_port2_lun_table,
&dev_attr_port3_lun_table,
&dev_attr_irqpoll_weight,
&dev_attr_num_hwqs,
&dev_attr_hwq_mode,
static struct attribute *cxlflash_host_attrs[] = {
&dev_attr_port0.attr,
&dev_attr_port1.attr,
&dev_attr_port2.attr,
&dev_attr_port3.attr,
&dev_attr_lun_mode.attr,
&dev_attr_ioctl_version.attr,
&dev_attr_port0_lun_table.attr,
&dev_attr_port1_lun_table.attr,
&dev_attr_port2_lun_table.attr,
&dev_attr_port3_lun_table.attr,
&dev_attr_irqpoll_weight.attr,
&dev_attr_num_hwqs.attr,
&dev_attr_hwq_mode.attr,
NULL
};
ATTRIBUTE_GROUPS(cxlflash_host);
/*
* Device attributes
*/
static DEVICE_ATTR_RO(mode);
static struct device_attribute *cxlflash_dev_attrs[] = {
&dev_attr_mode,
static struct attribute *cxlflash_dev_attrs[] = {
&dev_attr_mode.attr,
NULL
};
ATTRIBUTE_GROUPS(cxlflash_dev);
/*
* Host template
*/
@ -3150,8 +3154,8 @@ static struct scsi_host_template driver_template = {
.this_id = -1,
.sg_tablesize = 1, /* No scatter gather support */
.max_sectors = CXLFLASH_MAX_SECTORS,
.shost_attrs = cxlflash_host_attrs,
.sdev_attrs = cxlflash_dev_attrs,
.shost_groups = cxlflash_host_groups,
.sdev_groups = cxlflash_dev_groups,
};
/*

View File

@ -960,8 +960,9 @@ static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb,
* and is expected to be held on return.
*
**/
static int dc395x_queue_command_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
static int dc395x_queue_command_lck(struct scsi_cmnd *cmd)
{
void (*done)(struct scsi_cmnd *) = scsi_done;
struct DeviceCtlBlk *dcb;
struct ScsiReqBlk *srb;
struct AdapterCtlBlk *acb =
@ -995,8 +996,6 @@ static int dc395x_queue_command_lck(struct scsi_cmnd *cmd, void (*done)(struct s
goto complete;
}
/* set callback and clear result in the command */
cmd->scsi_done = done;
set_host_byte(cmd, DID_OK);
set_status_byte(cmd, SAM_STAT_GOOD);
@ -3336,7 +3335,7 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
dprintkl(KERN_ERR, "srb_done: ERROR! Completed cmd with tmp_srb\n");
}
cmd->scsi_done(cmd);
scsi_done(cmd);
waiting_process_next(acb);
}
@ -3367,7 +3366,7 @@ static void doing_srb_done(struct AdapterCtlBlk *acb, u8 did_flag,
if (force) {
/* For new EH, we normally don't need to give commands back,
* as they all complete or all time out */
p->scsi_done(p);
scsi_done(p);
}
}
if (!list_empty(&dcb->srb_going_list))
@ -3394,7 +3393,7 @@ static void doing_srb_done(struct AdapterCtlBlk *acb, u8 did_flag,
if (force) {
/* For new EH, we normally don't need to give commands back,
* as they all complete or all time out */
cmd->scsi_done(cmd);
scsi_done(cmd);
}
}
if (!list_empty(&dcb->srb_waiting_list))
@ -4618,6 +4617,7 @@ static int dc395x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
/* initialise the adapter and everything we need */
if (adapter_init(acb, io_port_base, io_port_len, irq)) {
dprintkl(KERN_INFO, "adapter init failed\n");
acb = NULL;
goto fail;
}

View File

@ -416,12 +416,11 @@ static int adpt_slave_configure(struct scsi_device * device)
return 0;
}
static int adpt_queue_lck(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *))
static int adpt_queue_lck(struct scsi_cmnd *cmd)
{
adpt_hba* pHba = NULL;
struct adpt_device* pDev = NULL; /* dpt per device information */
cmd->scsi_done = done;
/*
* SCSI REQUEST_SENSE commands will be executed automatically by the
* Host Adapter for any errors, so they should not be executed
@ -431,7 +430,7 @@ static int adpt_queue_lck(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd
if ((cmd->cmnd[0] == REQUEST_SENSE) && (cmd->sense_buffer[0] != 0)) {
cmd->result = (DID_OK << 16);
cmd->scsi_done(cmd);
scsi_done(cmd);
return 0;
}
@ -456,7 +455,7 @@ static int adpt_queue_lck(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd
// TODO: if any luns are at this bus, scsi id then fake a TEST_UNIT_READY and INQUIRY response
// with type 7F (for all luns less than the max for this bus,id) so the lun scan will continue.
cmd->result = (DID_NO_CONNECT << 16);
cmd->scsi_done(cmd);
scsi_done(cmd);
return 0;
}
cmd->device->hostdata = pDev;
@ -2227,7 +2226,7 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_d
printk(KERN_WARNING"%s: scsi opcode 0x%x not supported.\n",
pHba->name, cmd->cmnd[0]);
cmd->result = (DID_ERROR <<16);
cmd->scsi_done(cmd);
scsi_done(cmd);
return 0;
}
}
@ -2451,9 +2450,7 @@ static void adpt_i2o_scsi_complete(void __iomem *reply, struct scsi_cmnd *cmd)
cmd->result |= (dev_status);
if(cmd->scsi_done != NULL){
cmd->scsi_done(cmd);
}
scsi_done(cmd);
}

View File

@ -541,11 +541,9 @@ efct_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, efct);
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0 ||
pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
dev_warn(&pdev->dev, "trying DMA_BIT_MASK(32)\n");
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0 ||
pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
dev_err(&pdev->dev, "setting DMA_BIT_MASK failed\n");
rc = -1;
goto dma_mask_out;

View File

@ -382,7 +382,7 @@ efct_lio_sg_map(struct efct_io *io)
struct efct_scsi_tgt_io *ocp = &io->tgt_io;
struct se_cmd *cmd = &ocp->cmd;
ocp->seg_map_cnt = pci_map_sg(io->efct->pci, cmd->t_data_sg,
ocp->seg_map_cnt = dma_map_sg(&io->efct->pci->dev, cmd->t_data_sg,
cmd->t_data_nents, cmd->data_direction);
if (ocp->seg_map_cnt == 0)
return -EFAULT;
@ -398,7 +398,7 @@ efct_lio_sg_unmap(struct efct_io *io)
if (WARN_ON(!ocp->seg_map_cnt || !cmd->t_data_sg))
return;
pci_unmap_sg(io->efct->pci, cmd->t_data_sg,
dma_unmap_sg(&io->efct->pci->dev, cmd->t_data_sg,
ocp->seg_map_cnt, cmd->data_direction);
ocp->seg_map_cnt = 0;
}

View File

@ -38,8 +38,6 @@ efct_scsi_io_alloc(struct efct_node *node)
xport = efct->xport;
spin_lock_irqsave(&node->active_ios_lock, flags);
io = efct_io_pool_io_alloc(efct->xport->io_pool);
if (!io) {
efc_log_err(efct, "IO alloc Failed\n");
@ -65,6 +63,7 @@ efct_scsi_io_alloc(struct efct_node *node)
/* Add to node's active_ios list */
INIT_LIST_HEAD(&io->list_entry);
spin_lock_irqsave(&node->active_ios_lock, flags);
list_add(&io->list_entry, &node->active_ios);
spin_unlock_irqrestore(&node->active_ios_lock, flags);

View File

@ -47,6 +47,6 @@ enum efc_scsi_del_target_reason {
#define nport_sm_trace(nport) \
efc_log_debug(nport->efc, \
"[%s] %-20s\n", nport->display_name, efc_sm_event_name(evt)) \
"[%s] %-20s %-20s\n", nport->display_name, __func__, efc_sm_event_name(evt)) \
#endif /* __EFC_H__ */

View File

@ -249,6 +249,7 @@ efc_nport_attach_reg_vpi_cb(struct efc *efc, int status, u8 *mqe,
{
struct efc_nport *nport = arg;
nport->attaching = false;
if (efc_nport_get_mbox_status(nport, mqe, status)) {
efc_nport_free_resources(nport, EFC_EVT_NPORT_ATTACH_FAIL, mqe);
return -EIO;
@ -286,6 +287,8 @@ efc_cmd_nport_attach(struct efc *efc, struct efc_nport *nport, u32 fc_id)
if (rc) {
efc_log_err(efc, "REG_VPI command failure\n");
efc_nport_free_resources(nport, EFC_EVT_NPORT_ATTACH_FAIL, buf);
} else {
nport->attaching = true;
}
return rc;
@ -302,8 +305,10 @@ efc_cmd_nport_free(struct efc *efc, struct efc_nport *nport)
/* Issue the UNREG_VPI command to free the assigned VPI context */
if (nport->attached)
efc_nport_free_unreg_vpi(nport);
else
else if (nport->attaching)
nport->free_req_pending = true;
else
efc_sm_post_event(&nport->sm, EFC_EVT_NPORT_FREE_OK, NULL);
return 0;
}

View File

@ -685,7 +685,7 @@ efc_process_gidpt_payload(struct efc_node *node,
}
/* Allocate a buffer for all nodes */
active_nodes = kzalloc(port_count * sizeof(*active_nodes), GFP_ATOMIC);
active_nodes = kcalloc(port_count, sizeof(*active_nodes), GFP_ATOMIC);
if (!active_nodes) {
node_printf(node, "efc_malloc failed\n");
return -EIO;

View File

@ -142,6 +142,7 @@ struct efc_nport {
bool is_vport;
bool free_req_pending;
bool attached;
bool attaching;
bool p2p_winner;
struct efc_domain *domain;
u64 wwpn;

View File

@ -828,7 +828,7 @@ int esas2r_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
if (unlikely(test_bit(AF_DEGRADED_MODE, &a->flags))) {
cmd->result = DID_NO_CONNECT << 16;
cmd->scsi_done(cmd);
scsi_done(cmd);
return 0;
}
@ -988,7 +988,7 @@ int esas2r_eh_abort(struct scsi_cmnd *cmd)
scsi_set_resid(cmd, 0);
cmd->scsi_done(cmd);
scsi_done(cmd);
return SUCCESS;
}
@ -1054,7 +1054,7 @@ int esas2r_eh_abort(struct scsi_cmnd *cmd)
scsi_set_resid(cmd, 0);
cmd->scsi_done(cmd);
scsi_done(cmd);
return SUCCESS;
}
@ -1535,7 +1535,7 @@ void esas2r_complete_request_cb(struct esas2r_adapter *a,
scsi_set_resid(rq->cmd, 0);
}
rq->cmd->scsi_done(rq->cmd);
scsi_done(rq->cmd);
esas2r_free_request(a, rq);
}

View File

@ -936,7 +936,7 @@ static void esp_cmd_is_done(struct esp *esp, struct esp_cmd_entry *ent,
}
}
cmd->scsi_done(cmd);
scsi_done(cmd);
list_del(&ent->list);
esp_put_ent(esp, ent);
@ -952,7 +952,7 @@ static void esp_event_queue_full(struct esp *esp, struct esp_cmd_entry *ent)
scsi_track_queue_full(dev, lp->num_tagged - 1);
}
static int esp_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
static int esp_queuecommand_lck(struct scsi_cmnd *cmd)
{
struct scsi_device *dev = cmd->device;
struct esp *esp = shost_priv(dev->host);
@ -965,8 +965,6 @@ static int esp_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_
ent->cmd = cmd;
cmd->scsi_done = done;
spriv = ESP_CMD_PRIV(cmd);
spriv->num_sg = 0;
@ -2038,7 +2036,7 @@ static void esp_reset_cleanup_one(struct esp *esp, struct esp_cmd_entry *ent)
if (ent->flags & ESP_CMD_FLAG_AUTOSENSE)
esp_unmap_sense(esp, ent);
cmd->scsi_done(cmd);
scsi_done(cmd);
list_del(&ent->list);
esp_put_ent(esp, ent);
}
@ -2061,7 +2059,7 @@ static void esp_reset_cleanup(struct esp *esp)
list_del(&ent->list);
cmd->result = DID_RESET << 16;
cmd->scsi_done(cmd);
scsi_done(cmd);
esp_put_ent(esp, ent);
}
@ -2535,7 +2533,7 @@ static int esp_eh_abort_handler(struct scsi_cmnd *cmd)
list_del(&ent->list);
cmd->result = DID_ABORT << 16;
cmd->scsi_done(cmd);
scsi_done(cmd);
esp_put_ent(esp, ent);

View File

@ -307,7 +307,7 @@ static int fcoe_interface_setup(struct fcoe_interface *fcoe,
}
/* Do not support for bonding device */
if (netdev->priv_flags & IFF_BONDING && netdev->flags & IFF_MASTER) {
if (netif_is_bond_master(netdev)) {
FCOE_NETDEV_DBG(netdev, "Bonded interfaces not supported\n");
return -EOPNOTSUPP;
}

View File

@ -206,7 +206,7 @@ static void fdomain_finish_cmd(struct fdomain *fd)
{
outb(0, fd->base + REG_ICTL);
fdomain_make_bus_idle(fd);
fd->cur_cmd->scsi_done(fd->cur_cmd);
scsi_done(fd->cur_cmd);
fd->cur_cmd = NULL;
}

View File

@ -322,7 +322,7 @@ static inline struct fnic *fnic_from_ctlr(struct fcoe_ctlr *fip)
extern struct workqueue_struct *fnic_event_queue;
extern struct workqueue_struct *fnic_fip_queue;
extern struct device_attribute *fnic_attrs[];
extern const struct attribute_group *fnic_host_groups[];
void fnic_clear_intr_mode(struct fnic *fnic);
int fnic_set_intr_mode(struct fnic *fnic);

View File

@ -48,9 +48,18 @@ static DEVICE_ATTR(fnic_state, S_IRUGO, fnic_show_state, NULL);
static DEVICE_ATTR(drv_version, S_IRUGO, fnic_show_drv_version, NULL);
static DEVICE_ATTR(link_state, S_IRUGO, fnic_show_link_state, NULL);
struct device_attribute *fnic_attrs[] = {
&dev_attr_fnic_state,
&dev_attr_drv_version,
&dev_attr_link_state,
static struct attribute *fnic_host_attrs[] = {
&dev_attr_fnic_state.attr,
&dev_attr_drv_version.attr,
&dev_attr_link_state.attr,
NULL,
};
static const struct attribute_group fnic_host_attr_group = {
.attrs = fnic_host_attrs
};
const struct attribute_group *fnic_host_groups[] = {
&fnic_host_attr_group,
NULL
};

View File

@ -122,7 +122,7 @@ static struct scsi_host_template fnic_host_template = {
.can_queue = FNIC_DFLT_IO_REQ,
.sg_tablesize = FNIC_MAX_SG_DESC_CNT,
.max_sectors = 0xffff,
.shost_attrs = fnic_attrs,
.shost_groups = fnic_host_groups,
.track_queue_depth = 1,
};

View File

@ -420,8 +420,9 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
* Routine to send a scsi cdb
* Called with host_lock held and interrupts disabled.
*/
static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
static int fnic_queuecommand_lck(struct scsi_cmnd *sc)
{
void (*done)(struct scsi_cmnd *) = scsi_done;
const int tag = scsi_cmd_to_rq(sc)->tag;
struct fc_lport *lp = shost_priv(sc->device->host);
struct fc_rport *rport;
@ -560,7 +561,6 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING;
CMD_SP(sc) = (char *)io_req;
CMD_FLAGS(sc) |= FNIC_IO_INITIALIZED;
sc->scsi_done = done;
/* create copy wq desc and enqueue it */
wq = &fnic->wq_copy[0];
@ -1051,8 +1051,7 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
}
/* Call SCSI completion function to complete the IO */
if (sc->scsi_done)
sc->scsi_done(sc);
scsi_done(sc);
}
/* fnic_fcpio_itmf_cmpl_handler
@ -1193,28 +1192,25 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
fnic_release_ioreq_buf(fnic, io_req, sc);
mempool_free(io_req, fnic->io_req_pool);
if (sc->scsi_done) {
FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
sc->device->host->host_no, id,
sc,
jiffies_to_msecs(jiffies - start_time),
desc,
(((u64)hdr_status << 40) |
(u64)sc->cmnd[0] << 32 |
(u64)sc->cmnd[2] << 24 |
(u64)sc->cmnd[3] << 16 |
(u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
(((u64)CMD_FLAGS(sc) << 32) |
CMD_STATE(sc)));
sc->scsi_done(sc);
atomic64_dec(&fnic_stats->io_stats.active_ios);
if (atomic64_read(&fnic->io_cmpl_skip))
atomic64_dec(&fnic->io_cmpl_skip);
else
atomic64_inc(&fnic_stats->io_stats.io_completions);
}
FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
sc->device->host->host_no, id,
sc,
jiffies_to_msecs(jiffies - start_time),
desc,
(((u64)hdr_status << 40) |
(u64)sc->cmnd[0] << 32 |
(u64)sc->cmnd[2] << 24 |
(u64)sc->cmnd[3] << 16 |
(u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
(((u64)CMD_FLAGS(sc) << 32) |
CMD_STATE(sc)));
scsi_done(sc);
atomic64_dec(&fnic_stats->io_stats.active_ios);
if (atomic64_read(&fnic->io_cmpl_skip))
atomic64_dec(&fnic->io_cmpl_skip);
else
atomic64_inc(&fnic_stats->io_stats.io_completions);
}
} else if (id & FNIC_TAG_DEV_RST) {
/* Completion of device reset */
CMD_LR_STATUS(sc) = hdr_status;
@ -1421,23 +1417,22 @@ static bool fnic_cleanup_io_iter(struct scsi_cmnd *sc, void *data,
atomic64_inc(&fnic_stats->io_stats.io_completions);
/* Complete the command to SCSI */
if (sc->scsi_done) {
if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED))
shost_printk(KERN_ERR, fnic->lport->host,
"Calling done for IO not issued to fw: tag:0x%x sc:0x%p\n",
tag, sc);
if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED))
shost_printk(KERN_ERR, fnic->lport->host,
"Calling done for IO not issued to fw: tag:0x%x sc:0x%p\n",
tag, sc);
FNIC_TRACE(fnic_cleanup_io,
sc->device->host->host_no, tag, sc,
jiffies_to_msecs(jiffies - start_time),
0, ((u64)sc->cmnd[0] << 32 |
(u64)sc->cmnd[2] << 24 |
(u64)sc->cmnd[3] << 16 |
(u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
(((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
FNIC_TRACE(fnic_cleanup_io,
sc->device->host->host_no, tag, sc,
jiffies_to_msecs(jiffies - start_time),
0, ((u64)sc->cmnd[0] << 32 |
(u64)sc->cmnd[2] << 24 |
(u64)sc->cmnd[3] << 16 |
(u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
(((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
scsi_done(sc);
sc->scsi_done(sc);
}
return true;
}
@ -1495,17 +1490,15 @@ void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "wq_copy_cleanup_handler:"
" DID_NO_CONNECT\n");
if (sc->scsi_done) {
FNIC_TRACE(fnic_wq_copy_cleanup_handler,
sc->device->host->host_no, id, sc,
jiffies_to_msecs(jiffies - start_time),
0, ((u64)sc->cmnd[0] << 32 |
(u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
(u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
(((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
FNIC_TRACE(fnic_wq_copy_cleanup_handler,
sc->device->host->host_no, id, sc,
jiffies_to_msecs(jiffies - start_time),
0, ((u64)sc->cmnd[0] << 32 |
(u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
(u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
(((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
sc->scsi_done(sc);
}
scsi_done(sc);
}
static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag,
@ -1931,16 +1924,14 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
fnic_release_ioreq_buf(fnic, io_req, sc);
mempool_free(io_req, fnic->io_req_pool);
if (sc->scsi_done) {
/* Call SCSI completion function to complete the IO */
sc->result = (DID_ABORT << 16);
sc->scsi_done(sc);
atomic64_dec(&fnic_stats->io_stats.active_ios);
if (atomic64_read(&fnic->io_cmpl_skip))
atomic64_dec(&fnic->io_cmpl_skip);
else
atomic64_inc(&fnic_stats->io_stats.io_completions);
}
sc->result = DID_ABORT << 16;
scsi_done(sc);
atomic64_dec(&fnic_stats->io_stats.active_ios);
if (atomic64_read(&fnic->io_cmpl_skip))
atomic64_dec(&fnic->io_cmpl_skip);
else
atomic64_inc(&fnic_stats->io_stats.io_completions);
fnic_abort_cmd_end:
FNIC_TRACE(fnic_abort_cmd, sc->device->host->host_no, tag, sc,
@ -2153,11 +2144,10 @@ static bool fnic_pending_aborts_iter(struct scsi_cmnd *sc,
* Any IO is returned during reset, it needs to call scsi_done
* to return the scsi_cmnd to upper layer.
*/
if (sc->scsi_done) {
/* Set result to let upper SCSI layer retry */
sc->result = DID_RESET << 16;
sc->scsi_done(sc);
}
/* Set result to let upper SCSI layer retry */
sc->result = DID_RESET << 16;
scsi_done(sc);
return true;
}

View File

@ -35,7 +35,7 @@
#define HISI_SAS_QUEUE_SLOTS 4096
#define HISI_SAS_MAX_ITCT_ENTRIES 1024
#define HISI_SAS_MAX_DEVICES HISI_SAS_MAX_ITCT_ENTRIES
#define HISI_SAS_RESET_BIT 0
#define HISI_SAS_RESETTING_BIT 0
#define HISI_SAS_REJECT_CMD_BIT 1
#define HISI_SAS_PM_BIT 2
#define HISI_SAS_HW_FAULT_BIT 3
@ -649,6 +649,7 @@ extern int hisi_sas_probe(struct platform_device *pdev,
extern int hisi_sas_remove(struct platform_device *pdev);
extern int hisi_sas_slave_configure(struct scsi_device *sdev);
extern int hisi_sas_slave_alloc(struct scsi_device *sdev);
extern int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time);
extern void hisi_sas_scan_start(struct Scsi_Host *shost);
extern int hisi_sas_host_reset(struct Scsi_Host *shost, int reset_type);

View File

@ -724,7 +724,7 @@ static int hisi_sas_init_device(struct domain_device *device)
*/
local_phy = sas_get_local_phy(device);
if (!scsi_is_sas_phy_local(local_phy) &&
!test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) {
!test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) {
unsigned long deadline = ata_deadline(jiffies, 20000);
struct sata_device *sata_dev = &device->sata_dev;
struct ata_host *ata_host = sata_dev->ata_host;
@ -756,6 +756,20 @@ static int hisi_sas_init_device(struct domain_device *device)
return rc;
}
int hisi_sas_slave_alloc(struct scsi_device *sdev)
{
struct domain_device *ddev;
int rc;
rc = sas_slave_alloc(sdev);
if (rc)
return rc;
ddev = sdev_to_domain_dev(sdev);
return hisi_sas_init_device(ddev);
}
EXPORT_SYMBOL_GPL(hisi_sas_slave_alloc);
static int hisi_sas_dev_found(struct domain_device *device)
{
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
@ -802,9 +816,6 @@ static int hisi_sas_dev_found(struct domain_device *device)
dev_info(dev, "dev[%d:%x] found\n",
sas_dev->device_id, sas_dev->dev_type);
rc = hisi_sas_init_device(device);
if (rc)
goto err_out;
sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
return 0;
@ -1072,7 +1083,7 @@ static void hisi_sas_dev_gone(struct domain_device *device)
sas_dev->device_id, sas_dev->dev_type);
down(&hisi_hba->sem);
if (!test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) {
if (!test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) {
hisi_sas_internal_task_abort(hisi_hba, device,
HISI_SAS_INT_ABT_DEV, 0, true);
@ -1135,9 +1146,17 @@ static int hisi_sas_phy_set_linkrate(struct hisi_hba *hisi_hba, int phy_no,
static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
void *funcdata)
{
struct hisi_sas_phy *phy = container_of(sas_phy,
struct hisi_sas_phy, sas_phy);
struct sas_ha_struct *sas_ha = sas_phy->ha;
struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
struct device *dev = hisi_hba->dev;
DECLARE_COMPLETION_ONSTACK(completion);
int phy_no = sas_phy->id;
u8 sts = phy->phy_attached;
int ret = 0;
phy->reset_completion = &completion;
switch (func) {
case PHY_FUNC_HARD_RESET:
@ -1152,26 +1171,40 @@ static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
case PHY_FUNC_DISABLE:
hisi_sas_phy_enable(hisi_hba, phy_no, 0);
break;
goto out;
case PHY_FUNC_SET_LINK_RATE:
return hisi_sas_phy_set_linkrate(hisi_hba, phy_no, funcdata);
ret = hisi_sas_phy_set_linkrate(hisi_hba, phy_no, funcdata);
break;
case PHY_FUNC_GET_EVENTS:
if (hisi_hba->hw->get_events) {
hisi_hba->hw->get_events(hisi_hba, phy_no);
break;
goto out;
}
fallthrough;
case PHY_FUNC_RELEASE_SPINUP_HOLD:
default:
return -EOPNOTSUPP;
ret = -EOPNOTSUPP;
goto out;
}
return 0;
if (sts && !wait_for_completion_timeout(&completion, 2 * HZ)) {
dev_warn(dev, "phy%d wait phyup timed out for func %d\n",
phy_no, func);
if (phy->in_reset)
ret = -ETIMEDOUT;
}
out:
phy->reset_completion = NULL;
return ret;
}
static void hisi_sas_task_done(struct sas_task *task)
{
del_timer(&task->slow_task->timer);
del_timer_sync(&task->slow_task->timer);
complete(&task->slow_task->completion);
}
@ -1229,7 +1262,7 @@ static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
res = hisi_sas_task_exec(task, GFP_KERNEL, 1, tmf);
if (res) {
del_timer(&task->slow_task->timer);
del_timer_sync(&task->slow_task->timer);
dev_err(dev, "abort tmf: executing internal task failed: %d\n",
res);
goto ex_err;
@ -1554,8 +1587,7 @@ void hisi_sas_controller_reset_prepare(struct hisi_hba *hisi_hba)
scsi_block_requests(shost);
hisi_hba->hw->wait_cmds_complete_timeout(hisi_hba, 100, 5000);
if (timer_pending(&hisi_hba->timer))
del_timer_sync(&hisi_hba->timer);
del_timer_sync(&hisi_hba->timer);
set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
}
@ -1576,7 +1608,7 @@ void hisi_sas_controller_reset_done(struct hisi_hba *hisi_hba)
hisi_sas_reset_init_all_devices(hisi_hba);
up(&hisi_hba->sem);
scsi_unblock_requests(shost);
clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags);
hisi_sas_rescan_topology(hisi_hba, hisi_hba->phy_state);
}
@ -1587,7 +1619,7 @@ static int hisi_sas_controller_prereset(struct hisi_hba *hisi_hba)
if (!hisi_hba->hw->soft_reset)
return -1;
if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
if (test_and_set_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags))
return -1;
if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct)
@ -1611,7 +1643,7 @@ static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
up(&hisi_hba->sem);
scsi_unblock_requests(shost);
clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags);
return rc;
}
@ -1773,7 +1805,6 @@ static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
struct hisi_sas_device *sas_dev = device->lldd_dev;
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
struct sas_ha_struct *sas_ha = &hisi_hba->sha;
DECLARE_COMPLETION_ONSTACK(phyreset);
int rc, reset_type;
if (!local_phy->enabled) {
@ -1786,8 +1817,11 @@ static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
sas_ha->sas_phy[local_phy->number];
struct hisi_sas_phy *phy =
container_of(sas_phy, struct hisi_sas_phy, sas_phy);
unsigned long flags;
spin_lock_irqsave(&phy->lock, flags);
phy->in_reset = 1;
phy->reset_completion = &phyreset;
spin_unlock_irqrestore(&phy->lock, flags);
}
reset_type = (sas_dev->dev_status == HISI_SAS_DEV_INIT ||
@ -1801,17 +1835,14 @@ static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
sas_ha->sas_phy[local_phy->number];
struct hisi_sas_phy *phy =
container_of(sas_phy, struct hisi_sas_phy, sas_phy);
int ret = wait_for_completion_timeout(&phyreset,
I_T_NEXUS_RESET_PHYUP_TIMEOUT);
unsigned long flags;
spin_lock_irqsave(&phy->lock, flags);
phy->reset_completion = NULL;
phy->in_reset = 0;
spin_unlock_irqrestore(&phy->lock, flags);
/* report PHY down if timed out */
if (!ret)
if (rc == -ETIMEDOUT)
hisi_sas_phy_down(hisi_hba, sas_phy->id, 0, GFP_KERNEL);
} else if (sas_dev->dev_status != HISI_SAS_DEV_INIT) {
/*
@ -1839,13 +1870,32 @@ static int hisi_sas_I_T_nexus_reset(struct domain_device *device)
}
hisi_sas_dereg_device(hisi_hba, device);
if (dev_is_sata(device)) {
rc = hisi_sas_softreset_ata_disk(device);
if (rc == TMF_RESP_FUNC_FAILED)
return TMF_RESP_FUNC_FAILED;
}
rc = hisi_sas_debug_I_T_nexus_reset(device);
if (rc == TMF_RESP_FUNC_COMPLETE && dev_is_sata(device)) {
struct sas_phy *local_phy;
rc = hisi_sas_softreset_ata_disk(device);
switch (rc) {
case -ECOMM:
rc = -ENODEV;
break;
case TMF_RESP_FUNC_FAILED:
case -EMSGSIZE:
case -EIO:
local_phy = sas_get_local_phy(device);
rc = sas_phy_enable(local_phy, 0);
if (!rc) {
local_phy->enabled = 0;
dev_err(dev, "Disabled local phy of ATA disk %016llx due to softreset fail (%d)\n",
SAS_ADDR(device->sas_addr), rc);
rc = -ENODEV;
}
sas_put_local_phy(local_phy);
break;
default:
break;
}
}
if ((rc == TMF_RESP_FUNC_COMPLETE) || (rc == -ENODEV))
hisi_sas_release_task(hisi_hba, device);
@ -2097,7 +2147,7 @@ _hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
res = hisi_sas_internal_abort_task_exec(hisi_hba, sas_dev->device_id,
task, abort_flag, tag, dq);
if (res) {
del_timer(&task->slow_task->timer);
del_timer_sync(&task->slow_task->timer);
dev_err(dev, "internal task abort: executing internal task failed: %d\n",
res);
goto exit;
@ -2251,7 +2301,7 @@ void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy,
} else {
struct hisi_sas_port *port = phy->port;
if (test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags) ||
if (test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags) ||
phy->in_reset) {
dev_info(dev, "ignore flutter phy%d down\n", phy_no);
return;
@ -2769,8 +2819,7 @@ int hisi_sas_remove(struct platform_device *pdev)
struct hisi_hba *hisi_hba = sha->lldd_ha;
struct Scsi_Host *shost = sha->core.shost;
if (timer_pending(&hisi_hba->timer))
del_timer(&hisi_hba->timer);
del_timer_sync(&hisi_hba->timer);
sas_unregister_ha(sha);
sas_remove_host(sha->core.shost);

View File

@ -1327,7 +1327,6 @@ static irqreturn_t int_phyup_v1_hw(int irq_no, void *p)
u32 *frame_rcvd = (u32 *)sas_phy->frame_rcvd;
struct sas_identify_frame *id = (struct sas_identify_frame *)frame_rcvd;
irqreturn_t res = IRQ_HANDLED;
unsigned long flags;
irq_value = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT2);
if (!(irq_value & CHL_INT2_SL_PHY_ENA_MSK)) {
@ -1380,15 +1379,9 @@ static irqreturn_t int_phyup_v1_hw(int irq_no, void *p)
phy->identify.target_port_protocols =
SAS_PROTOCOL_SMP;
hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP);
spin_lock_irqsave(&phy->lock, flags);
if (phy->reset_completion) {
phy->in_reset = 0;
complete(phy->reset_completion);
}
spin_unlock_irqrestore(&phy->lock, flags);
end:
if (phy->reset_completion)
complete(phy->reset_completion);
hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2,
CHL_INT2_SL_PHY_ENA_MSK);
@ -1422,7 +1415,7 @@ static irqreturn_t int_bcast_v1_hw(int irq, void *p)
goto end;
}
if (!test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
if (!test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags))
sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD,
GFP_ATOMIC);
@ -1749,11 +1742,13 @@ static int hisi_sas_v1_init(struct hisi_hba *hisi_hba)
return 0;
}
static struct device_attribute *host_attrs_v1_hw[] = {
&dev_attr_phy_event_threshold,
static struct attribute *host_v1_hw_attrs[] = {
&dev_attr_phy_event_threshold.attr,
NULL
};
ATTRIBUTE_GROUPS(host_v1_hw);
static struct scsi_host_template sht_v1_hw = {
.name = DRV_NAME,
.proc_name = DRV_NAME,
@ -1771,13 +1766,13 @@ static struct scsi_host_template sht_v1_hw = {
.max_sectors = SCSI_DEFAULT_MAX_SECTORS,
.eh_device_reset_handler = sas_eh_device_reset_handler,
.eh_target_reset_handler = sas_eh_target_reset_handler,
.slave_alloc = sas_slave_alloc,
.slave_alloc = hisi_sas_slave_alloc,
.target_destroy = sas_target_destroy,
.ioctl = sas_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = sas_ioctl,
#endif
.shost_attrs = host_attrs_v1_hw,
.shost_groups = host_v1_hw_groups,
.host_reset = hisi_sas_host_reset,
};

View File

@ -2368,18 +2368,18 @@ static void slot_complete_v2_hw(struct hisi_hba *hisi_hba,
case STAT_IO_COMPLETE:
/* internal abort command complete */
ts->stat = TMF_RESP_FUNC_SUCC;
del_timer(&slot->internal_abort_timer);
del_timer_sync(&slot->internal_abort_timer);
goto out;
case STAT_IO_NO_DEVICE:
ts->stat = TMF_RESP_FUNC_COMPLETE;
del_timer(&slot->internal_abort_timer);
del_timer_sync(&slot->internal_abort_timer);
goto out;
case STAT_IO_NOT_VALID:
/* abort single io, controller don't find
* the io need to abort
*/
ts->stat = TMF_RESP_FUNC_FAILED;
del_timer(&slot->internal_abort_timer);
del_timer_sync(&slot->internal_abort_timer);
goto out;
default:
break;
@ -2641,7 +2641,6 @@ static int phy_up_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
struct device *dev = hisi_hba->dev;
u32 *frame_rcvd = (u32 *)sas_phy->frame_rcvd;
struct sas_identify_frame *id = (struct sas_identify_frame *)frame_rcvd;
unsigned long flags;
hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 1);
@ -2696,14 +2695,9 @@ static int phy_up_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
set_link_timer_quirk(hisi_hba);
}
hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP);
spin_lock_irqsave(&phy->lock, flags);
if (phy->reset_completion) {
phy->in_reset = 0;
complete(phy->reset_completion);
}
spin_unlock_irqrestore(&phy->lock, flags);
end:
if (phy->reset_completion)
complete(phy->reset_completion);
hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
CHL_INT0_SL_PHY_ENABLE_MSK);
hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 0);
@ -2824,7 +2818,7 @@ static void phy_bcast_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 1);
bcast_status = hisi_sas_phy_read32(hisi_hba, phy_no, RX_PRIMS_STATUS);
if ((bcast_status & RX_BCAST_CHG_MSK) &&
!test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
!test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags))
sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD,
GFP_ATOMIC);
hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
@ -3204,7 +3198,6 @@ static irqreturn_t sata_int_v2_hw(int irq_no, void *p)
u32 ent_tmp, ent_msk, ent_int, port_id, link_rate, hard_phy_linkrate;
irqreturn_t res = IRQ_HANDLED;
u8 attached_sas_addr[SAS_ADDR_SIZE] = {0};
unsigned long flags;
int phy_no, offset;
del_timer(&phy->timer);
@ -3280,12 +3273,8 @@ static irqreturn_t sata_int_v2_hw(int irq_no, void *p)
phy->identify.target_port_protocols = SAS_PROTOCOL_SATA;
hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP);
spin_lock_irqsave(&phy->lock, flags);
if (phy->reset_completion) {
phy->in_reset = 0;
if (phy->reset_completion)
complete(phy->reset_completion);
}
spin_unlock_irqrestore(&phy->lock, flags);
end:
hisi_sas_write32(hisi_hba, ENT_INT_SRC1 + offset, ent_tmp);
hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1 + offset, ent_msk);
@ -3542,11 +3531,13 @@ static void wait_cmds_complete_timeout_v2_hw(struct hisi_hba *hisi_hba,
}
static struct device_attribute *host_attrs_v2_hw[] = {
&dev_attr_phy_event_threshold,
static struct attribute *host_v2_hw_attrs[] = {
&dev_attr_phy_event_threshold.attr,
NULL
};
ATTRIBUTE_GROUPS(host_v2_hw);
static int map_queues_v2_hw(struct Scsi_Host *shost)
{
struct hisi_hba *hisi_hba = shost_priv(shost);
@ -3584,13 +3575,13 @@ static struct scsi_host_template sht_v2_hw = {
.max_sectors = SCSI_DEFAULT_MAX_SECTORS,
.eh_device_reset_handler = sas_eh_device_reset_handler,
.eh_target_reset_handler = sas_eh_target_reset_handler,
.slave_alloc = sas_slave_alloc,
.slave_alloc = hisi_sas_slave_alloc,
.target_destroy = sas_target_destroy,
.ioctl = sas_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = sas_ioctl,
#endif
.shost_attrs = host_attrs_v2_hw,
.shost_groups = host_v2_hw_groups,
.host_reset = hisi_sas_host_reset,
.map_queues = map_queues_v2_hw,
.host_tagset = 1,

View File

@ -519,6 +519,8 @@ struct hisi_sas_err_record_v3 {
#define CHNL_INT_STS_INT2_MSK BIT(3)
#define CHNL_WIDTH 4
#define BAR_NO_V3_HW 5
enum {
DSM_FUNC_ERR_HANDLE_MSI = 0,
};
@ -1481,7 +1483,6 @@ static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
struct asd_sas_phy *sas_phy = &phy->sas_phy;
struct device *dev = hisi_hba->dev;
unsigned long flags;
del_timer(&phy->timer);
hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 1);
@ -1563,13 +1564,9 @@ static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
phy->phy_attached = 1;
hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP);
res = IRQ_HANDLED;
spin_lock_irqsave(&phy->lock, flags);
if (phy->reset_completion) {
phy->in_reset = 0;
complete(phy->reset_completion);
}
spin_unlock_irqrestore(&phy->lock, flags);
end:
if (phy->reset_completion)
complete(phy->reset_completion);
hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
CHL_INT0_SL_PHY_ENABLE_MSK);
hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 0);
@ -1616,7 +1613,7 @@ static irqreturn_t phy_bcast_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 1);
bcast_status = hisi_sas_phy_read32(hisi_hba, phy_no, RX_PRIMS_STATUS);
if ((bcast_status & RX_BCAST_CHG_MSK) &&
!test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
!test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags))
sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD,
GFP_ATOMIC);
hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
@ -2770,14 +2767,16 @@ static int slave_configure_v3_hw(struct scsi_device *sdev)
return 0;
}
static struct device_attribute *host_attrs_v3_hw[] = {
&dev_attr_phy_event_threshold,
&dev_attr_intr_conv_v3_hw,
&dev_attr_intr_coal_ticks_v3_hw,
&dev_attr_intr_coal_count_v3_hw,
static struct attribute *host_v3_hw_attrs[] = {
&dev_attr_phy_event_threshold.attr,
&dev_attr_intr_conv_v3_hw.attr,
&dev_attr_intr_coal_ticks_v3_hw.attr,
&dev_attr_intr_coal_count_v3_hw.attr,
NULL
};
ATTRIBUTE_GROUPS(host_v3_hw);
#define HISI_SAS_DEBUGFS_REG(x) {#x, x}
struct hisi_sas_debugfs_reg_lu {
@ -3156,13 +3155,13 @@ static struct scsi_host_template sht_v3_hw = {
.max_sectors = SCSI_DEFAULT_MAX_SECTORS,
.eh_device_reset_handler = sas_eh_device_reset_handler,
.eh_target_reset_handler = sas_eh_target_reset_handler,
.slave_alloc = sas_slave_alloc,
.slave_alloc = hisi_sas_slave_alloc,
.target_destroy = sas_target_destroy,
.ioctl = sas_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = sas_ioctl,
#endif
.shost_attrs = host_attrs_v3_hw,
.shost_groups = host_v3_hw_groups,
.tag_alloc_policy = BLK_TAG_ALLOC_RR,
.host_reset = hisi_sas_host_reset,
.host_tagset = 1,
@ -3687,7 +3686,6 @@ static void debugfs_snapshot_regs_v3_hw(struct hisi_hba *hisi_hba)
do_div(timestamp, NSEC_PER_MSEC);
hisi_hba->debugfs_timestamp[debugfs_dump_index] = timestamp;
hisi_hba->debugfs_dump_index++;
debugfs_snapshot_prepare_v3_hw(hisi_hba);
@ -3703,6 +3701,7 @@ static void debugfs_snapshot_regs_v3_hw(struct hisi_hba *hisi_hba)
debugfs_create_files_v3_hw(hisi_hba);
debugfs_snapshot_restore_v3_hw(hisi_hba);
hisi_hba->debugfs_dump_index++;
}
static ssize_t debugfs_trigger_dump_v3_hw_write(struct file *file,
@ -4677,15 +4676,15 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
struct sas_ha_struct *sha;
int rc, phy_nr, port_nr, i;
rc = pci_enable_device(pdev);
rc = pcim_enable_device(pdev);
if (rc)
goto err_out;
pci_set_master(pdev);
rc = pci_request_regions(pdev, DRV_NAME);
rc = pcim_iomap_regions(pdev, 1 << BAR_NO_V3_HW, DRV_NAME);
if (rc)
goto err_out_disable_device;
goto err_out;
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (rc)
@ -4693,20 +4692,20 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (rc) {
dev_err(dev, "No usable DMA addressing method\n");
rc = -ENODEV;
goto err_out_regions;
goto err_out;
}
shost = hisi_sas_shost_alloc_pci(pdev);
if (!shost) {
rc = -ENOMEM;
goto err_out_regions;
goto err_out;
}
sha = SHOST_TO_SAS_HA(shost);
hisi_hba = shost_priv(shost);
dev_set_drvdata(dev, sha);
hisi_hba->regs = pcim_iomap(pdev, 5, 0);
hisi_hba->regs = pcim_iomap_table(pdev)[BAR_NO_V3_HW];
if (!hisi_hba->regs) {
dev_err(dev, "cannot map register\n");
rc = -ENOMEM;
@ -4761,7 +4760,7 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
rc = interrupt_preinit_v3_hw(hisi_hba);
if (rc)
goto err_out_debugfs;
dev_err(dev, "%d hw queues\n", shost->nr_hw_queues);
rc = scsi_add_host(shost, dev);
if (rc)
goto err_out_free_irq_vectors;
@ -4800,10 +4799,6 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err_out_ha:
hisi_sas_free(hisi_hba);
scsi_host_put(shost);
err_out_regions:
pci_release_regions(pdev);
err_out_disable_device:
pci_disable_device(pdev);
err_out:
return rc;
}
@ -4833,16 +4828,13 @@ static void hisi_sas_v3_remove(struct pci_dev *pdev)
struct Scsi_Host *shost = sha->core.shost;
pm_runtime_get_noresume(dev);
if (timer_pending(&hisi_hba->timer))
del_timer(&hisi_hba->timer);
del_timer_sync(&hisi_hba->timer);
sas_unregister_ha(sha);
flush_workqueue(hisi_hba->wq);
sas_remove_host(sha->core.shost);
hisi_sas_v3_destroy_irqs(pdev, hisi_hba);
pci_release_regions(pdev);
pci_disable_device(pdev);
hisi_sas_free(hisi_hba);
debugfs_exit_v3_hw(hisi_hba);
scsi_host_put(shost);
@ -4856,7 +4848,7 @@ static void hisi_sas_reset_prepare_v3_hw(struct pci_dev *pdev)
int rc;
dev_info(dev, "FLR prepare\n");
set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
set_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags);
hisi_sas_controller_reset_prepare(hisi_hba);
rc = disable_host_v3_hw(hisi_hba);
@ -4902,7 +4894,7 @@ static int _suspend_v3_hw(struct device *device)
return -ENODEV;
}
if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
if (test_and_set_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags))
return -1;
scsi_block_requests(shost);
@ -4913,7 +4905,7 @@ static int _suspend_v3_hw(struct device *device)
if (rc) {
dev_err(dev, "PM suspend: disable host failed rc=%d\n", rc);
clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags);
scsi_unblock_requests(shost);
return rc;
}
@ -4952,7 +4944,7 @@ static int _resume_v3_hw(struct device *device)
}
phys_init_v3_hw(hisi_hba);
sas_resume_ha(sha);
clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags);
return 0;
}

View File

@ -377,7 +377,7 @@ static struct device_type scsi_host_type = {
struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
{
struct Scsi_Host *shost;
int index;
int index, i, j = 0;
shost = kzalloc(sizeof(struct Scsi_Host) + privsize, GFP_KERNEL);
if (!shost)
@ -476,12 +476,23 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
dev_set_name(&shost->shost_gendev, "host%d", shost->host_no);
shost->shost_gendev.bus = &scsi_bus_type;
shost->shost_gendev.type = &scsi_host_type;
scsi_enable_async_suspend(&shost->shost_gendev);
device_initialize(&shost->shost_dev);
shost->shost_dev.parent = &shost->shost_gendev;
shost->shost_dev.class = &shost_class;
dev_set_name(&shost->shost_dev, "host%d", shost->host_no);
shost->shost_dev.groups = scsi_sysfs_shost_attr_groups;
shost->shost_dev.groups = shost->shost_dev_attr_groups;
shost->shost_dev_attr_groups[j++] = &scsi_shost_attr_group;
if (sht->shost_groups) {
for (i = 0; sht->shost_groups[i] &&
j < ARRAY_SIZE(shost->shost_dev_attr_groups);
i++, j++) {
shost->shost_dev_attr_groups[j] =
sht->shost_groups[i];
}
}
WARN_ON_ONCE(j >= ARRAY_SIZE(shost->shost_dev_attr_groups));
shost->ehandler = kthread_run(scsi_error_handler, shost,
"scsi_eh_%d", shost->host_no);
@ -667,7 +678,7 @@ static bool complete_all_cmds_iter(struct request *rq, void *data, bool rsvd)
scsi_dma_unmap(scmd);
scmd->result = 0;
set_host_byte(scmd, status);
scmd->scsi_done(scmd);
scsi_done(scmd);
return true;
}

View File

@ -936,30 +936,34 @@ static DEVICE_ATTR(ctlr_num, S_IRUGO,
static DEVICE_ATTR(legacy_board, S_IRUGO,
host_show_legacy_board, NULL);
static struct device_attribute *hpsa_sdev_attrs[] = {
&dev_attr_raid_level,
&dev_attr_lunid,
&dev_attr_unique_id,
&dev_attr_hp_ssd_smart_path_enabled,
&dev_attr_path_info,
&dev_attr_sas_address,
static struct attribute *hpsa_sdev_attrs[] = {
&dev_attr_raid_level.attr,
&dev_attr_lunid.attr,
&dev_attr_unique_id.attr,
&dev_attr_hp_ssd_smart_path_enabled.attr,
&dev_attr_path_info.attr,
&dev_attr_sas_address.attr,
NULL,
};
static struct device_attribute *hpsa_shost_attrs[] = {
&dev_attr_rescan,
&dev_attr_firmware_revision,
&dev_attr_commands_outstanding,
&dev_attr_transport_mode,
&dev_attr_resettable,
&dev_attr_hp_ssd_smart_path_status,
&dev_attr_raid_offload_debug,
&dev_attr_lockup_detected,
&dev_attr_ctlr_num,
&dev_attr_legacy_board,
ATTRIBUTE_GROUPS(hpsa_sdev);
static struct attribute *hpsa_shost_attrs[] = {
&dev_attr_rescan.attr,
&dev_attr_firmware_revision.attr,
&dev_attr_commands_outstanding.attr,
&dev_attr_transport_mode.attr,
&dev_attr_resettable.attr,
&dev_attr_hp_ssd_smart_path_status.attr,
&dev_attr_raid_offload_debug.attr,
&dev_attr_lockup_detected.attr,
&dev_attr_ctlr_num.attr,
&dev_attr_legacy_board.attr,
NULL,
};
ATTRIBUTE_GROUPS(hpsa_shost);
#define HPSA_NRESERVED_CMDS (HPSA_CMDS_RESERVED_FOR_DRIVER +\
HPSA_MAX_CONCURRENT_PASSTHRUS)
@ -980,8 +984,8 @@ static struct scsi_host_template hpsa_driver_template = {
#ifdef CONFIG_COMPAT
.compat_ioctl = hpsa_compat_ioctl,
#endif
.sdev_attrs = hpsa_sdev_attrs,
.shost_attrs = hpsa_shost_attrs,
.sdev_groups = hpsa_sdev_groups,
.shost_groups = hpsa_shost_groups,
.max_sectors = 2048,
.no_write_same = 1,
};
@ -2482,8 +2486,8 @@ static void hpsa_cmd_free_and_done(struct ctlr_info *h,
struct CommandList *c, struct scsi_cmnd *cmd)
{
hpsa_cmd_resolve_and_free(h, c);
if (cmd && cmd->scsi_done)
cmd->scsi_done(cmd);
if (cmd)
scsi_done(cmd);
}
static void hpsa_retry_cmd(struct ctlr_info *h, struct CommandList *c)
@ -5671,7 +5675,7 @@ static void hpsa_command_resubmit_worker(struct work_struct *work)
* if it encountered a dma mapping failure.
*/
cmd->result = DID_IMM_RETRY << 16;
cmd->scsi_done(cmd);
scsi_done(cmd);
}
}
@ -5691,19 +5695,19 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
dev = cmd->device->hostdata;
if (!dev) {
cmd->result = DID_NO_CONNECT << 16;
cmd->scsi_done(cmd);
scsi_done(cmd);
return 0;
}
if (dev->removed) {
cmd->result = DID_NO_CONNECT << 16;
cmd->scsi_done(cmd);
scsi_done(cmd);
return 0;
}
if (unlikely(lockup_detected(h))) {
cmd->result = DID_NO_CONNECT << 16;
cmd->scsi_done(cmd);
scsi_done(cmd);
return 0;
}

View File

@ -769,7 +769,7 @@ static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag,
skip_resid:
dprintk("scsi_done(%p)\n", scp);
scp->scsi_done(scp);
scsi_done(scp);
free_req(hba, &hba->reqs[tag]);
}
@ -993,8 +993,7 @@ static int hptiop_reset_comm_mvfrey(struct hptiop_hba *hba)
return 0;
}
static int hptiop_queuecommand_lck(struct scsi_cmnd *scp,
void (*done)(struct scsi_cmnd *))
static int hptiop_queuecommand_lck(struct scsi_cmnd *scp)
{
struct Scsi_Host *host = scp->device->host;
struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
@ -1002,9 +1001,6 @@ static int hptiop_queuecommand_lck(struct scsi_cmnd *scp,
int sg_count = 0;
struct hptiop_request *_req;
BUG_ON(!done);
scp->scsi_done = done;
_req = get_req(hba);
if (_req == NULL) {
dprintk("hptiop_queuecmd : no free req\n");
@ -1059,7 +1055,7 @@ static int hptiop_queuecommand_lck(struct scsi_cmnd *scp,
cmd_done:
dprintk("scsi_done(scp=%p)\n", scp);
scp->scsi_done(scp);
scsi_done(scp);
return 0;
}
@ -1150,12 +1146,14 @@ static struct device_attribute hptiop_attr_fw_version = {
.show = hptiop_show_fw_version,
};
static struct device_attribute *hptiop_attrs[] = {
&hptiop_attr_version,
&hptiop_attr_fw_version,
static struct attribute *hptiop_host_attrs[] = {
&hptiop_attr_version.attr,
&hptiop_attr_fw_version.attr,
NULL
};
ATTRIBUTE_GROUPS(hptiop_host);
static int hptiop_slave_config(struct scsi_device *sdev)
{
if (sdev->type == TYPE_TAPE)
@ -1172,7 +1170,7 @@ static struct scsi_host_template driver_template = {
.info = hptiop_info,
.emulated = 0,
.proc_name = driver_name,
.shost_attrs = hptiop_attrs,
.shost_groups = hptiop_host_groups,
.slave_configure = hptiop_slave_config,
.this_id = -1,
.change_queue_depth = hptiop_adjust_disk_queue_depth,

View File

@ -1046,7 +1046,7 @@ static void ibmvfc_scsi_eh_done(struct ibmvfc_event *evt)
if (cmnd) {
scsi_dma_unmap(cmnd);
cmnd->scsi_done(cmnd);
scsi_done(cmnd);
}
ibmvfc_free_event(evt);
@ -1849,7 +1849,7 @@ static void ibmvfc_scsi_done(struct ibmvfc_event *evt)
cmnd->result = (DID_ERROR << 16);
scsi_dma_unmap(cmnd);
cmnd->scsi_done(cmnd);
scsi_done(cmnd);
}
ibmvfc_free_event(evt);
@ -1935,7 +1935,7 @@ static int ibmvfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
if (unlikely((rc = fc_remote_port_chkready(rport))) ||
unlikely((rc = ibmvfc_host_chkready(vhost)))) {
cmnd->result = rc;
cmnd->scsi_done(cmnd);
scsi_done(cmnd);
return 0;
}
@ -1975,7 +1975,7 @@ static int ibmvfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
"Failed to map DMA buffer for command. rc=%d\n", rc);
cmnd->result = DID_ERROR << 16;
cmnd->scsi_done(cmnd);
scsi_done(cmnd);
return 0;
}
@ -3589,18 +3589,20 @@ static struct bin_attribute ibmvfc_trace_attr = {
};
#endif
static struct device_attribute *ibmvfc_attrs[] = {
&dev_attr_partition_name,
&dev_attr_device_name,
&dev_attr_port_loc_code,
&dev_attr_drc_name,
&dev_attr_npiv_version,
&dev_attr_capabilities,
&dev_attr_log_level,
&dev_attr_nr_scsi_channels,
static struct attribute *ibmvfc_host_attrs[] = {
&dev_attr_partition_name.attr,
&dev_attr_device_name.attr,
&dev_attr_port_loc_code.attr,
&dev_attr_drc_name.attr,
&dev_attr_npiv_version.attr,
&dev_attr_capabilities.attr,
&dev_attr_log_level.attr,
&dev_attr_nr_scsi_channels.attr,
NULL
};
ATTRIBUTE_GROUPS(ibmvfc_host);
static struct scsi_host_template driver_template = {
.module = THIS_MODULE,
.name = "IBM POWER Virtual FC Adapter",
@ -3621,7 +3623,7 @@ static struct scsi_host_template driver_template = {
.this_id = -1,
.sg_tablesize = SG_ALL,
.max_sectors = IBMVFC_MAX_SECTORS,
.shost_attrs = ibmvfc_attrs,
.shost_groups = ibmvfc_host_groups,
.track_queue_depth = 1,
.host_tagset = 1,
};

View File

@ -454,7 +454,7 @@ static int initialize_event_pool(struct event_pool *pool,
pool->iu_storage =
dma_alloc_coherent(hostdata->dev,
pool->size * sizeof(*pool->iu_storage),
&pool->iu_token, 0);
&pool->iu_token, GFP_KERNEL);
if (!pool->iu_storage) {
kfree(pool->events);
return -ENOMEM;
@ -1039,9 +1039,9 @@ static inline u16 lun_from_dev(struct scsi_device *dev)
* @cmnd: struct scsi_cmnd to be executed
* @done: Callback function to be called when cmd is completed
*/
static int ibmvscsi_queuecommand_lck(struct scsi_cmnd *cmnd,
void (*done) (struct scsi_cmnd *))
static int ibmvscsi_queuecommand_lck(struct scsi_cmnd *cmnd)
{
void (*done)(struct scsi_cmnd *) = scsi_done;
struct srp_cmd *srp_cmd;
struct srp_event_struct *evt_struct;
struct srp_indirect_buf *indirect;
@ -2065,18 +2065,20 @@ static int ibmvscsi_host_reset(struct Scsi_Host *shost, int reset_type)
return 0;
}
static struct device_attribute *ibmvscsi_attrs[] = {
&ibmvscsi_host_vhost_loc,
&ibmvscsi_host_vhost_name,
&ibmvscsi_host_srp_version,
&ibmvscsi_host_partition_name,
&ibmvscsi_host_partition_number,
&ibmvscsi_host_mad_version,
&ibmvscsi_host_os_type,
&ibmvscsi_host_config,
static struct attribute *ibmvscsi_host_attrs[] = {
&ibmvscsi_host_vhost_loc.attr,
&ibmvscsi_host_vhost_name.attr,
&ibmvscsi_host_srp_version.attr,
&ibmvscsi_host_partition_name.attr,
&ibmvscsi_host_partition_number.attr,
&ibmvscsi_host_mad_version.attr,
&ibmvscsi_host_os_type.attr,
&ibmvscsi_host_config.attr,
NULL
};
ATTRIBUTE_GROUPS(ibmvscsi_host);
/* ------------------------------------------------------------
* SCSI driver registration
*/
@ -2096,7 +2098,7 @@ static struct scsi_host_template driver_template = {
.can_queue = IBMVSCSI_MAX_REQUESTS_DEFAULT,
.this_id = -1,
.sg_tablesize = SG_ALL,
.shost_attrs = ibmvscsi_attrs,
.shost_groups = ibmvscsi_host_groups,
};
/**

View File

@ -3948,41 +3948,16 @@ static struct configfs_attribute *ibmvscsis_wwn_attrs[] = {
NULL,
};
static ssize_t ibmvscsis_tpg_enable_show(struct config_item *item,
char *page)
{
struct se_portal_group *se_tpg = to_tpg(item);
struct ibmvscsis_tport *tport = container_of(se_tpg,
struct ibmvscsis_tport,
se_tpg);
return snprintf(page, PAGE_SIZE, "%d\n", (tport->enabled) ? 1 : 0);
}
static ssize_t ibmvscsis_tpg_enable_store(struct config_item *item,
const char *page, size_t count)
static int ibmvscsis_enable_tpg(struct se_portal_group *se_tpg, bool enable)
{
struct se_portal_group *se_tpg = to_tpg(item);
struct ibmvscsis_tport *tport = container_of(se_tpg,
struct ibmvscsis_tport,
se_tpg);
struct scsi_info *vscsi = container_of(tport, struct scsi_info, tport);
unsigned long tmp;
int rc;
long lrc;
rc = kstrtoul(page, 0, &tmp);
if (rc < 0) {
dev_err(&vscsi->dev, "Unable to extract srpt_tpg_store_enable\n");
return -EINVAL;
}
if ((tmp != 0) && (tmp != 1)) {
dev_err(&vscsi->dev, "Illegal value for srpt_tpg_store_enable\n");
return -EINVAL;
}
if (tmp) {
if (enable) {
spin_lock_bh(&vscsi->intr_lock);
tport->enabled = true;
lrc = ibmvscsis_enable_change_state(vscsi);
@ -3998,17 +3973,8 @@ static ssize_t ibmvscsis_tpg_enable_store(struct config_item *item,
spin_unlock_bh(&vscsi->intr_lock);
}
dev_dbg(&vscsi->dev, "tpg_enable_store, tmp %ld, state %d\n", tmp,
vscsi->state);
return count;
return 0;
}
CONFIGFS_ATTR(ibmvscsis_tpg_, enable);
static struct configfs_attribute *ibmvscsis_tpg_attrs[] = {
&ibmvscsis_tpg_attr_enable,
NULL,
};
static const struct target_core_fabric_ops ibmvscsis_ops = {
.module = THIS_MODULE,
@ -4038,10 +4004,10 @@ static const struct target_core_fabric_ops ibmvscsis_ops = {
.fabric_make_wwn = ibmvscsis_make_tport,
.fabric_drop_wwn = ibmvscsis_drop_tport,
.fabric_make_tpg = ibmvscsis_make_tpg,
.fabric_enable_tpg = ibmvscsis_enable_tpg,
.fabric_drop_tpg = ibmvscsis_drop_tpg,
.tfc_wwn_attrs = ibmvscsis_wwn_attrs,
.tfc_tpg_base_attrs = ibmvscsis_tpg_attrs,
};
static void ibmvscsis_dev_release(struct device *dev) {};

View File

@ -769,7 +769,7 @@ static void imm_interrupt(struct work_struct *work)
spin_lock_irqsave(host->host_lock, flags);
dev->cur_cmd = NULL;
cmd->scsi_done(cmd);
scsi_done(cmd);
spin_unlock_irqrestore(host->host_lock, flags);
return;
}
@ -910,8 +910,7 @@ static int imm_engine(imm_struct *dev, struct scsi_cmnd *cmd)
return 0;
}
static int imm_queuecommand_lck(struct scsi_cmnd *cmd,
void (*done)(struct scsi_cmnd *))
static int imm_queuecommand_lck(struct scsi_cmnd *cmd)
{
imm_struct *dev = imm_dev(cmd->device->host);
@ -922,7 +921,6 @@ static int imm_queuecommand_lck(struct scsi_cmnd *cmd,
dev->failed = 0;
dev->jstart = jiffies;
dev->cur_cmd = cmd;
cmd->scsi_done = done;
cmd->result = DID_ERROR << 16; /* default return code */
cmd->SCp.phase = 0; /* bus free */

View File

@ -2609,14 +2609,11 @@ static void initio_build_scb(struct initio_host * host, struct scsi_ctrl_blk * c
* will cause the mid layer to call us again later with the command)
*/
static int i91u_queuecommand_lck(struct scsi_cmnd *cmd,
void (*done)(struct scsi_cmnd *))
static int i91u_queuecommand_lck(struct scsi_cmnd *cmd)
{
struct initio_host *host = (struct initio_host *) cmd->device->host->hostdata;
struct scsi_ctrl_blk *cmnd;
cmd->scsi_done = done;
cmnd = initio_alloc_scb(host);
if (!cmnd)
return SCSI_MLQUEUE_HOST_BUSY;
@ -2788,7 +2785,7 @@ static void i91uSCBPost(u8 * host_mem, u8 * cblk_mem)
cmnd->result = cblk->tastat | (cblk->hastat << 16);
i91u_unmap_scb(host->pci_dev, cmnd);
cmnd->scsi_done(cmnd); /* Notify system DONE */
scsi_done(cmnd); /* Notify system DONE */
initio_release_scb(host, cblk); /* Release SCB for current channel */
}

View File

@ -866,7 +866,7 @@ static void __ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
scsi_cmd->result |= (DID_ERROR << 16);
scsi_dma_unmap(ipr_cmd->scsi_cmd);
scsi_cmd->scsi_done(scsi_cmd);
scsi_done(scsi_cmd);
if (ipr_cmd->eh_comp)
complete(ipr_cmd->eh_comp);
list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
@ -4236,18 +4236,20 @@ static struct bin_attribute ipr_ioa_async_err_log = {
.write = ipr_next_async_err_log
};
static struct device_attribute *ipr_ioa_attrs[] = {
&ipr_fw_version_attr,
&ipr_log_level_attr,
&ipr_diagnostics_attr,
&ipr_ioa_state_attr,
&ipr_ioa_reset_attr,
&ipr_update_fw_attr,
&ipr_ioa_fw_type_attr,
&ipr_iopoll_weight_attr,
static struct attribute *ipr_ioa_attrs[] = {
&ipr_fw_version_attr.attr,
&ipr_log_level_attr.attr,
&ipr_diagnostics_attr.attr,
&ipr_ioa_state_attr.attr,
&ipr_ioa_reset_attr.attr,
&ipr_update_fw_attr.attr,
&ipr_ioa_fw_type_attr.attr,
&ipr_iopoll_weight_attr.attr,
NULL,
};
ATTRIBUTE_GROUPS(ipr_ioa);
#ifdef CONFIG_SCSI_IPR_DUMP
/**
* ipr_read_dump - Dump the adapter
@ -4732,15 +4734,17 @@ static struct device_attribute ipr_raw_mode_attr = {
.store = ipr_store_raw_mode
};
static struct device_attribute *ipr_dev_attrs[] = {
&ipr_adapter_handle_attr,
&ipr_resource_path_attr,
&ipr_device_id_attr,
&ipr_resource_type_attr,
&ipr_raw_mode_attr,
static struct attribute *ipr_dev_attrs[] = {
&ipr_adapter_handle_attr.attr,
&ipr_resource_path_attr.attr,
&ipr_device_id_attr.attr,
&ipr_resource_type_attr.attr,
&ipr_raw_mode_attr.attr,
NULL,
};
ATTRIBUTE_GROUPS(ipr_dev);
/**
* ipr_biosparam - Return the HSC mapping
* @sdev: scsi device struct
@ -6065,7 +6069,7 @@ static void __ipr_erp_done(struct ipr_cmnd *ipr_cmd)
res->in_erp = 0;
}
scsi_dma_unmap(ipr_cmd->scsi_cmd);
scsi_cmd->scsi_done(scsi_cmd);
scsi_done(scsi_cmd);
if (ipr_cmd->eh_comp)
complete(ipr_cmd->eh_comp);
list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
@ -6502,7 +6506,7 @@ static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
}
scsi_dma_unmap(ipr_cmd->scsi_cmd);
scsi_cmd->scsi_done(scsi_cmd);
scsi_done(scsi_cmd);
if (ipr_cmd->eh_comp)
complete(ipr_cmd->eh_comp);
list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
@ -6531,7 +6535,7 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
scsi_dma_unmap(scsi_cmd);
spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags);
scsi_cmd->scsi_done(scsi_cmd);
scsi_done(scsi_cmd);
if (ipr_cmd->eh_comp)
complete(ipr_cmd->eh_comp);
list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
@ -6685,7 +6689,7 @@ static int ipr_queuecommand(struct Scsi_Host *shost,
spin_lock_irqsave(hrrq->lock, hrrq_flags);
memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
scsi_cmd->result = (DID_NO_CONNECT << 16);
scsi_cmd->scsi_done(scsi_cmd);
scsi_done(scsi_cmd);
spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
return 0;
}
@ -6762,8 +6766,8 @@ static struct scsi_host_template driver_template = {
.sg_tablesize = IPR_MAX_SGLIST,
.max_sectors = IPR_IOA_MAX_SECTORS,
.cmd_per_lun = IPR_MAX_CMD_PER_LUN,
.shost_attrs = ipr_ioa_attrs,
.sdev_attrs = ipr_dev_attrs,
.shost_groups = ipr_ioa_groups,
.sdev_groups = ipr_dev_groups,
.proc_name = IPR_NAME,
};

View File

@ -936,7 +936,7 @@ static int __ips_eh_reset(struct scsi_cmnd *SC)
while ((scb = ips_removeq_scb_head(&ha->scb_activelist))) {
scb->scsi_cmd->result = DID_ERROR << 16;
scb->scsi_cmd->scsi_done(scb->scsi_cmd);
scsi_done(scb->scsi_cmd);
ips_freescb(ha, scb);
}
@ -946,7 +946,7 @@ static int __ips_eh_reset(struct scsi_cmnd *SC)
while ((scsi_cmd = ips_removeq_wait_head(&ha->scb_waitlist))) {
scsi_cmd->result = DID_ERROR;
scsi_cmd->scsi_done(scsi_cmd);
scsi_done(scsi_cmd);
}
ha->active = FALSE;
@ -965,7 +965,7 @@ static int __ips_eh_reset(struct scsi_cmnd *SC)
while ((scb = ips_removeq_scb_head(&ha->scb_activelist))) {
scb->scsi_cmd->result = DID_ERROR << 16;
scb->scsi_cmd->scsi_done(scb->scsi_cmd);
scsi_done(scb->scsi_cmd);
ips_freescb(ha, scb);
}
@ -975,7 +975,7 @@ static int __ips_eh_reset(struct scsi_cmnd *SC)
while ((scsi_cmd = ips_removeq_wait_head(&ha->scb_waitlist))) {
scsi_cmd->result = DID_ERROR << 16;
scsi_cmd->scsi_done(scsi_cmd);
scsi_done(scsi_cmd);
}
ha->active = FALSE;
@ -994,7 +994,7 @@ static int __ips_eh_reset(struct scsi_cmnd *SC)
while ((scb = ips_removeq_scb_head(&ha->scb_activelist))) {
scb->scsi_cmd->result = DID_RESET << 16;
scb->scsi_cmd->scsi_done(scb->scsi_cmd);
scsi_done(scb->scsi_cmd);
ips_freescb(ha, scb);
}
@ -1035,8 +1035,9 @@ static int ips_eh_reset(struct scsi_cmnd *SC)
/* Linux obtains io_request_lock before calling this function */
/* */
/****************************************************************************/
static int ips_queue_lck(struct scsi_cmnd *SC, void (*done) (struct scsi_cmnd *))
static int ips_queue_lck(struct scsi_cmnd *SC)
{
void (*done)(struct scsi_cmnd *) = scsi_done;
ips_ha_t *ha;
ips_passthru_t *pt;
@ -1064,8 +1065,6 @@ static int ips_queue_lck(struct scsi_cmnd *SC, void (*done) (struct scsi_cmnd *)
return (0);
}
SC->scsi_done = done;
DEBUG_VAR(2, "(%s%d): ips_queue: cmd 0x%X (%d %d %d)",
ips_name,
ha->host_num,
@ -1099,7 +1098,7 @@ static int ips_queue_lck(struct scsi_cmnd *SC, void (*done) (struct scsi_cmnd *)
ha->ioctl_reset = 1; /* This reset request is from an IOCTL */
__ips_eh_reset(SC);
SC->result = DID_OK << 16;
SC->scsi_done(SC);
scsi_done(SC);
return (0);
}
@ -2579,7 +2578,7 @@ ips_next(ips_ha_t * ha, int intr)
case IPS_FAILURE:
if (scb->scsi_cmd) {
scb->scsi_cmd->result = DID_ERROR << 16;
scb->scsi_cmd->scsi_done(scb->scsi_cmd);
scsi_done(scb->scsi_cmd);
}
ips_freescb(ha, scb);
@ -2587,7 +2586,7 @@ ips_next(ips_ha_t * ha, int intr)
case IPS_SUCCESS_IMM:
if (scb->scsi_cmd) {
scb->scsi_cmd->result = DID_OK << 16;
scb->scsi_cmd->scsi_done(scb->scsi_cmd);
scsi_done(scb->scsi_cmd);
}
ips_freescb(ha, scb);
@ -2712,7 +2711,7 @@ ips_next(ips_ha_t * ha, int intr)
case IPS_FAILURE:
if (scb->scsi_cmd) {
scb->scsi_cmd->result = DID_ERROR << 16;
scb->scsi_cmd->scsi_done(scb->scsi_cmd);
scsi_done(scb->scsi_cmd);
}
if (scb->bus)
@ -2723,7 +2722,7 @@ ips_next(ips_ha_t * ha, int intr)
break;
case IPS_SUCCESS_IMM:
if (scb->scsi_cmd)
scb->scsi_cmd->scsi_done(scb->scsi_cmd);
scsi_done(scb->scsi_cmd);
if (scb->bus)
ha->dcdb_active[scb->bus - 1] &=
@ -3206,7 +3205,7 @@ ips_done(ips_ha_t * ha, ips_scb_t * scb)
case IPS_FAILURE:
if (scb->scsi_cmd) {
scb->scsi_cmd->result = DID_ERROR << 16;
scb->scsi_cmd->scsi_done(scb->scsi_cmd);
scsi_done(scb->scsi_cmd);
}
ips_freescb(ha, scb);
@ -3214,7 +3213,7 @@ ips_done(ips_ha_t * ha, ips_scb_t * scb)
case IPS_SUCCESS_IMM:
if (scb->scsi_cmd) {
scb->scsi_cmd->result = DID_ERROR << 16;
scb->scsi_cmd->scsi_done(scb->scsi_cmd);
scsi_done(scb->scsi_cmd);
}
ips_freescb(ha, scb);
@ -3231,7 +3230,7 @@ ips_done(ips_ha_t * ha, ips_scb_t * scb)
ha->dcdb_active[scb->bus - 1] &= ~(1 << scb->target_id);
}
scb->scsi_cmd->scsi_done(scb->scsi_cmd);
scsi_done(scb->scsi_cmd);
ips_freescb(ha, scb);
}

View File

@ -142,11 +142,13 @@ static ssize_t isci_show_id(struct device *dev, struct device_attribute *attr, c
static DEVICE_ATTR(isci_id, S_IRUGO, isci_show_id, NULL);
static struct device_attribute *isci_host_attrs[] = {
&dev_attr_isci_id,
static struct attribute *isci_host_attrs[] = {
&dev_attr_isci_id.attr,
NULL
};
ATTRIBUTE_GROUPS(isci_host);
static struct scsi_host_template isci_sht = {
.module = THIS_MODULE,
@ -173,7 +175,7 @@ static struct scsi_host_template isci_sht = {
#ifdef CONFIG_COMPAT
.compat_ioctl = sas_ioctl,
#endif
.shost_attrs = isci_host_attrs,
.shost_groups = isci_host_groups,
.track_queue_depth = 1,
};

View File

@ -182,8 +182,4 @@ void *isci_task_ssp_request_get_response_data_address(
u32 isci_task_ssp_request_get_response_data_length(
struct isci_request *request);
int isci_queuecommand(
struct scsi_cmnd *scsi_cmd,
void (*donefunc)(struct scsi_cmnd *));
#endif /* !defined(_SCI_TASK_H_) */

View File

@ -1870,7 +1870,7 @@ int fc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc_cmd)
rval = fc_remote_port_chkready(rport);
if (rval) {
sc_cmd->result = rval;
sc_cmd->scsi_done(sc_cmd);
scsi_done(sc_cmd);
return 0;
}
@ -1880,7 +1880,7 @@ int fc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc_cmd)
* online
*/
sc_cmd->result = DID_IMM_RETRY << 16;
sc_cmd->scsi_done(sc_cmd);
scsi_done(sc_cmd);
goto out;
}
@ -2087,7 +2087,7 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
list_del(&fsp->list);
sc_cmd->SCp.ptr = NULL;
spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
sc_cmd->scsi_done(sc_cmd);
scsi_done(sc_cmd);
/* release ref from initial allocation in queue command */
fc_fcp_pkt_release(fsp);

View File

@ -468,7 +468,7 @@ static void iscsi_free_task(struct iscsi_task *task)
* it will decide how to return sc to scsi-ml.
*/
if (oldstate != ISCSI_TASK_REQUEUE_SCSIQ)
sc->scsi_done(sc);
scsi_done(sc);
}
}
@ -1807,7 +1807,7 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
ISCSI_DBG_SESSION(session, "iscsi: cmd 0x%x is not queued (%d)\n",
sc->cmnd[0], reason);
scsi_set_resid(sc, scsi_bufflen(sc));
sc->scsi_done(sc);
scsi_done(sc);
return 0;
}
EXPORT_SYMBOL_GPL(iscsi_queuecommand);
@ -2950,6 +2950,7 @@ iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,
session->tmf_state = TMF_INITIAL;
timer_setup(&session->tmf_timer, iscsi_tmf_timedout, 0);
mutex_init(&session->eh_mutex);
init_waitqueue_head(&session->ehwait);
spin_lock_init(&session->frwd_lock);
spin_lock_init(&session->back_lock);
@ -3077,8 +3078,6 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
goto login_task_data_alloc_fail;
conn->login_task->data = conn->data = data;
init_waitqueue_head(&session->ehwait);
return cls_conn;
login_task_data_alloc_fail:

View File

@ -147,6 +147,7 @@ int sas_register_ha(struct sas_ha_struct *sas_ha)
return error;
}
EXPORT_SYMBOL_GPL(sas_register_ha);
static void sas_disable_events(struct sas_ha_struct *sas_ha)
{
@ -176,6 +177,7 @@ int sas_unregister_ha(struct sas_ha_struct *sas_ha)
return 0;
}
EXPORT_SYMBOL_GPL(sas_unregister_ha);
static int sas_get_linkerrors(struct sas_phy *phy)
{
@ -252,7 +254,7 @@ static int transport_sas_phy_reset(struct sas_phy *phy, int hard_reset)
}
}
static int sas_phy_enable(struct sas_phy *phy, int enable)
int sas_phy_enable(struct sas_phy *phy, int enable)
{
int ret;
enum phy_func cmd;
@ -284,6 +286,7 @@ static int sas_phy_enable(struct sas_phy *phy, int enable)
}
return ret;
}
EXPORT_SYMBOL_GPL(sas_phy_enable);
int sas_phy_reset(struct sas_phy *phy, int hard_reset)
{
@ -313,6 +316,7 @@ int sas_phy_reset(struct sas_phy *phy, int hard_reset)
}
return ret;
}
EXPORT_SYMBOL_GPL(sas_phy_reset);
int sas_set_phy_speed(struct sas_phy *phy,
struct sas_phy_linkrates *rates)
@ -659,5 +663,3 @@ MODULE_LICENSE("GPL v2");
module_init(sas_class_init);
module_exit(sas_class_exit);
EXPORT_SYMBOL_GPL(sas_register_ha);
EXPORT_SYMBOL_GPL(sas_unregister_ha);

Some files were not shown because too many files have changed in this diff Show More