Merge master.kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6

* master.kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (73 commits)
  [SCSI] aic79xx: Add ASC-29320LPE ids to driver
  [SCSI] stex: version update
  [SCSI] stex: change wait loop code
  [SCSI] stex: add new device type support
  [SCSI] stex: update device id info
  [SCSI] stex: adjust default queue length
  [SCSI] stex: add value check in hard reset routine
  [SCSI] stex: fix controller_info command handling
  [SCSI] stex: fix biosparam calculation
  [SCSI] megaraid: fix MMIO casts
  [SCSI] tgt: fix undefined flush_dcache_page() problem
  [SCSI] libsas: better error handling in sas_expander.c
  [SCSI] lpfc 8.1.11 : Change version number to 8.1.11
  [SCSI] lpfc 8.1.11 : Misc Fixes
  [SCSI] lpfc 8.1.11 : Add soft_wwnn sysfs attribute, rename soft_wwn_enable
  [SCSI] lpfc 8.1.11 : Removed decoding of PCI Subsystem Id
  [SCSI] lpfc 8.1.11 : Add MSI (Message Signalled Interrupts) support
  [SCSI] lpfc 8.1.11 : Adjust LOG_FCP logging
  [SCSI] lpfc 8.1.11 : Fix Memory leaks
  [SCSI] lpfc 8.1.11 : Fix lpfc_multi_ring_support
  ...
This commit is contained in:
Linus Torvalds 2006-12-05 16:09:46 -08:00
commit ec0bf39a47
83 changed files with 4763 additions and 1129 deletions

View File

@ -1416,6 +1416,11 @@ and is between 256 and 4096 characters. It is defined in the file
scsi_logging= [SCSI]
scsi_mod.scan= [SCSI] sync (default) scans SCSI busses as they are
discovered. async scans them in kernel threads,
allowing boot to proceed. none ignores them, expecting
user space to do the scan.
selinux [SELINUX] Disable or enable SELinux at boot time.
Format: { "0" | "1" }
See security/selinux/Kconfig help text.

View File

@ -375,7 +375,6 @@ Summary:
scsi_add_device - creates new scsi device (lu) instance
scsi_add_host - perform sysfs registration and set up transport class
scsi_adjust_queue_depth - change the queue depth on a SCSI device
scsi_assign_lock - replace default host_lock with given lock
scsi_bios_ptable - return copy of block device's partition table
scsi_block_requests - prevent further commands being queued to given host
scsi_deactivate_tcq - turn off tag command queueing
@ -488,20 +487,6 @@ void scsi_adjust_queue_depth(struct scsi_device * sdev, int tagged,
int tags)
/**
* scsi_assign_lock - replace default host_lock with given lock
* @shost: a pointer to a scsi host instance
* @lock: pointer to lock to replace host_lock for this host
*
* Returns nothing
*
* Might block: no
*
* Defined in: include/scsi/scsi_host.h .
**/
void scsi_assign_lock(struct Scsi_Host *shost, spinlock_t *lock)
/**
* scsi_bios_ptable - return copy of block device's partition table
* @dev: pointer to block device
@ -1366,17 +1351,11 @@ Locks
Each struct Scsi_Host instance has a spin_lock called struct
Scsi_Host::default_lock which is initialized in scsi_host_alloc() [found in
hosts.c]. Within the same function the struct Scsi_Host::host_lock pointer
is initialized to point at default_lock with the scsi_assign_lock() function.
Thereafter lock and unlock operations performed by the mid level use the
struct Scsi_Host::host_lock pointer.
is initialized to point at default_lock. Thereafter lock and unlock
operations performed by the mid level use the struct Scsi_Host::host_lock
pointer. Previously drivers could override the host_lock pointer but
this is not allowed anymore.
LLDs can override the use of struct Scsi_Host::default_lock by
using scsi_assign_lock(). The earliest opportunity to do this would
be in the detect() function after it has invoked scsi_register(). It
could be replaced by a coarser grain lock (e.g. per driver) or a
lock of equal granularity (i.e. per host). Using finer grain locks
(e.g. per SCSI device) may be possible by juggling locks in
queuecommand().
Autosense
=========

View File

@ -277,7 +277,7 @@ static int sg_io(struct file *file, request_queue_t *q,
if (rq->bio)
blk_queue_bounce(q, &rq->bio);
rq->timeout = (hdr->timeout * HZ) / 1000;
rq->timeout = jiffies_to_msecs(hdr->timeout);
if (!rq->timeout)
rq->timeout = q->sg_timeout;
if (!rq->timeout)

View File

@ -624,6 +624,8 @@ NCR_700_scsi_done(struct NCR_700_Host_Parameters *hostdata,
* successful */
if (result == 0)
result = cmnd[7];
/* restore the original length */
SCp->cmd_len = cmnd[8];
} else
NCR_700_unmap(hostdata, SCp, slot);
@ -1007,6 +1009,9 @@ process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
* of the command */
cmnd[6] = NCR_700_INTERNAL_SENSE_MAGIC;
cmnd[7] = hostdata->status[0];
cmnd[8] = SCp->cmd_len;
SCp->cmd_len = 6; /* command length for
* REQUEST_SENSE */
slot->pCmd = dma_map_single(hostdata->dev, cmnd, MAX_COMMAND_SIZE, DMA_TO_DEVICE);
slot->dma_handle = dma_map_single(hostdata->dev, SCp->sense_buffer, sizeof(SCp->sense_buffer), DMA_FROM_DEVICE);
slot->SG[0].ins = bS_to_host(SCRIPT_MOVE_DATA_IN | sizeof(SCp->sense_buffer));

View File

@ -2186,21 +2186,21 @@ static int __init BusLogic_init(void)
if (BusLogic_ProbeOptions.NoProbe)
return -ENODEV;
BusLogic_ProbeInfoList = (struct BusLogic_ProbeInfo *)
kmalloc(BusLogic_MaxHostAdapters * sizeof(struct BusLogic_ProbeInfo), GFP_ATOMIC);
BusLogic_ProbeInfoList =
kzalloc(BusLogic_MaxHostAdapters * sizeof(struct BusLogic_ProbeInfo), GFP_KERNEL);
if (BusLogic_ProbeInfoList == NULL) {
BusLogic_Error("BusLogic: Unable to allocate Probe Info List\n", NULL);
return -ENOMEM;
}
memset(BusLogic_ProbeInfoList, 0, BusLogic_MaxHostAdapters * sizeof(struct BusLogic_ProbeInfo));
PrototypeHostAdapter = (struct BusLogic_HostAdapter *)
kmalloc(sizeof(struct BusLogic_HostAdapter), GFP_ATOMIC);
PrototypeHostAdapter =
kzalloc(sizeof(struct BusLogic_HostAdapter), GFP_KERNEL);
if (PrototypeHostAdapter == NULL) {
kfree(BusLogic_ProbeInfoList);
BusLogic_Error("BusLogic: Unable to allocate Prototype " "Host Adapter\n", NULL);
return -ENOMEM;
}
memset(PrototypeHostAdapter, 0, sizeof(struct BusLogic_HostAdapter));
#ifdef MODULE
if (BusLogic != NULL)
BusLogic_Setup(BusLogic);

View File

@ -29,6 +29,13 @@ config SCSI
However, do not compile this as a module if your root file system
(the one containing the directory /) is located on a SCSI device.
config SCSI_TGT
tristate "SCSI target support"
depends on SCSI && EXPERIMENTAL
---help---
If you want to use SCSI target mode drivers enable this option.
If you choose M, the module will be called scsi_tgt.
config SCSI_NETLINK
bool
default n
@ -216,6 +223,23 @@ config SCSI_LOGGING
there should be no noticeable performance impact as long as you have
logging turned off.
config SCSI_SCAN_ASYNC
bool "Asynchronous SCSI scanning"
depends on SCSI
help
The SCSI subsystem can probe for devices while the rest of the
system continues booting, and even probe devices on different
busses in parallel, leading to a significant speed-up.
If you have built SCSI as modules, enabling this option can
be a problem as the devices may not have been found by the
time your system expects them to have been. You can load the
scsi_wait_scan module to ensure that all scans have completed.
If you build your SCSI drivers into the kernel, then everything
will work fine if you say Y here.
You can override this choice by specifying scsi_mod.scan="sync"
or "async" on the kernel's command line.
menu "SCSI Transports"
depends on SCSI
@ -797,6 +821,20 @@ config SCSI_IBMVSCSI
To compile this driver as a module, choose M here: the
module will be called ibmvscsic.
config SCSI_IBMVSCSIS
tristate "IBM Virtual SCSI Server support"
depends on PPC_PSERIES && SCSI_TGT && SCSI_SRP
help
This is the SRP target driver for IBM pSeries virtual environments.
The userspace component needed to initialize the driver and
documentation can be found:
http://stgt.berlios.de/
To compile this driver as a module, choose M here: the
module will be called ibmvstgt.
config SCSI_INITIO
tristate "Initio 9100U(W) support"
depends on PCI && SCSI
@ -944,8 +982,13 @@ config SCSI_STEX
tristate "Promise SuperTrak EX Series support"
depends on PCI && SCSI
---help---
This driver supports Promise SuperTrak EX8350/8300/16350/16300
Storage controllers.
This driver supports Promise SuperTrak EX series storage controllers.
Promise provides Linux RAID configuration utility for these
controllers. Please visit <http://www.promise.com> to download.
To compile this driver as a module, choose M here: the
module will be called stex.
config SCSI_SYM53C8XX_2
tristate "SYM53C8XX Version 2 SCSI support"
@ -1026,6 +1069,7 @@ config SCSI_IPR
config SCSI_IPR_TRACE
bool "enable driver internal trace"
depends on SCSI_IPR
default y
help
If you say Y here, the driver will trace all commands issued
to the adapter. Performance impact is minimal. Trace can be
@ -1034,6 +1078,7 @@ config SCSI_IPR_TRACE
config SCSI_IPR_DUMP
bool "enable adapter dump support"
depends on SCSI_IPR
default y
help
If you say Y here, the driver will support adapter crash dump.
If you enable this support, the iprdump daemon can be used
@ -1734,6 +1779,16 @@ config ZFCP
called zfcp. If you want to compile it as a module, say M here
and read <file:Documentation/modules.txt>.
config SCSI_SRP
tristate "SCSI RDMA Protocol helper library"
depends on SCSI && PCI
select SCSI_TGT
help
If you wish to use SRP target drivers, say Y.
To compile this driver as a module, choose M here: the
module will be called libsrp.
endmenu
source "drivers/scsi/pcmcia/Kconfig"

View File

@ -21,6 +21,7 @@ CFLAGS_seagate.o = -DARBITRATE -DPARITY -DSEAGATE_USE_ASM
subdir-$(CONFIG_PCMCIA) += pcmcia
obj-$(CONFIG_SCSI) += scsi_mod.o
obj-$(CONFIG_SCSI_TGT) += scsi_tgt.o
obj-$(CONFIG_RAID_ATTRS) += raid_class.o
@ -125,7 +126,9 @@ obj-$(CONFIG_SCSI_FCAL) += fcal.o
obj-$(CONFIG_SCSI_LASI700) += 53c700.o lasi700.o
obj-$(CONFIG_SCSI_NSP32) += nsp32.o
obj-$(CONFIG_SCSI_IPR) += ipr.o
obj-$(CONFIG_SCSI_SRP) += libsrp.o
obj-$(CONFIG_SCSI_IBMVSCSI) += ibmvscsi/
obj-$(CONFIG_SCSI_IBMVSCSIS) += ibmvscsi/
obj-$(CONFIG_SCSI_HPTIOP) += hptiop.o
obj-$(CONFIG_SCSI_STEX) += stex.o
@ -141,6 +144,8 @@ obj-$(CONFIG_CHR_DEV_SCH) += ch.o
# This goes last, so that "real" scsi devices probe earlier
obj-$(CONFIG_SCSI_DEBUG) += scsi_debug.o
obj-$(CONFIG_SCSI) += scsi_wait_scan.o
scsi_mod-y += scsi.o hosts.o scsi_ioctl.o constants.o \
scsicam.o scsi_error.o scsi_lib.o \
scsi_scan.o scsi_sysfs.o \
@ -149,6 +154,8 @@ scsi_mod-$(CONFIG_SCSI_NETLINK) += scsi_netlink.o
scsi_mod-$(CONFIG_SYSCTL) += scsi_sysctl.o
scsi_mod-$(CONFIG_SCSI_PROC_FS) += scsi_proc.o
scsi_tgt-y += scsi_tgt_lib.o scsi_tgt_if.o
sd_mod-objs := sd.o
sr_mod-objs := sr.o sr_ioctl.o sr_vendor.o
ncr53c8xx-flags-$(CONFIG_SCSI_ZALON) \

View File

@ -220,9 +220,11 @@ static void *addresses[] = {
static unsigned short ports[] = { 0x230, 0x330, 0x280, 0x290, 0x330, 0x340, 0x300, 0x310, 0x348, 0x350 };
#define PORT_COUNT ARRAY_SIZE(ports)
#ifndef MODULE
/* possible interrupt channels */
static unsigned short intrs[] = { 10, 11, 12, 15 };
#define INTR_COUNT ARRAY_SIZE(intrs)
#endif /* !MODULE */
/* signatures for NCR 53c406a based controllers */
#if USE_BIOS
@ -605,6 +607,7 @@ static int NCR53c406a_release(struct Scsi_Host *shost)
return 0;
}
#ifndef MODULE
/* called from init/main.c */
static int __init NCR53c406a_setup(char *str)
{
@ -661,6 +664,8 @@ static int __init NCR53c406a_setup(char *str)
__setup("ncr53c406a=", NCR53c406a_setup);
#endif /* !MODULE */
static const char *NCR53c406a_info(struct Scsi_Host *SChost)
{
DEB(printk("NCR53c406a_info called\n"));

View File

@ -11,8 +11,8 @@
*----------------------------------------------------------------------------*/
#ifndef AAC_DRIVER_BUILD
# define AAC_DRIVER_BUILD 2409
# define AAC_DRIVER_BRANCH "-mh2"
# define AAC_DRIVER_BUILD 2423
# define AAC_DRIVER_BRANCH "-mh3"
#endif
#define MAXIMUM_NUM_CONTAINERS 32

View File

@ -518,6 +518,7 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
*/
unsigned long count = 36000000L; /* 3 minutes */
while (down_trylock(&fibptr->event_wait)) {
int blink;
if (--count == 0) {
spin_lock_irqsave(q->lock, qflags);
q->numpending--;
@ -530,6 +531,14 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
}
return -ETIMEDOUT;
}
if ((blink = aac_adapter_check_health(dev)) > 0) {
if (wait == -1) {
printk(KERN_ERR "aacraid: aac_fib_send: adapter blinkLED 0x%x.\n"
"Usually a result of a serious unrecoverable hardware problem\n",
blink);
}
return -EFAULT;
}
udelay(5);
}
} else if (down_interruptible(&fibptr->event_wait)) {
@ -1093,6 +1102,20 @@ static int _aac_reset_adapter(struct aac_dev *aac)
goto out;
}
/*
* Loop through the fibs, close the synchronous FIBS
*/
for (index = 0; index < (aac->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); index++) {
struct fib *fib = &aac->fibs[index];
if (!(fib->hw_fib->header.XferState & cpu_to_le32(NoResponseExpected | Async)) &&
(fib->hw_fib->header.XferState & cpu_to_le32(ResponseExpected))) {
unsigned long flagv;
spin_lock_irqsave(&fib->event_lock, flagv);
up(&fib->event_wait);
spin_unlock_irqrestore(&fib->event_lock, flagv);
schedule();
}
}
index = aac->cardtype;
/*

View File

@ -586,7 +586,7 @@ static struct scsi_host_template aha1740_template = {
static int aha1740_probe (struct device *dev)
{
int slotbase;
int slotbase, rc;
unsigned int irq_level, irq_type, translation;
struct Scsi_Host *shpnt;
struct aha1740_hostdata *host;
@ -641,10 +641,16 @@ static int aha1740_probe (struct device *dev)
}
eisa_set_drvdata (edev, shpnt);
scsi_add_host (shpnt, dev); /* XXX handle failure */
rc = scsi_add_host (shpnt, dev);
if (rc)
goto err_irq;
scsi_scan_host (shpnt);
return 0;
err_irq:
free_irq(irq_level, shpnt);
err_unmap:
dma_unmap_single (&edev->dev, host->ecb_dma_addr,
sizeof (host->ecb), DMA_BIDIRECTIONAL);

View File

@ -62,6 +62,7 @@ static struct pci_device_id ahd_linux_pci_id_table[] = {
/* aic7901 based controllers */
ID(ID_AHA_29320A),
ID(ID_AHA_29320ALP),
ID(ID_AHA_29320LPE),
/* aic7902 based controllers */
ID(ID_AHA_29320),
ID(ID_AHA_29320B),

View File

@ -109,7 +109,13 @@ static struct ahd_pci_identity ahd_pci_ident_table [] =
{
ID_AHA_29320ALP,
ID_ALL_MASK,
"Adaptec 29320ALP Ultra320 SCSI adapter",
"Adaptec 29320ALP PCIx Ultra320 SCSI adapter",
ahd_aic7901_setup
},
{
ID_AHA_29320LPE,
ID_ALL_MASK,
"Adaptec 29320LPE PCIe Ultra320 SCSI adapter",
ahd_aic7901_setup
},
/* aic7901A based controllers */

View File

@ -51,6 +51,7 @@
#define ID_AIC7901 0x800F9005FFFF9005ull
#define ID_AHA_29320A 0x8000900500609005ull
#define ID_AHA_29320ALP 0x8017900500449005ull
#define ID_AHA_29320LPE 0x8017900500459005ull
#define ID_AIC7901A 0x801E9005FFFF9005ull
#define ID_AHA_29320LP 0x8014900500449005ull

View File

@ -724,6 +724,15 @@ static void asd_free_queues(struct asd_ha_struct *asd_ha)
list_for_each_safe(pos, n, &pending) {
struct asd_ascb *ascb = list_entry(pos, struct asd_ascb, list);
/*
* Delete unexpired ascb timers. This may happen if we issue
* a CONTROL PHY scb to an adapter and rmmod before the scb
* times out. Apparently we don't wait for the CONTROL PHY
* to complete, so it doesn't matter if we kill the timer.
*/
del_timer_sync(&ascb->timer);
WARN_ON(ascb->scb->header.opcode != CONTROL_PHY);
list_del_init(pos);
ASD_DPRINTK("freeing from pending\n");
asd_ascb_free(ascb);

View File

@ -25,6 +25,7 @@
*/
#include <linux/pci.h>
#include <scsi/scsi_host.h>
#include "aic94xx.h"
#include "aic94xx_reg.h"
@ -412,6 +413,39 @@ void asd_invalidate_edb(struct asd_ascb *ascb, int edb_id)
}
}
/* hard reset a phy later */
static void do_phy_reset_later(void *data)
{
struct sas_phy *sas_phy = data;
int error;
ASD_DPRINTK("%s: About to hard reset phy %d\n", __FUNCTION__,
sas_phy->identify.phy_identifier);
/* Reset device port */
error = sas_phy_reset(sas_phy, 1);
if (error)
ASD_DPRINTK("%s: Hard reset of phy %d failed (%d).\n",
__FUNCTION__, sas_phy->identify.phy_identifier, error);
}
static void phy_reset_later(struct sas_phy *sas_phy, struct Scsi_Host *shost)
{
INIT_WORK(&sas_phy->reset_work, do_phy_reset_later, sas_phy);
queue_work(shost->work_q, &sas_phy->reset_work);
}
/* start up the ABORT TASK tmf... */
static void task_kill_later(struct asd_ascb *ascb)
{
struct asd_ha_struct *asd_ha = ascb->ha;
struct sas_ha_struct *sas_ha = &asd_ha->sas_ha;
struct Scsi_Host *shost = sas_ha->core.shost;
struct sas_task *task = ascb->uldd_task;
INIT_WORK(&task->abort_work, (void (*)(void *))sas_task_abort, task);
queue_work(shost->work_q, &task->abort_work);
}
static void escb_tasklet_complete(struct asd_ascb *ascb,
struct done_list_struct *dl)
{
@ -439,6 +473,74 @@ static void escb_tasklet_complete(struct asd_ascb *ascb,
ascb->scb->header.opcode);
}
/* Catch these before we mask off the sb_opcode bits */
switch (sb_opcode) {
case REQ_TASK_ABORT: {
struct asd_ascb *a, *b;
u16 tc_abort;
tc_abort = *((u16*)(&dl->status_block[1]));
tc_abort = le16_to_cpu(tc_abort);
ASD_DPRINTK("%s: REQ_TASK_ABORT, reason=0x%X\n",
__FUNCTION__, dl->status_block[3]);
/* Find the pending task and abort it. */
list_for_each_entry_safe(a, b, &asd_ha->seq.pend_q, list)
if (a->tc_index == tc_abort) {
task_kill_later(a);
break;
}
goto out;
}
case REQ_DEVICE_RESET: {
struct Scsi_Host *shost = sas_ha->core.shost;
struct sas_phy *dev_phy;
struct asd_ascb *a;
u16 conn_handle;
conn_handle = *((u16*)(&dl->status_block[1]));
conn_handle = le16_to_cpu(conn_handle);
ASD_DPRINTK("%s: REQ_DEVICE_RESET, reason=0x%X\n", __FUNCTION__,
dl->status_block[3]);
/* Kill all pending tasks and reset the device */
dev_phy = NULL;
list_for_each_entry(a, &asd_ha->seq.pend_q, list) {
struct sas_task *task;
struct domain_device *dev;
u16 x;
task = a->uldd_task;
if (!task)
continue;
dev = task->dev;
x = (unsigned long)dev->lldd_dev;
if (x == conn_handle) {
dev_phy = dev->port->phy;
task_kill_later(a);
}
}
/* Reset device port */
if (!dev_phy) {
ASD_DPRINTK("%s: No pending commands; can't reset.\n",
__FUNCTION__);
goto out;
}
phy_reset_later(dev_phy, shost);
goto out;
}
case SIGNAL_NCQ_ERROR:
ASD_DPRINTK("%s: SIGNAL_NCQ_ERROR\n", __FUNCTION__);
goto out;
case CLEAR_NCQ_ERROR:
ASD_DPRINTK("%s: CLEAR_NCQ_ERROR\n", __FUNCTION__);
goto out;
}
sb_opcode &= ~DL_PHY_MASK;
switch (sb_opcode) {
@ -469,22 +571,6 @@ static void escb_tasklet_complete(struct asd_ascb *ascb,
asd_deform_port(asd_ha, phy);
sas_ha->notify_port_event(sas_phy, PORTE_TIMER_EVENT);
break;
case REQ_TASK_ABORT:
ASD_DPRINTK("%s: phy%d: REQ_TASK_ABORT\n", __FUNCTION__,
phy_id);
break;
case REQ_DEVICE_RESET:
ASD_DPRINTK("%s: phy%d: REQ_DEVICE_RESET\n", __FUNCTION__,
phy_id);
break;
case SIGNAL_NCQ_ERROR:
ASD_DPRINTK("%s: phy%d: SIGNAL_NCQ_ERROR\n", __FUNCTION__,
phy_id);
break;
case CLEAR_NCQ_ERROR:
ASD_DPRINTK("%s: phy%d: CLEAR_NCQ_ERROR\n", __FUNCTION__,
phy_id);
break;
default:
ASD_DPRINTK("%s: phy%d: unknown event:0x%x\n", __FUNCTION__,
phy_id, sb_opcode);
@ -504,7 +590,7 @@ static void escb_tasklet_complete(struct asd_ascb *ascb,
break;
}
out:
asd_invalidate_edb(ascb, edb);
}

View File

@ -294,6 +294,7 @@ static struct Scsi_Host *hosts[FD_MAX_HOSTS + 1] = { NULL };
static int user_fifo_count = 0;
static int user_fifo_size = 0;
#ifndef MODULE
static int __init fd_mcs_setup(char *str)
{
static int done_setup = 0;
@ -311,6 +312,7 @@ static int __init fd_mcs_setup(char *str)
}
__setup("fd_mcs=", fd_mcs_setup);
#endif /* !MODULE */
static void print_banner(struct Scsi_Host *shpnt)
{

View File

@ -263,6 +263,10 @@ static void scsi_host_dev_release(struct device *dev)
kthread_stop(shost->ehandler);
if (shost->work_q)
destroy_workqueue(shost->work_q);
if (shost->uspace_req_q) {
kfree(shost->uspace_req_q->queuedata);
scsi_free_queue(shost->uspace_req_q);
}
scsi_destroy_command_freelist(shost);
if (shost->bqt)
@ -301,8 +305,8 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
if (!shost)
return NULL;
spin_lock_init(&shost->default_lock);
scsi_assign_lock(shost, &shost->default_lock);
shost->host_lock = &shost->default_lock;
spin_lock_init(shost->host_lock);
shost->shost_state = SHOST_CREATED;
INIT_LIST_HEAD(&shost->__devices);
INIT_LIST_HEAD(&shost->__targets);

View File

@ -3,3 +3,5 @@ obj-$(CONFIG_SCSI_IBMVSCSI) += ibmvscsic.o
ibmvscsic-y += ibmvscsi.o
ibmvscsic-$(CONFIG_PPC_ISERIES) += iseries_vscsi.o
ibmvscsic-$(CONFIG_PPC_PSERIES) += rpa_vscsi.o
obj-$(CONFIG_SCSI_IBMVSCSIS) += ibmvstgt.o

View File

@ -0,0 +1,958 @@
/*
* IBM eServer i/pSeries Virtual SCSI Target Driver
* Copyright (C) 2003-2005 Dave Boutcher (boutcher@us.ibm.com) IBM Corp.
* Santiago Leon (santil@us.ibm.com) IBM Corp.
* Linda Xie (lxie@us.ibm.com) IBM Corp.
*
* Copyright (C) 2005-2006 FUJITA Tomonori <tomof@acm.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
* USA
*/
#include <linux/interrupt.h>
#include <linux/module.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_tgt.h>
#include <scsi/libsrp.h>
#include <asm/hvcall.h>
#include <asm/iommu.h>
#include <asm/prom.h>
#include <asm/vio.h>
#include "ibmvscsi.h"
#define INITIAL_SRP_LIMIT 16
#define DEFAULT_MAX_SECTORS 512
#define TGT_NAME "ibmvstgt"
/*
* Hypervisor calls.
*/
#define h_copy_rdma(l, sa, sb, da, db) \
plpar_hcall_norets(H_COPY_RDMA, l, sa, sb, da, db)
#define h_send_crq(ua, l, h) \
plpar_hcall_norets(H_SEND_CRQ, ua, l, h)
#define h_reg_crq(ua, tok, sz)\
plpar_hcall_norets(H_REG_CRQ, ua, tok, sz);
#define h_free_crq(ua) \
plpar_hcall_norets(H_FREE_CRQ, ua);
/* tmp - will replace with SCSI logging stuff */
#define eprintk(fmt, args...) \
do { \
printk("%s(%d) " fmt, __FUNCTION__, __LINE__, ##args); \
} while (0)
/* #define dprintk eprintk */
#define dprintk(fmt, args...)
struct vio_port {
struct vio_dev *dma_dev;
struct crq_queue crq_queue;
struct work_struct crq_work;
unsigned long liobn;
unsigned long riobn;
};
static struct workqueue_struct *vtgtd;
/*
* These are fixed for the system and come from the Open Firmware device tree.
* We just store them here to save getting them every time.
*/
static char system_id[64] = "";
static char partition_name[97] = "UNKNOWN";
static unsigned int partition_number = -1;
static struct vio_port *target_to_port(struct srp_target *target)
{
return (struct vio_port *) target->ldata;
}
static inline union viosrp_iu *vio_iu(struct iu_entry *iue)
{
return (union viosrp_iu *) (iue->sbuf->buf);
}
static int send_iu(struct iu_entry *iue, uint64_t length, uint8_t format)
{
struct srp_target *target = iue->target;
struct vio_port *vport = target_to_port(target);
long rc, rc1;
union {
struct viosrp_crq cooked;
uint64_t raw[2];
} crq;
/* First copy the SRP */
rc = h_copy_rdma(length, vport->liobn, iue->sbuf->dma,
vport->riobn, iue->remote_token);
if (rc)
eprintk("Error %ld transferring data\n", rc);
crq.cooked.valid = 0x80;
crq.cooked.format = format;
crq.cooked.reserved = 0x00;
crq.cooked.timeout = 0x00;
crq.cooked.IU_length = length;
crq.cooked.IU_data_ptr = vio_iu(iue)->srp.rsp.tag;
if (rc == 0)
crq.cooked.status = 0x99; /* Just needs to be non-zero */
else
crq.cooked.status = 0x00;
rc1 = h_send_crq(vport->dma_dev->unit_address, crq.raw[0], crq.raw[1]);
if (rc1) {
eprintk("%ld sending response\n", rc1);
return rc1;
}
return rc;
}
#define SRP_RSP_SENSE_DATA_LEN 18
static int send_rsp(struct iu_entry *iue, struct scsi_cmnd *sc,
unsigned char status, unsigned char asc)
{
union viosrp_iu *iu = vio_iu(iue);
uint64_t tag = iu->srp.rsp.tag;
/* If the linked bit is on and status is good */
if (test_bit(V_LINKED, &iue->flags) && (status == NO_SENSE))
status = 0x10;
memset(iu, 0, sizeof(struct srp_rsp));
iu->srp.rsp.opcode = SRP_RSP;
iu->srp.rsp.req_lim_delta = 1;
iu->srp.rsp.tag = tag;
if (test_bit(V_DIOVER, &iue->flags))
iu->srp.rsp.flags |= SRP_RSP_FLAG_DIOVER;
iu->srp.rsp.data_in_res_cnt = 0;
iu->srp.rsp.data_out_res_cnt = 0;
iu->srp.rsp.flags &= ~SRP_RSP_FLAG_RSPVALID;
iu->srp.rsp.resp_data_len = 0;
iu->srp.rsp.status = status;
if (status) {
uint8_t *sense = iu->srp.rsp.data;
if (sc) {
iu->srp.rsp.flags |= SRP_RSP_FLAG_SNSVALID;
iu->srp.rsp.sense_data_len = SCSI_SENSE_BUFFERSIZE;
memcpy(sense, sc->sense_buffer, SCSI_SENSE_BUFFERSIZE);
} else {
iu->srp.rsp.status = SAM_STAT_CHECK_CONDITION;
iu->srp.rsp.flags |= SRP_RSP_FLAG_SNSVALID;
iu->srp.rsp.sense_data_len = SRP_RSP_SENSE_DATA_LEN;
/* Valid bit and 'current errors' */
sense[0] = (0x1 << 7 | 0x70);
/* Sense key */
sense[2] = status;
/* Additional sense length */
sense[7] = 0xa; /* 10 bytes */
/* Additional sense code */
sense[12] = asc;
}
}
send_iu(iue, sizeof(iu->srp.rsp) + SRP_RSP_SENSE_DATA_LEN,
VIOSRP_SRP_FORMAT);
return 0;
}
static void handle_cmd_queue(struct srp_target *target)
{
struct Scsi_Host *shost = target->shost;
struct iu_entry *iue;
struct srp_cmd *cmd;
unsigned long flags;
int err;
retry:
spin_lock_irqsave(&target->lock, flags);
list_for_each_entry(iue, &target->cmd_queue, ilist) {
if (!test_and_set_bit(V_FLYING, &iue->flags)) {
spin_unlock_irqrestore(&target->lock, flags);
cmd = iue->sbuf->buf;
err = srp_cmd_queue(shost, cmd, iue, 0);
if (err) {
eprintk("cannot queue cmd %p %d\n", cmd, err);
srp_iu_put(iue);
}
goto retry;
}
}
spin_unlock_irqrestore(&target->lock, flags);
}
static int ibmvstgt_rdma(struct scsi_cmnd *sc, struct scatterlist *sg, int nsg,
struct srp_direct_buf *md, int nmd,
enum dma_data_direction dir, unsigned int rest)
{
struct iu_entry *iue = (struct iu_entry *) sc->SCp.ptr;
struct srp_target *target = iue->target;
struct vio_port *vport = target_to_port(target);
dma_addr_t token;
long err;
unsigned int done = 0;
int i, sidx, soff;
sidx = soff = 0;
token = sg_dma_address(sg + sidx);
for (i = 0; i < nmd && rest; i++) {
unsigned int mdone, mlen;
mlen = min(rest, md[i].len);
for (mdone = 0; mlen;) {
int slen = min(sg_dma_len(sg + sidx) - soff, mlen);
if (dir == DMA_TO_DEVICE)
err = h_copy_rdma(slen,
vport->riobn,
md[i].va + mdone,
vport->liobn,
token + soff);
else
err = h_copy_rdma(slen,
vport->liobn,
token + soff,
vport->riobn,
md[i].va + mdone);
if (err != H_SUCCESS) {
eprintk("rdma error %d %d\n", dir, slen);
goto out;
}
mlen -= slen;
mdone += slen;
soff += slen;
done += slen;
if (soff == sg_dma_len(sg + sidx)) {
sidx++;
soff = 0;
token = sg_dma_address(sg + sidx);
if (sidx > nsg) {
eprintk("out of sg %p %d %d\n",
iue, sidx, nsg);
goto out;
}
}
};
rest -= mlen;
}
out:
return 0;
}
static int ibmvstgt_transfer_data(struct scsi_cmnd *sc,
void (*done)(struct scsi_cmnd *))
{
struct iu_entry *iue = (struct iu_entry *) sc->SCp.ptr;
int err;
err = srp_transfer_data(sc, &vio_iu(iue)->srp.cmd, ibmvstgt_rdma, 1, 1);
done(sc);
return err;
}
static int ibmvstgt_cmd_done(struct scsi_cmnd *sc,
void (*done)(struct scsi_cmnd *))
{
unsigned long flags;
struct iu_entry *iue = (struct iu_entry *) sc->SCp.ptr;
struct srp_target *target = iue->target;
dprintk("%p %p %x\n", iue, target, vio_iu(iue)->srp.cmd.cdb[0]);
spin_lock_irqsave(&target->lock, flags);
list_del(&iue->ilist);
spin_unlock_irqrestore(&target->lock, flags);
if (sc->result != SAM_STAT_GOOD) {
eprintk("operation failed %p %d %x\n",
iue, sc->result, vio_iu(iue)->srp.cmd.cdb[0]);
send_rsp(iue, sc, HARDWARE_ERROR, 0x00);
} else
send_rsp(iue, sc, NO_SENSE, 0x00);
done(sc);
srp_iu_put(iue);
return 0;
}
int send_adapter_info(struct iu_entry *iue,
dma_addr_t remote_buffer, uint16_t length)
{
struct srp_target *target = iue->target;
struct vio_port *vport = target_to_port(target);
struct Scsi_Host *shost = target->shost;
dma_addr_t data_token;
struct mad_adapter_info_data *info;
int err;
info = dma_alloc_coherent(target->dev, sizeof(*info), &data_token,
GFP_KERNEL);
if (!info) {
eprintk("bad dma_alloc_coherent %p\n", target);
return 1;
}
/* Get remote info */
err = h_copy_rdma(sizeof(*info), vport->riobn, remote_buffer,
vport->liobn, data_token);
if (err == H_SUCCESS) {
dprintk("Client connect: %s (%d)\n",
info->partition_name, info->partition_number);
}
memset(info, 0, sizeof(*info));
strcpy(info->srp_version, "16.a");
strncpy(info->partition_name, partition_name,
sizeof(info->partition_name));
info->partition_number = partition_number;
info->mad_version = 1;
info->os_type = 2;
info->port_max_txu[0] = shost->hostt->max_sectors << 9;
/* Send our info to remote */
err = h_copy_rdma(sizeof(*info), vport->liobn, data_token,
vport->riobn, remote_buffer);
dma_free_coherent(target->dev, sizeof(*info), info, data_token);
if (err != H_SUCCESS) {
eprintk("Error sending adapter info %d\n", err);
return 1;
}
return 0;
}
static void process_login(struct iu_entry *iue)
{
union viosrp_iu *iu = vio_iu(iue);
struct srp_login_rsp *rsp = &iu->srp.login_rsp;
uint64_t tag = iu->srp.rsp.tag;
/* TODO handle case that requested size is wrong and
* buffer format is wrong
*/
memset(iu, 0, sizeof(struct srp_login_rsp));
rsp->opcode = SRP_LOGIN_RSP;
rsp->req_lim_delta = INITIAL_SRP_LIMIT;
rsp->tag = tag;
rsp->max_it_iu_len = sizeof(union srp_iu);
rsp->max_ti_iu_len = sizeof(union srp_iu);
/* direct and indirect */
rsp->buf_fmt = SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT;
send_iu(iue, sizeof(*rsp), VIOSRP_SRP_FORMAT);
}
static inline void queue_cmd(struct iu_entry *iue)
{
struct srp_target *target = iue->target;
unsigned long flags;
spin_lock_irqsave(&target->lock, flags);
list_add_tail(&iue->ilist, &target->cmd_queue);
spin_unlock_irqrestore(&target->lock, flags);
}
static int process_tsk_mgmt(struct iu_entry *iue)
{
union viosrp_iu *iu = vio_iu(iue);
int fn;
dprintk("%p %u\n", iue, iu->srp.tsk_mgmt.tsk_mgmt_func);
switch (iu->srp.tsk_mgmt.tsk_mgmt_func) {
case SRP_TSK_ABORT_TASK:
fn = ABORT_TASK;
break;
case SRP_TSK_ABORT_TASK_SET:
fn = ABORT_TASK_SET;
break;
case SRP_TSK_CLEAR_TASK_SET:
fn = CLEAR_TASK_SET;
break;
case SRP_TSK_LUN_RESET:
fn = LOGICAL_UNIT_RESET;
break;
case SRP_TSK_CLEAR_ACA:
fn = CLEAR_ACA;
break;
default:
fn = 0;
}
if (fn)
scsi_tgt_tsk_mgmt_request(iue->target->shost, fn,
iu->srp.tsk_mgmt.task_tag,
(struct scsi_lun *) &iu->srp.tsk_mgmt.lun,
iue);
else
send_rsp(iue, NULL, ILLEGAL_REQUEST, 0x20);
return !fn;
}
static int process_mad_iu(struct iu_entry *iue)
{
union viosrp_iu *iu = vio_iu(iue);
struct viosrp_adapter_info *info;
struct viosrp_host_config *conf;
switch (iu->mad.empty_iu.common.type) {
case VIOSRP_EMPTY_IU_TYPE:
eprintk("%s\n", "Unsupported EMPTY MAD IU");
break;
case VIOSRP_ERROR_LOG_TYPE:
eprintk("%s\n", "Unsupported ERROR LOG MAD IU");
iu->mad.error_log.common.status = 1;
send_iu(iue, sizeof(iu->mad.error_log), VIOSRP_MAD_FORMAT);
break;
case VIOSRP_ADAPTER_INFO_TYPE:
info = &iu->mad.adapter_info;
info->common.status = send_adapter_info(iue, info->buffer,
info->common.length);
send_iu(iue, sizeof(*info), VIOSRP_MAD_FORMAT);
break;
case VIOSRP_HOST_CONFIG_TYPE:
conf = &iu->mad.host_config;
conf->common.status = 1;
send_iu(iue, sizeof(*conf), VIOSRP_MAD_FORMAT);
break;
default:
eprintk("Unknown type %u\n", iu->srp.rsp.opcode);
}
return 1;
}
static int process_srp_iu(struct iu_entry *iue)
{
union viosrp_iu *iu = vio_iu(iue);
int done = 1;
u8 opcode = iu->srp.rsp.opcode;
switch (opcode) {
case SRP_LOGIN_REQ:
process_login(iue);
break;
case SRP_TSK_MGMT:
done = process_tsk_mgmt(iue);
break;
case SRP_CMD:
queue_cmd(iue);
done = 0;
break;
case SRP_LOGIN_RSP:
case SRP_I_LOGOUT:
case SRP_T_LOGOUT:
case SRP_RSP:
case SRP_CRED_REQ:
case SRP_CRED_RSP:
case SRP_AER_REQ:
case SRP_AER_RSP:
eprintk("Unsupported type %u\n", opcode);
break;
default:
eprintk("Unknown type %u\n", opcode);
}
return done;
}
static void process_iu(struct viosrp_crq *crq, struct srp_target *target)
{
struct vio_port *vport = target_to_port(target);
struct iu_entry *iue;
long err, done;
iue = srp_iu_get(target);
if (!iue) {
eprintk("Error getting IU from pool, %p\n", target);
return;
}
iue->remote_token = crq->IU_data_ptr;
err = h_copy_rdma(crq->IU_length, vport->riobn,
iue->remote_token, vport->liobn, iue->sbuf->dma);
if (err != H_SUCCESS) {
eprintk("%ld transferring data error %p\n", err, iue);
done = 1;
goto out;
}
if (crq->format == VIOSRP_MAD_FORMAT)
done = process_mad_iu(iue);
else
done = process_srp_iu(iue);
out:
if (done)
srp_iu_put(iue);
}
static irqreturn_t ibmvstgt_interrupt(int irq, void *data)
{
struct srp_target *target = (struct srp_target *) data;
struct vio_port *vport = target_to_port(target);
vio_disable_interrupts(vport->dma_dev);
queue_work(vtgtd, &vport->crq_work);
return IRQ_HANDLED;
}
static int crq_queue_create(struct crq_queue *queue, struct srp_target *target)
{
int err;
struct vio_port *vport = target_to_port(target);
queue->msgs = (struct viosrp_crq *) get_zeroed_page(GFP_KERNEL);
if (!queue->msgs)
goto malloc_failed;
queue->size = PAGE_SIZE / sizeof(*queue->msgs);
queue->msg_token = dma_map_single(target->dev, queue->msgs,
queue->size * sizeof(*queue->msgs),
DMA_BIDIRECTIONAL);
if (dma_mapping_error(queue->msg_token))
goto map_failed;
err = h_reg_crq(vport->dma_dev->unit_address, queue->msg_token,
PAGE_SIZE);
/* If the adapter was left active for some reason (like kexec)
* try freeing and re-registering
*/
if (err == H_RESOURCE) {
do {
err = h_free_crq(vport->dma_dev->unit_address);
} while (err == H_BUSY || H_IS_LONG_BUSY(err));
err = h_reg_crq(vport->dma_dev->unit_address, queue->msg_token,
PAGE_SIZE);
}
if (err != H_SUCCESS && err != 2) {
eprintk("Error 0x%x opening virtual adapter\n", err);
goto reg_crq_failed;
}
err = request_irq(vport->dma_dev->irq, &ibmvstgt_interrupt,
SA_INTERRUPT, "ibmvstgt", target);
if (err)
goto req_irq_failed;
vio_enable_interrupts(vport->dma_dev);
h_send_crq(vport->dma_dev->unit_address, 0xC001000000000000, 0);
queue->cur = 0;
spin_lock_init(&queue->lock);
return 0;
req_irq_failed:
do {
err = h_free_crq(vport->dma_dev->unit_address);
} while (err == H_BUSY || H_IS_LONG_BUSY(err));
reg_crq_failed:
dma_unmap_single(target->dev, queue->msg_token,
queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
map_failed:
free_page((unsigned long) queue->msgs);
malloc_failed:
return -ENOMEM;
}
static void crq_queue_destroy(struct srp_target *target)
{
struct vio_port *vport = target_to_port(target);
struct crq_queue *queue = &vport->crq_queue;
int err;
free_irq(vport->dma_dev->irq, target);
do {
err = h_free_crq(vport->dma_dev->unit_address);
} while (err == H_BUSY || H_IS_LONG_BUSY(err));
dma_unmap_single(target->dev, queue->msg_token,
queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
free_page((unsigned long) queue->msgs);
}
static void process_crq(struct viosrp_crq *crq, struct srp_target *target)
{
struct vio_port *vport = target_to_port(target);
dprintk("%x %x\n", crq->valid, crq->format);
switch (crq->valid) {
case 0xC0:
/* initialization */
switch (crq->format) {
case 0x01:
h_send_crq(vport->dma_dev->unit_address,
0xC002000000000000, 0);
break;
case 0x02:
break;
default:
eprintk("Unknown format %u\n", crq->format);
}
break;
case 0xFF:
/* transport event */
break;
case 0x80:
/* real payload */
switch (crq->format) {
case VIOSRP_SRP_FORMAT:
case VIOSRP_MAD_FORMAT:
process_iu(crq, target);
break;
case VIOSRP_OS400_FORMAT:
case VIOSRP_AIX_FORMAT:
case VIOSRP_LINUX_FORMAT:
case VIOSRP_INLINE_FORMAT:
eprintk("Unsupported format %u\n", crq->format);
break;
default:
eprintk("Unknown format %u\n", crq->format);
}
break;
default:
eprintk("unknown message type 0x%02x!?\n", crq->valid);
}
}
static inline struct viosrp_crq *next_crq(struct crq_queue *queue)
{
struct viosrp_crq *crq;
unsigned long flags;
spin_lock_irqsave(&queue->lock, flags);
crq = &queue->msgs[queue->cur];
if (crq->valid & 0x80) {
if (++queue->cur == queue->size)
queue->cur = 0;
} else
crq = NULL;
spin_unlock_irqrestore(&queue->lock, flags);
return crq;
}
static void handle_crq(void *data)
{
struct srp_target *target = (struct srp_target *) data;
struct vio_port *vport = target_to_port(target);
struct viosrp_crq *crq;
int done = 0;
while (!done) {
while ((crq = next_crq(&vport->crq_queue)) != NULL) {
process_crq(crq, target);
crq->valid = 0x00;
}
vio_enable_interrupts(vport->dma_dev);
crq = next_crq(&vport->crq_queue);
if (crq) {
vio_disable_interrupts(vport->dma_dev);
process_crq(crq, target);
crq->valid = 0x00;
} else
done = 1;
}
handle_cmd_queue(target);
}
static int ibmvstgt_eh_abort_handler(struct scsi_cmnd *sc)
{
unsigned long flags;
struct iu_entry *iue = (struct iu_entry *) sc->SCp.ptr;
struct srp_target *target = iue->target;
dprintk("%p %p %x\n", iue, target, vio_iu(iue)->srp.cmd.cdb[0]);
spin_lock_irqsave(&target->lock, flags);
list_del(&iue->ilist);
spin_unlock_irqrestore(&target->lock, flags);
srp_iu_put(iue);
return 0;
}
static int ibmvstgt_tsk_mgmt_response(u64 mid, int result)
{
struct iu_entry *iue = (struct iu_entry *) ((void *) mid);
union viosrp_iu *iu = vio_iu(iue);
unsigned char status, asc;
eprintk("%p %d\n", iue, result);
status = NO_SENSE;
asc = 0;
switch (iu->srp.tsk_mgmt.tsk_mgmt_func) {
case SRP_TSK_ABORT_TASK:
asc = 0x14;
if (result)
status = ABORTED_COMMAND;
break;
default:
break;
}
send_rsp(iue, NULL, status, asc);
srp_iu_put(iue);
return 0;
}
static ssize_t system_id_show(struct class_device *cdev, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%s\n", system_id);
}
static ssize_t partition_number_show(struct class_device *cdev, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%x\n", partition_number);
}
static ssize_t unit_address_show(struct class_device *cdev, char *buf)
{
struct Scsi_Host *shost = class_to_shost(cdev);
struct srp_target *target = host_to_srp_target(shost);
struct vio_port *vport = target_to_port(target);
return snprintf(buf, PAGE_SIZE, "%x\n", vport->dma_dev->unit_address);
}
static CLASS_DEVICE_ATTR(system_id, S_IRUGO, system_id_show, NULL);
static CLASS_DEVICE_ATTR(partition_number, S_IRUGO, partition_number_show, NULL);
static CLASS_DEVICE_ATTR(unit_address, S_IRUGO, unit_address_show, NULL);
static struct class_device_attribute *ibmvstgt_attrs[] = {
&class_device_attr_system_id,
&class_device_attr_partition_number,
&class_device_attr_unit_address,
NULL,
};
static struct scsi_host_template ibmvstgt_sht = {
.name = TGT_NAME,
.module = THIS_MODULE,
.can_queue = INITIAL_SRP_LIMIT,
.sg_tablesize = SG_ALL,
.use_clustering = DISABLE_CLUSTERING,
.max_sectors = DEFAULT_MAX_SECTORS,
.transfer_response = ibmvstgt_cmd_done,
.transfer_data = ibmvstgt_transfer_data,
.eh_abort_handler = ibmvstgt_eh_abort_handler,
.tsk_mgmt_response = ibmvstgt_tsk_mgmt_response,
.shost_attrs = ibmvstgt_attrs,
.proc_name = TGT_NAME,
};
static int ibmvstgt_probe(struct vio_dev *dev, const struct vio_device_id *id)
{
struct Scsi_Host *shost;
struct srp_target *target;
struct vio_port *vport;
unsigned int *dma, dma_size;
int err = -ENOMEM;
vport = kzalloc(sizeof(struct vio_port), GFP_KERNEL);
if (!vport)
return err;
shost = scsi_host_alloc(&ibmvstgt_sht, sizeof(struct srp_target));
if (!shost)
goto free_vport;
err = scsi_tgt_alloc_queue(shost);
if (err)
goto put_host;
target = host_to_srp_target(shost);
target->shost = shost;
vport->dma_dev = dev;
target->ldata = vport;
err = srp_target_alloc(target, &dev->dev, INITIAL_SRP_LIMIT,
SRP_MAX_IU_LEN);
if (err)
goto put_host;
dma = (unsigned int *) vio_get_attribute(dev, "ibm,my-dma-window",
&dma_size);
if (!dma || dma_size != 40) {
eprintk("Couldn't get window property %d\n", dma_size);
err = -EIO;
goto free_srp_target;
}
vport->liobn = dma[0];
vport->riobn = dma[5];
INIT_WORK(&vport->crq_work, handle_crq, target);
err = crq_queue_create(&vport->crq_queue, target);
if (err)
goto free_srp_target;
err = scsi_add_host(shost, target->dev);
if (err)
goto destroy_queue;
return 0;
destroy_queue:
crq_queue_destroy(target);
free_srp_target:
srp_target_free(target);
put_host:
scsi_host_put(shost);
free_vport:
kfree(vport);
return err;
}
static int ibmvstgt_remove(struct vio_dev *dev)
{
struct srp_target *target = (struct srp_target *) dev->dev.driver_data;
struct Scsi_Host *shost = target->shost;
struct vio_port *vport = target->ldata;
crq_queue_destroy(target);
scsi_remove_host(shost);
scsi_tgt_free_queue(shost);
srp_target_free(target);
kfree(vport);
scsi_host_put(shost);
return 0;
}
static struct vio_device_id ibmvstgt_device_table[] __devinitdata = {
{"v-scsi-host", "IBM,v-scsi-host"},
{"",""}
};
MODULE_DEVICE_TABLE(vio, ibmvstgt_device_table);
static struct vio_driver ibmvstgt_driver = {
.id_table = ibmvstgt_device_table,
.probe = ibmvstgt_probe,
.remove = ibmvstgt_remove,
.driver = {
.name = "ibmvscsis",
.owner = THIS_MODULE,
}
};
static int get_system_info(void)
{
struct device_node *rootdn;
const char *id, *model, *name;
unsigned int *num;
rootdn = find_path_device("/");
if (!rootdn)
return -ENOENT;
model = get_property(rootdn, "model", NULL);
id = get_property(rootdn, "system-id", NULL);
if (model && id)
snprintf(system_id, sizeof(system_id), "%s-%s", model, id);
name = get_property(rootdn, "ibm,partition-name", NULL);
if (name)
strncpy(partition_name, name, sizeof(partition_name));
num = (unsigned int *) get_property(rootdn, "ibm,partition-no", NULL);
if (num)
partition_number = *num;
return 0;
}
static int ibmvstgt_init(void)
{
int err = -ENOMEM;
printk("IBM eServer i/pSeries Virtual SCSI Target Driver\n");
vtgtd = create_workqueue("ibmvtgtd");
if (!vtgtd)
return err;
err = get_system_info();
if (err)
goto destroy_wq;
err = vio_register_driver(&ibmvstgt_driver);
if (err)
goto destroy_wq;
return 0;
destroy_wq:
destroy_workqueue(vtgtd);
return err;
}
static void ibmvstgt_exit(void)
{
printk("Unregister IBM virtual SCSI driver\n");
destroy_workqueue(vtgtd);
vio_unregister_driver(&ibmvstgt_driver);
}
MODULE_DESCRIPTION("IBM Virtual SCSI Target");
MODULE_AUTHOR("Santiago Leon");
MODULE_LICENSE("GPL");
module_init(ibmvstgt_init);
module_exit(ibmvstgt_exit);

View File

@ -170,7 +170,7 @@ static int setup_debug = 0;
static void i91uSCBPost(BYTE * pHcb, BYTE * pScb);
/* PCI Devices supported by this driver */
static struct pci_device_id i91u_pci_devices[] __devinitdata = {
static struct pci_device_id i91u_pci_devices[] = {
{ PCI_VENDOR_ID_INIT, I950_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{ PCI_VENDOR_ID_INIT, I940_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{ PCI_VENDOR_ID_INIT, I935_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},

View File

@ -79,7 +79,6 @@
#include <scsi/scsi_tcq.h>
#include <scsi/scsi_eh.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_transport.h>
#include "ipr.h"
/*
@ -98,7 +97,7 @@ static DEFINE_SPINLOCK(ipr_driver_lock);
/* This table describes the differences between DMA controller chips */
static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
{ /* Gemstone, Citrine, and Obsidian */
{ /* Gemstone, Citrine, Obsidian, and Obsidian-E */
.mailbox = 0x0042C,
.cache_line_size = 0x20,
{
@ -135,6 +134,7 @@ static const struct ipr_chip_t ipr_chip[] = {
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, &ipr_chip_cfg[0] },
{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, &ipr_chip_cfg[0] },
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, &ipr_chip_cfg[0] },
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, &ipr_chip_cfg[0] },
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, &ipr_chip_cfg[1] },
{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, &ipr_chip_cfg[1] }
};
@ -1249,19 +1249,23 @@ static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
/**
* ipr_log_hex_data - Log additional hex IOA error data.
* @ioa_cfg: ioa config struct
* @data: IOA error data
* @len: data length
*
* Return value:
* none
**/
static void ipr_log_hex_data(u32 *data, int len)
static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
{
int i;
if (len == 0)
return;
if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
for (i = 0; i < len / 4; i += 4) {
ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
be32_to_cpu(data[i]),
@ -1290,7 +1294,7 @@ static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
ipr_err("%s\n", error->failure_reason);
ipr_err("Remote Adapter VPD:\n");
ipr_log_ext_vpd(&error->vpd);
ipr_log_hex_data(error->data,
ipr_log_hex_data(ioa_cfg, error->data,
be32_to_cpu(hostrcb->hcam.length) -
(offsetof(struct ipr_hostrcb_error, u) +
offsetof(struct ipr_hostrcb_type_17_error, data)));
@ -1315,12 +1319,225 @@ static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
ipr_err("%s\n", error->failure_reason);
ipr_err("Remote Adapter VPD:\n");
ipr_log_vpd(&error->vpd);
ipr_log_hex_data(error->data,
ipr_log_hex_data(ioa_cfg, error->data,
be32_to_cpu(hostrcb->hcam.length) -
(offsetof(struct ipr_hostrcb_error, u) +
offsetof(struct ipr_hostrcb_type_07_error, data)));
}
static const struct {
u8 active;
char *desc;
} path_active_desc[] = {
{ IPR_PATH_NO_INFO, "Path" },
{ IPR_PATH_ACTIVE, "Active path" },
{ IPR_PATH_NOT_ACTIVE, "Inactive path" }
};
static const struct {
u8 state;
char *desc;
} path_state_desc[] = {
{ IPR_PATH_STATE_NO_INFO, "has no path state information available" },
{ IPR_PATH_HEALTHY, "is healthy" },
{ IPR_PATH_DEGRADED, "is degraded" },
{ IPR_PATH_FAILED, "is failed" }
};
/**
* ipr_log_fabric_path - Log a fabric path error
* @hostrcb: hostrcb struct
* @fabric: fabric descriptor
*
* Return value:
* none
**/
static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
struct ipr_hostrcb_fabric_desc *fabric)
{
int i, j;
u8 path_state = fabric->path_state;
u8 active = path_state & IPR_PATH_ACTIVE_MASK;
u8 state = path_state & IPR_PATH_STATE_MASK;
for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
if (path_active_desc[i].active != active)
continue;
for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
if (path_state_desc[j].state != state)
continue;
if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
path_active_desc[i].desc, path_state_desc[j].desc,
fabric->ioa_port);
} else if (fabric->cascaded_expander == 0xff) {
ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
path_active_desc[i].desc, path_state_desc[j].desc,
fabric->ioa_port, fabric->phy);
} else if (fabric->phy == 0xff) {
ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
path_active_desc[i].desc, path_state_desc[j].desc,
fabric->ioa_port, fabric->cascaded_expander);
} else {
ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
path_active_desc[i].desc, path_state_desc[j].desc,
fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
}
return;
}
}
ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
}
static const struct {
u8 type;
char *desc;
} path_type_desc[] = {
{ IPR_PATH_CFG_IOA_PORT, "IOA port" },
{ IPR_PATH_CFG_EXP_PORT, "Expander port" },
{ IPR_PATH_CFG_DEVICE_PORT, "Device port" },
{ IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
};
static const struct {
u8 status;
char *desc;
} path_status_desc[] = {
{ IPR_PATH_CFG_NO_PROB, "Functional" },
{ IPR_PATH_CFG_DEGRADED, "Degraded" },
{ IPR_PATH_CFG_FAILED, "Failed" },
{ IPR_PATH_CFG_SUSPECT, "Suspect" },
{ IPR_PATH_NOT_DETECTED, "Missing" },
{ IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
};
static const char *link_rate[] = {
"unknown",
"disabled",
"phy reset problem",
"spinup hold",
"port selector",
"unknown",
"unknown",
"unknown",
"1.5Gbps",
"3.0Gbps",
"unknown",
"unknown",
"unknown",
"unknown",
"unknown",
"unknown"
};
/**
* ipr_log_path_elem - Log a fabric path element.
* @hostrcb: hostrcb struct
* @cfg: fabric path element struct
*
* Return value:
* none
**/
static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
struct ipr_hostrcb_config_element *cfg)
{
int i, j;
u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
if (type == IPR_PATH_CFG_NOT_EXIST)
return;
for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
if (path_type_desc[i].type != type)
continue;
for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
if (path_status_desc[j].status != status)
continue;
if (type == IPR_PATH_CFG_IOA_PORT) {
ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
path_status_desc[j].desc, path_type_desc[i].desc,
cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
} else {
if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
path_status_desc[j].desc, path_type_desc[i].desc,
link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
} else if (cfg->cascaded_expander == 0xff) {
ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
"WWN=%08X%08X\n", path_status_desc[j].desc,
path_type_desc[i].desc, cfg->phy,
link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
} else if (cfg->phy == 0xff) {
ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
"WWN=%08X%08X\n", path_status_desc[j].desc,
path_type_desc[i].desc, cfg->cascaded_expander,
link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
} else {
ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
"WWN=%08X%08X\n", path_status_desc[j].desc,
path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
}
}
return;
}
}
ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
"WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
}
/**
* ipr_log_fabric_error - Log a fabric error.
* @ioa_cfg: ioa config struct
* @hostrcb: hostrcb struct
*
* Return value:
* none
**/
static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
struct ipr_hostrcb *hostrcb)
{
struct ipr_hostrcb_type_20_error *error;
struct ipr_hostrcb_fabric_desc *fabric;
struct ipr_hostrcb_config_element *cfg;
int i, add_len;
error = &hostrcb->hcam.u.error.u.type_20_error;
error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
add_len = be32_to_cpu(hostrcb->hcam.length) -
(offsetof(struct ipr_hostrcb_error, u) +
offsetof(struct ipr_hostrcb_type_20_error, desc));
for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
ipr_log_fabric_path(hostrcb, fabric);
for_each_fabric_cfg(fabric, cfg)
ipr_log_path_elem(hostrcb, cfg);
add_len -= be16_to_cpu(fabric->length);
fabric = (struct ipr_hostrcb_fabric_desc *)
((unsigned long)fabric + be16_to_cpu(fabric->length));
}
ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
}
/**
* ipr_log_generic_error - Log an adapter error.
* @ioa_cfg: ioa config struct
@ -1332,7 +1549,7 @@ static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
struct ipr_hostrcb *hostrcb)
{
ipr_log_hex_data(hostrcb->hcam.u.raw.data,
ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
be32_to_cpu(hostrcb->hcam.length));
}
@ -1394,13 +1611,7 @@ static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
if (!ipr_error_table[error_index].log_hcam)
return;
if (ipr_is_device(&hostrcb->hcam.u.error.failing_dev_res_addr)) {
ipr_ra_err(ioa_cfg, hostrcb->hcam.u.error.failing_dev_res_addr,
"%s\n", ipr_error_table[error_index].error);
} else {
dev_err(&ioa_cfg->pdev->dev, "%s\n",
ipr_error_table[error_index].error);
}
ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
/* Set indication we have logged an error */
ioa_cfg->errors_logged++;
@ -1437,6 +1648,9 @@ static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
case IPR_HOST_RCB_OVERLAY_ID_17:
ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
break;
case IPR_HOST_RCB_OVERLAY_ID_20:
ipr_log_fabric_error(ioa_cfg, hostrcb);
break;
case IPR_HOST_RCB_OVERLAY_ID_1:
case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
default:
@ -2969,7 +3183,6 @@ static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
struct ipr_dump *dump;
unsigned long lock_flags = 0;
ENTER;
dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
if (!dump) {
@ -2996,7 +3209,6 @@ static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
}
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
LEAVE;
return 0;
}
@ -3573,6 +3785,12 @@ static int ipr_sata_reset(struct ata_port *ap, unsigned int *classes)
ENTER;
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
while(ioa_cfg->in_reset_reload) {
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
}
res = sata_port->res;
if (res) {
rc = ipr_device_reset(ioa_cfg, res);
@ -3636,6 +3854,10 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) {
if (ipr_cmd->scsi_cmd)
ipr_cmd->done = ipr_scsi_eh_done;
if (ipr_cmd->qc && !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
}
}
}
@ -3770,7 +3992,7 @@ static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
*/
if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead)
return FAILED;
if (!res || (!ipr_is_gscsi(res) && !ipr_is_vset_device(res)))
if (!res || !ipr_is_gscsi(res))
return FAILED;
list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
@ -4615,7 +4837,7 @@ static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
* Return value:
* 0 on success / other on failure
**/
int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
{
struct ipr_resource_entry *res;
@ -4648,40 +4870,6 @@ static const char * ipr_ioa_info(struct Scsi_Host *host)
return buffer;
}
/**
* ipr_scsi_timed_out - Handle scsi command timeout
* @scsi_cmd: scsi command struct
*
* Return value:
* EH_NOT_HANDLED
**/
enum scsi_eh_timer_return ipr_scsi_timed_out(struct scsi_cmnd *scsi_cmd)
{
struct ipr_ioa_cfg *ioa_cfg;
struct ipr_cmnd *ipr_cmd;
unsigned long flags;
ENTER;
spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
if (ipr_cmd->qc && ipr_cmd->qc->scsicmd == scsi_cmd) {
ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
break;
}
}
spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
LEAVE;
return EH_NOT_HANDLED;
}
static struct scsi_transport_template ipr_transport_template = {
.eh_timed_out = ipr_scsi_timed_out
};
static struct scsi_host_template driver_template = {
.module = THIS_MODULE,
.name = "IPR",
@ -4776,6 +4964,12 @@ static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
unsigned long flags;
spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
while(ioa_cfg->in_reset_reload) {
spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
}
list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
if (ipr_cmd->qc == qc) {
ipr_device_reset(ioa_cfg, sata_port->res);
@ -6832,6 +7026,7 @@ static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
ioa_cfg->hostrcb[i]->hostrcb_dma =
ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
}
@ -7017,7 +7212,6 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
host->transportt = &ipr_transport_template;
ata_host_init(&ioa_cfg->ata_host, &pdev->dev,
sata_port_info.flags, &ipr_sata_ops);
@ -7351,12 +7545,24 @@ static struct pci_device_id ipr_pci_table[] __devinitdata = {
{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B,
0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C,
0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A,
0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B,
0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C,
0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B8,
0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7,
0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780,
0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
@ -7366,6 +7572,9 @@ static struct pci_device_id ipr_pci_table[] __devinitdata = {
{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F,
0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F,
0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
{ }
};
MODULE_DEVICE_TABLE(pci, ipr_pci_table);

View File

@ -37,8 +37,8 @@
/*
* Literals
*/
#define IPR_DRIVER_VERSION "2.2.0"
#define IPR_DRIVER_DATE "(September 25, 2006)"
#define IPR_DRIVER_VERSION "2.3.0"
#define IPR_DRIVER_DATE "(November 8, 2006)"
/*
* IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding
@ -54,6 +54,8 @@
*/
#define IPR_NUM_BASE_CMD_BLKS 100
#define PCI_DEVICE_ID_IBM_OBSIDIAN_E 0x0339
#define IPR_SUBS_DEV_ID_2780 0x0264
#define IPR_SUBS_DEV_ID_5702 0x0266
#define IPR_SUBS_DEV_ID_5703 0x0278
@ -66,7 +68,11 @@
#define IPR_SUBS_DEV_ID_571F 0x02D5
#define IPR_SUBS_DEV_ID_572A 0x02C1
#define IPR_SUBS_DEV_ID_572B 0x02C2
#define IPR_SUBS_DEV_ID_572F 0x02C3
#define IPR_SUBS_DEV_ID_575B 0x030D
#define IPR_SUBS_DEV_ID_575C 0x0338
#define IPR_SUBS_DEV_ID_57B7 0x0360
#define IPR_SUBS_DEV_ID_57B8 0x02C2
#define IPR_NAME "ipr"
@ -98,6 +104,7 @@
#define IPR_IOASC_IOA_WAS_RESET 0x10000001
#define IPR_IOASC_PCI_ACCESS_ERROR 0x10000002
#define IPR_DEFAULT_MAX_ERROR_DUMP 984
#define IPR_NUM_LOG_HCAMS 2
#define IPR_NUM_CFG_CHG_HCAMS 2
#define IPR_NUM_HCAMS (IPR_NUM_LOG_HCAMS + IPR_NUM_CFG_CHG_HCAMS)
@ -731,6 +738,64 @@ struct ipr_hostrcb_type_17_error {
u32 data[476];
}__attribute__((packed, aligned (4)));
struct ipr_hostrcb_config_element {
u8 type_status;
#define IPR_PATH_CFG_TYPE_MASK 0xF0
#define IPR_PATH_CFG_NOT_EXIST 0x00
#define IPR_PATH_CFG_IOA_PORT 0x10
#define IPR_PATH_CFG_EXP_PORT 0x20
#define IPR_PATH_CFG_DEVICE_PORT 0x30
#define IPR_PATH_CFG_DEVICE_LUN 0x40
#define IPR_PATH_CFG_STATUS_MASK 0x0F
#define IPR_PATH_CFG_NO_PROB 0x00
#define IPR_PATH_CFG_DEGRADED 0x01
#define IPR_PATH_CFG_FAILED 0x02
#define IPR_PATH_CFG_SUSPECT 0x03
#define IPR_PATH_NOT_DETECTED 0x04
#define IPR_PATH_INCORRECT_CONN 0x05
u8 cascaded_expander;
u8 phy;
u8 link_rate;
#define IPR_PHY_LINK_RATE_MASK 0x0F
__be32 wwid[2];
}__attribute__((packed, aligned (4)));
struct ipr_hostrcb_fabric_desc {
__be16 length;
u8 ioa_port;
u8 cascaded_expander;
u8 phy;
u8 path_state;
#define IPR_PATH_ACTIVE_MASK 0xC0
#define IPR_PATH_NO_INFO 0x00
#define IPR_PATH_ACTIVE 0x40
#define IPR_PATH_NOT_ACTIVE 0x80
#define IPR_PATH_STATE_MASK 0x0F
#define IPR_PATH_STATE_NO_INFO 0x00
#define IPR_PATH_HEALTHY 0x01
#define IPR_PATH_DEGRADED 0x02
#define IPR_PATH_FAILED 0x03
__be16 num_entries;
struct ipr_hostrcb_config_element elem[1];
}__attribute__((packed, aligned (4)));
#define for_each_fabric_cfg(fabric, cfg) \
for (cfg = (fabric)->elem; \
cfg < ((fabric)->elem + be16_to_cpu((fabric)->num_entries)); \
cfg++)
struct ipr_hostrcb_type_20_error {
u8 failure_reason[64];
u8 reserved[3];
u8 num_entries;
struct ipr_hostrcb_fabric_desc desc[1];
}__attribute__((packed, aligned (4)));
struct ipr_hostrcb_error {
__be32 failing_dev_ioasc;
struct ipr_res_addr failing_dev_res_addr;
@ -747,6 +812,7 @@ struct ipr_hostrcb_error {
struct ipr_hostrcb_type_13_error type_13_error;
struct ipr_hostrcb_type_14_error type_14_error;
struct ipr_hostrcb_type_17_error type_17_error;
struct ipr_hostrcb_type_20_error type_20_error;
} u;
}__attribute__((packed, aligned (4)));
@ -786,6 +852,7 @@ struct ipr_hcam {
#define IPR_HOST_RCB_OVERLAY_ID_14 0x14
#define IPR_HOST_RCB_OVERLAY_ID_16 0x16
#define IPR_HOST_RCB_OVERLAY_ID_17 0x17
#define IPR_HOST_RCB_OVERLAY_ID_20 0x20
#define IPR_HOST_RCB_OVERLAY_ID_DEFAULT 0xFF
u8 reserved1[3];
@ -805,6 +872,7 @@ struct ipr_hostrcb {
struct ipr_hcam hcam;
dma_addr_t hostrcb_dma;
struct list_head queue;
struct ipr_ioa_cfg *ioa_cfg;
};
/* IPR smart dump table structures */
@ -1283,6 +1351,17 @@ struct ipr_ucode_image_header {
} \
}
#define ipr_hcam_err(hostrcb, fmt, ...) \
{ \
if (ipr_is_device(&(hostrcb)->hcam.u.error.failing_dev_res_addr)) { \
ipr_ra_err((hostrcb)->ioa_cfg, \
(hostrcb)->hcam.u.error.failing_dev_res_addr, \
fmt, ##__VA_ARGS__); \
} else { \
dev_err(&(hostrcb)->ioa_cfg->pdev->dev, fmt, ##__VA_ARGS__); \
} \
}
#define ipr_trace ipr_dbg("%s: %s: Line: %d\n",\
__FILE__, __FUNCTION__, __LINE__)

View File

@ -5001,7 +5001,7 @@ ips_init_copperhead(ips_ha_t * ha)
break;
/* Delay for 1 Second */
msleep(IPS_ONE_SEC);
MDELAY(IPS_ONE_SEC);
}
if (j >= 45)
@ -5027,7 +5027,7 @@ ips_init_copperhead(ips_ha_t * ha)
break;
/* Delay for 1 Second */
msleep(IPS_ONE_SEC);
MDELAY(IPS_ONE_SEC);
}
if (j >= 240)
@ -5045,7 +5045,7 @@ ips_init_copperhead(ips_ha_t * ha)
break;
/* Delay for 1 Second */
msleep(IPS_ONE_SEC);
MDELAY(IPS_ONE_SEC);
}
if (i >= 240)
@ -5095,7 +5095,7 @@ ips_init_copperhead_memio(ips_ha_t * ha)
break;
/* Delay for 1 Second */
msleep(IPS_ONE_SEC);
MDELAY(IPS_ONE_SEC);
}
if (j >= 45)
@ -5121,7 +5121,7 @@ ips_init_copperhead_memio(ips_ha_t * ha)
break;
/* Delay for 1 Second */
msleep(IPS_ONE_SEC);
MDELAY(IPS_ONE_SEC);
}
if (j >= 240)
@ -5139,7 +5139,7 @@ ips_init_copperhead_memio(ips_ha_t * ha)
break;
/* Delay for 1 Second */
msleep(IPS_ONE_SEC);
MDELAY(IPS_ONE_SEC);
}
if (i >= 240)
@ -5191,7 +5191,7 @@ ips_init_morpheus(ips_ha_t * ha)
break;
/* Delay for 1 Second */
msleep(IPS_ONE_SEC);
MDELAY(IPS_ONE_SEC);
}
if (i >= 45) {
@ -5217,7 +5217,7 @@ ips_init_morpheus(ips_ha_t * ha)
if (Post != 0x4F00)
break;
/* Delay for 1 Second */
msleep(IPS_ONE_SEC);
MDELAY(IPS_ONE_SEC);
}
if (i >= 120) {
@ -5247,7 +5247,7 @@ ips_init_morpheus(ips_ha_t * ha)
break;
/* Delay for 1 Second */
msleep(IPS_ONE_SEC);
MDELAY(IPS_ONE_SEC);
}
if (i >= 240) {
@ -5307,12 +5307,12 @@ ips_reset_copperhead(ips_ha_t * ha)
outb(IPS_BIT_RST, ha->io_addr + IPS_REG_SCPR);
/* Delay for 1 Second */
msleep(IPS_ONE_SEC);
MDELAY(IPS_ONE_SEC);
outb(0, ha->io_addr + IPS_REG_SCPR);
/* Delay for 1 Second */
msleep(IPS_ONE_SEC);
MDELAY(IPS_ONE_SEC);
if ((*ha->func.init) (ha))
break;
@ -5352,12 +5352,12 @@ ips_reset_copperhead_memio(ips_ha_t * ha)
writeb(IPS_BIT_RST, ha->mem_ptr + IPS_REG_SCPR);
/* Delay for 1 Second */
msleep(IPS_ONE_SEC);
MDELAY(IPS_ONE_SEC);
writeb(0, ha->mem_ptr + IPS_REG_SCPR);
/* Delay for 1 Second */
msleep(IPS_ONE_SEC);
MDELAY(IPS_ONE_SEC);
if ((*ha->func.init) (ha))
break;
@ -5398,7 +5398,7 @@ ips_reset_morpheus(ips_ha_t * ha)
writel(0x80000000, ha->mem_ptr + IPS_REG_I960_IDR);
/* Delay for 5 Seconds */
msleep(5 * IPS_ONE_SEC);
MDELAY(5 * IPS_ONE_SEC);
/* Do a PCI config read to wait for adapter */
pci_read_config_byte(ha->pcidev, 4, &junk);

View File

@ -51,6 +51,7 @@
#define _IPS_H_
#include <linux/version.h>
#include <linux/nmi.h>
#include <asm/uaccess.h>
#include <asm/io.h>
@ -116,9 +117,11 @@
dev_printk(level , &((pcidev)->dev) , format , ## arg)
#endif
#ifndef MDELAY
#define MDELAY mdelay
#endif
#define MDELAY(n) \
do { \
mdelay(n); \
touch_nmi_watchdog(); \
} while (0)
#ifndef min
#define min(x,y) ((x) < (y) ? x : y)

View File

@ -597,10 +597,15 @@ static struct domain_device *sas_ex_discover_end_dev(
child->iproto = phy->attached_iproto;
memcpy(child->sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE);
sas_hash_addr(child->hashed_sas_addr, child->sas_addr);
if (!phy->port) {
phy->port = sas_port_alloc(&parent->rphy->dev, phy_id);
BUG_ON(!phy->port);
/* FIXME: better error handling*/
BUG_ON(sas_port_add(phy->port) != 0);
if (unlikely(!phy->port))
goto out_err;
if (unlikely(sas_port_add(phy->port) != 0)) {
sas_port_free(phy->port);
goto out_err;
}
}
sas_ex_get_linkrate(parent, child, phy);
if ((phy->attached_tproto & SAS_PROTO_STP) || phy->attached_sata_dev) {
@ -615,8 +620,7 @@ static struct domain_device *sas_ex_discover_end_dev(
SAS_DPRINTK("report phy sata to %016llx:0x%x returned "
"0x%x\n", SAS_ADDR(parent->sas_addr),
phy_id, res);
kfree(child);
return NULL;
goto out_free;
}
memcpy(child->frame_rcvd, &child->sata_dev.rps_resp.rps.fis,
sizeof(struct dev_to_host_fis));
@ -627,14 +631,14 @@ static struct domain_device *sas_ex_discover_end_dev(
"%016llx:0x%x returned 0x%x\n",
SAS_ADDR(child->sas_addr),
SAS_ADDR(parent->sas_addr), phy_id, res);
kfree(child);
return NULL;
goto out_free;
}
} else if (phy->attached_tproto & SAS_PROTO_SSP) {
child->dev_type = SAS_END_DEV;
rphy = sas_end_device_alloc(phy->port);
/* FIXME: error handling */
BUG_ON(!rphy);
if (unlikely(!rphy))
goto out_free;
child->tproto = phy->attached_tproto;
sas_init_dev(child);
@ -651,9 +655,7 @@ static struct domain_device *sas_ex_discover_end_dev(
"at %016llx:0x%x returned 0x%x\n",
SAS_ADDR(child->sas_addr),
SAS_ADDR(parent->sas_addr), phy_id, res);
/* FIXME: this kfrees list elements without removing them */
//kfree(child);
return NULL;
goto out_list_del;
}
} else {
SAS_DPRINTK("target proto 0x%x at %016llx:0x%x not handled\n",
@ -663,6 +665,16 @@ static struct domain_device *sas_ex_discover_end_dev(
list_add_tail(&child->siblings, &parent_ex->children);
return child;
out_list_del:
list_del(&child->dev_list_node);
sas_rphy_free(rphy);
out_free:
sas_port_delete(phy->port);
out_err:
phy->port = NULL;
kfree(child);
return NULL;
}
static struct domain_device *sas_ex_discover_expander(

View File

@ -112,6 +112,8 @@ int sas_register_ha(struct sas_ha_struct *sas_ha)
}
}
INIT_LIST_HEAD(&sas_ha->eh_done_q);
return 0;
Undo_ports:
@ -142,7 +144,7 @@ static int sas_get_linkerrors(struct sas_phy *phy)
return sas_smp_get_phy_events(phy);
}
static int sas_phy_reset(struct sas_phy *phy, int hard_reset)
int sas_phy_reset(struct sas_phy *phy, int hard_reset)
{
int ret;
enum phy_func reset_type;

View File

@ -29,9 +29,11 @@
#include <scsi/scsi_device.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsi.h>
#include <scsi/scsi_eh.h>
#include <scsi/scsi_transport.h>
#include <scsi/scsi_transport_sas.h>
#include "../scsi_sas_internal.h"
#include "../scsi_transport_api.h"
#include <linux/err.h>
#include <linux/blkdev.h>
@ -46,6 +48,7 @@ static void sas_scsi_task_done(struct sas_task *task)
{
struct task_status_struct *ts = &task->task_status;
struct scsi_cmnd *sc = task->uldd_task;
struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(sc->device->host);
unsigned ts_flags = task->task_state_flags;
int hs = 0, stat = 0;
@ -116,7 +119,7 @@ static void sas_scsi_task_done(struct sas_task *task)
sas_free_task(task);
/* This is very ugly but this is how SCSI Core works. */
if (ts_flags & SAS_TASK_STATE_ABORTED)
scsi_finish_command(sc);
scsi_eh_finish_cmd(sc, &sas_ha->eh_done_q);
else
sc->scsi_done(sc);
}
@ -307,6 +310,15 @@ static enum task_disposition sas_scsi_find_task(struct sas_task *task)
spin_unlock_irqrestore(&core->task_queue_lock, flags);
}
spin_lock_irqsave(&task->task_state_lock, flags);
if (task->task_state_flags & SAS_TASK_INITIATOR_ABORTED) {
spin_unlock_irqrestore(&task->task_state_lock, flags);
SAS_DPRINTK("%s: task 0x%p already aborted\n",
__FUNCTION__, task);
return TASK_IS_ABORTED;
}
spin_unlock_irqrestore(&task->task_state_lock, flags);
for (i = 0; i < 5; i++) {
SAS_DPRINTK("%s: aborting task 0x%p\n", __FUNCTION__, task);
res = si->dft->lldd_abort_task(task);
@ -409,13 +421,16 @@ void sas_scsi_recover_host(struct Scsi_Host *shost)
SAS_DPRINTK("going over list...\n");
list_for_each_entry_safe(cmd, n, &error_q, eh_entry) {
struct sas_task *task = TO_SAS_TASK(cmd);
SAS_DPRINTK("trying to find task 0x%p\n", task);
list_del_init(&cmd->eh_entry);
if (!task) {
SAS_DPRINTK("%s: taskless cmd?!\n", __FUNCTION__);
continue;
}
SAS_DPRINTK("trying to find task 0x%p\n", task);
res = sas_scsi_find_task(task);
cmd->eh_eflags = 0;
shost->host_failed--;
switch (res) {
case TASK_IS_DONE:
@ -491,6 +506,7 @@ void sas_scsi_recover_host(struct Scsi_Host *shost)
}
}
out:
scsi_eh_flush_done_q(&ha->eh_done_q);
SAS_DPRINTK("--- Exit %s\n", __FUNCTION__);
return;
clear_q:
@ -508,12 +524,18 @@ enum scsi_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd)
unsigned long flags;
if (!task) {
SAS_DPRINTK("command 0x%p, task 0x%p, timed out: EH_HANDLED\n",
SAS_DPRINTK("command 0x%p, task 0x%p, gone: EH_HANDLED\n",
cmd, task);
return EH_HANDLED;
}
spin_lock_irqsave(&task->task_state_lock, flags);
if (task->task_state_flags & SAS_TASK_INITIATOR_ABORTED) {
spin_unlock_irqrestore(&task->task_state_lock, flags);
SAS_DPRINTK("command 0x%p, task 0x%p, aborted by initiator: "
"EH_NOT_HANDLED\n", cmd, task);
return EH_NOT_HANDLED;
}
if (task->task_state_flags & SAS_TASK_STATE_DONE) {
spin_unlock_irqrestore(&task->task_state_lock, flags);
SAS_DPRINTK("command 0x%p, task 0x%p, timed out: EH_HANDLED\n",
@ -777,6 +799,64 @@ void sas_shutdown_queue(struct sas_ha_struct *sas_ha)
spin_unlock_irqrestore(&core->task_queue_lock, flags);
}
static int do_sas_task_abort(struct sas_task *task)
{
struct scsi_cmnd *sc = task->uldd_task;
struct sas_internal *si =
to_sas_internal(task->dev->port->ha->core.shost->transportt);
unsigned long flags;
int res;
spin_lock_irqsave(&task->task_state_lock, flags);
if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
spin_unlock_irqrestore(&task->task_state_lock, flags);
SAS_DPRINTK("%s: Task %p already aborted.\n", __FUNCTION__,
task);
return 0;
}
task->task_state_flags |= SAS_TASK_INITIATOR_ABORTED;
if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
task->task_state_flags |= SAS_TASK_STATE_ABORTED;
spin_unlock_irqrestore(&task->task_state_lock, flags);
if (!si->dft->lldd_abort_task)
return -ENODEV;
res = si->dft->lldd_abort_task(task);
if ((task->task_state_flags & SAS_TASK_STATE_DONE) ||
(res == TMF_RESP_FUNC_COMPLETE))
{
/* SMP commands don't have scsi_cmds(?) */
if (!sc) {
task->task_done(task);
return 0;
}
scsi_req_abort_cmd(sc);
scsi_schedule_eh(sc->device->host);
return 0;
}
spin_lock_irqsave(&task->task_state_lock, flags);
task->task_state_flags &= ~SAS_TASK_INITIATOR_ABORTED;
if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
task->task_state_flags &= ~SAS_TASK_STATE_ABORTED;
spin_unlock_irqrestore(&task->task_state_lock, flags);
return -EAGAIN;
}
void sas_task_abort(struct sas_task *task)
{
int i;
for (i = 0; i < 5; i++)
if (!do_sas_task_abort(task))
return;
SAS_DPRINTK("%s: Could not kill task!\n", __FUNCTION__);
}
EXPORT_SYMBOL_GPL(sas_queuecommand);
EXPORT_SYMBOL_GPL(sas_target_alloc);
EXPORT_SYMBOL_GPL(sas_slave_configure);
@ -784,3 +864,5 @@ EXPORT_SYMBOL_GPL(sas_slave_destroy);
EXPORT_SYMBOL_GPL(sas_change_queue_depth);
EXPORT_SYMBOL_GPL(sas_change_queue_type);
EXPORT_SYMBOL_GPL(sas_bios_param);
EXPORT_SYMBOL_GPL(sas_task_abort);
EXPORT_SYMBOL_GPL(sas_phy_reset);

441
drivers/scsi/libsrp.c Normal file
View File

@ -0,0 +1,441 @@
/*
* SCSI RDAM Protocol lib functions
*
* Copyright (C) 2006 FUJITA Tomonori <tomof@acm.org>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA
*/
#include <linux/err.h>
#include <linux/kfifo.h>
#include <linux/scatterlist.h>
#include <linux/dma-mapping.h>
#include <linux/pci.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsi_tgt.h>
#include <scsi/srp.h>
#include <scsi/libsrp.h>
enum srp_task_attributes {
SRP_SIMPLE_TASK = 0,
SRP_HEAD_TASK = 1,
SRP_ORDERED_TASK = 2,
SRP_ACA_TASK = 4
};
/* tmp - will replace with SCSI logging stuff */
#define eprintk(fmt, args...) \
do { \
printk("%s(%d) " fmt, __FUNCTION__, __LINE__, ##args); \
} while (0)
/* #define dprintk eprintk */
#define dprintk(fmt, args...)
static int srp_iu_pool_alloc(struct srp_queue *q, size_t max,
struct srp_buf **ring)
{
int i;
struct iu_entry *iue;
q->pool = kcalloc(max, sizeof(struct iu_entry *), GFP_KERNEL);
if (!q->pool)
return -ENOMEM;
q->items = kcalloc(max, sizeof(struct iu_entry), GFP_KERNEL);
if (!q->items)
goto free_pool;
spin_lock_init(&q->lock);
q->queue = kfifo_init((void *) q->pool, max * sizeof(void *),
GFP_KERNEL, &q->lock);
if (IS_ERR(q->queue))
goto free_item;
for (i = 0, iue = q->items; i < max; i++) {
__kfifo_put(q->queue, (void *) &iue, sizeof(void *));
iue->sbuf = ring[i];
iue++;
}
return 0;
free_item:
kfree(q->items);
free_pool:
kfree(q->pool);
return -ENOMEM;
}
static void srp_iu_pool_free(struct srp_queue *q)
{
kfree(q->items);
kfree(q->pool);
}
static struct srp_buf **srp_ring_alloc(struct device *dev,
size_t max, size_t size)
{
int i;
struct srp_buf **ring;
ring = kcalloc(max, sizeof(struct srp_buf *), GFP_KERNEL);
if (!ring)
return NULL;
for (i = 0; i < max; i++) {
ring[i] = kzalloc(sizeof(struct srp_buf), GFP_KERNEL);
if (!ring[i])
goto out;
ring[i]->buf = dma_alloc_coherent(dev, size, &ring[i]->dma,
GFP_KERNEL);
if (!ring[i]->buf)
goto out;
}
return ring;
out:
for (i = 0; i < max && ring[i]; i++) {
if (ring[i]->buf)
dma_free_coherent(dev, size, ring[i]->buf, ring[i]->dma);
kfree(ring[i]);
}
kfree(ring);
return NULL;
}
static void srp_ring_free(struct device *dev, struct srp_buf **ring, size_t max,
size_t size)
{
int i;
for (i = 0; i < max; i++) {
dma_free_coherent(dev, size, ring[i]->buf, ring[i]->dma);
kfree(ring[i]);
}
}
int srp_target_alloc(struct srp_target *target, struct device *dev,
size_t nr, size_t iu_size)
{
int err;
spin_lock_init(&target->lock);
INIT_LIST_HEAD(&target->cmd_queue);
target->dev = dev;
target->dev->driver_data = target;
target->srp_iu_size = iu_size;
target->rx_ring_size = nr;
target->rx_ring = srp_ring_alloc(target->dev, nr, iu_size);
if (!target->rx_ring)
return -ENOMEM;
err = srp_iu_pool_alloc(&target->iu_queue, nr, target->rx_ring);
if (err)
goto free_ring;
return 0;
free_ring:
srp_ring_free(target->dev, target->rx_ring, nr, iu_size);
return -ENOMEM;
}
EXPORT_SYMBOL_GPL(srp_target_alloc);
void srp_target_free(struct srp_target *target)
{
srp_ring_free(target->dev, target->rx_ring, target->rx_ring_size,
target->srp_iu_size);
srp_iu_pool_free(&target->iu_queue);
}
EXPORT_SYMBOL_GPL(srp_target_free);
struct iu_entry *srp_iu_get(struct srp_target *target)
{
struct iu_entry *iue = NULL;
kfifo_get(target->iu_queue.queue, (void *) &iue, sizeof(void *));
if (!iue)
return iue;
iue->target = target;
INIT_LIST_HEAD(&iue->ilist);
iue->flags = 0;
return iue;
}
EXPORT_SYMBOL_GPL(srp_iu_get);
void srp_iu_put(struct iu_entry *iue)
{
kfifo_put(iue->target->iu_queue.queue, (void *) &iue, sizeof(void *));
}
EXPORT_SYMBOL_GPL(srp_iu_put);
static int srp_direct_data(struct scsi_cmnd *sc, struct srp_direct_buf *md,
enum dma_data_direction dir, srp_rdma_t rdma_io,
int dma_map, int ext_desc)
{
struct iu_entry *iue = NULL;
struct scatterlist *sg = NULL;
int err, nsg = 0, len;
if (dma_map) {
iue = (struct iu_entry *) sc->SCp.ptr;
sg = sc->request_buffer;
dprintk("%p %u %u %d\n", iue, sc->request_bufflen,
md->len, sc->use_sg);
nsg = dma_map_sg(iue->target->dev, sg, sc->use_sg,
DMA_BIDIRECTIONAL);
if (!nsg) {
printk("fail to map %p %d\n", iue, sc->use_sg);
return 0;
}
len = min(sc->request_bufflen, md->len);
} else
len = md->len;
err = rdma_io(sc, sg, nsg, md, 1, dir, len);
if (dma_map)
dma_unmap_sg(iue->target->dev, sg, nsg, DMA_BIDIRECTIONAL);
return err;
}
static int srp_indirect_data(struct scsi_cmnd *sc, struct srp_cmd *cmd,
struct srp_indirect_buf *id,
enum dma_data_direction dir, srp_rdma_t rdma_io,
int dma_map, int ext_desc)
{
struct iu_entry *iue = NULL;
struct srp_direct_buf *md = NULL;
struct scatterlist dummy, *sg = NULL;
dma_addr_t token = 0;
long err;
unsigned int done = 0;
int nmd, nsg = 0, len;
if (dma_map || ext_desc) {
iue = (struct iu_entry *) sc->SCp.ptr;
sg = sc->request_buffer;
dprintk("%p %u %u %d %d\n",
iue, sc->request_bufflen, id->len,
cmd->data_in_desc_cnt, cmd->data_out_desc_cnt);
}
nmd = id->table_desc.len / sizeof(struct srp_direct_buf);
if ((dir == DMA_FROM_DEVICE && nmd == cmd->data_in_desc_cnt) ||
(dir == DMA_TO_DEVICE && nmd == cmd->data_out_desc_cnt)) {
md = &id->desc_list[0];
goto rdma;
}
if (ext_desc && dma_map) {
md = dma_alloc_coherent(iue->target->dev, id->table_desc.len,
&token, GFP_KERNEL);
if (!md) {
eprintk("Can't get dma memory %u\n", id->table_desc.len);
return -ENOMEM;
}
sg_init_one(&dummy, md, id->table_desc.len);
sg_dma_address(&dummy) = token;
err = rdma_io(sc, &dummy, 1, &id->table_desc, 1, DMA_TO_DEVICE,
id->table_desc.len);
if (err < 0) {
eprintk("Error copying indirect table %ld\n", err);
goto free_mem;
}
} else {
eprintk("This command uses external indirect buffer\n");
return -EINVAL;
}
rdma:
if (dma_map) {
nsg = dma_map_sg(iue->target->dev, sg, sc->use_sg, DMA_BIDIRECTIONAL);
if (!nsg) {
eprintk("fail to map %p %d\n", iue, sc->use_sg);
goto free_mem;
}
len = min(sc->request_bufflen, id->len);
} else
len = id->len;
err = rdma_io(sc, sg, nsg, md, nmd, dir, len);
if (dma_map)
dma_unmap_sg(iue->target->dev, sg, nsg, DMA_BIDIRECTIONAL);
free_mem:
if (token && dma_map)
dma_free_coherent(iue->target->dev, id->table_desc.len, md, token);
return done;
}
static int data_out_desc_size(struct srp_cmd *cmd)
{
int size = 0;
u8 fmt = cmd->buf_fmt >> 4;
switch (fmt) {
case SRP_NO_DATA_DESC:
break;
case SRP_DATA_DESC_DIRECT:
size = sizeof(struct srp_direct_buf);
break;
case SRP_DATA_DESC_INDIRECT:
size = sizeof(struct srp_indirect_buf) +
sizeof(struct srp_direct_buf) * cmd->data_out_desc_cnt;
break;
default:
eprintk("client error. Invalid data_out_format %x\n", fmt);
break;
}
return size;
}
/*
* TODO: this can be called multiple times for a single command if it
* has very long data.
*/
int srp_transfer_data(struct scsi_cmnd *sc, struct srp_cmd *cmd,
srp_rdma_t rdma_io, int dma_map, int ext_desc)
{
struct srp_direct_buf *md;
struct srp_indirect_buf *id;
enum dma_data_direction dir;
int offset, err = 0;
u8 format;
offset = cmd->add_cdb_len * 4;
dir = srp_cmd_direction(cmd);
if (dir == DMA_FROM_DEVICE)
offset += data_out_desc_size(cmd);
if (dir == DMA_TO_DEVICE)
format = cmd->buf_fmt >> 4;
else
format = cmd->buf_fmt & ((1U << 4) - 1);
switch (format) {
case SRP_NO_DATA_DESC:
break;
case SRP_DATA_DESC_DIRECT:
md = (struct srp_direct_buf *)
(cmd->add_data + offset);
err = srp_direct_data(sc, md, dir, rdma_io, dma_map, ext_desc);
break;
case SRP_DATA_DESC_INDIRECT:
id = (struct srp_indirect_buf *)
(cmd->add_data + offset);
err = srp_indirect_data(sc, cmd, id, dir, rdma_io, dma_map,
ext_desc);
break;
default:
eprintk("Unknown format %d %x\n", dir, format);
break;
}
return err;
}
EXPORT_SYMBOL_GPL(srp_transfer_data);
static int vscsis_data_length(struct srp_cmd *cmd, enum dma_data_direction dir)
{
struct srp_direct_buf *md;
struct srp_indirect_buf *id;
int len = 0, offset = cmd->add_cdb_len * 4;
u8 fmt;
if (dir == DMA_TO_DEVICE)
fmt = cmd->buf_fmt >> 4;
else {
fmt = cmd->buf_fmt & ((1U << 4) - 1);
offset += data_out_desc_size(cmd);
}
switch (fmt) {
case SRP_NO_DATA_DESC:
break;
case SRP_DATA_DESC_DIRECT:
md = (struct srp_direct_buf *) (cmd->add_data + offset);
len = md->len;
break;
case SRP_DATA_DESC_INDIRECT:
id = (struct srp_indirect_buf *) (cmd->add_data + offset);
len = id->len;
break;
default:
eprintk("invalid data format %x\n", fmt);
break;
}
return len;
}
int srp_cmd_queue(struct Scsi_Host *shost, struct srp_cmd *cmd, void *info,
u64 addr)
{
enum dma_data_direction dir;
struct scsi_cmnd *sc;
int tag, len, err;
switch (cmd->task_attr) {
case SRP_SIMPLE_TASK:
tag = MSG_SIMPLE_TAG;
break;
case SRP_ORDERED_TASK:
tag = MSG_ORDERED_TAG;
break;
case SRP_HEAD_TASK:
tag = MSG_HEAD_TAG;
break;
default:
eprintk("Task attribute %d not supported\n", cmd->task_attr);
tag = MSG_ORDERED_TAG;
}
dir = srp_cmd_direction(cmd);
len = vscsis_data_length(cmd, dir);
dprintk("%p %x %lx %d %d %d %llx\n", info, cmd->cdb[0],
cmd->lun, dir, len, tag, (unsigned long long) cmd->tag);
sc = scsi_host_get_command(shost, dir, GFP_KERNEL);
if (!sc)
return -ENOMEM;
sc->SCp.ptr = info;
memcpy(sc->cmnd, cmd->cdb, MAX_COMMAND_SIZE);
sc->request_bufflen = len;
sc->request_buffer = (void *) (unsigned long) addr;
sc->tag = tag;
err = scsi_tgt_queue_command(sc, (struct scsi_lun *) &cmd->lun, cmd->tag);
if (err)
scsi_host_put_command(shost, sc);
return err;
}
EXPORT_SYMBOL_GPL(srp_cmd_queue);
MODULE_DESCRIPTION("SCSI RDAM Protocol lib functions");
MODULE_AUTHOR("FUJITA Tomonori");
MODULE_LICENSE("GPL");

View File

@ -296,13 +296,17 @@ struct lpfc_hba {
uint32_t cfg_cr_delay;
uint32_t cfg_cr_count;
uint32_t cfg_multi_ring_support;
uint32_t cfg_multi_ring_rctl;
uint32_t cfg_multi_ring_type;
uint32_t cfg_fdmi_on;
uint32_t cfg_discovery_threads;
uint32_t cfg_max_luns;
uint32_t cfg_poll;
uint32_t cfg_poll_tmo;
uint32_t cfg_use_msi;
uint32_t cfg_sg_seg_cnt;
uint32_t cfg_sg_dma_buf_size;
uint64_t cfg_soft_wwnn;
uint64_t cfg_soft_wwpn;
uint32_t dev_loss_tmo_changed;
@ -355,7 +359,7 @@ struct lpfc_hba {
#define VPD_PORT 0x8 /* valid vpd port data */
#define VPD_MASK 0xf /* mask for any vpd data */
uint8_t soft_wwpn_enable;
uint8_t soft_wwn_enable;
struct timer_list fcp_poll_timer;
struct timer_list els_tmofunc;

View File

@ -552,10 +552,10 @@ static CLASS_DEVICE_ATTR(board_mode, S_IRUGO | S_IWUSR,
static CLASS_DEVICE_ATTR(issue_reset, S_IWUSR, NULL, lpfc_issue_reset);
static char *lpfc_soft_wwpn_key = "C99G71SL8032A";
static char *lpfc_soft_wwn_key = "C99G71SL8032A";
static ssize_t
lpfc_soft_wwpn_enable_store(struct class_device *cdev, const char *buf,
lpfc_soft_wwn_enable_store(struct class_device *cdev, const char *buf,
size_t count)
{
struct Scsi_Host *host = class_to_shost(cdev);
@ -579,15 +579,15 @@ lpfc_soft_wwpn_enable_store(struct class_device *cdev, const char *buf,
if (buf[cnt-1] == '\n')
cnt--;
if ((cnt != strlen(lpfc_soft_wwpn_key)) ||
(strncmp(buf, lpfc_soft_wwpn_key, strlen(lpfc_soft_wwpn_key)) != 0))
if ((cnt != strlen(lpfc_soft_wwn_key)) ||
(strncmp(buf, lpfc_soft_wwn_key, strlen(lpfc_soft_wwn_key)) != 0))
return -EINVAL;
phba->soft_wwpn_enable = 1;
phba->soft_wwn_enable = 1;
return count;
}
static CLASS_DEVICE_ATTR(lpfc_soft_wwpn_enable, S_IWUSR, NULL,
lpfc_soft_wwpn_enable_store);
static CLASS_DEVICE_ATTR(lpfc_soft_wwn_enable, S_IWUSR, NULL,
lpfc_soft_wwn_enable_store);
static ssize_t
lpfc_soft_wwpn_show(struct class_device *cdev, char *buf)
@ -613,12 +613,12 @@ lpfc_soft_wwpn_store(struct class_device *cdev, const char *buf, size_t count)
if (buf[cnt-1] == '\n')
cnt--;
if (!phba->soft_wwpn_enable || (cnt < 16) || (cnt > 18) ||
if (!phba->soft_wwn_enable || (cnt < 16) || (cnt > 18) ||
((cnt == 17) && (*buf++ != 'x')) ||
((cnt == 18) && ((*buf++ != '0') || (*buf++ != 'x'))))
return -EINVAL;
phba->soft_wwpn_enable = 0;
phba->soft_wwn_enable = 0;
memset(wwpn, 0, sizeof(wwpn));
@ -639,6 +639,8 @@ lpfc_soft_wwpn_store(struct class_device *cdev, const char *buf, size_t count)
}
phba->cfg_soft_wwpn = wwn_to_u64(wwpn);
fc_host_port_name(host) = phba->cfg_soft_wwpn;
if (phba->cfg_soft_wwnn)
fc_host_node_name(host) = phba->cfg_soft_wwnn;
dev_printk(KERN_NOTICE, &phba->pcidev->dev,
"lpfc%d: Reinitializing to use soft_wwpn\n", phba->brd_no);
@ -664,6 +666,66 @@ lpfc_soft_wwpn_store(struct class_device *cdev, const char *buf, size_t count)
static CLASS_DEVICE_ATTR(lpfc_soft_wwpn, S_IRUGO | S_IWUSR,\
lpfc_soft_wwpn_show, lpfc_soft_wwpn_store);
static ssize_t
lpfc_soft_wwnn_show(struct class_device *cdev, char *buf)
{
struct Scsi_Host *host = class_to_shost(cdev);
struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
return snprintf(buf, PAGE_SIZE, "0x%llx\n",
(unsigned long long)phba->cfg_soft_wwnn);
}
static ssize_t
lpfc_soft_wwnn_store(struct class_device *cdev, const char *buf, size_t count)
{
struct Scsi_Host *host = class_to_shost(cdev);
struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
unsigned int i, j, cnt=count;
u8 wwnn[8];
/* count may include a LF at end of string */
if (buf[cnt-1] == '\n')
cnt--;
if (!phba->soft_wwn_enable || (cnt < 16) || (cnt > 18) ||
((cnt == 17) && (*buf++ != 'x')) ||
((cnt == 18) && ((*buf++ != '0') || (*buf++ != 'x'))))
return -EINVAL;
/*
* Allow wwnn to be set many times, as long as the enable is set.
* However, once the wwpn is set, everything locks.
*/
memset(wwnn, 0, sizeof(wwnn));
/* Validate and store the new name */
for (i=0, j=0; i < 16; i++) {
if ((*buf >= 'a') && (*buf <= 'f'))
j = ((j << 4) | ((*buf++ -'a') + 10));
else if ((*buf >= 'A') && (*buf <= 'F'))
j = ((j << 4) | ((*buf++ -'A') + 10));
else if ((*buf >= '0') && (*buf <= '9'))
j = ((j << 4) | (*buf++ -'0'));
else
return -EINVAL;
if (i % 2) {
wwnn[i/2] = j & 0xff;
j = 0;
}
}
phba->cfg_soft_wwnn = wwn_to_u64(wwnn);
dev_printk(KERN_NOTICE, &phba->pcidev->dev,
"lpfc%d: soft_wwnn set. Value will take effect upon "
"setting of the soft_wwpn\n", phba->brd_no);
return count;
}
static CLASS_DEVICE_ATTR(lpfc_soft_wwnn, S_IRUGO | S_IWUSR,\
lpfc_soft_wwnn_show, lpfc_soft_wwnn_store);
static int lpfc_poll = 0;
module_param(lpfc_poll, int, 0);
@ -802,12 +864,11 @@ static CLASS_DEVICE_ATTR(lpfc_devloss_tmo, S_IRUGO | S_IWUSR,
# LOG_MBOX 0x4 Mailbox events
# LOG_INIT 0x8 Initialization events
# LOG_LINK_EVENT 0x10 Link events
# LOG_IP 0x20 IP traffic history
# LOG_FCP 0x40 FCP traffic history
# LOG_NODE 0x80 Node table events
# LOG_MISC 0x400 Miscellaneous events
# LOG_SLI 0x800 SLI events
# LOG_CHK_COND 0x1000 FCP Check condition flag
# LOG_FCP_ERROR 0x1000 Only log FCP errors
# LOG_LIBDFC 0x2000 LIBDFC events
# LOG_ALL_MSG 0xffff LOG all messages
*/
@ -915,6 +976,22 @@ LPFC_ATTR_RW(cr_count, 1, 1, 255, "A count of I/O completions after which an "
LPFC_ATTR_R(multi_ring_support, 1, 1, 2, "Determines number of primary "
"SLI rings to spread IOCB entries across");
/*
# lpfc_multi_ring_rctl: If lpfc_multi_ring_support is enabled, this
# identifies what rctl value to configure the additional ring for.
# Value range is [1,0xff]. Default value is 4 (Unsolicated Data).
*/
LPFC_ATTR_R(multi_ring_rctl, FC_UNSOL_DATA, 1,
255, "Identifies RCTL for additional ring configuration");
/*
# lpfc_multi_ring_type: If lpfc_multi_ring_support is enabled, this
# identifies what type value to configure the additional ring for.
# Value range is [1,0xff]. Default value is 5 (LLC/SNAP).
*/
LPFC_ATTR_R(multi_ring_type, FC_LLC_SNAP, 1,
255, "Identifies TYPE for additional ring configuration");
/*
# lpfc_fdmi_on: controls FDMI support.
# 0 = no FDMI support
@ -946,6 +1023,15 @@ LPFC_ATTR_R(max_luns, 255, 0, 65535,
LPFC_ATTR_RW(poll_tmo, 10, 1, 255,
"Milliseconds driver will wait between polling FCP ring");
/*
# lpfc_use_msi: Use MSI (Message Signaled Interrupts) in systems that
# support this feature
# 0 = MSI disabled (default)
# 1 = MSI enabled
# Value range is [0,1]. Default value is 0.
*/
LPFC_ATTR_R(use_msi, 0, 0, 1, "Use Message Signaled Interrupts, if possible");
struct class_device_attribute *lpfc_host_attrs[] = {
&class_device_attr_info,
@ -974,6 +1060,8 @@ struct class_device_attribute *lpfc_host_attrs[] = {
&class_device_attr_lpfc_cr_delay,
&class_device_attr_lpfc_cr_count,
&class_device_attr_lpfc_multi_ring_support,
&class_device_attr_lpfc_multi_ring_rctl,
&class_device_attr_lpfc_multi_ring_type,
&class_device_attr_lpfc_fdmi_on,
&class_device_attr_lpfc_max_luns,
&class_device_attr_nport_evt_cnt,
@ -982,8 +1070,10 @@ struct class_device_attribute *lpfc_host_attrs[] = {
&class_device_attr_issue_reset,
&class_device_attr_lpfc_poll,
&class_device_attr_lpfc_poll_tmo,
&class_device_attr_lpfc_use_msi,
&class_device_attr_lpfc_soft_wwnn,
&class_device_attr_lpfc_soft_wwpn,
&class_device_attr_lpfc_soft_wwpn_enable,
&class_device_attr_lpfc_soft_wwn_enable,
NULL,
};
@ -1771,6 +1861,8 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
lpfc_cr_delay_init(phba, lpfc_cr_delay);
lpfc_cr_count_init(phba, lpfc_cr_count);
lpfc_multi_ring_support_init(phba, lpfc_multi_ring_support);
lpfc_multi_ring_rctl_init(phba, lpfc_multi_ring_rctl);
lpfc_multi_ring_type_init(phba, lpfc_multi_ring_type);
lpfc_lun_queue_depth_init(phba, lpfc_lun_queue_depth);
lpfc_fcp_class_init(phba, lpfc_fcp_class);
lpfc_use_adisc_init(phba, lpfc_use_adisc);
@ -1782,9 +1874,11 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
lpfc_discovery_threads_init(phba, lpfc_discovery_threads);
lpfc_max_luns_init(phba, lpfc_max_luns);
lpfc_poll_tmo_init(phba, lpfc_poll_tmo);
lpfc_use_msi_init(phba, lpfc_use_msi);
lpfc_devloss_tmo_init(phba, lpfc_devloss_tmo);
lpfc_nodev_tmo_init(phba, lpfc_nodev_tmo);
phba->cfg_poll = lpfc_poll;
phba->cfg_soft_wwnn = 0L;
phba->cfg_soft_wwpn = 0L;
/*

View File

@ -558,6 +558,14 @@ lpfc_cmpl_ct_cmd_rsnn_nn(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
return;
}
static void
lpfc_cmpl_ct_cmd_rff_id(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
struct lpfc_iocbq * rspiocb)
{
lpfc_cmpl_ct_cmd_rft_id(phba, cmdiocb, rspiocb);
return;
}
void
lpfc_get_hba_sym_node_name(struct lpfc_hba * phba, uint8_t * symbp)
{
@ -629,6 +637,8 @@ lpfc_ns_cmd(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, int cmdcode)
bpl->tus.f.bdeSize = RNN_REQUEST_SZ;
else if (cmdcode == SLI_CTNS_RSNN_NN)
bpl->tus.f.bdeSize = RSNN_REQUEST_SZ;
else if (cmdcode == SLI_CTNS_RFF_ID)
bpl->tus.f.bdeSize = RFF_REQUEST_SZ;
else
bpl->tus.f.bdeSize = 0;
bpl->tus.w = le32_to_cpu(bpl->tus.w);
@ -660,6 +670,17 @@ lpfc_ns_cmd(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, int cmdcode)
cmpl = lpfc_cmpl_ct_cmd_rft_id;
break;
case SLI_CTNS_RFF_ID:
CtReq->CommandResponse.bits.CmdRsp =
be16_to_cpu(SLI_CTNS_RFF_ID);
CtReq->un.rff.PortId = be32_to_cpu(phba->fc_myDID);
CtReq->un.rff.feature_res = 0;
CtReq->un.rff.feature_tgt = 0;
CtReq->un.rff.type_code = FC_FCP_DATA;
CtReq->un.rff.feature_init = 1;
cmpl = lpfc_cmpl_ct_cmd_rff_id;
break;
case SLI_CTNS_RNN_ID:
CtReq->CommandResponse.bits.CmdRsp =
be16_to_cpu(SLI_CTNS_RNN_ID);
@ -934,7 +955,8 @@ lpfc_fdmi_cmd(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, int cmdcode)
ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
ae->ad.bits.AttrType = be16_to_cpu(OS_NAME_VERSION);
sprintf(ae->un.OsNameVersion, "%s %s %s",
init_utsname()->sysname, init_utsname()->release,
init_utsname()->sysname,
init_utsname()->release,
init_utsname()->version);
len = strlen(ae->un.OsNameVersion);
len += (len & 3) ? (4 - (len & 3)) : 4;

View File

@ -243,6 +243,7 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
struct serv_parm *sp, IOCB_t *irsp)
{
LPFC_MBOXQ_t *mbox;
struct lpfc_dmabuf *mp;
int rc;
spin_lock_irq(phba->host->host_lock);
@ -307,10 +308,14 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT | MBX_STOP_IOCB);
if (rc == MBX_NOT_FINISHED)
goto fail_free_mbox;
goto fail_issue_reg_login;
return 0;
fail_issue_reg_login:
mp = (struct lpfc_dmabuf *) mbox->context1;
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
fail_free_mbox:
mempool_free(mbox, phba->mbox_mem_pool);
fail:
@ -657,6 +662,12 @@ lpfc_plogi_confirm_nport(struct lpfc_hba * phba, struct lpfc_dmabuf *prsp,
uint8_t name[sizeof (struct lpfc_name)];
uint32_t rc;
/* Fabric nodes can have the same WWPN so we don't bother searching
* by WWPN. Just return the ndlp that was given to us.
*/
if (ndlp->nlp_type & NLP_FABRIC)
return ndlp;
lp = (uint32_t *) prsp->virt;
sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
memset(name, 0, sizeof (struct lpfc_name));
@ -1122,7 +1133,7 @@ lpfc_cmpl_els_adisc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
mempool_free(mbox,
phba->mbox_mem_pool);
lpfc_disc_flush_list(phba);
psli->ring[(psli->ip_ring)].
psli->ring[(psli->extra_ring)].
flag &=
~LPFC_STOP_IOCB_EVENT;
psli->ring[(psli->fcp_ring)].
@ -1851,6 +1862,7 @@ lpfc_cmpl_els_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
IOCB_t *irsp;
struct lpfc_nodelist *ndlp;
LPFC_MBOXQ_t *mbox = NULL;
struct lpfc_dmabuf *mp;
irsp = &rspiocb->iocb;
@ -1862,6 +1874,11 @@ lpfc_cmpl_els_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
/* Check to see if link went down during discovery */
if ((lpfc_els_chk_latt(phba)) || !ndlp) {
if (mbox) {
mp = (struct lpfc_dmabuf *) mbox->context1;
if (mp) {
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
}
mempool_free( mbox, phba->mbox_mem_pool);
}
goto out;
@ -1893,9 +1910,7 @@ lpfc_cmpl_els_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
}
/* NOTE: we should have messages for unsuccessful
reglogin */
mempool_free( mbox, phba->mbox_mem_pool);
} else {
mempool_free( mbox, phba->mbox_mem_pool);
/* Do not call NO_LIST for lpfc_els_abort'ed ELS cmds */
if (!((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) ||
@ -1907,6 +1922,12 @@ lpfc_cmpl_els_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
}
}
}
mp = (struct lpfc_dmabuf *) mbox->context1;
if (mp) {
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
}
mempool_free(mbox, phba->mbox_mem_pool);
}
out:
if (ndlp) {
@ -2644,6 +2665,7 @@ lpfc_els_handle_rscn(struct lpfc_hba * phba)
ndlp->nlp_type |= NLP_FABRIC;
ndlp->nlp_prev_state = ndlp->nlp_state;
ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
lpfc_issue_els_plogi(phba, NameServer_DID, 0);
/* Wait for NameServer login cmpl before we can
continue */
@ -3039,7 +3061,7 @@ lpfc_els_rcv_farp(struct lpfc_hba * phba,
/* FARP-REQ received from DID <did> */
lpfc_printf_log(phba,
KERN_INFO,
LOG_IP,
LOG_ELS,
"%d:0601 FARP-REQ received from DID x%x\n",
phba->brd_no, did);
@ -3101,7 +3123,7 @@ lpfc_els_rcv_farpr(struct lpfc_hba * phba,
/* FARP-RSP received from DID <did> */
lpfc_printf_log(phba,
KERN_INFO,
LOG_IP,
LOG_ELS,
"%d:0600 FARP-RSP received from DID x%x\n",
phba->brd_no, did);

View File

@ -525,7 +525,7 @@ lpfc_mbx_cmpl_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
psli = &phba->sli;
mb = &pmb->mb;
/* Since we don't do discovery right now, turn these off here */
psli->ring[psli->ip_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
psli->ring[psli->extra_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
psli->ring[psli->fcp_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
psli->ring[psli->next_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
@ -641,7 +641,7 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
if (rc == MBX_NOT_FINISHED) {
mempool_free(pmb, phba->mbox_mem_pool);
lpfc_disc_flush_list(phba);
psli->ring[(psli->ip_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
psli->ring[(psli->extra_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
phba->hba_state = LPFC_HBA_READY;
@ -672,6 +672,8 @@ lpfc_mbx_cmpl_read_sparam(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
memcpy((uint8_t *) & phba->fc_sparam, (uint8_t *) mp->virt,
sizeof (struct serv_parm));
if (phba->cfg_soft_wwnn)
u64_to_wwn(phba->cfg_soft_wwnn, phba->fc_sparam.nodeName.u.wwn);
if (phba->cfg_soft_wwpn)
u64_to_wwn(phba->cfg_soft_wwpn, phba->fc_sparam.portName.u.wwn);
memcpy((uint8_t *) & phba->fc_nodename,
@ -696,7 +698,7 @@ lpfc_mbx_cmpl_read_sparam(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
== MBX_NOT_FINISHED) {
mempool_free( pmb, phba->mbox_mem_pool);
lpfc_disc_flush_list(phba);
psli->ring[(psli->ip_ring)].flag &=
psli->ring[(psli->extra_ring)].flag &=
~LPFC_STOP_IOCB_EVENT;
psli->ring[(psli->fcp_ring)].flag &=
~LPFC_STOP_IOCB_EVENT;
@ -715,6 +717,9 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
{
int i;
LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox;
struct lpfc_dmabuf *mp;
int rc;
sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@ -793,16 +798,27 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
if (sparam_mbox) {
lpfc_read_sparam(phba, sparam_mbox);
sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
lpfc_sli_issue_mbox(phba, sparam_mbox,
rc = lpfc_sli_issue_mbox(phba, sparam_mbox,
(MBX_NOWAIT | MBX_STOP_IOCB));
if (rc == MBX_NOT_FINISHED) {
mp = (struct lpfc_dmabuf *) sparam_mbox->context1;
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
mempool_free(sparam_mbox, phba->mbox_mem_pool);
if (cfglink_mbox)
mempool_free(cfglink_mbox, phba->mbox_mem_pool);
return;
}
}
if (cfglink_mbox) {
phba->hba_state = LPFC_LOCAL_CFG_LINK;
lpfc_config_link(phba, cfglink_mbox);
cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
lpfc_sli_issue_mbox(phba, cfglink_mbox,
rc = lpfc_sli_issue_mbox(phba, cfglink_mbox,
(MBX_NOWAIT | MBX_STOP_IOCB));
if (rc == MBX_NOT_FINISHED)
mempool_free(cfglink_mbox, phba->mbox_mem_pool);
}
}
@ -1067,6 +1083,7 @@ lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RNN_ID);
lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RSNN_NN);
lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RFT_ID);
lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RFF_ID);
}
phba->fc_ns_retry = 0;
@ -1423,7 +1440,7 @@ lpfc_check_sli_ndlp(struct lpfc_hba * phba,
if (iocb->context1 == (uint8_t *) ndlp)
return 1;
}
} else if (pring->ringno == psli->ip_ring) {
} else if (pring->ringno == psli->extra_ring) {
} else if (pring->ringno == psli->fcp_ring) {
/* Skip match check if waiting to relogin to FCP target */
@ -1680,21 +1697,38 @@ lpfc_matchdid(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, uint32_t did)
struct lpfc_nodelist *
lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order, uint32_t did)
{
struct lpfc_nodelist *ndlp, *next_ndlp;
struct lpfc_nodelist *ndlp;
struct list_head *lists[]={&phba->fc_nlpunmap_list,
&phba->fc_nlpmap_list,
&phba->fc_plogi_list,
&phba->fc_adisc_list,
&phba->fc_reglogin_list,
&phba->fc_prli_list,
&phba->fc_npr_list,
&phba->fc_unused_list};
uint32_t search[]={NLP_SEARCH_UNMAPPED,
NLP_SEARCH_MAPPED,
NLP_SEARCH_PLOGI,
NLP_SEARCH_ADISC,
NLP_SEARCH_REGLOGIN,
NLP_SEARCH_PRLI,
NLP_SEARCH_NPR,
NLP_SEARCH_UNUSED};
int i;
uint32_t data1;
spin_lock_irq(phba->host->host_lock);
if (order & NLP_SEARCH_UNMAPPED) {
list_for_each_entry_safe(ndlp, next_ndlp,
&phba->fc_nlpunmap_list, nlp_listp) {
for (i = 0; i < ARRAY_SIZE(lists); i++ ) {
if (!(order & search[i]))
continue;
list_for_each_entry(ndlp, lists[i], nlp_listp) {
if (lpfc_matchdid(phba, ndlp, did)) {
data1 = (((uint32_t) ndlp->nlp_state << 24) |
((uint32_t) ndlp->nlp_xri << 16) |
((uint32_t) ndlp->nlp_type << 8) |
((uint32_t) ndlp->nlp_rpi & 0xff));
/* FIND node DID unmapped */
lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
"%d:0929 FIND node DID unmapped"
"%d:0929 FIND node DID "
" Data: x%p x%x x%x x%x\n",
phba->brd_no,
ndlp, ndlp->nlp_DID,
@ -1704,177 +1738,12 @@ lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order, uint32_t did)
}
}
}
if (order & NLP_SEARCH_MAPPED) {
list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nlpmap_list,
nlp_listp) {
if (lpfc_matchdid(phba, ndlp, did)) {
data1 = (((uint32_t) ndlp->nlp_state << 24) |
((uint32_t) ndlp->nlp_xri << 16) |
((uint32_t) ndlp->nlp_type << 8) |
((uint32_t) ndlp->nlp_rpi & 0xff));
/* FIND node DID mapped */
lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
"%d:0930 FIND node DID mapped "
"Data: x%p x%x x%x x%x\n",
phba->brd_no,
ndlp, ndlp->nlp_DID,
ndlp->nlp_flag, data1);
spin_unlock_irq(phba->host->host_lock);
return ndlp;
}
}
}
if (order & NLP_SEARCH_PLOGI) {
list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_plogi_list,
nlp_listp) {
if (lpfc_matchdid(phba, ndlp, did)) {
data1 = (((uint32_t) ndlp->nlp_state << 24) |
((uint32_t) ndlp->nlp_xri << 16) |
((uint32_t) ndlp->nlp_type << 8) |
((uint32_t) ndlp->nlp_rpi & 0xff));
/* LOG change to PLOGI */
/* FIND node DID plogi */
lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
"%d:0908 FIND node DID plogi "
"Data: x%p x%x x%x x%x\n",
phba->brd_no,
ndlp, ndlp->nlp_DID,
ndlp->nlp_flag, data1);
spin_unlock_irq(phba->host->host_lock);
return ndlp;
}
}
}
if (order & NLP_SEARCH_ADISC) {
list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_adisc_list,
nlp_listp) {
if (lpfc_matchdid(phba, ndlp, did)) {
data1 = (((uint32_t) ndlp->nlp_state << 24) |
((uint32_t) ndlp->nlp_xri << 16) |
((uint32_t) ndlp->nlp_type << 8) |
((uint32_t) ndlp->nlp_rpi & 0xff));
/* LOG change to ADISC */
/* FIND node DID adisc */
lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
"%d:0931 FIND node DID adisc "
"Data: x%p x%x x%x x%x\n",
phba->brd_no,
ndlp, ndlp->nlp_DID,
ndlp->nlp_flag, data1);
spin_unlock_irq(phba->host->host_lock);
return ndlp;
}
}
}
if (order & NLP_SEARCH_REGLOGIN) {
list_for_each_entry_safe(ndlp, next_ndlp,
&phba->fc_reglogin_list, nlp_listp) {
if (lpfc_matchdid(phba, ndlp, did)) {
data1 = (((uint32_t) ndlp->nlp_state << 24) |
((uint32_t) ndlp->nlp_xri << 16) |
((uint32_t) ndlp->nlp_type << 8) |
((uint32_t) ndlp->nlp_rpi & 0xff));
/* LOG change to REGLOGIN */
/* FIND node DID reglogin */
lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
"%d:0901 FIND node DID reglogin"
" Data: x%p x%x x%x x%x\n",
phba->brd_no,
ndlp, ndlp->nlp_DID,
ndlp->nlp_flag, data1);
spin_unlock_irq(phba->host->host_lock);
return ndlp;
}
}
}
if (order & NLP_SEARCH_PRLI) {
list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_prli_list,
nlp_listp) {
if (lpfc_matchdid(phba, ndlp, did)) {
data1 = (((uint32_t) ndlp->nlp_state << 24) |
((uint32_t) ndlp->nlp_xri << 16) |
((uint32_t) ndlp->nlp_type << 8) |
((uint32_t) ndlp->nlp_rpi & 0xff));
/* LOG change to PRLI */
/* FIND node DID prli */
lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
"%d:0902 FIND node DID prli "
"Data: x%p x%x x%x x%x\n",
phba->brd_no,
ndlp, ndlp->nlp_DID,
ndlp->nlp_flag, data1);
spin_unlock_irq(phba->host->host_lock);
return ndlp;
}
}
}
if (order & NLP_SEARCH_NPR) {
list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_npr_list,
nlp_listp) {
if (lpfc_matchdid(phba, ndlp, did)) {
data1 = (((uint32_t) ndlp->nlp_state << 24) |
((uint32_t) ndlp->nlp_xri << 16) |
((uint32_t) ndlp->nlp_type << 8) |
((uint32_t) ndlp->nlp_rpi & 0xff));
/* LOG change to NPR */
/* FIND node DID npr */
lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
"%d:0903 FIND node DID npr "
"Data: x%p x%x x%x x%x\n",
phba->brd_no,
ndlp, ndlp->nlp_DID,
ndlp->nlp_flag, data1);
spin_unlock_irq(phba->host->host_lock);
return ndlp;
}
}
}
if (order & NLP_SEARCH_UNUSED) {
list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_adisc_list,
nlp_listp) {
if (lpfc_matchdid(phba, ndlp, did)) {
data1 = (((uint32_t) ndlp->nlp_state << 24) |
((uint32_t) ndlp->nlp_xri << 16) |
((uint32_t) ndlp->nlp_type << 8) |
((uint32_t) ndlp->nlp_rpi & 0xff));
/* LOG change to UNUSED */
/* FIND node DID unused */
lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
"%d:0905 FIND node DID unused "
"Data: x%p x%x x%x x%x\n",
phba->brd_no,
ndlp, ndlp->nlp_DID,
ndlp->nlp_flag, data1);
spin_unlock_irq(phba->host->host_lock);
return ndlp;
}
}
}
spin_unlock_irq(phba->host->host_lock);
/* FIND node did <did> NOT FOUND */
lpfc_printf_log(phba,
KERN_INFO,
LOG_NODE,
lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
"%d:0932 FIND node did x%x NOT FOUND Data: x%x\n",
phba->brd_no, did, order);
/* no match found */
return NULL;
}
@ -2036,7 +1905,7 @@ lpfc_disc_start(struct lpfc_hba * phba)
if (rc == MBX_NOT_FINISHED) {
mempool_free( mbox, phba->mbox_mem_pool);
lpfc_disc_flush_list(phba);
psli->ring[(psli->ip_ring)].flag &=
psli->ring[(psli->extra_ring)].flag &=
~LPFC_STOP_IOCB_EVENT;
psli->ring[(psli->fcp_ring)].flag &=
~LPFC_STOP_IOCB_EVENT;
@ -2415,7 +2284,7 @@ lpfc_disc_timeout_handler(struct lpfc_hba *phba)
if (clrlaerr) {
lpfc_disc_flush_list(phba);
psli->ring[(psli->ip_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
psli->ring[(psli->extra_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
phba->hba_state = LPFC_HBA_READY;

View File

@ -42,14 +42,14 @@
#define FCELSSIZE 1024 /* maximum ELS transfer size */
#define LPFC_FCP_RING 0 /* ring 0 for FCP initiator commands */
#define LPFC_IP_RING 1 /* ring 1 for IP commands */
#define LPFC_EXTRA_RING 1 /* ring 1 for other protocols */
#define LPFC_ELS_RING 2 /* ring 2 for ELS commands */
#define LPFC_FCP_NEXT_RING 3
#define SLI2_IOCB_CMD_R0_ENTRIES 172 /* SLI-2 FCP command ring entries */
#define SLI2_IOCB_RSP_R0_ENTRIES 134 /* SLI-2 FCP response ring entries */
#define SLI2_IOCB_CMD_R1_ENTRIES 4 /* SLI-2 IP command ring entries */
#define SLI2_IOCB_RSP_R1_ENTRIES 4 /* SLI-2 IP response ring entries */
#define SLI2_IOCB_CMD_R1_ENTRIES 4 /* SLI-2 extra command ring entries */
#define SLI2_IOCB_RSP_R1_ENTRIES 4 /* SLI-2 extra response ring entries */
#define SLI2_IOCB_CMD_R1XTRA_ENTRIES 36 /* SLI-2 extra FCP cmd ring entries */
#define SLI2_IOCB_RSP_R1XTRA_ENTRIES 52 /* SLI-2 extra FCP rsp ring entries */
#define SLI2_IOCB_CMD_R2_ENTRIES 20 /* SLI-2 ELS command ring entries */
@ -121,6 +121,20 @@ struct lpfc_sli_ct_request {
uint32_t rsvd[7];
} rft;
struct rff {
uint32_t PortId;
uint8_t reserved[2];
#ifdef __BIG_ENDIAN_BITFIELD
uint8_t feature_res:6;
uint8_t feature_init:1;
uint8_t feature_tgt:1;
#else /* __LITTLE_ENDIAN_BITFIELD */
uint8_t feature_tgt:1;
uint8_t feature_init:1;
uint8_t feature_res:6;
#endif
uint8_t type_code; /* type=8 for FCP */
} rff;
struct rnn {
uint32_t PortId; /* For RNN_ID requests */
uint8_t wwnn[8];
@ -136,6 +150,7 @@ struct lpfc_sli_ct_request {
#define SLI_CT_REVISION 1
#define GID_REQUEST_SZ (sizeof(struct lpfc_sli_ct_request) - 260)
#define RFT_REQUEST_SZ (sizeof(struct lpfc_sli_ct_request) - 228)
#define RFF_REQUEST_SZ (sizeof(struct lpfc_sli_ct_request) - 235)
#define RNN_REQUEST_SZ (sizeof(struct lpfc_sli_ct_request) - 252)
#define RSNN_REQUEST_SZ (sizeof(struct lpfc_sli_ct_request))
@ -225,6 +240,7 @@ struct lpfc_sli_ct_request {
#define SLI_CTNS_RNN_ID 0x0213
#define SLI_CTNS_RCS_ID 0x0214
#define SLI_CTNS_RFT_ID 0x0217
#define SLI_CTNS_RFF_ID 0x021F
#define SLI_CTNS_RSPN_ID 0x0218
#define SLI_CTNS_RPT_ID 0x021A
#define SLI_CTNS_RIP_NN 0x0235
@ -1089,12 +1105,6 @@ typedef struct {
#define PCI_DEVICE_ID_ZEPHYR_SCSP 0xfe11
#define PCI_DEVICE_ID_ZEPHYR_DCSP 0xfe12
#define PCI_SUBSYSTEM_ID_LP11000S 0xfc11
#define PCI_SUBSYSTEM_ID_LP11002S 0xfc12
#define PCI_SUBSYSTEM_ID_LPE11000S 0xfc21
#define PCI_SUBSYSTEM_ID_LPE11002S 0xfc22
#define PCI_SUBSYSTEM_ID_LPE11010S 0xfc2A
#define JEDEC_ID_ADDRESS 0x0080001c
#define FIREFLY_JEDEC_ID 0x1ACC
#define SUPERFLY_JEDEC_ID 0x0020
@ -1284,6 +1294,10 @@ typedef struct { /* FireFly BIU registers */
#define CMD_FCP_IREAD_CX 0x1B
#define CMD_FCP_ICMND_CR 0x1C
#define CMD_FCP_ICMND_CX 0x1D
#define CMD_FCP_TSEND_CX 0x1F
#define CMD_FCP_TRECEIVE_CX 0x21
#define CMD_FCP_TRSP_CX 0x23
#define CMD_FCP_AUTO_TRSP_CX 0x29
#define CMD_ADAPTER_MSG 0x20
#define CMD_ADAPTER_DUMP 0x22
@ -1310,6 +1324,9 @@ typedef struct { /* FireFly BIU registers */
#define CMD_FCP_IREAD64_CX 0x9B
#define CMD_FCP_ICMND64_CR 0x9C
#define CMD_FCP_ICMND64_CX 0x9D
#define CMD_FCP_TSEND64_CX 0x9F
#define CMD_FCP_TRECEIVE64_CX 0xA1
#define CMD_FCP_TRSP64_CX 0xA3
#define CMD_GEN_REQUEST64_CR 0xC2
#define CMD_GEN_REQUEST64_CX 0xC3

View File

@ -268,6 +268,8 @@ lpfc_config_port_post(struct lpfc_hba * phba)
kfree(mp);
pmb->context1 = NULL;
if (phba->cfg_soft_wwnn)
u64_to_wwn(phba->cfg_soft_wwnn, phba->fc_sparam.nodeName.u.wwn);
if (phba->cfg_soft_wwpn)
u64_to_wwn(phba->cfg_soft_wwpn, phba->fc_sparam.portName.u.wwn);
memcpy(&phba->fc_nodename, &phba->fc_sparam.nodeName,
@ -349,8 +351,8 @@ lpfc_config_port_post(struct lpfc_hba * phba)
phba->hba_state = LPFC_LINK_DOWN;
/* Only process IOCBs on ring 0 till hba_state is READY */
if (psli->ring[psli->ip_ring].cmdringaddr)
psli->ring[psli->ip_ring].flag |= LPFC_STOP_IOCB_EVENT;
if (psli->ring[psli->extra_ring].cmdringaddr)
psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT;
if (psli->ring[psli->fcp_ring].cmdringaddr)
psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT;
if (psli->ring[psli->next_ring].cmdringaddr)
@ -517,7 +519,8 @@ lpfc_handle_eratt(struct lpfc_hba * phba)
struct lpfc_sli_ring *pring;
uint32_t event_data;
if (phba->work_hs & HS_FFER6) {
if (phba->work_hs & HS_FFER6 ||
phba->work_hs & HS_FFER5) {
/* Re-establishing Link */
lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
"%d:1301 Re-establishing Link "
@ -611,7 +614,7 @@ lpfc_handle_latt(struct lpfc_hba * phba)
pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la;
rc = lpfc_sli_issue_mbox (phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB));
if (rc == MBX_NOT_FINISHED)
goto lpfc_handle_latt_free_mp;
goto lpfc_handle_latt_free_mbuf;
/* Clear Link Attention in HA REG */
spin_lock_irq(phba->host->host_lock);
@ -621,6 +624,8 @@ lpfc_handle_latt(struct lpfc_hba * phba)
return;
lpfc_handle_latt_free_mbuf:
lpfc_mbuf_free(phba, mp->virt, mp->phys);
lpfc_handle_latt_free_mp:
kfree(mp);
lpfc_handle_latt_free_pmb:
@ -802,19 +807,13 @@ lpfc_get_hba_model_desc(struct lpfc_hba * phba, uint8_t * mdp, uint8_t * descp)
{
lpfc_vpd_t *vp;
uint16_t dev_id = phba->pcidev->device;
uint16_t dev_subid = phba->pcidev->subsystem_device;
uint8_t hdrtype;
int max_speed;
char * ports;
struct {
char * name;
int max_speed;
char * ports;
char * bus;
} m = {"<Unknown>", 0, "", ""};
} m = {"<Unknown>", 0, ""};
pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
ports = (hdrtype == 0x80) ? "2-port " : "";
if (mdp && mdp[0] != '\0'
&& descp && descp[0] != '\0')
return;
@ -834,130 +833,93 @@ lpfc_get_hba_model_desc(struct lpfc_hba * phba, uint8_t * mdp, uint8_t * descp)
switch (dev_id) {
case PCI_DEVICE_ID_FIREFLY:
m = (typeof(m)){"LP6000", max_speed, "", "PCI"};
m = (typeof(m)){"LP6000", max_speed, "PCI"};
break;
case PCI_DEVICE_ID_SUPERFLY:
if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
m = (typeof(m)){"LP7000", max_speed, "", "PCI"};
m = (typeof(m)){"LP7000", max_speed, "PCI"};
else
m = (typeof(m)){"LP7000E", max_speed, "", "PCI"};
m = (typeof(m)){"LP7000E", max_speed, "PCI"};
break;
case PCI_DEVICE_ID_DRAGONFLY:
m = (typeof(m)){"LP8000", max_speed, "", "PCI"};
m = (typeof(m)){"LP8000", max_speed, "PCI"};
break;
case PCI_DEVICE_ID_CENTAUR:
if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
m = (typeof(m)){"LP9002", max_speed, "", "PCI"};
m = (typeof(m)){"LP9002", max_speed, "PCI"};
else
m = (typeof(m)){"LP9000", max_speed, "", "PCI"};
m = (typeof(m)){"LP9000", max_speed, "PCI"};
break;
case PCI_DEVICE_ID_RFLY:
m = (typeof(m)){"LP952", max_speed, "", "PCI"};
m = (typeof(m)){"LP952", max_speed, "PCI"};
break;
case PCI_DEVICE_ID_PEGASUS:
m = (typeof(m)){"LP9802", max_speed, "", "PCI-X"};
m = (typeof(m)){"LP9802", max_speed, "PCI-X"};
break;
case PCI_DEVICE_ID_THOR:
if (hdrtype == 0x80)
m = (typeof(m)){"LP10000DC",
max_speed, ports, "PCI-X"};
else
m = (typeof(m)){"LP10000",
max_speed, ports, "PCI-X"};
m = (typeof(m)){"LP10000", max_speed, "PCI-X"};
break;
case PCI_DEVICE_ID_VIPER:
m = (typeof(m)){"LPX1000", max_speed, "", "PCI-X"};
m = (typeof(m)){"LPX1000", max_speed, "PCI-X"};
break;
case PCI_DEVICE_ID_PFLY:
m = (typeof(m)){"LP982", max_speed, "", "PCI-X"};
m = (typeof(m)){"LP982", max_speed, "PCI-X"};
break;
case PCI_DEVICE_ID_TFLY:
if (hdrtype == 0x80)
m = (typeof(m)){"LP1050DC", max_speed, ports, "PCI-X"};
else
m = (typeof(m)){"LP1050", max_speed, ports, "PCI-X"};
m = (typeof(m)){"LP1050", max_speed, "PCI-X"};
break;
case PCI_DEVICE_ID_HELIOS:
if (hdrtype == 0x80)
m = (typeof(m)){"LP11002", max_speed, ports, "PCI-X2"};
else
m = (typeof(m)){"LP11000", max_speed, ports, "PCI-X2"};
m = (typeof(m)){"LP11000", max_speed, "PCI-X2"};
break;
case PCI_DEVICE_ID_HELIOS_SCSP:
m = (typeof(m)){"LP11000-SP", max_speed, ports, "PCI-X2"};
m = (typeof(m)){"LP11000-SP", max_speed, "PCI-X2"};
break;
case PCI_DEVICE_ID_HELIOS_DCSP:
m = (typeof(m)){"LP11002-SP", max_speed, ports, "PCI-X2"};
m = (typeof(m)){"LP11002-SP", max_speed, "PCI-X2"};
break;
case PCI_DEVICE_ID_NEPTUNE:
if (hdrtype == 0x80)
m = (typeof(m)){"LPe1002", max_speed, ports, "PCIe"};
else
m = (typeof(m)){"LPe1000", max_speed, ports, "PCIe"};
m = (typeof(m)){"LPe1000", max_speed, "PCIe"};
break;
case PCI_DEVICE_ID_NEPTUNE_SCSP:
m = (typeof(m)){"LPe1000-SP", max_speed, ports, "PCIe"};
m = (typeof(m)){"LPe1000-SP", max_speed, "PCIe"};
break;
case PCI_DEVICE_ID_NEPTUNE_DCSP:
m = (typeof(m)){"LPe1002-SP", max_speed, ports, "PCIe"};
m = (typeof(m)){"LPe1002-SP", max_speed, "PCIe"};
break;
case PCI_DEVICE_ID_BMID:
m = (typeof(m)){"LP1150", max_speed, ports, "PCI-X2"};
m = (typeof(m)){"LP1150", max_speed, "PCI-X2"};
break;
case PCI_DEVICE_ID_BSMB:
m = (typeof(m)){"LP111", max_speed, ports, "PCI-X2"};
m = (typeof(m)){"LP111", max_speed, "PCI-X2"};
break;
case PCI_DEVICE_ID_ZEPHYR:
if (hdrtype == 0x80)
m = (typeof(m)){"LPe11002", max_speed, ports, "PCIe"};
else
m = (typeof(m)){"LPe11000", max_speed, ports, "PCIe"};
m = (typeof(m)){"LPe11000", max_speed, "PCIe"};
break;
case PCI_DEVICE_ID_ZEPHYR_SCSP:
m = (typeof(m)){"LPe11000", max_speed, ports, "PCIe"};
m = (typeof(m)){"LPe11000", max_speed, "PCIe"};
break;
case PCI_DEVICE_ID_ZEPHYR_DCSP:
m = (typeof(m)){"LPe11002-SP", max_speed, ports, "PCIe"};
m = (typeof(m)){"LPe11002-SP", max_speed, "PCIe"};
break;
case PCI_DEVICE_ID_ZMID:
m = (typeof(m)){"LPe1150", max_speed, ports, "PCIe"};
m = (typeof(m)){"LPe1150", max_speed, "PCIe"};
break;
case PCI_DEVICE_ID_ZSMB:
m = (typeof(m)){"LPe111", max_speed, ports, "PCIe"};
m = (typeof(m)){"LPe111", max_speed, "PCIe"};
break;
case PCI_DEVICE_ID_LP101:
m = (typeof(m)){"LP101", max_speed, ports, "PCI-X"};
m = (typeof(m)){"LP101", max_speed, "PCI-X"};
break;
case PCI_DEVICE_ID_LP10000S:
m = (typeof(m)){"LP10000-S", max_speed, ports, "PCI"};
m = (typeof(m)){"LP10000-S", max_speed, "PCI"};
break;
case PCI_DEVICE_ID_LP11000S:
case PCI_DEVICE_ID_LPE11000S:
switch (dev_subid) {
case PCI_SUBSYSTEM_ID_LP11000S:
m = (typeof(m)){"LP11000-S", max_speed,
ports, "PCI-X2"};
"PCI-X2"};
break;
case PCI_SUBSYSTEM_ID_LP11002S:
m = (typeof(m)){"LP11002-S", max_speed,
ports, "PCI-X2"};
break;
case PCI_SUBSYSTEM_ID_LPE11000S:
case PCI_DEVICE_ID_LPE11000S:
m = (typeof(m)){"LPe11000-S", max_speed,
ports, "PCIe"};
break;
case PCI_SUBSYSTEM_ID_LPE11002S:
m = (typeof(m)){"LPe11002-S", max_speed,
ports, "PCIe"};
break;
case PCI_SUBSYSTEM_ID_LPE11010S:
m = (typeof(m)){"LPe11010-S", max_speed,
"10-port ", "PCIe"};
break;
default:
m = (typeof(m)){ NULL };
break;
}
"PCIe"};
break;
default:
m = (typeof(m)){ NULL };
@ -968,8 +930,8 @@ lpfc_get_hba_model_desc(struct lpfc_hba * phba, uint8_t * mdp, uint8_t * descp)
snprintf(mdp, 79,"%s", m.name);
if (descp && descp[0] == '\0')
snprintf(descp, 255,
"Emulex %s %dGb %s%s Fibre Channel Adapter",
m.name, m.max_speed, m.ports, m.bus);
"Emulex %s %dGb %s Fibre Channel Adapter",
m.name, m.max_speed, m.bus);
}
/**************************************************/
@ -1651,6 +1613,14 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
if (error)
goto out_remove_host;
if (phba->cfg_use_msi) {
error = pci_enable_msi(phba->pcidev);
if (error)
lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "%d:0452 "
"Enable MSI failed, continuing with "
"IRQ\n", phba->brd_no);
}
error = request_irq(phba->pcidev->irq, lpfc_intr_handler, IRQF_SHARED,
LPFC_DRIVER_NAME, phba);
if (error) {
@ -1730,6 +1700,7 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
lpfc_stop_timer(phba);
phba->work_hba_events = 0;
free_irq(phba->pcidev->irq, phba);
pci_disable_msi(phba->pcidev);
out_free_sysfs_attr:
lpfc_free_sysfs_attr(phba);
out_remove_host:
@ -1796,6 +1767,7 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
/* Release the irq reservation */
free_irq(phba->pcidev->irq, phba);
pci_disable_msi(phba->pcidev);
lpfc_cleanup(phba, 0);
lpfc_stop_timer(phba);

View File

@ -28,7 +28,7 @@
#define LOG_NODE 0x80 /* Node table events */
#define LOG_MISC 0x400 /* Miscellaneous events */
#define LOG_SLI 0x800 /* SLI events */
#define LOG_CHK_COND 0x1000 /* FCP Check condition flag */
#define LOG_FCP_ERROR 0x1000 /* log errors, not underruns */
#define LOG_LIBDFC 0x2000 /* Libdfc events */
#define LOG_ALL_MSG 0xffff /* LOG all messages */

View File

@ -739,7 +739,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_hba * phba,
uint32_t evt)
{
struct lpfc_iocbq *cmdiocb, *rspiocb;
struct lpfc_dmabuf *pcmd, *prsp;
struct lpfc_dmabuf *pcmd, *prsp, *mp;
uint32_t *lp;
IOCB_t *irsp;
struct serv_parm *sp;
@ -829,6 +829,9 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_hba * phba,
NLP_REGLOGIN_LIST);
return ndlp->nlp_state;
}
mp = (struct lpfc_dmabuf *)mbox->context1;
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
mempool_free(mbox, phba->mbox_mem_pool);
} else {
mempool_free(mbox, phba->mbox_mem_pool);
@ -1620,8 +1623,8 @@ lpfc_rcv_padisc_npr_node(struct lpfc_hba * phba,
* or discovery in progress for this node. Starting discovery
* here will affect the counting of discovery threads.
*/
if ((!(ndlp->nlp_flag & NLP_DELAY_TMO)) &&
(ndlp->nlp_flag & NLP_NPR_2B_DISC)){
if (!(ndlp->nlp_flag & NLP_DELAY_TMO) &&
!(ndlp->nlp_flag & NLP_NPR_2B_DISC)){
if (ndlp->nlp_flag & NLP_NPR_ADISC) {
ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
ndlp->nlp_state = NLP_STE_ADISC_ISSUE;

View File

@ -297,8 +297,10 @@ lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd)
uint32_t fcpi_parm = lpfc_cmd->cur_iocbq.iocb.un.fcpi.fcpi_parm;
uint32_t resp_info = fcprsp->rspStatus2;
uint32_t scsi_status = fcprsp->rspStatus3;
uint32_t *lp;
uint32_t host_status = DID_OK;
uint32_t rsplen = 0;
uint32_t logit = LOG_FCP | LOG_FCP_ERROR;
/*
* If this is a task management command, there is no
@ -310,10 +312,25 @@ lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd)
goto out;
}
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
"%d:0730 FCP command failed: RSP "
"Data: x%x x%x x%x x%x x%x x%x\n",
phba->brd_no, resp_info, scsi_status,
if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
if (snslen > SCSI_SENSE_BUFFERSIZE)
snslen = SCSI_SENSE_BUFFERSIZE;
if (resp_info & RSP_LEN_VALID)
rsplen = be32_to_cpu(fcprsp->rspRspLen);
memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);
}
lp = (uint32_t *)cmnd->sense_buffer;
if (!scsi_status && (resp_info & RESID_UNDER))
logit = LOG_FCP;
lpfc_printf_log(phba, KERN_WARNING, logit,
"%d:0730 FCP command x%x failed: x%x SNS x%x x%x "
"Data: x%x x%x x%x x%x x%x\n",
phba->brd_no, cmnd->cmnd[0], scsi_status,
be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
be32_to_cpu(fcprsp->rspResId),
be32_to_cpu(fcprsp->rspSnsLen),
be32_to_cpu(fcprsp->rspRspLen),
@ -328,14 +345,6 @@ lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd)
}
}
if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
if (snslen > SCSI_SENSE_BUFFERSIZE)
snslen = SCSI_SENSE_BUFFERSIZE;
memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);
}
cmnd->resid = 0;
if (resp_info & RESID_UNDER) {
cmnd->resid = be32_to_cpu(fcprsp->rspResId);
@ -378,7 +387,7 @@ lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd)
*/
} else if ((scsi_status == SAM_STAT_GOOD) && fcpi_parm &&
(cmnd->sc_data_direction == DMA_FROM_DEVICE)) {
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
"%d:0734 FCP Read Check Error Data: "
"x%x x%x x%x x%x\n", phba->brd_no,
be32_to_cpu(fcpcmd->fcpDl),
@ -670,6 +679,9 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba,
struct lpfc_iocbq *iocbqrsp;
int ret;
if (!rdata->pnode)
return FAILED;
lpfc_cmd->rdata = rdata;
ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, lun,
FCP_TARGET_RESET);
@ -976,20 +988,34 @@ lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
lpfc_block_error_handler(cmnd);
spin_lock_irq(shost->host_lock);
loopcnt = 0;
/*
* If target is not in a MAPPED state, delay the reset until
* target is rediscovered or devloss timeout expires.
*/
while ( 1 ) {
if (!pnode)
break;
return FAILED;
if (pnode->nlp_state != NLP_STE_MAPPED_NODE) {
spin_unlock_irq(phba->host->host_lock);
schedule_timeout_uninterruptible(msecs_to_jiffies(500));
spin_lock_irq(phba->host->host_lock);
loopcnt++;
rdata = cmnd->device->hostdata;
if (!rdata ||
(loopcnt > ((phba->cfg_devloss_tmo * 2) + 1))) {
lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
"%d:0721 LUN Reset rport failure:"
" cnt x%x rdata x%p\n",
phba->brd_no, loopcnt, rdata);
goto out;
}
if ((pnode) && (pnode->nlp_state == NLP_STE_MAPPED_NODE))
pnode = rdata->pnode;
if (!pnode)
return FAILED;
}
if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
break;
}

View File

@ -117,6 +117,10 @@ lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
case CMD_FCP_IREAD_CX:
case CMD_FCP_ICMND_CR:
case CMD_FCP_ICMND_CX:
case CMD_FCP_TSEND_CX:
case CMD_FCP_TRSP_CX:
case CMD_FCP_TRECEIVE_CX:
case CMD_FCP_AUTO_TRSP_CX:
case CMD_ADAPTER_MSG:
case CMD_ADAPTER_DUMP:
case CMD_XMIT_SEQUENCE64_CR:
@ -131,6 +135,9 @@ lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
case CMD_FCP_IREAD64_CX:
case CMD_FCP_ICMND64_CR:
case CMD_FCP_ICMND64_CX:
case CMD_FCP_TSEND64_CX:
case CMD_FCP_TRSP64_CX:
case CMD_FCP_TRECEIVE64_CX:
case CMD_GEN_REQUEST64_CR:
case CMD_GEN_REQUEST64_CX:
case CMD_XMIT_ELS_RSP64_CX:
@ -1098,6 +1105,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,
lpfc_sli_pcimem_bcopy((uint32_t *) entry,
(uint32_t *) &rspiocbq.iocb,
sizeof (IOCB_t));
INIT_LIST_HEAD(&(rspiocbq.list));
irsp = &rspiocbq.iocb;
type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
@ -1149,6 +1157,11 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,
}
}
break;
case LPFC_UNSOL_IOCB:
spin_unlock_irqrestore(phba->host->host_lock, iflag);
lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
spin_lock_irqsave(phba->host->host_lock, iflag);
break;
default:
if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
char adaptermsg[LPFC_MAX_ADPTMSG];
@ -2472,13 +2485,17 @@ lpfc_extra_ring_setup( struct lpfc_hba *phba)
psli = &phba->sli;
/* Adjust cmd/rsp ring iocb entries more evenly */
/* Take some away from the FCP ring */
pring = &psli->ring[psli->fcp_ring];
pring->numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
pring->numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
pring->numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
pring->numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
pring = &psli->ring[1];
/* and give them to the extra ring */
pring = &psli->ring[psli->extra_ring];
pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
@ -2488,8 +2505,8 @@ lpfc_extra_ring_setup( struct lpfc_hba *phba)
pring->iotag_max = 4096;
pring->num_mask = 1;
pring->prt[0].profile = 0; /* Mask 0 */
pring->prt[0].rctl = FC_UNSOL_DATA;
pring->prt[0].type = 5;
pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
pring->prt[0].type = phba->cfg_multi_ring_type;
pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
return 0;
}
@ -2505,7 +2522,7 @@ lpfc_sli_setup(struct lpfc_hba *phba)
psli->sli_flag = 0;
psli->fcp_ring = LPFC_FCP_RING;
psli->next_ring = LPFC_FCP_NEXT_RING;
psli->ip_ring = LPFC_IP_RING;
psli->extra_ring = LPFC_EXTRA_RING;
psli->iocbq_lookup = NULL;
psli->iocbq_lookup_len = 0;
@ -2528,7 +2545,7 @@ lpfc_sli_setup(struct lpfc_hba *phba)
pring->fast_iotag = pring->iotag_max;
pring->num_mask = 0;
break;
case LPFC_IP_RING: /* ring 1 - IP */
case LPFC_EXTRA_RING: /* ring 1 - EXTRA */
/* numCiocb and numRiocb are used in config_port */
pring->numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
pring->numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
@ -3238,6 +3255,21 @@ lpfc_intr_handler(int irq, void *dev_id)
lpfc_sli_handle_fast_ring_event(phba,
&phba->sli.ring[LPFC_FCP_RING],
status);
if (phba->cfg_multi_ring_support == 2) {
/*
* Process all events on extra ring. Take the optimized path
* for extra ring IO. Any other IO is slow path and is handled
* by the worker thread.
*/
status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
status >>= (4*LPFC_EXTRA_RING);
if (status & HA_RXATT) {
lpfc_sli_handle_fast_ring_event(phba,
&phba->sli.ring[LPFC_EXTRA_RING],
status);
}
}
return IRQ_HANDLED;
} /* lpfc_intr_handler */

View File

@ -198,7 +198,7 @@ struct lpfc_sli {
int fcp_ring; /* ring used for FCP initiator commands */
int next_ring;
int ip_ring; /* ring used for IP network drv cmds */
int extra_ring; /* extra ring used for other protocols */
struct lpfc_sli_stat slistat; /* SLI statistical info */
struct list_head mboxq;

View File

@ -18,7 +18,7 @@
* included with this package. *
*******************************************************************/
#define LPFC_DRIVER_VERSION "8.1.10"
#define LPFC_DRIVER_VERSION "8.1.11"
#define LPFC_DRIVER_NAME "lpfc"

View File

@ -73,10 +73,10 @@ static unsigned short int max_mbox_busy_wait = MBOX_BUSY_WAIT;
module_param(max_mbox_busy_wait, ushort, 0);
MODULE_PARM_DESC(max_mbox_busy_wait, "Maximum wait for mailbox in microseconds if busy (default=MBOX_BUSY_WAIT=10)");
#define RDINDOOR(adapter) readl((adapter)->base + 0x20)
#define RDOUTDOOR(adapter) readl((adapter)->base + 0x2C)
#define WRINDOOR(adapter,value) writel(value, (adapter)->base + 0x20)
#define WROUTDOOR(adapter,value) writel(value, (adapter)->base + 0x2C)
#define RDINDOOR(adapter) readl((adapter)->mmio_base + 0x20)
#define RDOUTDOOR(adapter) readl((adapter)->mmio_base + 0x2C)
#define WRINDOOR(adapter,value) writel(value, (adapter)->mmio_base + 0x20)
#define WROUTDOOR(adapter,value) writel(value, (adapter)->mmio_base + 0x2C)
/*
* Global variables
@ -1386,7 +1386,8 @@ megaraid_isr_memmapped(int irq, void *devp)
handled = 1;
while( RDINDOOR(adapter) & 0x02 ) cpu_relax();
while( RDINDOOR(adapter) & 0x02 )
cpu_relax();
mega_cmd_done(adapter, completed, nstatus, status);
@ -4668,6 +4669,8 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
host->host_no, mega_baseport, irq);
adapter->base = mega_baseport;
if (flag & BOARD_MEMMAP)
adapter->mmio_base = (void __iomem *) mega_baseport;
INIT_LIST_HEAD(&adapter->free_list);
INIT_LIST_HEAD(&adapter->pending_list);

View File

@ -802,6 +802,7 @@ typedef struct {
u32 flag;
unsigned long base;
void __iomem *mmio_base;
/* mbox64 with mbox not aligned on 16-byte boundry */
mbox64_t *una_mbox64;

View File

@ -517,7 +517,7 @@ megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp,
* Returns the number of frames required for numnber of sge's (sge_count)
*/
u32 megasas_get_frame_count(u8 sge_count)
static u32 megasas_get_frame_count(u8 sge_count)
{
int num_cnt;
int sge_bytes;
@ -1733,7 +1733,7 @@ megasas_get_ctrl_info(struct megasas_instance *instance,
*
* Tasklet to complete cmds
*/
void megasas_complete_cmd_dpc(unsigned long instance_addr)
static void megasas_complete_cmd_dpc(unsigned long instance_addr)
{
u32 producer;
u32 consumer;

View File

@ -589,10 +589,12 @@ static int __map_scsi_sg_data(struct device *dev, struct scsi_cmnd *cmd)
static struct ncr_driver_setup
driver_setup = SCSI_NCR_DRIVER_SETUP;
#ifndef MODULE
#ifdef SCSI_NCR_BOOT_COMMAND_LINE_SUPPORT
static struct ncr_driver_setup
driver_safe_setup __initdata = SCSI_NCR_DRIVER_SAFE_SETUP;
#endif
#endif /* !MODULE */
#define initverbose (driver_setup.verbose)
#define bootverbose (np->verbose)
@ -641,6 +643,13 @@ static struct ncr_driver_setup
#define OPT_IARB 26
#endif
#ifdef MODULE
#define ARG_SEP ' '
#else
#define ARG_SEP ','
#endif
#ifndef MODULE
static char setup_token[] __initdata =
"tags:" "mpar:"
"spar:" "disc:"
@ -660,12 +669,6 @@ static char setup_token[] __initdata =
#endif
; /* DONNOT REMOVE THIS ';' */
#ifdef MODULE
#define ARG_SEP ' '
#else
#define ARG_SEP ','
#endif
static int __init get_setup_token(char *p)
{
char *cur = setup_token;
@ -682,7 +685,6 @@ static int __init get_setup_token(char *p)
return 0;
}
static int __init sym53c8xx__setup(char *str)
{
#ifdef SCSI_NCR_BOOT_COMMAND_LINE_SUPPORT
@ -804,6 +806,7 @@ static int __init sym53c8xx__setup(char *str)
#endif /* SCSI_NCR_BOOT_COMMAND_LINE_SUPPORT */
return 1;
}
#endif /* !MODULE */
/*===================================================================
**
@ -8321,12 +8324,12 @@ char *ncr53c8xx; /* command line passed by insmod */
module_param(ncr53c8xx, charp, 0);
#endif
#ifndef MODULE
static int __init ncr53c8xx_setup(char *str)
{
return sym53c8xx__setup(str);
}
#ifndef MODULE
__setup("ncr53c8xx=", ncr53c8xx_setup);
#endif

View File

@ -390,7 +390,7 @@ static struct sysfs_entry {
{ "optrom_ctl", &sysfs_optrom_ctl_attr, },
{ "vpd", &sysfs_vpd_attr, 1 },
{ "sfp", &sysfs_sfp_attr, 1 },
{ 0 },
{ NULL },
};
void

View File

@ -59,9 +59,6 @@ int
qla2x00_initialize_adapter(scsi_qla_host_t *ha)
{
int rval;
uint8_t restart_risc = 0;
uint8_t retry;
uint32_t wait_time;
/* Clear adapter flags. */
ha->flags.online = 0;
@ -104,87 +101,15 @@ qla2x00_initialize_adapter(scsi_qla_host_t *ha)
qla_printk(KERN_INFO, ha, "Verifying loaded RISC code...\n");
retry = 10;
/*
* Try to configure the loop.
*/
do {
restart_risc = 0;
/* If firmware needs to be loaded */
if (qla2x00_isp_firmware(ha) != QLA_SUCCESS) {
if ((rval = ha->isp_ops.chip_diag(ha)) == QLA_SUCCESS) {
rval = ha->isp_ops.chip_diag(ha);
if (rval)
return (rval);
rval = qla2x00_setup_chip(ha);
if (rval)
return (rval);
}
}
if (rval == QLA_SUCCESS &&
(rval = qla2x00_init_rings(ha)) == QLA_SUCCESS) {
check_fw_ready_again:
/*
* Wait for a successful LIP up to a maximum
* of (in seconds): RISC login timeout value,
* RISC retry count value, and port down retry
* value OR a minimum of 4 seconds OR If no
* cable, only 5 seconds.
*/
rval = qla2x00_fw_ready(ha);
if (rval == QLA_SUCCESS) {
clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
/* Issue a marker after FW becomes ready. */
qla2x00_marker(ha, 0, 0, MK_SYNC_ALL);
/*
* Wait at most MAX_TARGET RSCNs for a stable
* link.
*/
wait_time = 256;
do {
clear_bit(LOOP_RESYNC_NEEDED,
&ha->dpc_flags);
rval = qla2x00_configure_loop(ha);
if (test_and_clear_bit(ISP_ABORT_NEEDED,
&ha->dpc_flags)) {
restart_risc = 1;
break;
}
/*
* If loop state change while we were
* discoverying devices then wait for
* LIP to complete
*/
if (atomic_read(&ha->loop_state) !=
LOOP_READY && retry--) {
goto check_fw_ready_again;
}
wait_time--;
} while (!atomic_read(&ha->loop_down_timer) &&
retry &&
wait_time &&
(test_bit(LOOP_RESYNC_NEEDED,
&ha->dpc_flags)));
if (wait_time == 0)
rval = QLA_FUNCTION_FAILED;
} else if (ha->device_flags & DFLG_NO_CABLE)
/* If no cable, then all is good. */
rval = QLA_SUCCESS;
}
} while (restart_risc && retry--);
if (rval == QLA_SUCCESS) {
clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
qla2x00_marker(ha, 0, 0, MK_SYNC_ALL);
ha->marker_needed = 0;
ha->flags.online = 1;
} else {
DEBUG2_3(printk("%s(): **** FAILED ****\n", __func__));
}
rval = qla2x00_init_rings(ha);
return (rval);
}
@ -2208,7 +2133,6 @@ qla2x00_update_fcport(scsi_qla_host_t *ha, fc_port_t *fcport)
atomic_set(&fcport->state, FCS_ONLINE);
if (ha->flags.init_done)
qla2x00_reg_remote_port(ha, fcport);
}

View File

@ -95,6 +95,8 @@ MODULE_PARM_DESC(ql2xqfullrampup,
*/
static int qla2xxx_slave_configure(struct scsi_device * device);
static int qla2xxx_slave_alloc(struct scsi_device *);
static int qla2xxx_scan_finished(struct Scsi_Host *, unsigned long time);
static void qla2xxx_scan_start(struct Scsi_Host *);
static void qla2xxx_slave_destroy(struct scsi_device *);
static int qla2x00_queuecommand(struct scsi_cmnd *cmd,
void (*fn)(struct scsi_cmnd *));
@ -124,6 +126,8 @@ static struct scsi_host_template qla2x00_driver_template = {
.slave_alloc = qla2xxx_slave_alloc,
.slave_destroy = qla2xxx_slave_destroy,
.scan_finished = qla2xxx_scan_finished,
.scan_start = qla2xxx_scan_start,
.change_queue_depth = qla2x00_change_queue_depth,
.change_queue_type = qla2x00_change_queue_type,
.this_id = -1,
@ -287,7 +291,7 @@ qla24xx_pci_info_str(struct scsi_qla_host *ha, char *str)
return str;
}
char *
static char *
qla2x00_fw_version_str(struct scsi_qla_host *ha, char *str)
{
char un_str[10];
@ -325,7 +329,7 @@ qla2x00_fw_version_str(struct scsi_qla_host *ha, char *str)
return (str);
}
char *
static char *
qla24xx_fw_version_str(struct scsi_qla_host *ha, char *str)
{
sprintf(str, "%d.%02d.%02d ", ha->fw_major_version,
@ -634,7 +638,7 @@ qla2x00_block_error_handler(struct scsi_cmnd *cmnd)
* Note:
* Only return FAILED if command not returned by firmware.
**************************************************************************/
int
static int
qla2xxx_eh_abort(struct scsi_cmnd *cmd)
{
scsi_qla_host_t *ha = to_qla_host(cmd->device->host);
@ -771,7 +775,7 @@ qla2x00_eh_wait_for_pending_target_commands(scsi_qla_host_t *ha, unsigned int t)
* SUCCESS/FAILURE (defined as macro in scsi.h).
*
**************************************************************************/
int
static int
qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
{
scsi_qla_host_t *ha = to_qla_host(cmd->device->host);
@ -902,7 +906,7 @@ qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *ha)
* SUCCESS/FAILURE (defined as macro in scsi.h).
*
**************************************************************************/
int
static int
qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
{
scsi_qla_host_t *ha = to_qla_host(cmd->device->host);
@ -963,7 +967,7 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
*
* Note:
**************************************************************************/
int
static int
qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
{
scsi_qla_host_t *ha = to_qla_host(cmd->device->host);
@ -1366,6 +1370,29 @@ qla24xx_disable_intrs(scsi_qla_host_t *ha)
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
static void
qla2xxx_scan_start(struct Scsi_Host *shost)
{
scsi_qla_host_t *ha = (scsi_qla_host_t *)shost->hostdata;
set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
set_bit(RSCN_UPDATE, &ha->dpc_flags);
}
static int
qla2xxx_scan_finished(struct Scsi_Host *shost, unsigned long time)
{
scsi_qla_host_t *ha = (scsi_qla_host_t *)shost->hostdata;
if (!ha->host)
return 1;
if (time > ha->loop_reset_delay * HZ)
return 1;
return atomic_read(&ha->loop_state) == LOOP_READY;
}
/*
* PCI driver interface
*/
@ -1377,10 +1404,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
struct Scsi_Host *host;
scsi_qla_host_t *ha;
unsigned long flags = 0;
unsigned long wait_switch = 0;
char pci_info[20];
char fw_str[30];
fc_port_t *fcport;
struct scsi_host_template *sht;
if (pci_enable_device(pdev))
@ -1631,30 +1656,19 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->isp_ops.enable_intrs(ha);
/* v2.19.5b6 */
/*
* Wait around max loop_reset_delay secs for the devices to come
* on-line. We don't want Linux scanning before we are ready.
*
*/
for (wait_switch = jiffies + (ha->loop_reset_delay * HZ);
time_before(jiffies,wait_switch) &&
!(ha->device_flags & (DFLG_NO_CABLE | DFLG_FABRIC_DEVICES))
&& (ha->device_flags & SWITCH_FOUND) ;) {
qla2x00_check_fabric_devices(ha);
msleep(10);
}
pci_set_drvdata(pdev, ha);
ha->flags.init_done = 1;
ha->flags.online = 1;
num_hosts++;
ret = scsi_add_host(host, &pdev->dev);
if (ret)
goto probe_failed;
scsi_scan_host(host);
qla2x00_alloc_sysfs_attr(ha);
qla2x00_init_host_attr(ha);
@ -1669,10 +1683,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->flags.enable_64bit_addressing ? '+': '-', ha->host_no,
ha->isp_ops.fw_version_str(ha, fw_str));
/* Go with fc_rport registration. */
list_for_each_entry(fcport, &ha->fcports, list)
qla2x00_reg_remote_port(ha, fcport);
return 0;
probe_failed:

View File

@ -449,7 +449,7 @@ nvram_data_to_access_addr(uint32_t naddr)
return FARX_ACCESS_NVRAM_DATA | naddr;
}
uint32_t
static uint32_t
qla24xx_read_flash_dword(scsi_qla_host_t *ha, uint32_t addr)
{
int rval;
@ -490,7 +490,7 @@ qla24xx_read_flash_data(scsi_qla_host_t *ha, uint32_t *dwptr, uint32_t faddr,
return dwptr;
}
int
static int
qla24xx_write_flash_dword(scsi_qla_host_t *ha, uint32_t addr, uint32_t data)
{
int rval;
@ -512,7 +512,7 @@ qla24xx_write_flash_dword(scsi_qla_host_t *ha, uint32_t addr, uint32_t data)
return rval;
}
void
static void
qla24xx_get_flash_manufacturer(scsi_qla_host_t *ha, uint8_t *man_id,
uint8_t *flash_id)
{
@ -537,7 +537,7 @@ qla24xx_get_flash_manufacturer(scsi_qla_host_t *ha, uint8_t *man_id,
}
}
int
static int
qla24xx_write_flash_data(scsi_qla_host_t *ha, uint32_t *dwptr, uint32_t faddr,
uint32_t dwords)
{

View File

@ -71,7 +71,7 @@ void __dump_registers(struct scsi_qla_host *ha)
readw(&ha->reg->u1.isp4010.nvram));
}
else if (is_qla4022(ha)) {
else if (is_qla4022(ha) | is_qla4032(ha)) {
printk(KERN_INFO "0x%02X intr_mask = 0x%08X\n",
(uint8_t) offsetof(struct isp_reg,
u1.isp4022.intr_mask),
@ -119,7 +119,7 @@ void __dump_registers(struct scsi_qla_host *ha)
readw(&ha->reg->u2.isp4010.port_err_status));
}
else if (is_qla4022(ha)) {
else if (is_qla4022(ha) | is_qla4032(ha)) {
printk(KERN_INFO "Page 0 Registers:\n");
printk(KERN_INFO "0x%02X ext_hw_conf = 0x%08X\n",
(uint8_t) offsetof(struct isp_reg,

View File

@ -40,7 +40,11 @@
#ifndef PCI_DEVICE_ID_QLOGIC_ISP4022
#define PCI_DEVICE_ID_QLOGIC_ISP4022 0x4022
#endif /* */
#endif
#ifndef PCI_DEVICE_ID_QLOGIC_ISP4032
#define PCI_DEVICE_ID_QLOGIC_ISP4032 0x4032
#endif
#define QLA_SUCCESS 0
#define QLA_ERROR 1
@ -277,7 +281,6 @@ struct scsi_qla_host {
#define AF_INTERRUPTS_ON 6 /* 0x00000040 Not Used */
#define AF_GET_CRASH_RECORD 7 /* 0x00000080 */
#define AF_LINK_UP 8 /* 0x00000100 */
#define AF_TOPCAT_CHIP_PRESENT 9 /* 0x00000200 */
#define AF_IRQ_ATTACHED 10 /* 0x00000400 */
#define AF_ISNS_CMD_IN_PROCESS 12 /* 0x00001000 */
#define AF_ISNS_CMD_DONE 13 /* 0x00002000 */
@ -317,16 +320,17 @@ struct scsi_qla_host {
/* NVRAM registers */
struct eeprom_data *nvram;
spinlock_t hardware_lock ____cacheline_aligned;
spinlock_t list_lock;
uint32_t eeprom_cmd_data;
/* Counters for general statistics */
uint64_t isr_count;
uint64_t adapter_error_count;
uint64_t device_error_count;
uint64_t total_io_count;
uint64_t total_mbytes_xferred;
uint64_t link_failure_count;
uint64_t invalid_crc_count;
uint32_t bytes_xfered;
uint32_t spurious_int_count;
uint32_t aborted_io_count;
uint32_t io_timeout_count;
@ -438,6 +442,11 @@ static inline int is_qla4022(struct scsi_qla_host *ha)
return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP4022;
}
static inline int is_qla4032(struct scsi_qla_host *ha)
{
return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP4032;
}
static inline int adapter_up(struct scsi_qla_host *ha)
{
return (test_bit(AF_ONLINE, &ha->flags) != 0) &&
@ -451,58 +460,58 @@ static inline struct scsi_qla_host* to_qla_host(struct Scsi_Host *shost)
static inline void __iomem* isp_semaphore(struct scsi_qla_host *ha)
{
return (is_qla4022(ha) ?
&ha->reg->u1.isp4022.semaphore :
&ha->reg->u1.isp4010.nvram);
return (is_qla4010(ha) ?
&ha->reg->u1.isp4010.nvram :
&ha->reg->u1.isp4022.semaphore);
}
static inline void __iomem* isp_nvram(struct scsi_qla_host *ha)
{
return (is_qla4022(ha) ?
&ha->reg->u1.isp4022.nvram :
&ha->reg->u1.isp4010.nvram);
return (is_qla4010(ha) ?
&ha->reg->u1.isp4010.nvram :
&ha->reg->u1.isp4022.nvram);
}
static inline void __iomem* isp_ext_hw_conf(struct scsi_qla_host *ha)
{
return (is_qla4022(ha) ?
&ha->reg->u2.isp4022.p0.ext_hw_conf :
&ha->reg->u2.isp4010.ext_hw_conf);
return (is_qla4010(ha) ?
&ha->reg->u2.isp4010.ext_hw_conf :
&ha->reg->u2.isp4022.p0.ext_hw_conf);
}
static inline void __iomem* isp_port_status(struct scsi_qla_host *ha)
{
return (is_qla4022(ha) ?
&ha->reg->u2.isp4022.p0.port_status :
&ha->reg->u2.isp4010.port_status);
return (is_qla4010(ha) ?
&ha->reg->u2.isp4010.port_status :
&ha->reg->u2.isp4022.p0.port_status);
}
static inline void __iomem* isp_port_ctrl(struct scsi_qla_host *ha)
{
return (is_qla4022(ha) ?
&ha->reg->u2.isp4022.p0.port_ctrl :
&ha->reg->u2.isp4010.port_ctrl);
return (is_qla4010(ha) ?
&ha->reg->u2.isp4010.port_ctrl :
&ha->reg->u2.isp4022.p0.port_ctrl);
}
static inline void __iomem* isp_port_error_status(struct scsi_qla_host *ha)
{
return (is_qla4022(ha) ?
&ha->reg->u2.isp4022.p0.port_err_status :
&ha->reg->u2.isp4010.port_err_status);
return (is_qla4010(ha) ?
&ha->reg->u2.isp4010.port_err_status :
&ha->reg->u2.isp4022.p0.port_err_status);
}
static inline void __iomem * isp_gp_out(struct scsi_qla_host *ha)
{
return (is_qla4022(ha) ?
&ha->reg->u2.isp4022.p0.gp_out :
&ha->reg->u2.isp4010.gp_out);
return (is_qla4010(ha) ?
&ha->reg->u2.isp4010.gp_out :
&ha->reg->u2.isp4022.p0.gp_out);
}
static inline int eeprom_ext_hw_conf_offset(struct scsi_qla_host *ha)
{
return (is_qla4022(ha) ?
offsetof(struct eeprom_data, isp4022.ext_hw_conf) / 2 :
offsetof(struct eeprom_data, isp4010.ext_hw_conf) / 2);
return (is_qla4010(ha) ?
offsetof(struct eeprom_data, isp4010.ext_hw_conf) / 2 :
offsetof(struct eeprom_data, isp4022.ext_hw_conf) / 2);
}
int ql4xxx_sem_spinlock(struct scsi_qla_host * ha, u32 sem_mask, u32 sem_bits);
@ -511,59 +520,59 @@ int ql4xxx_sem_lock(struct scsi_qla_host * ha, u32 sem_mask, u32 sem_bits);
static inline int ql4xxx_lock_flash(struct scsi_qla_host *a)
{
if (is_qla4022(a))
if (is_qla4010(a))
return ql4xxx_sem_spinlock(a, QL4010_FLASH_SEM_MASK,
QL4010_FLASH_SEM_BITS);
else
return ql4xxx_sem_spinlock(a, QL4022_FLASH_SEM_MASK,
(QL4022_RESOURCE_BITS_BASE_CODE |
(a->mac_index)) << 13);
else
return ql4xxx_sem_spinlock(a, QL4010_FLASH_SEM_MASK,
QL4010_FLASH_SEM_BITS);
}
static inline void ql4xxx_unlock_flash(struct scsi_qla_host *a)
{
if (is_qla4022(a))
ql4xxx_sem_unlock(a, QL4022_FLASH_SEM_MASK);
else
if (is_qla4010(a))
ql4xxx_sem_unlock(a, QL4010_FLASH_SEM_MASK);
else
ql4xxx_sem_unlock(a, QL4022_FLASH_SEM_MASK);
}
static inline int ql4xxx_lock_nvram(struct scsi_qla_host *a)
{
if (is_qla4022(a))
if (is_qla4010(a))
return ql4xxx_sem_spinlock(a, QL4010_NVRAM_SEM_MASK,
QL4010_NVRAM_SEM_BITS);
else
return ql4xxx_sem_spinlock(a, QL4022_NVRAM_SEM_MASK,
(QL4022_RESOURCE_BITS_BASE_CODE |
(a->mac_index)) << 10);
else
return ql4xxx_sem_spinlock(a, QL4010_NVRAM_SEM_MASK,
QL4010_NVRAM_SEM_BITS);
}
static inline void ql4xxx_unlock_nvram(struct scsi_qla_host *a)
{
if (is_qla4022(a))
ql4xxx_sem_unlock(a, QL4022_NVRAM_SEM_MASK);
else
if (is_qla4010(a))
ql4xxx_sem_unlock(a, QL4010_NVRAM_SEM_MASK);
else
ql4xxx_sem_unlock(a, QL4022_NVRAM_SEM_MASK);
}
static inline int ql4xxx_lock_drvr(struct scsi_qla_host *a)
{
if (is_qla4022(a))
if (is_qla4010(a))
return ql4xxx_sem_lock(a, QL4010_DRVR_SEM_MASK,
QL4010_DRVR_SEM_BITS);
else
return ql4xxx_sem_lock(a, QL4022_DRVR_SEM_MASK,
(QL4022_RESOURCE_BITS_BASE_CODE |
(a->mac_index)) << 1);
else
return ql4xxx_sem_lock(a, QL4010_DRVR_SEM_MASK,
QL4010_DRVR_SEM_BITS);
}
static inline void ql4xxx_unlock_drvr(struct scsi_qla_host *a)
{
if (is_qla4022(a))
ql4xxx_sem_unlock(a, QL4022_DRVR_SEM_MASK);
else
if (is_qla4010(a))
ql4xxx_sem_unlock(a, QL4010_DRVR_SEM_MASK);
else
ql4xxx_sem_unlock(a, QL4022_DRVR_SEM_MASK);
}
/*---------------------------------------------------------------------------*/

View File

@ -296,7 +296,6 @@ static inline uint32_t clr_rmask(uint32_t val)
/* ISP Semaphore definitions */
/* ISP General Purpose Output definitions */
#define GPOR_TOPCAT_RESET 0x00000004
/* shadow registers (DMA'd from HA to system memory. read only) */
struct shadow_regs {
@ -339,10 +338,13 @@ union external_hw_config_reg {
/* Mailbox command definitions */
#define MBOX_CMD_ABOUT_FW 0x0009
#define MBOX_CMD_LUN_RESET 0x0016
#define MBOX_CMD_GET_MANAGEMENT_DATA 0x001E
#define MBOX_CMD_GET_FW_STATUS 0x001F
#define MBOX_CMD_SET_ISNS_SERVICE 0x0021
#define ISNS_DISABLE 0
#define ISNS_ENABLE 1
#define MBOX_CMD_COPY_FLASH 0x0024
#define MBOX_CMD_WRITE_FLASH 0x0025
#define MBOX_CMD_READ_FLASH 0x0026
#define MBOX_CMD_CLEAR_DATABASE_ENTRY 0x0031
#define MBOX_CMD_CONN_CLOSE_SESS_LOGOUT 0x0056
@ -360,10 +362,13 @@ union external_hw_config_reg {
#define DDB_DS_SESSION_FAILED 0x06
#define DDB_DS_LOGIN_IN_PROCESS 0x07
#define MBOX_CMD_GET_FW_STATE 0x0069
#define MBOX_CMD_GET_INIT_FW_CTRL_BLOCK_DEFAULTS 0x006A
#define MBOX_CMD_RESTORE_FACTORY_DEFAULTS 0x0087
/* Mailbox 1 */
#define FW_STATE_READY 0x0000
#define FW_STATE_CONFIG_WAIT 0x0001
#define FW_STATE_WAIT_LOGIN 0x0002
#define FW_STATE_ERROR 0x0004
#define FW_STATE_DHCP_IN_PROGRESS 0x0008

View File

@ -8,6 +8,7 @@
#ifndef __QLA4x_GBL_H
#define __QLA4x_GBL_H
int ql4xxx_lock_drvr_wait(struct scsi_qla_host *a);
int qla4xxx_send_tgts(struct scsi_qla_host *ha, char *ip, uint16_t port);
int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb);
int qla4xxx_initialize_adapter(struct scsi_qla_host * ha,

View File

@ -259,10 +259,16 @@ static int qla4xxx_fw_ready(struct scsi_qla_host *ha)
"seconds expired= %d\n", ha->host_no, __func__,
ha->firmware_state, ha->addl_fw_state,
timeout_count));
if (is_qla4032(ha) &&
!(ha->addl_fw_state & FW_ADDSTATE_LINK_UP) &&
(timeout_count < ADAPTER_INIT_TOV - 5)) {
break;
}
msleep(1000);
} /* end of for */
if (timeout_count <= 0)
if (timeout_count == 0)
DEBUG2(printk("scsi%ld: %s: FW Initialization timed out!\n",
ha->host_no, __func__));
@ -806,32 +812,6 @@ int qla4xxx_relogin_device(struct scsi_qla_host *ha,
return QLA_SUCCESS;
}
/**
* qla4010_get_topcat_presence - check if it is QLA4040 TopCat Chip
* @ha: Pointer to host adapter structure.
*
**/
static int qla4010_get_topcat_presence(struct scsi_qla_host *ha)
{
unsigned long flags;
uint16_t topcat;
if (ql4xxx_lock_nvram(ha) != QLA_SUCCESS)
return QLA_ERROR;
spin_lock_irqsave(&ha->hardware_lock, flags);
topcat = rd_nvram_word(ha, offsetof(struct eeprom_data,
isp4010.topcat));
spin_unlock_irqrestore(&ha->hardware_lock, flags);
if ((topcat & TOPCAT_MASK) == TOPCAT_PRESENT)
set_bit(AF_TOPCAT_CHIP_PRESENT, &ha->flags);
else
clear_bit(AF_TOPCAT_CHIP_PRESENT, &ha->flags);
ql4xxx_unlock_nvram(ha);
return QLA_SUCCESS;
}
static int qla4xxx_config_nvram(struct scsi_qla_host *ha)
{
unsigned long flags;
@ -866,7 +846,7 @@ static int qla4xxx_config_nvram(struct scsi_qla_host *ha)
/* set defaults */
if (is_qla4010(ha))
extHwConfig.Asuint32_t = 0x1912;
else if (is_qla4022(ha))
else if (is_qla4022(ha) | is_qla4032(ha))
extHwConfig.Asuint32_t = 0x0023;
}
DEBUG(printk("scsi%ld: %s: Setting extHwConfig to 0xFFFF%04x\n",
@ -927,7 +907,7 @@ static int qla4xxx_start_firmware_from_flash(struct scsi_qla_host *ha)
spin_lock_irqsave(&ha->hardware_lock, flags);
writel(jiffies, &ha->reg->mailbox[7]);
if (is_qla4022(ha))
if (is_qla4022(ha) | is_qla4032(ha))
writel(set_rmask(NVR_WRITE_ENABLE),
&ha->reg->u1.isp4022.nvram);
@ -978,7 +958,7 @@ static int qla4xxx_start_firmware_from_flash(struct scsi_qla_host *ha)
return status;
}
static int ql4xxx_lock_drvr_wait(struct scsi_qla_host *a)
int ql4xxx_lock_drvr_wait(struct scsi_qla_host *a)
{
#define QL4_LOCK_DRVR_WAIT 300
#define QL4_LOCK_DRVR_SLEEP 100
@ -1018,12 +998,7 @@ static int qla4xxx_start_firmware(struct scsi_qla_host *ha)
int soft_reset = 1;
int config_chip = 0;
if (is_qla4010(ha)){
if (qla4010_get_topcat_presence(ha) != QLA_SUCCESS)
return QLA_ERROR;
}
if (is_qla4022(ha))
if (is_qla4022(ha) | is_qla4032(ha))
ql4xxx_set_mac_number(ha);
if (ql4xxx_lock_drvr_wait(ha) != QLA_SUCCESS)

View File

@ -38,7 +38,7 @@ qla4xxx_lookup_ddb_by_fw_index(struct scsi_qla_host *ha, uint32_t fw_ddb_index)
static inline void
__qla4xxx_enable_intrs(struct scsi_qla_host *ha)
{
if (is_qla4022(ha)) {
if (is_qla4022(ha) | is_qla4032(ha)) {
writel(set_rmask(IMR_SCSI_INTR_ENABLE),
&ha->reg->u1.isp4022.intr_mask);
readl(&ha->reg->u1.isp4022.intr_mask);
@ -52,7 +52,7 @@ __qla4xxx_enable_intrs(struct scsi_qla_host *ha)
static inline void
__qla4xxx_disable_intrs(struct scsi_qla_host *ha)
{
if (is_qla4022(ha)) {
if (is_qla4022(ha) | is_qla4032(ha)) {
writel(clr_rmask(IMR_SCSI_INTR_ENABLE),
&ha->reg->u1.isp4022.intr_mask);
readl(&ha->reg->u1.isp4022.intr_mask);

View File

@ -294,6 +294,12 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
cmd_entry->control_flags = CF_WRITE;
else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
cmd_entry->control_flags = CF_READ;
ha->bytes_xfered += cmd->request_bufflen;
if (ha->bytes_xfered & ~0xFFFFF){
ha->total_mbytes_xferred += ha->bytes_xfered >> 20;
ha->bytes_xfered &= 0xFFFFF;
}
}
/* Set tagged queueing control flags */

View File

@ -627,6 +627,7 @@ irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id)
spin_lock_irqsave(&ha->hardware_lock, flags);
ha->isr_count++;
/*
* Repeatedly service interrupts up to a maximum of
* MAX_REQS_SERVICED_PER_INTR

View File

@ -7,15 +7,22 @@
#include "ql4_def.h"
static inline void eeprom_cmd(uint32_t cmd, struct scsi_qla_host *ha)
{
writel(cmd, isp_nvram(ha));
readl(isp_nvram(ha));
udelay(1);
}
static inline int eeprom_size(struct scsi_qla_host *ha)
{
return is_qla4022(ha) ? FM93C86A_SIZE_16 : FM93C66A_SIZE_16;
return is_qla4010(ha) ? FM93C66A_SIZE_16 : FM93C86A_SIZE_16;
}
static inline int eeprom_no_addr_bits(struct scsi_qla_host *ha)
{
return is_qla4022(ha) ? FM93C86A_NO_ADDR_BITS_16 :
FM93C56A_NO_ADDR_BITS_16;
return is_qla4010(ha) ? FM93C56A_NO_ADDR_BITS_16 :
FM93C86A_NO_ADDR_BITS_16 ;
}
static inline int eeprom_no_data_bits(struct scsi_qla_host *ha)
@ -28,8 +35,7 @@ static int fm93c56a_select(struct scsi_qla_host * ha)
DEBUG5(printk(KERN_ERR "fm93c56a_select:\n"));
ha->eeprom_cmd_data = AUBURN_EEPROM_CS_1 | 0x000f0000;
writel(ha->eeprom_cmd_data, isp_nvram(ha));
readl(isp_nvram(ha));
eeprom_cmd(ha->eeprom_cmd_data, ha);
return 1;
}
@ -41,12 +47,13 @@ static int fm93c56a_cmd(struct scsi_qla_host * ha, int cmd, int addr)
int previousBit;
/* Clock in a zero, then do the start bit. */
writel(ha->eeprom_cmd_data | AUBURN_EEPROM_DO_1, isp_nvram(ha));
writel(ha->eeprom_cmd_data | AUBURN_EEPROM_DO_1 |
AUBURN_EEPROM_CLK_RISE, isp_nvram(ha));
writel(ha->eeprom_cmd_data | AUBURN_EEPROM_DO_1 |
AUBURN_EEPROM_CLK_FALL, isp_nvram(ha));
readl(isp_nvram(ha));
eeprom_cmd(ha->eeprom_cmd_data | AUBURN_EEPROM_DO_1, ha);
eeprom_cmd(ha->eeprom_cmd_data | AUBURN_EEPROM_DO_1 |
AUBURN_EEPROM_CLK_RISE, ha);
eeprom_cmd(ha->eeprom_cmd_data | AUBURN_EEPROM_DO_1 |
AUBURN_EEPROM_CLK_FALL, ha);
mask = 1 << (FM93C56A_CMD_BITS - 1);
/* Force the previous data bit to be different. */
@ -60,14 +67,14 @@ static int fm93c56a_cmd(struct scsi_qla_host * ha, int cmd, int addr)
* If the bit changed, then change the DO state to
* match.
*/
writel(ha->eeprom_cmd_data | dataBit, isp_nvram(ha));
eeprom_cmd(ha->eeprom_cmd_data | dataBit, ha);
previousBit = dataBit;
}
writel(ha->eeprom_cmd_data | dataBit |
AUBURN_EEPROM_CLK_RISE, isp_nvram(ha));
writel(ha->eeprom_cmd_data | dataBit |
AUBURN_EEPROM_CLK_FALL, isp_nvram(ha));
readl(isp_nvram(ha));
eeprom_cmd(ha->eeprom_cmd_data | dataBit |
AUBURN_EEPROM_CLK_RISE, ha);
eeprom_cmd(ha->eeprom_cmd_data | dataBit |
AUBURN_EEPROM_CLK_FALL, ha);
cmd = cmd << 1;
}
mask = 1 << (eeprom_no_addr_bits(ha) - 1);
@ -82,14 +89,15 @@ static int fm93c56a_cmd(struct scsi_qla_host * ha, int cmd, int addr)
* If the bit changed, then change the DO state to
* match.
*/
writel(ha->eeprom_cmd_data | dataBit, isp_nvram(ha));
eeprom_cmd(ha->eeprom_cmd_data | dataBit, ha);
previousBit = dataBit;
}
writel(ha->eeprom_cmd_data | dataBit |
AUBURN_EEPROM_CLK_RISE, isp_nvram(ha));
writel(ha->eeprom_cmd_data | dataBit |
AUBURN_EEPROM_CLK_FALL, isp_nvram(ha));
readl(isp_nvram(ha));
eeprom_cmd(ha->eeprom_cmd_data | dataBit |
AUBURN_EEPROM_CLK_RISE, ha);
eeprom_cmd(ha->eeprom_cmd_data | dataBit |
AUBURN_EEPROM_CLK_FALL, ha);
addr = addr << 1;
}
return 1;
@ -98,8 +106,7 @@ static int fm93c56a_cmd(struct scsi_qla_host * ha, int cmd, int addr)
static int fm93c56a_deselect(struct scsi_qla_host * ha)
{
ha->eeprom_cmd_data = AUBURN_EEPROM_CS_0 | 0x000f0000;
writel(ha->eeprom_cmd_data, isp_nvram(ha));
readl(isp_nvram(ha));
eeprom_cmd(ha->eeprom_cmd_data, ha);
return 1;
}
@ -112,12 +119,13 @@ static int fm93c56a_datain(struct scsi_qla_host * ha, unsigned short *value)
/* Read the data bits
* The first bit is a dummy. Clock right over it. */
for (i = 0; i < eeprom_no_data_bits(ha); i++) {
writel(ha->eeprom_cmd_data |
AUBURN_EEPROM_CLK_RISE, isp_nvram(ha));
writel(ha->eeprom_cmd_data |
AUBURN_EEPROM_CLK_FALL, isp_nvram(ha));
dataBit =
(readw(isp_nvram(ha)) & AUBURN_EEPROM_DI_1) ? 1 : 0;
eeprom_cmd(ha->eeprom_cmd_data |
AUBURN_EEPROM_CLK_RISE, ha);
eeprom_cmd(ha->eeprom_cmd_data |
AUBURN_EEPROM_CLK_FALL, ha);
dataBit = (readw(isp_nvram(ha)) & AUBURN_EEPROM_DI_1) ? 1 : 0;
data = (data << 1) | dataBit;
}

View File

@ -134,9 +134,7 @@ struct eeprom_data {
u16 phyConfig; /* x36 */
#define PHY_CONFIG_PHY_ADDR_MASK 0x1f
#define PHY_CONFIG_ENABLE_FW_MANAGEMENT_MASK 0x20
u16 topcat; /* x38 */
#define TOPCAT_PRESENT 0x0100
#define TOPCAT_MASK 0xFF00
u16 reserved_56; /* x38 */
#define EEPROM_UNUSED_1_SIZE 2
u8 unused_1[EEPROM_UNUSED_1_SIZE]; /* x3A */

View File

@ -708,10 +708,10 @@ static int qla4xxx_cmd_wait(struct scsi_qla_host *ha)
}
/**
* qla4010_soft_reset - performs soft reset.
* qla4xxx_soft_reset - performs soft reset.
* @ha: Pointer to host adapter structure.
**/
static int qla4010_soft_reset(struct scsi_qla_host *ha)
int qla4xxx_soft_reset(struct scsi_qla_host *ha)
{
uint32_t max_wait_time;
unsigned long flags = 0;
@ -816,29 +816,6 @@ static int qla4010_soft_reset(struct scsi_qla_host *ha)
return status;
}
/**
* qla4xxx_topcat_reset - performs hard reset of TopCat Chip.
* @ha: Pointer to host adapter structure.
**/
static int qla4xxx_topcat_reset(struct scsi_qla_host *ha)
{
unsigned long flags;
ql4xxx_lock_nvram(ha);
spin_lock_irqsave(&ha->hardware_lock, flags);
writel(set_rmask(GPOR_TOPCAT_RESET), isp_gp_out(ha));
readl(isp_gp_out(ha));
mdelay(1);
writel(clr_rmask(GPOR_TOPCAT_RESET), isp_gp_out(ha));
readl(isp_gp_out(ha));
spin_unlock_irqrestore(&ha->hardware_lock, flags);
mdelay(2523);
ql4xxx_unlock_nvram(ha);
return QLA_SUCCESS;
}
/**
* qla4xxx_flush_active_srbs - returns all outstanding i/o requests to O.S.
* @ha: Pointer to host adapter structure.
@ -866,26 +843,6 @@ static void qla4xxx_flush_active_srbs(struct scsi_qla_host *ha)
}
/**
* qla4xxx_hard_reset - performs HBA Hard Reset
* @ha: Pointer to host adapter structure.
**/
static int qla4xxx_hard_reset(struct scsi_qla_host *ha)
{
/* The QLA4010 really doesn't have an equivalent to a hard reset */
qla4xxx_flush_active_srbs(ha);
if (test_bit(AF_TOPCAT_CHIP_PRESENT, &ha->flags)) {
int status = QLA_ERROR;
if ((qla4010_soft_reset(ha) == QLA_SUCCESS) &&
(qla4xxx_topcat_reset(ha) == QLA_SUCCESS) &&
(qla4010_soft_reset(ha) == QLA_SUCCESS))
status = QLA_SUCCESS;
return status;
} else
return qla4010_soft_reset(ha);
}
/**
* qla4xxx_recover_adapter - recovers adapter after a fatal error
* @ha: Pointer to host adapter structure.
@ -919,18 +876,11 @@ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha,
if (status == QLA_SUCCESS) {
DEBUG2(printk("scsi%ld: %s - Performing soft reset..\n",
ha->host_no, __func__));
qla4xxx_flush_active_srbs(ha);
if (ql4xxx_lock_drvr_wait(ha) == QLA_SUCCESS)
status = qla4xxx_soft_reset(ha);
}
/* FIXMEkaren: Do we want to keep interrupts enabled and process
AENs after soft reset */
/* If firmware (SOFT) reset failed, or if all outstanding
* commands have not returned, then do a HARD reset.
*/
if (status == QLA_ERROR) {
DEBUG2(printk("scsi%ld: %s - Performing hard reset..\n",
ha->host_no, __func__));
status = qla4xxx_hard_reset(ha);
else
status = QLA_ERROR;
}
/* Flush any pending ddb changed AENs */
@ -1016,13 +966,9 @@ static void qla4xxx_do_dpc(void *data)
struct scsi_qla_host *ha = (struct scsi_qla_host *) data;
struct ddb_entry *ddb_entry, *dtemp;
DEBUG2(printk("scsi%ld: %s: DPC handler waking up.\n",
ha->host_no, __func__));
DEBUG2(printk("scsi%ld: %s: ha->flags = 0x%08lx\n",
ha->host_no, __func__, ha->flags));
DEBUG2(printk("scsi%ld: %s: ha->dpc_flags = 0x%08lx\n",
ha->host_no, __func__, ha->dpc_flags));
DEBUG2(printk("scsi%ld: %s: DPC handler waking up."
"flags = 0x%08lx, dpc_flags = 0x%08lx\n",
ha->host_no, __func__, ha->flags, ha->dpc_flags));
/* Initialization not yet finished. Don't do anything yet. */
if (!test_bit(AF_INIT_DONE, &ha->flags))
@ -1032,16 +978,8 @@ static void qla4xxx_do_dpc(void *data)
test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
test_bit(DPC_RESET_HA_DESTROY_DDB_LIST, &ha->dpc_flags)) {
if (test_bit(DPC_RESET_HA_DESTROY_DDB_LIST, &ha->dpc_flags))
/*
* dg 09/23 Never initialize ddb list
* once we up and running
* qla4xxx_recover_adapter(ha,
* REBUILD_DDB_LIST);
*/
qla4xxx_recover_adapter(ha, PRESERVE_DDB_LIST);
if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
if (test_bit(DPC_RESET_HA_DESTROY_DDB_LIST, &ha->dpc_flags) ||
test_bit(DPC_RESET_HA, &ha->dpc_flags))
qla4xxx_recover_adapter(ha, PRESERVE_DDB_LIST);
if (test_and_clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
@ -1122,6 +1060,7 @@ static void qla4xxx_free_adapter(struct scsi_qla_host *ha)
destroy_workqueue(ha->dpc_thread);
/* Issue Soft Reset to put firmware in unknown state */
if (ql4xxx_lock_drvr_wait(ha) == QLA_SUCCESS)
qla4xxx_soft_reset(ha);
/* Remove timer thread, if present */
@ -1261,7 +1200,6 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
init_waitqueue_head(&ha->mailbox_wait_queue);
spin_lock_init(&ha->hardware_lock);
spin_lock_init(&ha->list_lock);
/* Allocate dma buffers */
if (qla4xxx_mem_alloc(ha)) {
@ -1467,27 +1405,6 @@ struct srb * qla4xxx_del_from_active_array(struct scsi_qla_host *ha, uint32_t in
return srb;
}
/**
* qla4xxx_soft_reset - performs a SOFT RESET of hba.
* @ha: Pointer to host adapter structure.
**/
int qla4xxx_soft_reset(struct scsi_qla_host *ha)
{
DEBUG2(printk(KERN_WARNING "scsi%ld: %s: chip reset!\n", ha->host_no,
__func__));
if (test_bit(AF_TOPCAT_CHIP_PRESENT, &ha->flags)) {
int status = QLA_ERROR;
if ((qla4010_soft_reset(ha) == QLA_SUCCESS) &&
(qla4xxx_topcat_reset(ha) == QLA_SUCCESS) &&
(qla4010_soft_reset(ha) == QLA_SUCCESS) )
status = QLA_SUCCESS;
return status;
} else
return qla4010_soft_reset(ha);
}
/**
* qla4xxx_eh_wait_on_command - waits for command to be returned by firmware
* @ha: actual ha whose done queue will contain the comd returned by firmware.
@ -1686,6 +1603,12 @@ static struct pci_device_id qla4xxx_pci_tbl[] = {
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
{
.vendor = PCI_VENDOR_ID_QLOGIC,
.device = PCI_DEVICE_ID_QLOGIC_ISP4032,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
{0, 0},
};
MODULE_DEVICE_TABLE(pci, qla4xxx_pci_tbl);

View File

@ -5,9 +5,4 @@
* See LICENSE.qla4xxx for copyright and licensing details.
*/
#define QLA4XXX_DRIVER_VERSION "5.00.05b9-k"
#define QL4_DRIVER_MAJOR_VER 5
#define QL4_DRIVER_MINOR_VER 0
#define QL4_DRIVER_PATCH_VER 5
#define QL4_DRIVER_BETA_VER 9
#define QLA4XXX_DRIVER_VERSION "5.00.07-k"

View File

@ -156,8 +156,7 @@ static struct scsi_host_cmd_pool scsi_cmd_dma_pool = {
static DEFINE_MUTEX(host_cmd_pool_mutex);
static struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost,
gfp_t gfp_mask)
struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost, gfp_t gfp_mask)
{
struct scsi_cmnd *cmd;
@ -178,6 +177,7 @@ static struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost,
return cmd;
}
EXPORT_SYMBOL_GPL(__scsi_get_command);
/*
* Function: scsi_get_command()
@ -217,6 +217,26 @@ struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, gfp_t gfp_mask)
}
EXPORT_SYMBOL(scsi_get_command);
void __scsi_put_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd,
struct device *dev)
{
unsigned long flags;
/* changing locks here, don't need to restore the irq state */
spin_lock_irqsave(&shost->free_list_lock, flags);
if (unlikely(list_empty(&shost->free_list))) {
list_add(&cmd->list, &shost->free_list);
cmd = NULL;
}
spin_unlock_irqrestore(&shost->free_list_lock, flags);
if (likely(cmd != NULL))
kmem_cache_free(shost->cmd_pool->slab, cmd);
put_device(dev);
}
EXPORT_SYMBOL(__scsi_put_command);
/*
* Function: scsi_put_command()
*
@ -231,26 +251,15 @@ EXPORT_SYMBOL(scsi_get_command);
void scsi_put_command(struct scsi_cmnd *cmd)
{
struct scsi_device *sdev = cmd->device;
struct Scsi_Host *shost = sdev->host;
unsigned long flags;
/* serious error if the command hasn't come from a device list */
spin_lock_irqsave(&cmd->device->list_lock, flags);
BUG_ON(list_empty(&cmd->list));
list_del_init(&cmd->list);
spin_unlock(&cmd->device->list_lock);
/* changing locks here, don't need to restore the irq state */
spin_lock(&shost->free_list_lock);
if (unlikely(list_empty(&shost->free_list))) {
list_add(&cmd->list, &shost->free_list);
cmd = NULL;
}
spin_unlock_irqrestore(&shost->free_list_lock, flags);
spin_unlock_irqrestore(&cmd->device->list_lock, flags);
if (likely(cmd != NULL))
kmem_cache_free(shost->cmd_pool->slab, cmd);
put_device(&sdev->sdev_gendev);
__scsi_put_command(cmd->device->host, cmd, &sdev->sdev_gendev);
}
EXPORT_SYMBOL(scsi_put_command);
@ -871,9 +880,9 @@ EXPORT_SYMBOL(scsi_device_get);
*/
void scsi_device_put(struct scsi_device *sdev)
{
#ifdef CONFIG_MODULE_UNLOAD
struct module *module = sdev->host->hostt->module;
#ifdef CONFIG_MODULE_UNLOAD
/* The module refcount will be zero if scsi_device_get()
* was called from a module removal routine */
if (module && module_refcount(module) != 0)

View File

@ -453,9 +453,18 @@ static void scsi_eh_done(struct scsi_cmnd *scmd)
}
/**
* scsi_send_eh_cmnd - send a cmd to a device as part of error recovery.
* @scmd: SCSI Cmd to send.
* @timeout: Timeout for cmd.
* scsi_send_eh_cmnd - submit a scsi command as part of error recory
* @scmd: SCSI command structure to hijack
* @cmnd: CDB to send
* @cmnd_size: size in bytes of @cmnd
* @timeout: timeout for this request
* @copy_sense: request sense data if set to 1
*
* This function is used to send a scsi command down to a target device
* as part of the error recovery process. If @copy_sense is 0 the command
* sent must be one that does not transfer any data. If @copy_sense is 1
* the command must be REQUEST_SENSE and this functions copies out the
* sense buffer it got into @scmd->sense_buffer.
*
* Return value:
* SUCCESS or FAILED or NEEDS_RETRY
@ -469,6 +478,7 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
DECLARE_COMPLETION_ONSTACK(done);
unsigned long timeleft;
unsigned long flags;
struct scatterlist sgl;
unsigned char old_cmnd[MAX_COMMAND_SIZE];
enum dma_data_direction old_data_direction;
unsigned short old_use_sg;
@ -500,19 +510,24 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
if (shost->hostt->unchecked_isa_dma)
gfp_mask |= __GFP_DMA;
scmd->sc_data_direction = DMA_FROM_DEVICE;
scmd->request_bufflen = 252;
scmd->request_buffer = kzalloc(scmd->request_bufflen, gfp_mask);
if (!scmd->request_buffer)
sgl.page = alloc_page(gfp_mask);
if (!sgl.page)
return FAILED;
sgl.offset = 0;
sgl.length = 252;
scmd->sc_data_direction = DMA_FROM_DEVICE;
scmd->request_bufflen = sgl.length;
scmd->request_buffer = &sgl;
scmd->use_sg = 1;
} else {
scmd->request_buffer = NULL;
scmd->request_bufflen = 0;
scmd->sc_data_direction = DMA_NONE;
scmd->use_sg = 0;
}
scmd->underflow = 0;
scmd->use_sg = 0;
scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
if (sdev->scsi_level <= SCSI_2)
@ -583,7 +598,7 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
memcpy(scmd->sense_buffer, scmd->request_buffer,
sizeof(scmd->sense_buffer));
}
kfree(scmd->request_buffer);
__free_page(sgl.page);
}

View File

@ -704,7 +704,7 @@ static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate,
return NULL;
}
static struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_mask)
struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_mask)
{
struct scsi_host_sg_pool *sgp;
struct scatterlist *sgl;
@ -745,7 +745,9 @@ static struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_m
return sgl;
}
static void scsi_free_sgtable(struct scatterlist *sgl, int index)
EXPORT_SYMBOL(scsi_alloc_sgtable);
void scsi_free_sgtable(struct scatterlist *sgl, int index)
{
struct scsi_host_sg_pool *sgp;
@ -755,6 +757,8 @@ static void scsi_free_sgtable(struct scatterlist *sgl, int index)
mempool_free(sgl, sgp->pool);
}
EXPORT_SYMBOL(scsi_free_sgtable);
/*
* Function: scsi_release_buffers()
*
@ -996,25 +1000,14 @@ static int scsi_init_io(struct scsi_cmnd *cmd)
int count;
/*
* if this is a rq->data based REQ_BLOCK_PC, setup for a non-sg xfer
*/
if (blk_pc_request(req) && !req->bio) {
cmd->request_bufflen = req->data_len;
cmd->request_buffer = req->data;
req->buffer = req->data;
cmd->use_sg = 0;
return 0;
}
/*
* we used to not use scatter-gather for single segment request,
* We used to not use scatter-gather for single segment request,
* but now we do (it makes highmem I/O easier to support without
* kmapping pages)
*/
cmd->use_sg = req->nr_phys_segments;
/*
* if sg table allocation fails, requeue request later.
* If sg table allocation fails, requeue request later.
*/
sgpnt = scsi_alloc_sgtable(cmd, GFP_ATOMIC);
if (unlikely(!sgpnt)) {
@ -1022,24 +1015,21 @@ static int scsi_init_io(struct scsi_cmnd *cmd)
return BLKPREP_DEFER;
}
req->buffer = NULL;
cmd->request_buffer = (char *) sgpnt;
cmd->request_bufflen = req->nr_sectors << 9;
if (blk_pc_request(req))
cmd->request_bufflen = req->data_len;
req->buffer = NULL;
else
cmd->request_bufflen = req->nr_sectors << 9;
/*
* Next, walk the list, and fill in the addresses and sizes of
* each segment.
*/
count = blk_rq_map_sg(req->q, req, cmd->request_buffer);
/*
* mapped well, send it off
*/
if (likely(count <= cmd->use_sg)) {
cmd->use_sg = count;
return 0;
return BLKPREP_OK;
}
printk(KERN_ERR "Incorrect number of segments after building list\n");
@ -1069,6 +1059,27 @@ static int scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk,
return -EOPNOTSUPP;
}
static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev,
struct request *req)
{
struct scsi_cmnd *cmd;
if (!req->special) {
cmd = scsi_get_command(sdev, GFP_ATOMIC);
if (unlikely(!cmd))
return NULL;
req->special = cmd;
} else {
cmd = req->special;
}
/* pull a tag out of the request if we have one */
cmd->tag = req->tag;
cmd->request = req;
return cmd;
}
static void scsi_blk_pc_done(struct scsi_cmnd *cmd)
{
BUG_ON(!blk_pc_request(cmd->request));
@ -1081,9 +1092,37 @@ static void scsi_blk_pc_done(struct scsi_cmnd *cmd)
scsi_io_completion(cmd, cmd->request_bufflen);
}
static void scsi_setup_blk_pc_cmnd(struct scsi_cmnd *cmd)
static int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
{
struct request *req = cmd->request;
struct scsi_cmnd *cmd;
cmd = scsi_get_cmd_from_req(sdev, req);
if (unlikely(!cmd))
return BLKPREP_DEFER;
/*
* BLOCK_PC requests may transfer data, in which case they must
* a bio attached to them. Or they might contain a SCSI command
* that does not transfer data, in which case they may optionally
* submit a request without an attached bio.
*/
if (req->bio) {
int ret;
BUG_ON(!req->nr_phys_segments);
ret = scsi_init_io(cmd);
if (unlikely(ret))
return ret;
} else {
BUG_ON(req->data_len);
BUG_ON(req->data);
cmd->request_bufflen = 0;
cmd->request_buffer = NULL;
cmd->use_sg = 0;
req->buffer = NULL;
}
BUILD_BUG_ON(sizeof(req->cmd) > sizeof(cmd->cmnd));
memcpy(cmd->cmnd, req->cmd, sizeof(cmd->cmnd));
@ -1099,154 +1138,138 @@ static void scsi_setup_blk_pc_cmnd(struct scsi_cmnd *cmd)
cmd->allowed = req->retries;
cmd->timeout_per_command = req->timeout;
cmd->done = scsi_blk_pc_done;
return BLKPREP_OK;
}
/*
* Setup a REQ_TYPE_FS command. These are simple read/write request
* from filesystems that still need to be translated to SCSI CDBs from
* the ULD.
*/
static int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
{
struct scsi_cmnd *cmd;
struct scsi_driver *drv;
int ret;
/*
* Filesystem requests must transfer data.
*/
BUG_ON(!req->nr_phys_segments);
cmd = scsi_get_cmd_from_req(sdev, req);
if (unlikely(!cmd))
return BLKPREP_DEFER;
ret = scsi_init_io(cmd);
if (unlikely(ret))
return ret;
/*
* Initialize the actual SCSI command for this request.
*/
drv = *(struct scsi_driver **)req->rq_disk->private_data;
if (unlikely(!drv->init_command(cmd))) {
scsi_release_buffers(cmd);
scsi_put_command(cmd);
return BLKPREP_KILL;
}
return BLKPREP_OK;
}
static int scsi_prep_fn(struct request_queue *q, struct request *req)
{
struct scsi_device *sdev = q->queuedata;
struct scsi_cmnd *cmd;
int specials_only = 0;
int ret = BLKPREP_OK;
/*
* Just check to see if the device is online. If it isn't, we
* refuse to process any commands. The device must be brought
* online before trying any recovery commands
* If the device is not in running state we will reject some
* or all commands.
*/
if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
switch (sdev->sdev_state) {
case SDEV_OFFLINE:
/*
* If the device is offline we refuse to process any
* commands. The device must be brought online
* before trying any recovery commands.
*/
if (unlikely(!scsi_device_online(sdev))) {
sdev_printk(KERN_ERR, sdev,
"rejecting I/O to offline device\n");
goto kill;
}
if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
/* OK, we're not in a running state don't prep
* user commands */
if (sdev->sdev_state == SDEV_DEL) {
/* Device is fully deleted, no commands
* at all allowed down */
ret = BLKPREP_KILL;
break;
case SDEV_DEL:
/*
* If the device is fully deleted, we refuse to
* process any commands as well.
*/
sdev_printk(KERN_ERR, sdev,
"rejecting I/O to dead device\n");
goto kill;
}
/* OK, we only allow special commands (i.e. not
* user initiated ones */
specials_only = sdev->sdev_state;
}
ret = BLKPREP_KILL;
break;
case SDEV_QUIESCE:
case SDEV_BLOCK:
/*
* Find the actual device driver associated with this command.
* The SPECIAL requests are things like character device or
* ioctls, which did not originate from ll_rw_blk. Note that
* the special field is also used to indicate the cmd for
* the remainder of a partially fulfilled request that can
* come up when there is a medium error. We have to treat
* these two cases differently. We differentiate by looking
* at request->cmd, as this tells us the real story.
* If the devices is blocked we defer normal commands.
*/
if (blk_special_request(req) && req->special)
cmd = req->special;
else if (blk_pc_request(req) || blk_fs_request(req)) {
if (unlikely(specials_only) && !(req->cmd_flags & REQ_PREEMPT)){
if (specials_only == SDEV_QUIESCE ||
specials_only == SDEV_BLOCK)
goto defer;
sdev_printk(KERN_ERR, sdev,
"rejecting I/O to device being removed\n");
goto kill;
if (!(req->cmd_flags & REQ_PREEMPT))
ret = BLKPREP_DEFER;
break;
default:
/*
* For any other not fully online state we only allow
* special commands. In particular any user initiated
* command is not allowed.
*/
if (!(req->cmd_flags & REQ_PREEMPT))
ret = BLKPREP_KILL;
break;
}
/*
* Now try and find a command block that we can use.
*/
if (!req->special) {
cmd = scsi_get_command(sdev, GFP_ATOMIC);
if (unlikely(!cmd))
goto defer;
} else
cmd = req->special;
/* pull a tag out of the request if we have one */
cmd->tag = req->tag;
} else {
blk_dump_rq_flags(req, "SCSI bad req");
goto kill;
if (ret != BLKPREP_OK)
goto out;
}
/* note the overloading of req->special. When the tag
* is active it always means cmd. If the tag goes
* back for re-queueing, it may be reset */
req->special = cmd;
cmd->request = req;
switch (req->cmd_type) {
case REQ_TYPE_BLOCK_PC:
ret = scsi_setup_blk_pc_cmnd(sdev, req);
break;
case REQ_TYPE_FS:
ret = scsi_setup_fs_cmnd(sdev, req);
break;
default:
/*
* FIXME: drop the lock here because the functions below
* expect to be called without the queue lock held. Also,
* previously, we dequeued the request before dropping the
* lock. We hope REQ_STARTED prevents anything untoward from
* happening now.
*/
if (blk_fs_request(req) || blk_pc_request(req)) {
int ret;
/*
* This will do a couple of things:
* 1) Fill in the actual SCSI command.
* 2) Fill in any other upper-level specific fields
* (timeout).
* All other command types are not supported.
*
* If this returns 0, it means that the request failed
* (reading past end of disk, reading offline device,
* etc). This won't actually talk to the device, but
* some kinds of consistency checking may cause the
* request to be rejected immediately.
* Note that these days the SCSI subsystem does not use
* REQ_TYPE_SPECIAL requests anymore. These are only used
* (directly or via blk_insert_request) by non-SCSI drivers.
*/
blk_dump_rq_flags(req, "SCSI bad req");
ret = BLKPREP_KILL;
break;
}
/*
* This sets up the scatter-gather table (allocating if
* required).
*/
ret = scsi_init_io(cmd);
out:
switch (ret) {
/* For BLKPREP_KILL/DEFER the cmd was released */
case BLKPREP_KILL:
goto kill;
req->errors = DID_NO_CONNECT << 16;
break;
case BLKPREP_DEFER:
goto defer;
}
/*
* Initialize the actual SCSI command for this request.
*/
if (blk_pc_request(req)) {
scsi_setup_blk_pc_cmnd(cmd);
} else if (req->rq_disk) {
struct scsi_driver *drv;
drv = *(struct scsi_driver **)req->rq_disk->private_data;
if (unlikely(!drv->init_command(cmd))) {
scsi_release_buffers(cmd);
scsi_put_command(cmd);
goto kill;
}
}
}
/*
* The request is now prepped, no need to come back here
*/
req->cmd_flags |= REQ_DONTPREP;
return BLKPREP_OK;
defer:
/* If we defer, the elv_next_request() returns NULL, but the
* If we defer, the elv_next_request() returns NULL, but the
* queue must be restarted, so we plug here if no returning
* command will automatically do that. */
* command will automatically do that.
*/
if (sdev->device_busy == 0)
blk_plug_device(q);
return BLKPREP_DEFER;
kill:
req->errors = DID_NO_CONNECT << 16;
return BLKPREP_KILL;
break;
default:
req->cmd_flags |= REQ_DONTPREP;
}
return ret;
}
/*
@ -1548,29 +1571,40 @@ u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
}
EXPORT_SYMBOL(scsi_calculate_bounce_limit);
struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
request_fn_proc *request_fn)
{
struct Scsi_Host *shost = sdev->host;
struct request_queue *q;
q = blk_init_queue(scsi_request_fn, NULL);
q = blk_init_queue(request_fn, NULL);
if (!q)
return NULL;
blk_queue_prep_rq(q, scsi_prep_fn);
blk_queue_max_hw_segments(q, shost->sg_tablesize);
blk_queue_max_phys_segments(q, SCSI_MAX_PHYS_SEGMENTS);
blk_queue_max_sectors(q, shost->max_sectors);
blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
blk_queue_segment_boundary(q, shost->dma_boundary);
blk_queue_issue_flush_fn(q, scsi_issue_flush_fn);
blk_queue_softirq_done(q, scsi_softirq_done);
if (!shost->use_clustering)
clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
return q;
}
EXPORT_SYMBOL(__scsi_alloc_queue);
struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
{
struct request_queue *q;
q = __scsi_alloc_queue(sdev->host, scsi_request_fn);
if (!q)
return NULL;
blk_queue_prep_rq(q, scsi_prep_fn);
blk_queue_issue_flush_fn(q, scsi_issue_flush_fn);
blk_queue_softirq_done(q, scsi_softirq_done);
return q;
}
void scsi_free_queue(struct request_queue *q)
{

View File

@ -39,6 +39,9 @@ static inline void scsi_log_completion(struct scsi_cmnd *cmd, int disposition)
{ };
#endif
/* scsi_scan.c */
int scsi_complete_async_scans(void);
/* scsi_devinfo.c */
extern int scsi_get_device_flags(struct scsi_device *sdev,
const unsigned char *vendor,

View File

@ -29,7 +29,9 @@
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <asm/semaphore.h>
#include <linux/delay.h>
#include <linux/kthread.h>
#include <linux/spinlock.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
@ -87,6 +89,17 @@ module_param_named(max_luns, max_scsi_luns, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(max_luns,
"last scsi LUN (should be between 1 and 2^32-1)");
#ifdef CONFIG_SCSI_SCAN_ASYNC
#define SCSI_SCAN_TYPE_DEFAULT "async"
#else
#define SCSI_SCAN_TYPE_DEFAULT "sync"
#endif
static char scsi_scan_type[6] = SCSI_SCAN_TYPE_DEFAULT;
module_param_string(scan, scsi_scan_type, sizeof(scsi_scan_type), S_IRUGO);
MODULE_PARM_DESC(scan, "sync, async or none");
/*
* max_scsi_report_luns: the maximum number of LUNS that will be
* returned from the REPORT LUNS command. 8 times this value must
@ -108,6 +121,68 @@ MODULE_PARM_DESC(inq_timeout,
"Timeout (in seconds) waiting for devices to answer INQUIRY."
" Default is 5. Some non-compliant devices need more.");
static DEFINE_SPINLOCK(async_scan_lock);
static LIST_HEAD(scanning_hosts);
struct async_scan_data {
struct list_head list;
struct Scsi_Host *shost;
struct completion prev_finished;
};
/**
* scsi_complete_async_scans - Wait for asynchronous scans to complete
*
* Asynchronous scans add themselves to the scanning_hosts list. Once
* that list is empty, we know that the scans are complete. Rather than
* waking up periodically to check the state of the list, we pretend to be
* a scanning task by adding ourselves at the end of the list and going to
* sleep. When the task before us wakes us up, we take ourselves off the
* list and return.
*/
int scsi_complete_async_scans(void)
{
struct async_scan_data *data;
do {
if (list_empty(&scanning_hosts))
return 0;
/* If we can't get memory immediately, that's OK. Just
* sleep a little. Even if we never get memory, the async
* scans will finish eventually.
*/
data = kmalloc(sizeof(*data), GFP_KERNEL);
if (!data)
msleep(1);
} while (!data);
data->shost = NULL;
init_completion(&data->prev_finished);
spin_lock(&async_scan_lock);
/* Check that there's still somebody else on the list */
if (list_empty(&scanning_hosts))
goto done;
list_add_tail(&data->list, &scanning_hosts);
spin_unlock(&async_scan_lock);
printk(KERN_INFO "scsi: waiting for bus probes to complete ...\n");
wait_for_completion(&data->prev_finished);
spin_lock(&async_scan_lock);
list_del(&data->list);
done:
spin_unlock(&async_scan_lock);
kfree(data);
return 0;
}
#ifdef MODULE
/* Only exported for the benefit of scsi_wait_scan */
EXPORT_SYMBOL_GPL(scsi_complete_async_scans);
#endif
/**
* scsi_unlock_floptical - unlock device via a special MODE SENSE command
* @sdev: scsi device to send command to
@ -619,7 +694,7 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
* SCSI_SCAN_LUN_PRESENT: a new scsi_device was allocated and initialized
**/
static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
int *bflags)
int *bflags, int async)
{
/*
* XXX do not save the inquiry, since it can change underneath us,
@ -805,7 +880,7 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
* register it and tell the rest of the kernel
* about it.
*/
if (scsi_sysfs_add_sdev(sdev) != 0)
if (!async && scsi_sysfs_add_sdev(sdev) != 0)
return SCSI_SCAN_NO_RESPONSE;
return SCSI_SCAN_LUN_PRESENT;
@ -974,7 +1049,7 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
goto out_free_result;
}
res = scsi_add_lun(sdev, result, &bflags);
res = scsi_add_lun(sdev, result, &bflags, shost->async_scan);
if (res == SCSI_SCAN_LUN_PRESENT) {
if (bflags & BLIST_KEY) {
sdev->lockable = 0;
@ -1474,6 +1549,12 @@ void scsi_scan_target(struct device *parent, unsigned int channel,
{
struct Scsi_Host *shost = dev_to_shost(parent);
if (strncmp(scsi_scan_type, "none", 4) == 0)
return;
if (!shost->async_scan)
scsi_complete_async_scans();
mutex_lock(&shost->scan_mutex);
if (scsi_host_scan_allowed(shost))
__scsi_scan_target(parent, channel, id, lun, rescan);
@ -1519,6 +1600,9 @@ int scsi_scan_host_selected(struct Scsi_Host *shost, unsigned int channel,
"%s: <%u:%u:%u>\n",
__FUNCTION__, channel, id, lun));
if (!shost->async_scan)
scsi_complete_async_scans();
if (((channel != SCAN_WILD_CARD) && (channel > shost->max_channel)) ||
((id != SCAN_WILD_CARD) && (id >= shost->max_id)) ||
((lun != SCAN_WILD_CARD) && (lun > shost->max_lun)))
@ -1539,14 +1623,143 @@ int scsi_scan_host_selected(struct Scsi_Host *shost, unsigned int channel,
return 0;
}
static void scsi_sysfs_add_devices(struct Scsi_Host *shost)
{
struct scsi_device *sdev;
shost_for_each_device(sdev, shost) {
if (scsi_sysfs_add_sdev(sdev) != 0)
scsi_destroy_sdev(sdev);
}
}
/**
* scsi_prep_async_scan - prepare for an async scan
* @shost: the host which will be scanned
* Returns: a cookie to be passed to scsi_finish_async_scan()
*
* Tells the midlayer this host is going to do an asynchronous scan.
* It reserves the host's position in the scanning list and ensures
* that other asynchronous scans started after this one won't affect the
* ordering of the discovered devices.
*/
static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost)
{
struct async_scan_data *data;
if (strncmp(scsi_scan_type, "sync", 4) == 0)
return NULL;
if (shost->async_scan) {
printk("%s called twice for host %d", __FUNCTION__,
shost->host_no);
dump_stack();
return NULL;
}
data = kmalloc(sizeof(*data), GFP_KERNEL);
if (!data)
goto err;
data->shost = scsi_host_get(shost);
if (!data->shost)
goto err;
init_completion(&data->prev_finished);
spin_lock(&async_scan_lock);
shost->async_scan = 1;
if (list_empty(&scanning_hosts))
complete(&data->prev_finished);
list_add_tail(&data->list, &scanning_hosts);
spin_unlock(&async_scan_lock);
return data;
err:
kfree(data);
return NULL;
}
/**
* scsi_finish_async_scan - asynchronous scan has finished
* @data: cookie returned from earlier call to scsi_prep_async_scan()
*
* All the devices currently attached to this host have been found.
* This function announces all the devices it has found to the rest
* of the system.
*/
static void scsi_finish_async_scan(struct async_scan_data *data)
{
struct Scsi_Host *shost;
if (!data)
return;
shost = data->shost;
if (!shost->async_scan) {
printk("%s called twice for host %d", __FUNCTION__,
shost->host_no);
dump_stack();
return;
}
wait_for_completion(&data->prev_finished);
scsi_sysfs_add_devices(shost);
spin_lock(&async_scan_lock);
shost->async_scan = 0;
list_del(&data->list);
if (!list_empty(&scanning_hosts)) {
struct async_scan_data *next = list_entry(scanning_hosts.next,
struct async_scan_data, list);
complete(&next->prev_finished);
}
spin_unlock(&async_scan_lock);
scsi_host_put(shost);
kfree(data);
}
static void do_scsi_scan_host(struct Scsi_Host *shost)
{
if (shost->hostt->scan_finished) {
unsigned long start = jiffies;
if (shost->hostt->scan_start)
shost->hostt->scan_start(shost);
while (!shost->hostt->scan_finished(shost, jiffies - start))
msleep(10);
} else {
scsi_scan_host_selected(shost, SCAN_WILD_CARD, SCAN_WILD_CARD,
SCAN_WILD_CARD, 0);
}
}
static int do_scan_async(void *_data)
{
struct async_scan_data *data = _data;
do_scsi_scan_host(data->shost);
scsi_finish_async_scan(data);
return 0;
}
/**
* scsi_scan_host - scan the given adapter
* @shost: adapter to scan
**/
void scsi_scan_host(struct Scsi_Host *shost)
{
scsi_scan_host_selected(shost, SCAN_WILD_CARD, SCAN_WILD_CARD,
SCAN_WILD_CARD, 0);
struct async_scan_data *data;
if (strncmp(scsi_scan_type, "none", 4) == 0)
return;
data = scsi_prep_async_scan(shost);
if (!data) {
do_scsi_scan_host(shost);
return;
}
kthread_run(do_scan_async, data, "scsi_scan_%d", shost->host_no);
}
EXPORT_SYMBOL(scsi_scan_host);

352
drivers/scsi/scsi_tgt_if.c Normal file
View File

@ -0,0 +1,352 @@
/*
* SCSI target kernel/user interface functions
*
* Copyright (C) 2005 FUJITA Tomonori <tomof@acm.org>
* Copyright (C) 2005 Mike Christie <michaelc@cs.wisc.edu>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA
*/
#include <linux/miscdevice.h>
#include <linux/file.h>
#include <net/tcp.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_tgt.h>
#include <scsi/scsi_tgt_if.h>
#include <asm/cacheflush.h>
#include "scsi_tgt_priv.h"
struct tgt_ring {
u32 tr_idx;
unsigned long tr_pages[TGT_RING_PAGES];
spinlock_t tr_lock;
};
/* tx_ring : kernel->user, rx_ring : user->kernel */
static struct tgt_ring tx_ring, rx_ring;
static DECLARE_WAIT_QUEUE_HEAD(tgt_poll_wait);
static inline void tgt_ring_idx_inc(struct tgt_ring *ring)
{
if (ring->tr_idx == TGT_MAX_EVENTS - 1)
ring->tr_idx = 0;
else
ring->tr_idx++;
}
static struct tgt_event *tgt_head_event(struct tgt_ring *ring, u32 idx)
{
u32 pidx, off;
pidx = idx / TGT_EVENT_PER_PAGE;
off = idx % TGT_EVENT_PER_PAGE;
return (struct tgt_event *)
(ring->tr_pages[pidx] + sizeof(struct tgt_event) * off);
}
static int tgt_uspace_send_event(u32 type, struct tgt_event *p)
{
struct tgt_event *ev;
struct tgt_ring *ring = &tx_ring;
unsigned long flags;
int err = 0;
spin_lock_irqsave(&ring->tr_lock, flags);
ev = tgt_head_event(ring, ring->tr_idx);
if (!ev->hdr.status)
tgt_ring_idx_inc(ring);
else
err = -BUSY;
spin_unlock_irqrestore(&ring->tr_lock, flags);
if (err)
return err;
memcpy(ev, p, sizeof(*ev));
ev->hdr.type = type;
mb();
ev->hdr.status = 1;
flush_dcache_page(virt_to_page(ev));
wake_up_interruptible(&tgt_poll_wait);
return 0;
}
int scsi_tgt_uspace_send_cmd(struct scsi_cmnd *cmd, struct scsi_lun *lun, u64 tag)
{
struct Scsi_Host *shost = scsi_tgt_cmd_to_host(cmd);
struct tgt_event ev;
int err;
memset(&ev, 0, sizeof(ev));
ev.p.cmd_req.host_no = shost->host_no;
ev.p.cmd_req.data_len = cmd->request_bufflen;
memcpy(ev.p.cmd_req.scb, cmd->cmnd, sizeof(ev.p.cmd_req.scb));
memcpy(ev.p.cmd_req.lun, lun, sizeof(ev.p.cmd_req.lun));
ev.p.cmd_req.attribute = cmd->tag;
ev.p.cmd_req.tag = tag;
dprintk("%p %d %u %x %llx\n", cmd, shost->host_no,
ev.p.cmd_req.data_len, cmd->tag,
(unsigned long long) ev.p.cmd_req.tag);
err = tgt_uspace_send_event(TGT_KEVENT_CMD_REQ, &ev);
if (err)
eprintk("tx buf is full, could not send\n");
return err;
}
int scsi_tgt_uspace_send_status(struct scsi_cmnd *cmd, u64 tag)
{
struct Scsi_Host *shost = scsi_tgt_cmd_to_host(cmd);
struct tgt_event ev;
int err;
memset(&ev, 0, sizeof(ev));
ev.p.cmd_done.host_no = shost->host_no;
ev.p.cmd_done.tag = tag;
ev.p.cmd_done.result = cmd->result;
dprintk("%p %d %llu %u %x\n", cmd, shost->host_no,
(unsigned long long) ev.p.cmd_req.tag,
ev.p.cmd_req.data_len, cmd->tag);
err = tgt_uspace_send_event(TGT_KEVENT_CMD_DONE, &ev);
if (err)
eprintk("tx buf is full, could not send\n");
return err;
}
int scsi_tgt_uspace_send_tsk_mgmt(int host_no, int function, u64 tag,
struct scsi_lun *scsilun, void *data)
{
struct tgt_event ev;
int err;
memset(&ev, 0, sizeof(ev));
ev.p.tsk_mgmt_req.host_no = host_no;
ev.p.tsk_mgmt_req.function = function;
ev.p.tsk_mgmt_req.tag = tag;
memcpy(ev.p.tsk_mgmt_req.lun, scsilun, sizeof(ev.p.tsk_mgmt_req.lun));
ev.p.tsk_mgmt_req.mid = (u64) (unsigned long) data;
dprintk("%d %x %llx %llx\n", host_no, function, (unsigned long long) tag,
(unsigned long long) ev.p.tsk_mgmt_req.mid);
err = tgt_uspace_send_event(TGT_KEVENT_TSK_MGMT_REQ, &ev);
if (err)
eprintk("tx buf is full, could not send\n");
return err;
}
static int event_recv_msg(struct tgt_event *ev)
{
int err = 0;
switch (ev->hdr.type) {
case TGT_UEVENT_CMD_RSP:
err = scsi_tgt_kspace_exec(ev->p.cmd_rsp.host_no,
ev->p.cmd_rsp.tag,
ev->p.cmd_rsp.result,
ev->p.cmd_rsp.len,
ev->p.cmd_rsp.uaddr,
ev->p.cmd_rsp.rw);
break;
case TGT_UEVENT_TSK_MGMT_RSP:
err = scsi_tgt_kspace_tsk_mgmt(ev->p.tsk_mgmt_rsp.host_no,
ev->p.tsk_mgmt_rsp.mid,
ev->p.tsk_mgmt_rsp.result);
break;
default:
eprintk("unknown type %d\n", ev->hdr.type);
err = -EINVAL;
}
return err;
}
static ssize_t tgt_write(struct file *file, const char __user * buffer,
size_t count, loff_t * ppos)
{
struct tgt_event *ev;
struct tgt_ring *ring = &rx_ring;
while (1) {
ev = tgt_head_event(ring, ring->tr_idx);
/* do we need this? */
flush_dcache_page(virt_to_page(ev));
if (!ev->hdr.status)
break;
tgt_ring_idx_inc(ring);
event_recv_msg(ev);
ev->hdr.status = 0;
};
return count;
}
static unsigned int tgt_poll(struct file * file, struct poll_table_struct *wait)
{
struct tgt_event *ev;
struct tgt_ring *ring = &tx_ring;
unsigned long flags;
unsigned int mask = 0;
u32 idx;
poll_wait(file, &tgt_poll_wait, wait);
spin_lock_irqsave(&ring->tr_lock, flags);
idx = ring->tr_idx ? ring->tr_idx - 1 : TGT_MAX_EVENTS - 1;
ev = tgt_head_event(ring, idx);
if (ev->hdr.status)
mask |= POLLIN | POLLRDNORM;
spin_unlock_irqrestore(&ring->tr_lock, flags);
return mask;
}
static int uspace_ring_map(struct vm_area_struct *vma, unsigned long addr,
struct tgt_ring *ring)
{
int i, err;
for (i = 0; i < TGT_RING_PAGES; i++) {
struct page *page = virt_to_page(ring->tr_pages[i]);
err = vm_insert_page(vma, addr, page);
if (err)
return err;
addr += PAGE_SIZE;
}
return 0;
}
static int tgt_mmap(struct file *filp, struct vm_area_struct *vma)
{
unsigned long addr;
int err;
if (vma->vm_pgoff)
return -EINVAL;
if (vma->vm_end - vma->vm_start != TGT_RING_SIZE * 2) {
eprintk("mmap size must be %lu, not %lu \n",
TGT_RING_SIZE * 2, vma->vm_end - vma->vm_start);
return -EINVAL;
}
addr = vma->vm_start;
err = uspace_ring_map(vma, addr, &tx_ring);
if (err)
return err;
err = uspace_ring_map(vma, addr + TGT_RING_SIZE, &rx_ring);
return err;
}
static int tgt_open(struct inode *inode, struct file *file)
{
tx_ring.tr_idx = rx_ring.tr_idx = 0;
return 0;
}
static struct file_operations tgt_fops = {
.owner = THIS_MODULE,
.open = tgt_open,
.poll = tgt_poll,
.write = tgt_write,
.mmap = tgt_mmap,
};
static struct miscdevice tgt_miscdev = {
.minor = MISC_DYNAMIC_MINOR,
.name = "tgt",
.fops = &tgt_fops,
};
static void tgt_ring_exit(struct tgt_ring *ring)
{
int i;
for (i = 0; i < TGT_RING_PAGES; i++)
free_page(ring->tr_pages[i]);
}
static int tgt_ring_init(struct tgt_ring *ring)
{
int i;
spin_lock_init(&ring->tr_lock);
for (i = 0; i < TGT_RING_PAGES; i++) {
ring->tr_pages[i] = get_zeroed_page(GFP_KERNEL);
if (!ring->tr_pages[i]) {
eprintk("out of memory\n");
return -ENOMEM;
}
}
return 0;
}
void scsi_tgt_if_exit(void)
{
tgt_ring_exit(&tx_ring);
tgt_ring_exit(&rx_ring);
misc_deregister(&tgt_miscdev);
}
int scsi_tgt_if_init(void)
{
int err;
err = tgt_ring_init(&tx_ring);
if (err)
return err;
err = tgt_ring_init(&rx_ring);
if (err)
goto free_tx_ring;
err = misc_register(&tgt_miscdev);
if (err)
goto free_rx_ring;
return 0;
free_rx_ring:
tgt_ring_exit(&rx_ring);
free_tx_ring:
tgt_ring_exit(&tx_ring);
return err;
}

742
drivers/scsi/scsi_tgt_lib.c Normal file
View File

@ -0,0 +1,742 @@
/*
* SCSI target lib functions
*
* Copyright (C) 2005 Mike Christie <michaelc@cs.wisc.edu>
* Copyright (C) 2005 FUJITA Tomonori <tomof@acm.org>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA
*/
#include <linux/blkdev.h>
#include <linux/hash.h>
#include <linux/module.h>
#include <linux/pagemap.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_tgt.h>
#include <../drivers/md/dm-bio-list.h>
#include "scsi_tgt_priv.h"
static struct workqueue_struct *scsi_tgtd;
static kmem_cache_t *scsi_tgt_cmd_cache;
/*
* TODO: this struct will be killed when the block layer supports large bios
* and James's work struct code is in
*/
struct scsi_tgt_cmd {
/* TODO replace work with James b's code */
struct work_struct work;
/* TODO replace the lists with a large bio */
struct bio_list xfer_done_list;
struct bio_list xfer_list;
struct list_head hash_list;
struct request *rq;
u64 tag;
void *buffer;
unsigned bufflen;
};
#define TGT_HASH_ORDER 4
#define cmd_hashfn(tag) hash_long((unsigned long) (tag), TGT_HASH_ORDER)
struct scsi_tgt_queuedata {
struct Scsi_Host *shost;
struct list_head cmd_hash[1 << TGT_HASH_ORDER];
spinlock_t cmd_hash_lock;
};
/*
* Function: scsi_host_get_command()
*
* Purpose: Allocate and setup a scsi command block and blk request
*
* Arguments: shost - scsi host
* data_dir - dma data dir
* gfp_mask- allocator flags
*
* Returns: The allocated scsi command structure.
*
* This should be called by target LLDs to get a command.
*/
struct scsi_cmnd *scsi_host_get_command(struct Scsi_Host *shost,
enum dma_data_direction data_dir,
gfp_t gfp_mask)
{
int write = (data_dir == DMA_TO_DEVICE);
struct request *rq;
struct scsi_cmnd *cmd;
struct scsi_tgt_cmd *tcmd;
/* Bail if we can't get a reference to the device */
if (!get_device(&shost->shost_gendev))
return NULL;
tcmd = kmem_cache_alloc(scsi_tgt_cmd_cache, GFP_ATOMIC);
if (!tcmd)
goto put_dev;
rq = blk_get_request(shost->uspace_req_q, write, gfp_mask);
if (!rq)
goto free_tcmd;
cmd = __scsi_get_command(shost, gfp_mask);
if (!cmd)
goto release_rq;
memset(cmd, 0, sizeof(*cmd));
cmd->sc_data_direction = data_dir;
cmd->jiffies_at_alloc = jiffies;
cmd->request = rq;
rq->special = cmd;
rq->cmd_type = REQ_TYPE_SPECIAL;
rq->cmd_flags |= REQ_TYPE_BLOCK_PC;
rq->end_io_data = tcmd;
bio_list_init(&tcmd->xfer_list);
bio_list_init(&tcmd->xfer_done_list);
tcmd->rq = rq;
return cmd;
release_rq:
blk_put_request(rq);
free_tcmd:
kmem_cache_free(scsi_tgt_cmd_cache, tcmd);
put_dev:
put_device(&shost->shost_gendev);
return NULL;
}
EXPORT_SYMBOL_GPL(scsi_host_get_command);
/*
* Function: scsi_host_put_command()
*
* Purpose: Free a scsi command block
*
* Arguments: shost - scsi host
* cmd - command block to free
*
* Returns: Nothing.
*
* Notes: The command must not belong to any lists.
*/
void scsi_host_put_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
{
struct request_queue *q = shost->uspace_req_q;
struct request *rq = cmd->request;
struct scsi_tgt_cmd *tcmd = rq->end_io_data;
unsigned long flags;
kmem_cache_free(scsi_tgt_cmd_cache, tcmd);
spin_lock_irqsave(q->queue_lock, flags);
__blk_put_request(q, rq);
spin_unlock_irqrestore(q->queue_lock, flags);
__scsi_put_command(shost, cmd, &shost->shost_gendev);
}
EXPORT_SYMBOL_GPL(scsi_host_put_command);
static void scsi_unmap_user_pages(struct scsi_tgt_cmd *tcmd)
{
struct bio *bio;
/* must call bio_endio in case bio was bounced */
while ((bio = bio_list_pop(&tcmd->xfer_done_list))) {
bio_endio(bio, bio->bi_size, 0);
bio_unmap_user(bio);
}
while ((bio = bio_list_pop(&tcmd->xfer_list))) {
bio_endio(bio, bio->bi_size, 0);
bio_unmap_user(bio);
}
}
static void cmd_hashlist_del(struct scsi_cmnd *cmd)
{
struct request_queue *q = cmd->request->q;
struct scsi_tgt_queuedata *qdata = q->queuedata;
unsigned long flags;
struct scsi_tgt_cmd *tcmd = cmd->request->end_io_data;
spin_lock_irqsave(&qdata->cmd_hash_lock, flags);
list_del(&tcmd->hash_list);
spin_unlock_irqrestore(&qdata->cmd_hash_lock, flags);
}
static void scsi_tgt_cmd_destroy(void *data)
{
struct scsi_cmnd *cmd = data;
struct scsi_tgt_cmd *tcmd = cmd->request->end_io_data;
dprintk("cmd %p %d %lu\n", cmd, cmd->sc_data_direction,
rq_data_dir(cmd->request));
/*
* We fix rq->cmd_flags here since when we told bio_map_user
* to write vm for WRITE commands, blk_rq_bio_prep set
* rq_data_dir the flags to READ.
*/
if (cmd->sc_data_direction == DMA_TO_DEVICE)
cmd->request->cmd_flags |= REQ_RW;
else
cmd->request->cmd_flags &= ~REQ_RW;
scsi_unmap_user_pages(tcmd);
scsi_host_put_command(scsi_tgt_cmd_to_host(cmd), cmd);
}
static void init_scsi_tgt_cmd(struct request *rq, struct scsi_tgt_cmd *tcmd,
u64 tag)
{
struct scsi_tgt_queuedata *qdata = rq->q->queuedata;
unsigned long flags;
struct list_head *head;
tcmd->tag = tag;
spin_lock_irqsave(&qdata->cmd_hash_lock, flags);
head = &qdata->cmd_hash[cmd_hashfn(tag)];
list_add(&tcmd->hash_list, head);
spin_unlock_irqrestore(&qdata->cmd_hash_lock, flags);
}
/*
* scsi_tgt_alloc_queue - setup queue used for message passing
* shost: scsi host
*
* This should be called by the LLD after host allocation.
* And will be released when the host is released.
*/
int scsi_tgt_alloc_queue(struct Scsi_Host *shost)
{
struct scsi_tgt_queuedata *queuedata;
struct request_queue *q;
int err, i;
/*
* Do we need to send a netlink event or should uspace
* just respond to the hotplug event?
*/
q = __scsi_alloc_queue(shost, NULL);
if (!q)
return -ENOMEM;
queuedata = kzalloc(sizeof(*queuedata), GFP_KERNEL);
if (!queuedata) {
err = -ENOMEM;
goto cleanup_queue;
}
queuedata->shost = shost;
q->queuedata = queuedata;
/*
* this is a silly hack. We should probably just queue as many
* command as is recvd to userspace. uspace can then make
* sure we do not overload the HBA
*/
q->nr_requests = shost->hostt->can_queue;
/*
* We currently only support software LLDs so this does
* not matter for now. Do we need this for the cards we support?
* If so we should make it a host template value.
*/
blk_queue_dma_alignment(q, 0);
shost->uspace_req_q = q;
for (i = 0; i < ARRAY_SIZE(queuedata->cmd_hash); i++)
INIT_LIST_HEAD(&queuedata->cmd_hash[i]);
spin_lock_init(&queuedata->cmd_hash_lock);
return 0;
cleanup_queue:
blk_cleanup_queue(q);
return err;
}
EXPORT_SYMBOL_GPL(scsi_tgt_alloc_queue);
void scsi_tgt_free_queue(struct Scsi_Host *shost)
{
int i;
unsigned long flags;
struct request_queue *q = shost->uspace_req_q;
struct scsi_cmnd *cmd;
struct scsi_tgt_queuedata *qdata = q->queuedata;
struct scsi_tgt_cmd *tcmd, *n;
LIST_HEAD(cmds);
spin_lock_irqsave(&qdata->cmd_hash_lock, flags);
for (i = 0; i < ARRAY_SIZE(qdata->cmd_hash); i++) {
list_for_each_entry_safe(tcmd, n, &qdata->cmd_hash[i],
hash_list) {
list_del(&tcmd->hash_list);
list_add(&tcmd->hash_list, &cmds);
}
}
spin_unlock_irqrestore(&qdata->cmd_hash_lock, flags);
while (!list_empty(&cmds)) {
tcmd = list_entry(cmds.next, struct scsi_tgt_cmd, hash_list);
list_del(&tcmd->hash_list);
cmd = tcmd->rq->special;
shost->hostt->eh_abort_handler(cmd);
scsi_tgt_cmd_destroy(cmd);
}
}
EXPORT_SYMBOL_GPL(scsi_tgt_free_queue);
struct Scsi_Host *scsi_tgt_cmd_to_host(struct scsi_cmnd *cmd)
{
struct scsi_tgt_queuedata *queue = cmd->request->q->queuedata;
return queue->shost;
}
EXPORT_SYMBOL_GPL(scsi_tgt_cmd_to_host);
/*
* scsi_tgt_queue_command - queue command for userspace processing
* @cmd: scsi command
* @scsilun: scsi lun
* @tag: unique value to identify this command for tmf
*/
int scsi_tgt_queue_command(struct scsi_cmnd *cmd, struct scsi_lun *scsilun,
u64 tag)
{
struct scsi_tgt_cmd *tcmd = cmd->request->end_io_data;
int err;
init_scsi_tgt_cmd(cmd->request, tcmd, tag);
err = scsi_tgt_uspace_send_cmd(cmd, scsilun, tag);
if (err)
cmd_hashlist_del(cmd);
return err;
}
EXPORT_SYMBOL_GPL(scsi_tgt_queue_command);
/*
* This is run from a interrpt handler normally and the unmap
* needs process context so we must queue
*/
static void scsi_tgt_cmd_done(struct scsi_cmnd *cmd)
{
struct scsi_tgt_cmd *tcmd = cmd->request->end_io_data;
dprintk("cmd %p %lu\n", cmd, rq_data_dir(cmd->request));
scsi_tgt_uspace_send_status(cmd, tcmd->tag);
INIT_WORK(&tcmd->work, scsi_tgt_cmd_destroy, cmd);
queue_work(scsi_tgtd, &tcmd->work);
}
static int __scsi_tgt_transfer_response(struct scsi_cmnd *cmd)
{
struct Scsi_Host *shost = scsi_tgt_cmd_to_host(cmd);
int err;
dprintk("cmd %p %lu\n", cmd, rq_data_dir(cmd->request));
err = shost->hostt->transfer_response(cmd, scsi_tgt_cmd_done);
switch (err) {
case SCSI_MLQUEUE_HOST_BUSY:
case SCSI_MLQUEUE_DEVICE_BUSY:
return -EAGAIN;
}
return 0;
}
static void scsi_tgt_transfer_response(struct scsi_cmnd *cmd)
{
struct scsi_tgt_cmd *tcmd = cmd->request->end_io_data;
int err;
err = __scsi_tgt_transfer_response(cmd);
if (!err)
return;
cmd->result = DID_BUS_BUSY << 16;
err = scsi_tgt_uspace_send_status(cmd, tcmd->tag);
if (err <= 0)
/* the eh will have to pick this up */
printk(KERN_ERR "Could not send cmd %p status\n", cmd);
}
static int scsi_tgt_init_cmd(struct scsi_cmnd *cmd, gfp_t gfp_mask)
{
struct request *rq = cmd->request;
struct scsi_tgt_cmd *tcmd = rq->end_io_data;
int count;
cmd->use_sg = rq->nr_phys_segments;
cmd->request_buffer = scsi_alloc_sgtable(cmd, gfp_mask);
if (!cmd->request_buffer)
return -ENOMEM;
cmd->request_bufflen = rq->data_len;
dprintk("cmd %p addr %p cnt %d %lu\n", cmd, tcmd->buffer, cmd->use_sg,
rq_data_dir(rq));
count = blk_rq_map_sg(rq->q, rq, cmd->request_buffer);
if (likely(count <= cmd->use_sg)) {
cmd->use_sg = count;
return 0;
}
eprintk("cmd %p addr %p cnt %d\n", cmd, tcmd->buffer, cmd->use_sg);
scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len);
return -EINVAL;
}
/* TODO: test this crap and replace bio_map_user with new interface maybe */
static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
int rw)
{
struct request_queue *q = cmd->request->q;
struct request *rq = cmd->request;
void *uaddr = tcmd->buffer;
unsigned int len = tcmd->bufflen;
struct bio *bio;
int err;
while (len > 0) {
dprintk("%lx %u\n", (unsigned long) uaddr, len);
bio = bio_map_user(q, NULL, (unsigned long) uaddr, len, rw);
if (IS_ERR(bio)) {
err = PTR_ERR(bio);
dprintk("fail to map %lx %u %d %x\n",
(unsigned long) uaddr, len, err, cmd->cmnd[0]);
goto unmap_bios;
}
uaddr += bio->bi_size;
len -= bio->bi_size;
/*
* The first bio is added and merged. We could probably
* try to add others using scsi_merge_bio() but for now
* we keep it simple. The first bio should be pretty large
* (either hitting the 1 MB bio pages limit or a queue limit)
* already but for really large IO we may want to try and
* merge these.
*/
if (!rq->bio) {
blk_rq_bio_prep(q, rq, bio);
rq->data_len = bio->bi_size;
} else
/* put list of bios to transfer in next go around */
bio_list_add(&tcmd->xfer_list, bio);
}
cmd->offset = 0;
err = scsi_tgt_init_cmd(cmd, GFP_KERNEL);
if (err)
goto unmap_bios;
return 0;
unmap_bios:
if (rq->bio) {
bio_unmap_user(rq->bio);
while ((bio = bio_list_pop(&tcmd->xfer_list)))
bio_unmap_user(bio);
}
return err;
}
static int scsi_tgt_transfer_data(struct scsi_cmnd *);
static void scsi_tgt_data_transfer_done(struct scsi_cmnd *cmd)
{
struct scsi_tgt_cmd *tcmd = cmd->request->end_io_data;
struct bio *bio;
int err;
/* should we free resources here on error ? */
if (cmd->result) {
send_uspace_err:
err = scsi_tgt_uspace_send_status(cmd, tcmd->tag);
if (err <= 0)
/* the tgt uspace eh will have to pick this up */
printk(KERN_ERR "Could not send cmd %p status\n", cmd);
return;
}
dprintk("cmd %p request_bufflen %u bufflen %u\n",
cmd, cmd->request_bufflen, tcmd->bufflen);
scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len);
bio_list_add(&tcmd->xfer_done_list, cmd->request->bio);
tcmd->buffer += cmd->request_bufflen;
cmd->offset += cmd->request_bufflen;
if (!tcmd->xfer_list.head) {
scsi_tgt_transfer_response(cmd);
return;
}
dprintk("cmd2 %p request_bufflen %u bufflen %u\n",
cmd, cmd->request_bufflen, tcmd->bufflen);
bio = bio_list_pop(&tcmd->xfer_list);
BUG_ON(!bio);
blk_rq_bio_prep(cmd->request->q, cmd->request, bio);
cmd->request->data_len = bio->bi_size;
err = scsi_tgt_init_cmd(cmd, GFP_ATOMIC);
if (err) {
cmd->result = DID_ERROR << 16;
goto send_uspace_err;
}
if (scsi_tgt_transfer_data(cmd)) {
cmd->result = DID_NO_CONNECT << 16;
goto send_uspace_err;
}
}
static int scsi_tgt_transfer_data(struct scsi_cmnd *cmd)
{
int err;
struct Scsi_Host *host = scsi_tgt_cmd_to_host(cmd);
err = host->hostt->transfer_data(cmd, scsi_tgt_data_transfer_done);
switch (err) {
case SCSI_MLQUEUE_HOST_BUSY:
case SCSI_MLQUEUE_DEVICE_BUSY:
return -EAGAIN;
default:
return 0;
}
}
static int scsi_tgt_copy_sense(struct scsi_cmnd *cmd, unsigned long uaddr,
unsigned len)
{
char __user *p = (char __user *) uaddr;
if (copy_from_user(cmd->sense_buffer, p,
min_t(unsigned, SCSI_SENSE_BUFFERSIZE, len))) {
printk(KERN_ERR "Could not copy the sense buffer\n");
return -EIO;
}
return 0;
}
static int scsi_tgt_abort_cmd(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
{
int err;
err = shost->hostt->eh_abort_handler(cmd);
if (err)
eprintk("fail to abort %p\n", cmd);
scsi_tgt_cmd_destroy(cmd);
return err;
}
static struct request *tgt_cmd_hash_lookup(struct request_queue *q, u64 tag)
{
struct scsi_tgt_queuedata *qdata = q->queuedata;
struct request *rq = NULL;
struct list_head *head;
struct scsi_tgt_cmd *tcmd;
unsigned long flags;
head = &qdata->cmd_hash[cmd_hashfn(tag)];
spin_lock_irqsave(&qdata->cmd_hash_lock, flags);
list_for_each_entry(tcmd, head, hash_list) {
if (tcmd->tag == tag) {
rq = tcmd->rq;
list_del(&tcmd->hash_list);
break;
}
}
spin_unlock_irqrestore(&qdata->cmd_hash_lock, flags);
return rq;
}
int scsi_tgt_kspace_exec(int host_no, u64 tag, int result, u32 len,
unsigned long uaddr, u8 rw)
{
struct Scsi_Host *shost;
struct scsi_cmnd *cmd;
struct request *rq;
struct scsi_tgt_cmd *tcmd;
int err = 0;
dprintk("%d %llu %d %u %lx %u\n", host_no, (unsigned long long) tag,
result, len, uaddr, rw);
/* TODO: replace with a O(1) alg */
shost = scsi_host_lookup(host_no);
if (IS_ERR(shost)) {
printk(KERN_ERR "Could not find host no %d\n", host_no);
return -EINVAL;
}
if (!shost->uspace_req_q) {
printk(KERN_ERR "Not target scsi host %d\n", host_no);
goto done;
}
rq = tgt_cmd_hash_lookup(shost->uspace_req_q, tag);
if (!rq) {
printk(KERN_ERR "Could not find tag %llu\n",
(unsigned long long) tag);
err = -EINVAL;
goto done;
}
cmd = rq->special;
dprintk("cmd %p result %d len %d bufflen %u %lu %x\n", cmd,
result, len, cmd->request_bufflen, rq_data_dir(rq), cmd->cmnd[0]);
if (result == TASK_ABORTED) {
scsi_tgt_abort_cmd(shost, cmd);
goto done;
}
/*
* store the userspace values here, the working values are
* in the request_* values
*/
tcmd = cmd->request->end_io_data;
tcmd->buffer = (void *)uaddr;
tcmd->bufflen = len;
cmd->result = result;
if (!tcmd->bufflen || cmd->request_buffer) {
err = __scsi_tgt_transfer_response(cmd);
goto done;
}
/*
* TODO: Do we need to handle case where request does not
* align with LLD.
*/
err = scsi_map_user_pages(rq->end_io_data, cmd, rw);
if (err) {
eprintk("%p %d\n", cmd, err);
err = -EAGAIN;
goto done;
}
/* userspace failure */
if (cmd->result) {
if (status_byte(cmd->result) == CHECK_CONDITION)
scsi_tgt_copy_sense(cmd, uaddr, len);
err = __scsi_tgt_transfer_response(cmd);
goto done;
}
/* ask the target LLD to transfer the data to the buffer */
err = scsi_tgt_transfer_data(cmd);
done:
scsi_host_put(shost);
return err;
}
int scsi_tgt_tsk_mgmt_request(struct Scsi_Host *shost, int function, u64 tag,
struct scsi_lun *scsilun, void *data)
{
int err;
/* TODO: need to retry if this fails. */
err = scsi_tgt_uspace_send_tsk_mgmt(shost->host_no, function,
tag, scsilun, data);
if (err < 0)
eprintk("The task management request lost!\n");
return err;
}
EXPORT_SYMBOL_GPL(scsi_tgt_tsk_mgmt_request);
int scsi_tgt_kspace_tsk_mgmt(int host_no, u64 mid, int result)
{
struct Scsi_Host *shost;
int err = -EINVAL;
dprintk("%d %d %llx\n", host_no, result, (unsigned long long) mid);
shost = scsi_host_lookup(host_no);
if (IS_ERR(shost)) {
printk(KERN_ERR "Could not find host no %d\n", host_no);
return err;
}
if (!shost->uspace_req_q) {
printk(KERN_ERR "Not target scsi host %d\n", host_no);
goto done;
}
err = shost->hostt->tsk_mgmt_response(mid, result);
done:
scsi_host_put(shost);
return err;
}
static int __init scsi_tgt_init(void)
{
int err;
scsi_tgt_cmd_cache = kmem_cache_create("scsi_tgt_cmd",
sizeof(struct scsi_tgt_cmd),
0, 0, NULL, NULL);
if (!scsi_tgt_cmd_cache)
return -ENOMEM;
scsi_tgtd = create_workqueue("scsi_tgtd");
if (!scsi_tgtd) {
err = -ENOMEM;
goto free_kmemcache;
}
err = scsi_tgt_if_init();
if (err)
goto destroy_wq;
return 0;
destroy_wq:
destroy_workqueue(scsi_tgtd);
free_kmemcache:
kmem_cache_destroy(scsi_tgt_cmd_cache);
return err;
}
static void __exit scsi_tgt_exit(void)
{
destroy_workqueue(scsi_tgtd);
scsi_tgt_if_exit();
kmem_cache_destroy(scsi_tgt_cmd_cache);
}
module_init(scsi_tgt_init);
module_exit(scsi_tgt_exit);
MODULE_DESCRIPTION("SCSI target core");
MODULE_LICENSE("GPL");

View File

@ -0,0 +1,25 @@
struct scsi_cmnd;
struct scsi_lun;
struct Scsi_Host;
struct task_struct;
/* tmp - will replace with SCSI logging stuff */
#define eprintk(fmt, args...) \
do { \
printk("%s(%d) " fmt, __FUNCTION__, __LINE__, ##args); \
} while (0)
#define dprintk(fmt, args...)
/* #define dprintk eprintk */
extern void scsi_tgt_if_exit(void);
extern int scsi_tgt_if_init(void);
extern int scsi_tgt_uspace_send_cmd(struct scsi_cmnd *cmd, struct scsi_lun *lun,
u64 tag);
extern int scsi_tgt_uspace_send_status(struct scsi_cmnd *cmd, u64 tag);
extern int scsi_tgt_kspace_exec(int host_no, u64 tag, int result, u32 len,
unsigned long uaddr, u8 rw);
extern int scsi_tgt_uspace_send_tsk_mgmt(int host_no, int function, u64 tag,
struct scsi_lun *scsilun, void *data);
extern int scsi_tgt_kspace_tsk_mgmt(int host_no, u64 mid, int result);

View File

@ -0,0 +1,31 @@
/*
* scsi_wait_scan.c
*
* Copyright (C) 2006 James Bottomley <James.Bottomley@SteelEye.com>
*
* This is a simple module to wait until all the async scans are
* complete. The idea is to use it in initrd/initramfs scripts. You
* modprobe it after all the modprobes of the root SCSI drivers and it
* will wait until they have all finished scanning their busses before
* allowing the boot to proceed
*/
#include <linux/module.h>
#include "scsi_priv.h"
static int __init wait_scan_init(void)
{
scsi_complete_async_scans();
return 0;
}
static void __exit wait_scan_exit(void)
{
}
MODULE_DESCRIPTION("SCSI wait for scans");
MODULE_AUTHOR("James Bottomley");
MODULE_LICENSE("GPL");
late_initcall(wait_scan_init);
module_exit(wait_scan_exit);

View File

@ -1051,6 +1051,14 @@ sd_spinup_disk(struct scsi_disk *sdkp, char *diskname)
&sshdr, SD_TIMEOUT,
SD_MAX_RETRIES);
/*
* If the drive has indicated to us that it
* doesn't have any media in it, don't bother
* with any more polling.
*/
if (media_not_present(sdkp, &sshdr))
return;
if (the_result)
sense_valid = scsi_sense_valid(&sshdr);
retries++;
@ -1059,14 +1067,6 @@ sd_spinup_disk(struct scsi_disk *sdkp, char *diskname)
((driver_byte(the_result) & DRIVER_SENSE) &&
sense_valid && sshdr.sense_key == UNIT_ATTENTION)));
/*
* If the drive has indicated to us that it doesn't have
* any media in it, don't bother with any of the rest of
* this crap.
*/
if (media_not_present(sdkp, &sshdr))
return;
if ((driver_byte(the_result) & DRIVER_SENSE) == 0) {
/* no sense, TUR either succeeded or failed
* with a status error */
@ -1467,7 +1467,6 @@ sd_read_cache_type(struct scsi_disk *sdkp, char *diskname,
res = sd_do_mode_sense(sdp, dbd, modepage, buffer, len, &data, &sshdr);
if (scsi_status_is_good(res)) {
int ct = 0;
int offset = data.header_length + data.block_descriptor_length;
if (offset >= SD_BUF_SIZE - 2) {
@ -1496,11 +1495,13 @@ sd_read_cache_type(struct scsi_disk *sdkp, char *diskname,
sdkp->DPOFUA = 0;
}
ct = sdkp->RCD + 2*sdkp->WCE;
printk(KERN_NOTICE "SCSI device %s: drive cache: %s%s\n",
diskname, sd_cache_types[ct],
sdkp->DPOFUA ? " w/ FUA" : "");
printk(KERN_NOTICE "SCSI device %s: "
"write cache: %s, read cache: %s, %s\n",
diskname,
sdkp->WCE ? "enabled" : "disabled",
sdkp->RCD ? "disabled" : "enabled",
sdkp->DPOFUA ? "supports DPO and FUA"
: "doesn't support DPO or FUA");
return;
}

View File

@ -9,7 +9,7 @@
Steve Hirsch, Andreas Koppenh"ofer, Michael Leodolter, Eyal Lebedinsky,
Michael Schaefer, J"org Weule, and Eric Youngdale.
Copyright 1992 - 2005 Kai Makisara
Copyright 1992 - 2006 Kai Makisara
email Kai.Makisara@kolumbus.fi
Some small formal changes - aeb, 950809
@ -17,7 +17,7 @@
Last modified: 18-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Devfs support
*/
static const char *verstr = "20050830";
static const char *verstr = "20061107";
#include <linux/module.h>
@ -999,7 +999,7 @@ static int check_tape(struct scsi_tape *STp, struct file *filp)
STp->min_block = ((STp->buffer)->b_data[4] << 8) |
(STp->buffer)->b_data[5];
if ( DEB( debugging || ) !STp->inited)
printk(KERN_WARNING
printk(KERN_INFO
"%s: Block limits %d - %d bytes.\n", name,
STp->min_block, STp->max_block);
} else {
@ -1224,7 +1224,7 @@ static int st_flush(struct file *filp, fl_owner_t id)
}
DEBC( if (STp->nbr_requests)
printk(KERN_WARNING "%s: Number of r/w requests %d, dio used in %d, pages %d (%d).\n",
printk(KERN_DEBUG "%s: Number of r/w requests %d, dio used in %d, pages %d (%d).\n",
name, STp->nbr_requests, STp->nbr_dio, STp->nbr_pages, STp->nbr_combinable));
if (STps->rw == ST_WRITING && !STp->pos_unknown) {
@ -4056,9 +4056,9 @@ static int st_probe(struct device *dev)
goto out_free_tape;
}
sdev_printk(KERN_WARNING, SDp,
sdev_printk(KERN_NOTICE, SDp,
"Attached scsi tape %s\n", tape_name(tpnt));
printk(KERN_WARNING "%s: try direct i/o: %s (alignment %d B)\n",
sdev_printk(KERN_INFO, SDp, "%s: try direct i/o: %s (alignment %d B)\n",
tape_name(tpnt), tpnt->try_dio ? "yes" : "no",
queue_dma_alignment(SDp->request_queue) + 1);

View File

@ -11,8 +11,6 @@
* Written By:
* Ed Lin <promise_linux@promise.com>
*
* Version: 3.0.0.1
*
*/
#include <linux/init.h>
@ -37,9 +35,9 @@
#include <scsi/scsi_tcq.h>
#define DRV_NAME "stex"
#define ST_DRIVER_VERSION "3.0.0.1"
#define ST_DRIVER_VERSION "3.1.0.1"
#define ST_VER_MAJOR 3
#define ST_VER_MINOR 0
#define ST_VER_MINOR 1
#define ST_OEM 0
#define ST_BUILD_VER 1
@ -76,8 +74,10 @@ enum {
MU_STATE_STARTED = 4,
MU_STATE_RESETTING = 5,
MU_MAX_DELAY_TIME = 240000,
MU_MAX_DELAY = 120,
MU_HANDSHAKE_SIGNATURE = 0x55aaaa55,
MU_HANDSHAKE_SIGNATURE_HALF = 0x5a5a0000,
MU_HARD_RESET_WAIT = 30000,
HMU_PARTNER_TYPE = 2,
/* firmware returned values */
@ -120,7 +120,8 @@ enum {
st_shasta = 0,
st_vsc = 1,
st_yosemite = 2,
st_vsc1 = 2,
st_yosemite = 3,
PASSTHRU_REQ_TYPE = 0x00000001,
PASSTHRU_REQ_NO_WAKEUP = 0x00000100,
@ -150,6 +151,8 @@ enum {
MGT_CMD_SIGNATURE = 0xba,
INQUIRY_EVPD = 0x01,
ST_ADDITIONAL_MEM = 0x200000,
};
/* SCSI inquiry data */
@ -211,7 +214,9 @@ struct handshake_frame {
__le32 partner_ver_minor;
__le32 partner_ver_oem;
__le32 partner_ver_build;
u32 reserved1[4];
__le32 extra_offset; /* NEW */
__le32 extra_size; /* NEW */
u32 reserved1[2];
};
struct req_msg {
@ -302,6 +307,7 @@ struct st_hba {
void __iomem *mmio_base; /* iomapped PCI memory space */
void *dma_mem;
dma_addr_t dma_handle;
size_t dma_size;
struct Scsi_Host *host;
struct pci_dev *pdev;
@ -507,6 +513,7 @@ static void stex_controller_info(struct st_hba *hba, struct st_ccb *ccb)
size_t count = sizeof(struct st_frame);
p = hba->copy_buffer;
stex_internal_copy(ccb->cmd, p, &count, ccb->sg_count, ST_FROM_CMD);
memset(p->base, 0, sizeof(u32)*6);
*(unsigned long *)(p->base) = pci_resource_start(hba->pdev, 0);
p->rom_addr = 0;
@ -901,27 +908,34 @@ static int stex_handshake(struct st_hba *hba)
void __iomem *base = hba->mmio_base;
struct handshake_frame *h;
dma_addr_t status_phys;
int i;
u32 data;
unsigned long before;
if (readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE) {
writel(MU_INBOUND_DOORBELL_HANDSHAKE, base + IDBL);
readl(base + IDBL);
for (i = 0; readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE
&& i < MU_MAX_DELAY_TIME; i++) {
rmb();
msleep(1);
}
if (i == MU_MAX_DELAY_TIME) {
before = jiffies;
while (readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE) {
if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) {
printk(KERN_ERR DRV_NAME
"(%s): no handshake signature\n",
pci_name(hba->pdev));
return -1;
}
rmb();
msleep(1);
}
}
udelay(10);
data = readl(base + OMR1);
if ((data & 0xffff0000) == MU_HANDSHAKE_SIGNATURE_HALF) {
data &= 0x0000ffff;
if (hba->host->can_queue > data)
hba->host->can_queue = data;
}
h = (struct handshake_frame *)(hba->dma_mem + MU_REQ_BUFFER_SIZE);
h->rb_phy = cpu_to_le32(hba->dma_handle);
h->rb_phy_hi = cpu_to_le32((hba->dma_handle >> 16) >> 16);
@ -931,6 +945,11 @@ static int stex_handshake(struct st_hba *hba)
h->status_cnt = cpu_to_le16(MU_STATUS_COUNT);
stex_gettime(&h->hosttime);
h->partner_type = HMU_PARTNER_TYPE;
if (hba->dma_size > STEX_BUFFER_SIZE) {
h->extra_offset = cpu_to_le32(STEX_BUFFER_SIZE);
h->extra_size = cpu_to_le32(ST_ADDITIONAL_MEM);
} else
h->extra_offset = h->extra_size = 0;
status_phys = hba->dma_handle + MU_REQ_BUFFER_SIZE;
writel(status_phys, base + IMR0);
@ -944,18 +963,17 @@ static int stex_handshake(struct st_hba *hba)
readl(base + IDBL); /* flush */
udelay(10);
for (i = 0; readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE
&& i < MU_MAX_DELAY_TIME; i++) {
rmb();
msleep(1);
}
if (i == MU_MAX_DELAY_TIME) {
before = jiffies;
while (readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE) {
if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) {
printk(KERN_ERR DRV_NAME
"(%s): no signature after handshake frame\n",
pci_name(hba->pdev));
return -1;
}
rmb();
msleep(1);
}
writel(0, base + IMR0);
readl(base + IMR0);
@ -1038,9 +1056,9 @@ static void stex_hard_reset(struct st_hba *hba)
pci_bctl &= ~PCI_BRIDGE_CTL_BUS_RESET;
pci_write_config_byte(bus->self, PCI_BRIDGE_CONTROL, pci_bctl);
for (i = 0; i < MU_MAX_DELAY_TIME; i++) {
for (i = 0; i < MU_HARD_RESET_WAIT; i++) {
pci_read_config_word(hba->pdev, PCI_COMMAND, &pci_cmd);
if (pci_cmd & PCI_COMMAND_MASTER)
if (pci_cmd != 0xffff && (pci_cmd & PCI_COMMAND_MASTER))
break;
msleep(1);
}
@ -1100,18 +1118,18 @@ static int stex_reset(struct scsi_cmnd *cmd)
static int stex_biosparam(struct scsi_device *sdev,
struct block_device *bdev, sector_t capacity, int geom[])
{
int heads = 255, sectors = 63, cylinders;
int heads = 255, sectors = 63;
if (capacity < 0x200000) {
heads = 64;
sectors = 32;
}
cylinders = sector_div(capacity, heads * sectors);
sector_div(capacity, heads * sectors);
geom[0] = heads;
geom[1] = sectors;
geom[2] = cylinders;
geom[2] = capacity;
return 0;
}
@ -1193,8 +1211,13 @@ stex_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto out_iounmap;
}
hba->cardtype = (unsigned int) id->driver_data;
if (hba->cardtype == st_vsc && (pdev->subsystem_device & 0xf) == 0x1)
hba->cardtype = st_vsc1;
hba->dma_size = (hba->cardtype == st_vsc1) ?
(STEX_BUFFER_SIZE + ST_ADDITIONAL_MEM) : (STEX_BUFFER_SIZE);
hba->dma_mem = dma_alloc_coherent(&pdev->dev,
STEX_BUFFER_SIZE, &hba->dma_handle, GFP_KERNEL);
hba->dma_size, &hba->dma_handle, GFP_KERNEL);
if (!hba->dma_mem) {
err = -ENOMEM;
printk(KERN_ERR DRV_NAME "(%s): dma mem alloc failed\n",
@ -1207,8 +1230,6 @@ stex_probe(struct pci_dev *pdev, const struct pci_device_id *id)
hba->copy_buffer = hba->dma_mem + MU_BUFFER_SIZE;
hba->mu_status = MU_STATE_STARTING;
hba->cardtype = (unsigned int) id->driver_data;
/* firmware uses id/lun pair for a logical drive, but lun would be
always 0 if CONFIG_SCSI_MULTI_LUN not configured, so we use
channel to map lun here */
@ -1233,7 +1254,7 @@ stex_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (err)
goto out_free_irq;
err = scsi_init_shared_tag_map(host, ST_CAN_QUEUE);
err = scsi_init_shared_tag_map(host, host->can_queue);
if (err) {
printk(KERN_ERR DRV_NAME "(%s): init shared queue failed\n",
pci_name(pdev));
@ -1256,7 +1277,7 @@ stex_probe(struct pci_dev *pdev, const struct pci_device_id *id)
out_free_irq:
free_irq(pdev->irq, hba);
out_pci_free:
dma_free_coherent(&pdev->dev, STEX_BUFFER_SIZE,
dma_free_coherent(&pdev->dev, hba->dma_size,
hba->dma_mem, hba->dma_handle);
out_iounmap:
iounmap(hba->mmio_base);
@ -1317,7 +1338,7 @@ static void stex_hba_free(struct st_hba *hba)
pci_release_regions(hba->pdev);
dma_free_coherent(&hba->pdev->dev, STEX_BUFFER_SIZE,
dma_free_coherent(&hba->pdev->dev, hba->dma_size,
hba->dma_mem, hba->dma_handle);
}
@ -1346,15 +1367,32 @@ static void stex_shutdown(struct pci_dev *pdev)
}
static struct pci_device_id stex_pci_tbl[] = {
{ 0x105a, 0x8350, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta },
{ 0x105a, 0xc350, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta },
{ 0x105a, 0xf350, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta },
{ 0x105a, 0x4301, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta },
{ 0x105a, 0x4302, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta },
{ 0x105a, 0x8301, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta },
{ 0x105a, 0x8302, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta },
{ 0x1725, 0x7250, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_vsc },
{ 0x105a, 0x8650, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_yosemite },
/* st_shasta */
{ 0x105a, 0x8350, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
st_shasta }, /* SuperTrak EX8350/8300/16350/16300 */
{ 0x105a, 0xc350, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
st_shasta }, /* SuperTrak EX12350 */
{ 0x105a, 0x4302, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
st_shasta }, /* SuperTrak EX4350 */
{ 0x105a, 0xe350, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
st_shasta }, /* SuperTrak EX24350 */
/* st_vsc */
{ 0x105a, 0x7250, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_vsc },
/* st_yosemite */
{ 0x105a, 0x8650, PCI_ANY_ID, 0x4600, 0, 0,
st_yosemite }, /* SuperTrak EX4650 */
{ 0x105a, 0x8650, PCI_ANY_ID, 0x4610, 0, 0,
st_yosemite }, /* SuperTrak EX4650o */
{ 0x105a, 0x8650, PCI_ANY_ID, 0x8600, 0, 0,
st_yosemite }, /* SuperTrak EX8650EL */
{ 0x105a, 0x8650, PCI_ANY_ID, 0x8601, 0, 0,
st_yosemite }, /* SuperTrak EX8650 */
{ 0x105a, 0x8650, PCI_ANY_ID, 0x8602, 0, 0,
st_yosemite }, /* SuperTrak EX8654 */
{ 0x105a, 0x8650, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
st_yosemite }, /* generic st_yosemite */
{ } /* terminate list */
};
MODULE_DEVICE_TABLE(pci, stex_pci_tbl);

View File

@ -92,12 +92,13 @@
#define T_DATA_REG_OFFSET 0x1e00 /* rw 512 bytes long */
#ifndef ASM
static int t128_abort(Scsi_Cmnd *);
static int t128_abort(struct scsi_cmnd *);
static int t128_biosparam(struct scsi_device *, struct block_device *,
sector_t, int*);
static int t128_detect(struct scsi_host_template *);
static int t128_queue_command(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
static int t128_bus_reset(Scsi_Cmnd *);
static int t128_queue_command(struct scsi_cmnd *,
void (*done)(struct scsi_cmnd *));
static int t128_bus_reset(struct scsi_cmnd *);
#ifndef CMD_PER_LUN
#define CMD_PER_LUN 2

View File

@ -339,6 +339,8 @@ struct sas_ha_struct {
void (*notify_phy_event)(struct asd_sas_phy *, enum phy_event);
void *lldd_ha; /* not touched by sas class code */
struct list_head eh_done_q;
};
#define SHOST_TO_SAS_HA(_shost) (*(struct sas_ha_struct **)(_shost)->hostdata)
@ -527,6 +529,8 @@ struct sas_task {
void *lldd_task; /* for use by LLDDs */
void *uldd_task;
struct work_struct abort_work;
};
@ -534,6 +538,7 @@ struct sas_task {
#define SAS_TASK_STATE_PENDING 1
#define SAS_TASK_STATE_DONE 2
#define SAS_TASK_STATE_ABORTED 4
#define SAS_TASK_INITIATOR_ABORTED 8
static inline struct sas_task *sas_alloc_task(gfp_t flags)
{
@ -593,6 +598,7 @@ struct sas_domain_function_template {
extern int sas_register_ha(struct sas_ha_struct *);
extern int sas_unregister_ha(struct sas_ha_struct *);
int sas_phy_reset(struct sas_phy *phy, int hard_reset);
extern int sas_queuecommand(struct scsi_cmnd *,
void (*scsi_done)(struct scsi_cmnd *));
extern int sas_target_alloc(struct scsi_target *);
@ -625,4 +631,6 @@ void sas_unregister_dev(struct domain_device *);
void sas_init_dev(struct domain_device *);
void sas_task_abort(struct sas_task *task);
#endif /* _SASLIB_H_ */

77
include/scsi/libsrp.h Normal file
View File

@ -0,0 +1,77 @@
#ifndef __LIBSRP_H__
#define __LIBSRP_H__
#include <linux/list.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_host.h>
#include <scsi/srp.h>
enum iue_flags {
V_DIOVER,
V_WRITE,
V_LINKED,
V_FLYING,
};
struct srp_buf {
dma_addr_t dma;
void *buf;
};
struct srp_queue {
void *pool;
void *items;
struct kfifo *queue;
spinlock_t lock;
};
struct srp_target {
struct Scsi_Host *shost;
struct device *dev;
spinlock_t lock;
struct list_head cmd_queue;
size_t srp_iu_size;
struct srp_queue iu_queue;
size_t rx_ring_size;
struct srp_buf **rx_ring;
void *ldata;
};
struct iu_entry {
struct srp_target *target;
struct list_head ilist;
dma_addr_t remote_token;
unsigned long flags;
struct srp_buf *sbuf;
};
typedef int (srp_rdma_t)(struct scsi_cmnd *, struct scatterlist *, int,
struct srp_direct_buf *, int,
enum dma_data_direction, unsigned int);
extern int srp_target_alloc(struct srp_target *, struct device *, size_t, size_t);
extern void srp_target_free(struct srp_target *);
extern struct iu_entry *srp_iu_get(struct srp_target *);
extern void srp_iu_put(struct iu_entry *);
extern int srp_cmd_queue(struct Scsi_Host *, struct srp_cmd *, void *, u64);
extern int srp_transfer_data(struct scsi_cmnd *, struct srp_cmd *,
srp_rdma_t, int, int);
static inline struct srp_target *host_to_srp_target(struct Scsi_Host *host)
{
return (struct srp_target *) host->hostdata;
}
static inline int srp_cmd_direction(struct srp_cmd *cmd)
{
return (cmd->buf_fmt >> 4) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
}
#endif

View File

@ -8,6 +8,7 @@
struct request;
struct scatterlist;
struct Scsi_Host;
struct scsi_device;
@ -72,6 +73,9 @@ struct scsi_cmnd {
unsigned short use_sg; /* Number of pieces of scatter-gather */
unsigned short sglist_len; /* size of malloc'd scatter-gather list */
/* offset in cmd we are at (for multi-transfer tgt cmds) */
unsigned offset;
unsigned underflow; /* Return error if less than
this amount is transferred */
@ -119,7 +123,10 @@ struct scsi_cmnd {
};
extern struct scsi_cmnd *scsi_get_command(struct scsi_device *, gfp_t);
extern struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *, gfp_t);
extern void scsi_put_command(struct scsi_cmnd *);
extern void __scsi_put_command(struct Scsi_Host *, struct scsi_cmnd *,
struct device *);
extern void scsi_io_completion(struct scsi_cmnd *, unsigned int);
extern void scsi_finish_command(struct scsi_cmnd *cmd);
extern void scsi_req_abort_cmd(struct scsi_cmnd *cmd);
@ -128,4 +135,7 @@ extern void *scsi_kmap_atomic_sg(struct scatterlist *sg, int sg_count,
size_t *offset, size_t *len);
extern void scsi_kunmap_atomic_sg(void *virt);
extern struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *, gfp_t);
extern void scsi_free_sgtable(struct scatterlist *, int);
#endif /* _SCSI_SCSI_CMND_H */

View File

@ -224,12 +224,12 @@ extern struct scsi_device *__scsi_iterate_devices(struct Scsi_Host *,
/**
* shost_for_each_device - iterate over all devices of a host
* @sdev: iterator
* @host: host whiches devices we want to iterate over
* @sdev: the &struct scsi_device to use as a cursor
* @shost: the &struct scsi_host to iterate over
*
* This traverses over each devices of @shost. The devices have
* a reference that must be released by scsi_host_put when breaking
* out of the loop.
* Iterator that returns each device attached to @shost. This loop
* takes a reference on each device and releases it at the end. If
* you break out of the loop, you must call scsi_device_put(sdev).
*/
#define shost_for_each_device(sdev, shost) \
for ((sdev) = __scsi_iterate_devices((shost), NULL); \
@ -238,16 +238,16 @@ extern struct scsi_device *__scsi_iterate_devices(struct Scsi_Host *,
/**
* __shost_for_each_device - iterate over all devices of a host (UNLOCKED)
* @sdev: iterator
* @host: host whiches devices we want to iterate over
* @sdev: the &struct scsi_device to use as a cursor
* @shost: the &struct scsi_host to iterate over
*
* This traverses over each devices of @shost. It does _not_ take a
* reference on the scsi_device, thus it the whole loop must be protected
* by shost->host_lock.
* Iterator that returns each device attached to @shost. It does _not_
* take a reference on the scsi_device, so the whole loop must be
* protected by shost->host_lock.
*
* Note: The only reason why drivers would want to use this is because
* they're need to access the device list in irq context. Otherwise you
* really want to use shost_for_each_device instead.
* Note: The only reason to use this is because you need to access the
* device list in interrupt context. Otherwise you really want to use
* shost_for_each_device instead.
*/
#define __shost_for_each_device(sdev, shost) \
list_for_each_entry((sdev), &((shost)->__devices), siblings)

View File

@ -7,6 +7,7 @@
#include <linux/workqueue.h>
#include <linux/mutex.h>
struct request_queue;
struct block_device;
struct completion;
struct module;
@ -123,6 +124,39 @@ struct scsi_host_template {
int (* queuecommand)(struct scsi_cmnd *,
void (*done)(struct scsi_cmnd *));
/*
* The transfer functions are used to queue a scsi command to
* the LLD. When the driver is finished processing the command
* the done callback is invoked.
*
* return values: see queuecommand
*
* If the LLD accepts the cmd, it should set the result to an
* appropriate value when completed before calling the done function.
*
* STATUS: REQUIRED FOR TARGET DRIVERS
*/
/* TODO: rename */
int (* transfer_response)(struct scsi_cmnd *,
void (*done)(struct scsi_cmnd *));
/*
* This is called to inform the LLD to transfer cmd->request_bufflen
* bytes of the cmd at cmd->offset in the cmd. The cmd->use_sg
* speciefies the number of scatterlist entried in the command
* and cmd->request_buffer contains the scatterlist.
*
* If the command cannot be processed in one transfer_data call
* becuase a scatterlist within the LLD's limits cannot be
* created then transfer_data will be called multiple times.
* It is initially called from process context, and later
* calls are from the interrup context.
*/
int (* transfer_data)(struct scsi_cmnd *,
void (*done)(struct scsi_cmnd *));
/* Used as callback for the completion of task management request. */
int (* tsk_mgmt_response)(u64 mid, int result);
/*
* This is an error handling strategy routine. You don't need to
* define one of these if you don't want to - there is a default
@ -240,6 +274,24 @@ struct scsi_host_template {
*/
void (* target_destroy)(struct scsi_target *);
/*
* If a host has the ability to discover targets on its own instead
* of scanning the entire bus, it can fill in this function and
* call scsi_scan_host(). This function will be called periodically
* until it returns 1 with the scsi_host and the elapsed time of
* the scan in jiffies.
*
* Status: OPTIONAL
*/
int (* scan_finished)(struct Scsi_Host *, unsigned long);
/*
* If the host wants to be called before the scan starts, but
* after the midlayer has set up ready for the scan, it can fill
* in this function.
*/
void (* scan_start)(struct Scsi_Host *);
/*
* fill in this function to allow the queue depth of this host
* to be changeable (on a per device basis). returns either
@ -552,6 +604,9 @@ struct Scsi_Host {
/* task mgmt function in progress */
unsigned tmf_in_progress:1;
/* Asynchronous scan in progress */
unsigned async_scan:1;
/*
* Optional work queue to be utilized by the transport
*/
@ -568,6 +623,12 @@ struct Scsi_Host {
*/
unsigned int max_host_blocked;
/*
* q used for scsi_tgt msgs, async events or any other requests that
* need to be processed in userspace
*/
struct request_queue *uspace_req_q;
/* legacy crap */
unsigned long base;
unsigned long io_port;
@ -648,11 +709,6 @@ extern const char *scsi_host_state_name(enum scsi_host_state);
extern u64 scsi_calculate_bounce_limit(struct Scsi_Host *);
static inline void scsi_assign_lock(struct Scsi_Host *shost, spinlock_t *lock)
{
shost->host_lock = lock;
}
static inline struct device *scsi_get_device(struct Scsi_Host *shost)
{
return shost->shost_gendev.parent;
@ -671,6 +727,9 @@ extern void scsi_unblock_requests(struct Scsi_Host *);
extern void scsi_block_requests(struct Scsi_Host *);
struct class_container;
extern struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
void (*) (struct request_queue *));
/*
* These two functions are used to allocate and free a pseudo device
* which will connect to the host adapter itself rather than any

19
include/scsi/scsi_tgt.h Normal file
View File

@ -0,0 +1,19 @@
/*
* SCSI target definitions
*/
#include <linux/dma-mapping.h>
struct Scsi_Host;
struct scsi_cmnd;
struct scsi_lun;
extern struct Scsi_Host *scsi_tgt_cmd_to_host(struct scsi_cmnd *);
extern int scsi_tgt_alloc_queue(struct Scsi_Host *);
extern void scsi_tgt_free_queue(struct Scsi_Host *);
extern int scsi_tgt_queue_command(struct scsi_cmnd *, struct scsi_lun *, u64);
extern int scsi_tgt_tsk_mgmt_request(struct Scsi_Host *, int, u64, struct scsi_lun *,
void *);
extern struct scsi_cmnd *scsi_host_get_command(struct Scsi_Host *,
enum dma_data_direction, gfp_t);
extern void scsi_host_put_command(struct Scsi_Host *, struct scsi_cmnd *);

View File

@ -0,0 +1,90 @@
/*
* SCSI target kernel/user interface
*
* Copyright (C) 2005 FUJITA Tomonori <tomof@acm.org>
* Copyright (C) 2005 Mike Christie <michaelc@cs.wisc.edu>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA
*/
#ifndef __SCSI_TARGET_IF_H
#define __SCSI_TARGET_IF_H
/* user -> kernel */
#define TGT_UEVENT_CMD_RSP 0x0001
#define TGT_UEVENT_TSK_MGMT_RSP 0x0002
/* kernel -> user */
#define TGT_KEVENT_CMD_REQ 0x1001
#define TGT_KEVENT_CMD_DONE 0x1002
#define TGT_KEVENT_TSK_MGMT_REQ 0x1003
struct tgt_event_hdr {
uint16_t version;
uint16_t status;
uint16_t type;
uint16_t len;
} __attribute__ ((aligned (sizeof(uint64_t))));
struct tgt_event {
struct tgt_event_hdr hdr;
union {
/* user-> kernel */
struct {
int host_no;
uint32_t len;
int result;
aligned_u64 uaddr;
uint8_t rw;
aligned_u64 tag;
} cmd_rsp;
struct {
int host_no;
aligned_u64 mid;
int result;
} tsk_mgmt_rsp;
/* kernel -> user */
struct {
int host_no;
uint32_t data_len;
uint8_t scb[16];
uint8_t lun[8];
int attribute;
aligned_u64 tag;
} cmd_req;
struct {
int host_no;
aligned_u64 tag;
int result;
} cmd_done;
struct {
int host_no;
int function;
aligned_u64 tag;
uint8_t lun[8];
aligned_u64 mid;
} tsk_mgmt_req;
} p;
} __attribute__ ((aligned (sizeof(uint64_t))));
#define TGT_RING_SIZE (1UL << 16)
#define TGT_RING_PAGES (TGT_RING_SIZE >> PAGE_SHIFT)
#define TGT_EVENT_PER_PAGE (PAGE_SIZE / sizeof(struct tgt_event))
#define TGT_MAX_EVENTS (TGT_EVENT_PER_PAGE * TGT_RING_PAGES)
#endif

View File

@ -73,6 +73,8 @@ struct sas_phy {
/* for the list of phys belonging to a port */
struct list_head port_siblings;
struct work_struct reset_work;
};
#define dev_to_phy(d) \