mirror of https://gitee.com/openkylin/linux.git
Merge master.kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-for-linus-2.6
This commit is contained in:
commit
7d6322b465
|
@ -60,6 +60,7 @@
|
|||
Remove un-needed eh_abort handler.
|
||||
Add support for embedded firmware error strings.
|
||||
2.26.02.003 - Correctly handle single sgl's with use_sg=1.
|
||||
2.26.02.004 - Add support for 9550SX controllers.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
|
@ -82,7 +83,7 @@
|
|||
#include "3w-9xxx.h"
|
||||
|
||||
/* Globals */
|
||||
#define TW_DRIVER_VERSION "2.26.02.003"
|
||||
#define TW_DRIVER_VERSION "2.26.02.004"
|
||||
static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT];
|
||||
static unsigned int twa_device_extension_count;
|
||||
static int twa_major = -1;
|
||||
|
@ -892,11 +893,6 @@ static int twa_decode_bits(TW_Device_Extension *tw_dev, u32 status_reg_value)
|
|||
writel(TW_CONTROL_CLEAR_QUEUE_ERROR, TW_CONTROL_REG_ADDR(tw_dev));
|
||||
}
|
||||
|
||||
if (status_reg_value & TW_STATUS_SBUF_WRITE_ERROR) {
|
||||
TW_PRINTK(tw_dev->host, TW_DRIVER, 0xf, "SBUF Write Error: clearing");
|
||||
writel(TW_CONTROL_CLEAR_SBUF_WRITE_ERROR, TW_CONTROL_REG_ADDR(tw_dev));
|
||||
}
|
||||
|
||||
if (status_reg_value & TW_STATUS_MICROCONTROLLER_ERROR) {
|
||||
if (tw_dev->reset_print == 0) {
|
||||
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x10, "Microcontroller Error: clearing");
|
||||
|
@ -930,6 +926,36 @@ static int twa_empty_response_queue(TW_Device_Extension *tw_dev)
|
|||
return retval;
|
||||
} /* End twa_empty_response_queue() */
|
||||
|
||||
/* This function will clear the pchip/response queue on 9550SX */
|
||||
static int twa_empty_response_queue_large(TW_Device_Extension *tw_dev)
|
||||
{
|
||||
u32 status_reg_value, response_que_value;
|
||||
int count = 0, retval = 1;
|
||||
|
||||
if (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9550SX) {
|
||||
status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
|
||||
|
||||
while (((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) && (count < TW_MAX_RESPONSE_DRAIN)) {
|
||||
response_que_value = readl(TW_RESPONSE_QUEUE_REG_ADDR_LARGE(tw_dev));
|
||||
if ((response_que_value & TW_9550SX_DRAIN_COMPLETED) == TW_9550SX_DRAIN_COMPLETED) {
|
||||
/* P-chip settle time */
|
||||
msleep(500);
|
||||
retval = 0;
|
||||
goto out;
|
||||
}
|
||||
status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
|
||||
count++;
|
||||
}
|
||||
if (count == TW_MAX_RESPONSE_DRAIN)
|
||||
goto out;
|
||||
|
||||
retval = 0;
|
||||
} else
|
||||
retval = 0;
|
||||
out:
|
||||
return retval;
|
||||
} /* End twa_empty_response_queue_large() */
|
||||
|
||||
/* This function passes sense keys from firmware to scsi layer */
|
||||
static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host)
|
||||
{
|
||||
|
@ -1613,8 +1639,16 @@ static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset)
|
|||
int tries = 0, retval = 1, flashed = 0, do_soft_reset = soft_reset;
|
||||
|
||||
while (tries < TW_MAX_RESET_TRIES) {
|
||||
if (do_soft_reset)
|
||||
if (do_soft_reset) {
|
||||
TW_SOFT_RESET(tw_dev);
|
||||
/* Clear pchip/response queue on 9550SX */
|
||||
if (twa_empty_response_queue_large(tw_dev)) {
|
||||
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x36, "Response queue (large) empty failed during reset sequence");
|
||||
do_soft_reset = 1;
|
||||
tries++;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
/* Make sure controller is in a good state */
|
||||
if (twa_poll_status(tw_dev, TW_STATUS_MICROCONTROLLER_READY | (do_soft_reset == 1 ? TW_STATUS_ATTENTION_INTERRUPT : 0), 60)) {
|
||||
|
@ -2034,7 +2068,10 @@ static int __devinit twa_probe(struct pci_dev *pdev, const struct pci_device_id
|
|||
goto out_free_device_extension;
|
||||
}
|
||||
|
||||
mem_addr = pci_resource_start(pdev, 1);
|
||||
if (pdev->device == PCI_DEVICE_ID_3WARE_9000)
|
||||
mem_addr = pci_resource_start(pdev, 1);
|
||||
else
|
||||
mem_addr = pci_resource_start(pdev, 2);
|
||||
|
||||
/* Save base address */
|
||||
tw_dev->base_addr = ioremap(mem_addr, PAGE_SIZE);
|
||||
|
@ -2148,6 +2185,8 @@ static void twa_remove(struct pci_dev *pdev)
|
|||
static struct pci_device_id twa_pci_tbl[] __devinitdata = {
|
||||
{ PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9000,
|
||||
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
|
||||
{ PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9550SX,
|
||||
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
|
||||
{ }
|
||||
};
|
||||
MODULE_DEVICE_TABLE(pci, twa_pci_tbl);
|
||||
|
|
|
@ -267,7 +267,6 @@ static twa_message_type twa_error_table[] = {
|
|||
#define TW_CONTROL_CLEAR_PARITY_ERROR 0x00800000
|
||||
#define TW_CONTROL_CLEAR_QUEUE_ERROR 0x00400000
|
||||
#define TW_CONTROL_CLEAR_PCI_ABORT 0x00100000
|
||||
#define TW_CONTROL_CLEAR_SBUF_WRITE_ERROR 0x00000008
|
||||
|
||||
/* Status register bit definitions */
|
||||
#define TW_STATUS_MAJOR_VERSION_MASK 0xF0000000
|
||||
|
@ -285,9 +284,8 @@ static twa_message_type twa_error_table[] = {
|
|||
#define TW_STATUS_MICROCONTROLLER_READY 0x00002000
|
||||
#define TW_STATUS_COMMAND_QUEUE_EMPTY 0x00001000
|
||||
#define TW_STATUS_EXPECTED_BITS 0x00002000
|
||||
#define TW_STATUS_UNEXPECTED_BITS 0x00F00008
|
||||
#define TW_STATUS_SBUF_WRITE_ERROR 0x00000008
|
||||
#define TW_STATUS_VALID_INTERRUPT 0x00DF0008
|
||||
#define TW_STATUS_UNEXPECTED_BITS 0x00F00000
|
||||
#define TW_STATUS_VALID_INTERRUPT 0x00DF0000
|
||||
|
||||
/* RESPONSE QUEUE BIT DEFINITIONS */
|
||||
#define TW_RESPONSE_ID_MASK 0x00000FF0
|
||||
|
@ -324,9 +322,9 @@ static twa_message_type twa_error_table[] = {
|
|||
|
||||
/* Compatibility defines */
|
||||
#define TW_9000_ARCH_ID 0x5
|
||||
#define TW_CURRENT_DRIVER_SRL 28
|
||||
#define TW_CURRENT_DRIVER_BUILD 9
|
||||
#define TW_CURRENT_DRIVER_BRANCH 4
|
||||
#define TW_CURRENT_DRIVER_SRL 30
|
||||
#define TW_CURRENT_DRIVER_BUILD 80
|
||||
#define TW_CURRENT_DRIVER_BRANCH 0
|
||||
|
||||
/* Phase defines */
|
||||
#define TW_PHASE_INITIAL 0
|
||||
|
@ -334,6 +332,7 @@ static twa_message_type twa_error_table[] = {
|
|||
#define TW_PHASE_SGLIST 2
|
||||
|
||||
/* Misc defines */
|
||||
#define TW_9550SX_DRAIN_COMPLETED 0xFFFF
|
||||
#define TW_SECTOR_SIZE 512
|
||||
#define TW_ALIGNMENT_9000 4 /* 4 bytes */
|
||||
#define TW_ALIGNMENT_9000_SGL 0x3
|
||||
|
@ -417,6 +416,9 @@ static twa_message_type twa_error_table[] = {
|
|||
#ifndef PCI_DEVICE_ID_3WARE_9000
|
||||
#define PCI_DEVICE_ID_3WARE_9000 0x1002
|
||||
#endif
|
||||
#ifndef PCI_DEVICE_ID_3WARE_9550SX
|
||||
#define PCI_DEVICE_ID_3WARE_9550SX 0x1003
|
||||
#endif
|
||||
|
||||
/* Bitmask macros to eliminate bitfields */
|
||||
|
||||
|
@ -443,6 +445,7 @@ static twa_message_type twa_error_table[] = {
|
|||
#define TW_STATUS_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + 0x4)
|
||||
#define TW_COMMAND_QUEUE_REG_ADDR(x) (sizeof(dma_addr_t) > 4 ? ((unsigned char __iomem *)x->base_addr + 0x20) : ((unsigned char __iomem *)x->base_addr + 0x8))
|
||||
#define TW_RESPONSE_QUEUE_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + 0xC)
|
||||
#define TW_RESPONSE_QUEUE_REG_ADDR_LARGE(x) ((unsigned char __iomem *)x->base_addr + 0x30)
|
||||
#define TW_CLEAR_ALL_INTERRUPTS(x) (writel(TW_STATUS_VALID_INTERRUPT, TW_CONTROL_REG_ADDR(x)))
|
||||
#define TW_CLEAR_ATTENTION_INTERRUPT(x) (writel(TW_CONTROL_CLEAR_ATTENTION_INTERRUPT, TW_CONTROL_REG_ADDR(x)))
|
||||
#define TW_CLEAR_HOST_INTERRUPT(x) (writel(TW_CONTROL_CLEAR_HOST_INTERRUPT, TW_CONTROL_REG_ADDR(x)))
|
||||
|
|
|
@ -99,6 +99,7 @@ obj-$(CONFIG_SCSI_DC395x) += dc395x.o
|
|||
obj-$(CONFIG_SCSI_DC390T) += tmscsim.o
|
||||
obj-$(CONFIG_MEGARAID_LEGACY) += megaraid.o
|
||||
obj-$(CONFIG_MEGARAID_NEWGEN) += megaraid/
|
||||
obj-$(CONFIG_MEGARAID_SAS) += megaraid/
|
||||
obj-$(CONFIG_SCSI_ACARD) += atp870u.o
|
||||
obj-$(CONFIG_SCSI_SUNESP) += esp.o
|
||||
obj-$(CONFIG_SCSI_GDTH) += gdth.o
|
||||
|
|
|
@ -313,18 +313,37 @@ int aac_get_containers(struct aac_dev *dev)
|
|||
}
|
||||
dresp = (struct aac_mount *)fib_data(fibptr);
|
||||
|
||||
if ((le32_to_cpu(dresp->status) == ST_OK) &&
|
||||
(le32_to_cpu(dresp->mnt[0].vol) == CT_NONE)) {
|
||||
dinfo->command = cpu_to_le32(VM_NameServe64);
|
||||
dinfo->count = cpu_to_le32(index);
|
||||
dinfo->type = cpu_to_le32(FT_FILESYS);
|
||||
|
||||
if (fib_send(ContainerCommand,
|
||||
fibptr,
|
||||
sizeof(struct aac_query_mount),
|
||||
FsaNormal,
|
||||
1, 1,
|
||||
NULL, NULL) < 0)
|
||||
continue;
|
||||
} else
|
||||
dresp->mnt[0].capacityhigh = 0;
|
||||
|
||||
dprintk ((KERN_DEBUG
|
||||
"VM_NameServe cid=%d status=%d vol=%d state=%d cap=%u\n",
|
||||
"VM_NameServe cid=%d status=%d vol=%d state=%d cap=%llu\n",
|
||||
(int)index, (int)le32_to_cpu(dresp->status),
|
||||
(int)le32_to_cpu(dresp->mnt[0].vol),
|
||||
(int)le32_to_cpu(dresp->mnt[0].state),
|
||||
(unsigned)le32_to_cpu(dresp->mnt[0].capacity)));
|
||||
((u64)le32_to_cpu(dresp->mnt[0].capacity)) +
|
||||
(((u64)le32_to_cpu(dresp->mnt[0].capacityhigh)) << 32)));
|
||||
if ((le32_to_cpu(dresp->status) == ST_OK) &&
|
||||
(le32_to_cpu(dresp->mnt[0].vol) != CT_NONE) &&
|
||||
(le32_to_cpu(dresp->mnt[0].state) != FSCS_HIDDEN)) {
|
||||
fsa_dev_ptr[index].valid = 1;
|
||||
fsa_dev_ptr[index].type = le32_to_cpu(dresp->mnt[0].vol);
|
||||
fsa_dev_ptr[index].size = le32_to_cpu(dresp->mnt[0].capacity);
|
||||
fsa_dev_ptr[index].size
|
||||
= ((u64)le32_to_cpu(dresp->mnt[0].capacity)) +
|
||||
(((u64)le32_to_cpu(dresp->mnt[0].capacityhigh)) << 32);
|
||||
if (le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY)
|
||||
fsa_dev_ptr[index].ro = 1;
|
||||
}
|
||||
|
@ -460,7 +479,7 @@ static int aac_get_container_name(struct scsi_cmnd * scsicmd, int cid)
|
|||
* is updated in the struct fsa_dev_info structure rather than returned.
|
||||
*/
|
||||
|
||||
static int probe_container(struct aac_dev *dev, int cid)
|
||||
int probe_container(struct aac_dev *dev, int cid)
|
||||
{
|
||||
struct fsa_dev_info *fsa_dev_ptr;
|
||||
int status;
|
||||
|
@ -496,12 +515,30 @@ static int probe_container(struct aac_dev *dev, int cid)
|
|||
|
||||
dresp = (struct aac_mount *) fib_data(fibptr);
|
||||
|
||||
if ((le32_to_cpu(dresp->status) == ST_OK) &&
|
||||
(le32_to_cpu(dresp->mnt[0].vol) == CT_NONE)) {
|
||||
dinfo->command = cpu_to_le32(VM_NameServe64);
|
||||
dinfo->count = cpu_to_le32(cid);
|
||||
dinfo->type = cpu_to_le32(FT_FILESYS);
|
||||
|
||||
if (fib_send(ContainerCommand,
|
||||
fibptr,
|
||||
sizeof(struct aac_query_mount),
|
||||
FsaNormal,
|
||||
1, 1,
|
||||
NULL, NULL) < 0)
|
||||
goto error;
|
||||
} else
|
||||
dresp->mnt[0].capacityhigh = 0;
|
||||
|
||||
if ((le32_to_cpu(dresp->status) == ST_OK) &&
|
||||
(le32_to_cpu(dresp->mnt[0].vol) != CT_NONE) &&
|
||||
(le32_to_cpu(dresp->mnt[0].state) != FSCS_HIDDEN)) {
|
||||
fsa_dev_ptr[cid].valid = 1;
|
||||
fsa_dev_ptr[cid].type = le32_to_cpu(dresp->mnt[0].vol);
|
||||
fsa_dev_ptr[cid].size = le32_to_cpu(dresp->mnt[0].capacity);
|
||||
fsa_dev_ptr[cid].size
|
||||
= ((u64)le32_to_cpu(dresp->mnt[0].capacity)) +
|
||||
(((u64)le32_to_cpu(dresp->mnt[0].capacityhigh)) << 32);
|
||||
if (le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY)
|
||||
fsa_dev_ptr[cid].ro = 1;
|
||||
}
|
||||
|
@ -655,7 +692,7 @@ int aac_get_adapter_info(struct aac_dev* dev)
|
|||
fibptr,
|
||||
sizeof(*info),
|
||||
FsaNormal,
|
||||
1, 1,
|
||||
-1, 1, /* First `interrupt' command uses special wait */
|
||||
NULL,
|
||||
NULL);
|
||||
|
||||
|
@ -806,8 +843,8 @@ int aac_get_adapter_info(struct aac_dev* dev)
|
|||
if (!(dev->raw_io_interface)) {
|
||||
dev->scsi_host_ptr->sg_tablesize = (dev->max_fib_size -
|
||||
sizeof(struct aac_fibhdr) -
|
||||
sizeof(struct aac_write) + sizeof(struct sgmap)) /
|
||||
sizeof(struct sgmap);
|
||||
sizeof(struct aac_write) + sizeof(struct sgentry)) /
|
||||
sizeof(struct sgentry);
|
||||
if (dev->dac_support) {
|
||||
/*
|
||||
* 38 scatter gather elements
|
||||
|
@ -816,8 +853,8 @@ int aac_get_adapter_info(struct aac_dev* dev)
|
|||
(dev->max_fib_size -
|
||||
sizeof(struct aac_fibhdr) -
|
||||
sizeof(struct aac_write64) +
|
||||
sizeof(struct sgmap64)) /
|
||||
sizeof(struct sgmap64);
|
||||
sizeof(struct sgentry64)) /
|
||||
sizeof(struct sgentry64);
|
||||
}
|
||||
dev->scsi_host_ptr->max_sectors = AAC_MAX_32BIT_SGBCOUNT;
|
||||
if(!(dev->adapter_info.options & AAC_OPT_NEW_COMM)) {
|
||||
|
@ -854,7 +891,40 @@ static void io_callback(void *context, struct fib * fibptr)
|
|||
dev = (struct aac_dev *)scsicmd->device->host->hostdata;
|
||||
cid = ID_LUN_TO_CONTAINER(scsicmd->device->id, scsicmd->device->lun);
|
||||
|
||||
dprintk((KERN_DEBUG "io_callback[cpu %d]: lba = %u, t = %ld.\n", smp_processor_id(), ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3], jiffies));
|
||||
if (nblank(dprintk(x))) {
|
||||
u64 lba;
|
||||
switch (scsicmd->cmnd[0]) {
|
||||
case WRITE_6:
|
||||
case READ_6:
|
||||
lba = ((scsicmd->cmnd[1] & 0x1F) << 16) |
|
||||
(scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
|
||||
break;
|
||||
case WRITE_16:
|
||||
case READ_16:
|
||||
lba = ((u64)scsicmd->cmnd[2] << 56) |
|
||||
((u64)scsicmd->cmnd[3] << 48) |
|
||||
((u64)scsicmd->cmnd[4] << 40) |
|
||||
((u64)scsicmd->cmnd[5] << 32) |
|
||||
((u64)scsicmd->cmnd[6] << 24) |
|
||||
(scsicmd->cmnd[7] << 16) |
|
||||
(scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
|
||||
break;
|
||||
case WRITE_12:
|
||||
case READ_12:
|
||||
lba = ((u64)scsicmd->cmnd[2] << 24) |
|
||||
(scsicmd->cmnd[3] << 16) |
|
||||
(scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
|
||||
break;
|
||||
default:
|
||||
lba = ((u64)scsicmd->cmnd[2] << 24) |
|
||||
(scsicmd->cmnd[3] << 16) |
|
||||
(scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
|
||||
break;
|
||||
}
|
||||
printk(KERN_DEBUG
|
||||
"io_callback[cpu %d]: lba = %llu, t = %ld.\n",
|
||||
smp_processor_id(), (unsigned long long)lba, jiffies);
|
||||
}
|
||||
|
||||
if (fibptr == NULL)
|
||||
BUG();
|
||||
|
@ -895,7 +965,7 @@ static void io_callback(void *context, struct fib * fibptr)
|
|||
|
||||
static int aac_read(struct scsi_cmnd * scsicmd, int cid)
|
||||
{
|
||||
u32 lba;
|
||||
u64 lba;
|
||||
u32 count;
|
||||
int status;
|
||||
|
||||
|
@ -907,23 +977,69 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
|
|||
/*
|
||||
* Get block address and transfer length
|
||||
*/
|
||||
if (scsicmd->cmnd[0] == READ_6) /* 6 byte command */
|
||||
{
|
||||
switch (scsicmd->cmnd[0]) {
|
||||
case READ_6:
|
||||
dprintk((KERN_DEBUG "aachba: received a read(6) command on id %d.\n", cid));
|
||||
|
||||
lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
|
||||
lba = ((scsicmd->cmnd[1] & 0x1F) << 16) |
|
||||
(scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
|
||||
count = scsicmd->cmnd[4];
|
||||
|
||||
if (count == 0)
|
||||
count = 256;
|
||||
} else {
|
||||
break;
|
||||
case READ_16:
|
||||
dprintk((KERN_DEBUG "aachba: received a read(16) command on id %d.\n", cid));
|
||||
|
||||
lba = ((u64)scsicmd->cmnd[2] << 56) |
|
||||
((u64)scsicmd->cmnd[3] << 48) |
|
||||
((u64)scsicmd->cmnd[4] << 40) |
|
||||
((u64)scsicmd->cmnd[5] << 32) |
|
||||
((u64)scsicmd->cmnd[6] << 24) |
|
||||
(scsicmd->cmnd[7] << 16) |
|
||||
(scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
|
||||
count = (scsicmd->cmnd[10] << 24) |
|
||||
(scsicmd->cmnd[11] << 16) |
|
||||
(scsicmd->cmnd[12] << 8) | scsicmd->cmnd[13];
|
||||
break;
|
||||
case READ_12:
|
||||
dprintk((KERN_DEBUG "aachba: received a read(12) command on id %d.\n", cid));
|
||||
|
||||
lba = ((u64)scsicmd->cmnd[2] << 24) |
|
||||
(scsicmd->cmnd[3] << 16) |
|
||||
(scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
|
||||
count = (scsicmd->cmnd[6] << 24) |
|
||||
(scsicmd->cmnd[7] << 16) |
|
||||
(scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
|
||||
break;
|
||||
default:
|
||||
dprintk((KERN_DEBUG "aachba: received a read(10) command on id %d.\n", cid));
|
||||
|
||||
lba = (scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
|
||||
lba = ((u64)scsicmd->cmnd[2] << 24) |
|
||||
(scsicmd->cmnd[3] << 16) |
|
||||
(scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
|
||||
count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8];
|
||||
break;
|
||||
}
|
||||
dprintk((KERN_DEBUG "aac_read[cpu %d]: lba = %u, t = %ld.\n",
|
||||
dprintk((KERN_DEBUG "aac_read[cpu %d]: lba = %llu, t = %ld.\n",
|
||||
smp_processor_id(), (unsigned long long)lba, jiffies));
|
||||
if ((!(dev->raw_io_interface) || !(dev->raw_io_64)) &&
|
||||
(lba & 0xffffffff00000000LL)) {
|
||||
dprintk((KERN_DEBUG "aac_read: Illegal lba\n"));
|
||||
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
|
||||
SAM_STAT_CHECK_CONDITION;
|
||||
set_sense((u8 *) &dev->fsa_dev[cid].sense_data,
|
||||
HARDWARE_ERROR,
|
||||
SENCODE_INTERNAL_TARGET_FAILURE,
|
||||
ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0,
|
||||
0, 0);
|
||||
memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
|
||||
(sizeof(dev->fsa_dev[cid].sense_data) > sizeof(scsicmd->sense_buffer))
|
||||
? sizeof(scsicmd->sense_buffer)
|
||||
: sizeof(dev->fsa_dev[cid].sense_data));
|
||||
scsicmd->scsi_done(scsicmd);
|
||||
return 0;
|
||||
}
|
||||
/*
|
||||
* Alocate and initialize a Fib
|
||||
*/
|
||||
|
@ -936,8 +1052,8 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
|
|||
if (dev->raw_io_interface) {
|
||||
struct aac_raw_io *readcmd;
|
||||
readcmd = (struct aac_raw_io *) fib_data(cmd_fibcontext);
|
||||
readcmd->block[0] = cpu_to_le32(lba);
|
||||
readcmd->block[1] = 0;
|
||||
readcmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff));
|
||||
readcmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
|
||||
readcmd->count = cpu_to_le32(count<<9);
|
||||
readcmd->cid = cpu_to_le16(cid);
|
||||
readcmd->flags = cpu_to_le16(1);
|
||||
|
@ -964,7 +1080,7 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
|
|||
readcmd->command = cpu_to_le32(VM_CtHostRead64);
|
||||
readcmd->cid = cpu_to_le16(cid);
|
||||
readcmd->sector_count = cpu_to_le16(count);
|
||||
readcmd->block = cpu_to_le32(lba);
|
||||
readcmd->block = cpu_to_le32((u32)(lba&0xffffffff));
|
||||
readcmd->pad = 0;
|
||||
readcmd->flags = 0;
|
||||
|
||||
|
@ -989,7 +1105,7 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
|
|||
readcmd = (struct aac_read *) fib_data(cmd_fibcontext);
|
||||
readcmd->command = cpu_to_le32(VM_CtBlockRead);
|
||||
readcmd->cid = cpu_to_le32(cid);
|
||||
readcmd->block = cpu_to_le32(lba);
|
||||
readcmd->block = cpu_to_le32((u32)(lba&0xffffffff));
|
||||
readcmd->count = cpu_to_le32(count * 512);
|
||||
|
||||
aac_build_sg(scsicmd, &readcmd->sg);
|
||||
|
@ -1031,7 +1147,7 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
|
|||
|
||||
static int aac_write(struct scsi_cmnd * scsicmd, int cid)
|
||||
{
|
||||
u32 lba;
|
||||
u64 lba;
|
||||
u32 count;
|
||||
int status;
|
||||
u16 fibsize;
|
||||
|
@ -1048,13 +1164,48 @@ static int aac_write(struct scsi_cmnd * scsicmd, int cid)
|
|||
count = scsicmd->cmnd[4];
|
||||
if (count == 0)
|
||||
count = 256;
|
||||
} else if (scsicmd->cmnd[0] == WRITE_16) { /* 16 byte command */
|
||||
dprintk((KERN_DEBUG "aachba: received a write(16) command on id %d.\n", cid));
|
||||
|
||||
lba = ((u64)scsicmd->cmnd[2] << 56) |
|
||||
((u64)scsicmd->cmnd[3] << 48) |
|
||||
((u64)scsicmd->cmnd[4] << 40) |
|
||||
((u64)scsicmd->cmnd[5] << 32) |
|
||||
((u64)scsicmd->cmnd[6] << 24) |
|
||||
(scsicmd->cmnd[7] << 16) |
|
||||
(scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
|
||||
count = (scsicmd->cmnd[10] << 24) | (scsicmd->cmnd[11] << 16) |
|
||||
(scsicmd->cmnd[12] << 8) | scsicmd->cmnd[13];
|
||||
} else if (scsicmd->cmnd[0] == WRITE_12) { /* 12 byte command */
|
||||
dprintk((KERN_DEBUG "aachba: received a write(12) command on id %d.\n", cid));
|
||||
|
||||
lba = ((u64)scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16)
|
||||
| (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
|
||||
count = (scsicmd->cmnd[6] << 24) | (scsicmd->cmnd[7] << 16)
|
||||
| (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
|
||||
} else {
|
||||
dprintk((KERN_DEBUG "aachba: received a write(10) command on id %d.\n", cid));
|
||||
lba = (scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
|
||||
lba = ((u64)scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
|
||||
count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8];
|
||||
}
|
||||
dprintk((KERN_DEBUG "aac_write[cpu %d]: lba = %u, t = %ld.\n",
|
||||
dprintk((KERN_DEBUG "aac_write[cpu %d]: lba = %llu, t = %ld.\n",
|
||||
smp_processor_id(), (unsigned long long)lba, jiffies));
|
||||
if ((!(dev->raw_io_interface) || !(dev->raw_io_64))
|
||||
&& (lba & 0xffffffff00000000LL)) {
|
||||
dprintk((KERN_DEBUG "aac_write: Illegal lba\n"));
|
||||
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
|
||||
set_sense((u8 *) &dev->fsa_dev[cid].sense_data,
|
||||
HARDWARE_ERROR,
|
||||
SENCODE_INTERNAL_TARGET_FAILURE,
|
||||
ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0,
|
||||
0, 0);
|
||||
memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
|
||||
(sizeof(dev->fsa_dev[cid].sense_data) > sizeof(scsicmd->sense_buffer))
|
||||
? sizeof(scsicmd->sense_buffer)
|
||||
: sizeof(dev->fsa_dev[cid].sense_data));
|
||||
scsicmd->scsi_done(scsicmd);
|
||||
return 0;
|
||||
}
|
||||
/*
|
||||
* Allocate and initialize a Fib then setup a BlockWrite command
|
||||
*/
|
||||
|
@ -1068,8 +1219,8 @@ static int aac_write(struct scsi_cmnd * scsicmd, int cid)
|
|||
if (dev->raw_io_interface) {
|
||||
struct aac_raw_io *writecmd;
|
||||
writecmd = (struct aac_raw_io *) fib_data(cmd_fibcontext);
|
||||
writecmd->block[0] = cpu_to_le32(lba);
|
||||
writecmd->block[1] = 0;
|
||||
writecmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff));
|
||||
writecmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
|
||||
writecmd->count = cpu_to_le32(count<<9);
|
||||
writecmd->cid = cpu_to_le16(cid);
|
||||
writecmd->flags = 0;
|
||||
|
@ -1096,7 +1247,7 @@ static int aac_write(struct scsi_cmnd * scsicmd, int cid)
|
|||
writecmd->command = cpu_to_le32(VM_CtHostWrite64);
|
||||
writecmd->cid = cpu_to_le16(cid);
|
||||
writecmd->sector_count = cpu_to_le16(count);
|
||||
writecmd->block = cpu_to_le32(lba);
|
||||
writecmd->block = cpu_to_le32((u32)(lba&0xffffffff));
|
||||
writecmd->pad = 0;
|
||||
writecmd->flags = 0;
|
||||
|
||||
|
@ -1121,7 +1272,7 @@ static int aac_write(struct scsi_cmnd * scsicmd, int cid)
|
|||
writecmd = (struct aac_write *) fib_data(cmd_fibcontext);
|
||||
writecmd->command = cpu_to_le32(VM_CtBlockWrite);
|
||||
writecmd->cid = cpu_to_le32(cid);
|
||||
writecmd->block = cpu_to_le32(lba);
|
||||
writecmd->block = cpu_to_le32((u32)(lba&0xffffffff));
|
||||
writecmd->count = cpu_to_le32(count * 512);
|
||||
writecmd->sg.count = cpu_to_le32(1);
|
||||
/* ->stable is not used - it did mean which type of write */
|
||||
|
@ -1310,11 +1461,18 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
|
|||
*/
|
||||
if ((fsa_dev_ptr[cid].valid & 1) == 0) {
|
||||
switch (scsicmd->cmnd[0]) {
|
||||
case SERVICE_ACTION_IN:
|
||||
if (!(dev->raw_io_interface) ||
|
||||
!(dev->raw_io_64) ||
|
||||
((scsicmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16))
|
||||
break;
|
||||
case INQUIRY:
|
||||
case READ_CAPACITY:
|
||||
case TEST_UNIT_READY:
|
||||
spin_unlock_irq(host->host_lock);
|
||||
probe_container(dev, cid);
|
||||
if ((fsa_dev_ptr[cid].valid & 1) == 0)
|
||||
fsa_dev_ptr[cid].valid = 0;
|
||||
spin_lock_irq(host->host_lock);
|
||||
if (fsa_dev_ptr[cid].valid == 0) {
|
||||
scsicmd->result = DID_NO_CONNECT << 16;
|
||||
|
@ -1375,7 +1533,6 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
|
|||
memset(&inq_data, 0, sizeof (struct inquiry_data));
|
||||
|
||||
inq_data.inqd_ver = 2; /* claim compliance to SCSI-2 */
|
||||
inq_data.inqd_dtq = 0x80; /* set RMB bit to one indicating that the medium is removable */
|
||||
inq_data.inqd_rdf = 2; /* A response data format value of two indicates that the data shall be in the format specified in SCSI-2 */
|
||||
inq_data.inqd_len = 31;
|
||||
/*Format for "pad2" is RelAdr | WBus32 | WBus16 | Sync | Linked |Reserved| CmdQue | SftRe */
|
||||
|
@ -1397,13 +1554,55 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
|
|||
aac_internal_transfer(scsicmd, &inq_data, 0, sizeof(inq_data));
|
||||
return aac_get_container_name(scsicmd, cid);
|
||||
}
|
||||
case SERVICE_ACTION_IN:
|
||||
if (!(dev->raw_io_interface) ||
|
||||
!(dev->raw_io_64) ||
|
||||
((scsicmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16))
|
||||
break;
|
||||
{
|
||||
u64 capacity;
|
||||
char cp[12];
|
||||
unsigned int offset = 0;
|
||||
|
||||
dprintk((KERN_DEBUG "READ CAPACITY_16 command.\n"));
|
||||
capacity = fsa_dev_ptr[cid].size - 1;
|
||||
if (scsicmd->cmnd[13] > 12) {
|
||||
offset = scsicmd->cmnd[13] - 12;
|
||||
if (offset > sizeof(cp))
|
||||
break;
|
||||
memset(cp, 0, offset);
|
||||
aac_internal_transfer(scsicmd, cp, 0, offset);
|
||||
}
|
||||
cp[0] = (capacity >> 56) & 0xff;
|
||||
cp[1] = (capacity >> 48) & 0xff;
|
||||
cp[2] = (capacity >> 40) & 0xff;
|
||||
cp[3] = (capacity >> 32) & 0xff;
|
||||
cp[4] = (capacity >> 24) & 0xff;
|
||||
cp[5] = (capacity >> 16) & 0xff;
|
||||
cp[6] = (capacity >> 8) & 0xff;
|
||||
cp[7] = (capacity >> 0) & 0xff;
|
||||
cp[8] = 0;
|
||||
cp[9] = 0;
|
||||
cp[10] = 2;
|
||||
cp[11] = 0;
|
||||
aac_internal_transfer(scsicmd, cp, offset, sizeof(cp));
|
||||
|
||||
/* Do not cache partition table for arrays */
|
||||
scsicmd->device->removable = 1;
|
||||
|
||||
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
|
||||
scsicmd->scsi_done(scsicmd);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
case READ_CAPACITY:
|
||||
{
|
||||
u32 capacity;
|
||||
char cp[8];
|
||||
|
||||
dprintk((KERN_DEBUG "READ CAPACITY command.\n"));
|
||||
if (fsa_dev_ptr[cid].size <= 0x100000000LL)
|
||||
if (fsa_dev_ptr[cid].size <= 0x100000000ULL)
|
||||
capacity = fsa_dev_ptr[cid].size - 1;
|
||||
else
|
||||
capacity = (u32)-1;
|
||||
|
@ -1417,6 +1616,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
|
|||
cp[6] = 2;
|
||||
cp[7] = 0;
|
||||
aac_internal_transfer(scsicmd, cp, 0, sizeof(cp));
|
||||
/* Do not cache partition table for arrays */
|
||||
scsicmd->device->removable = 1;
|
||||
|
||||
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
|
||||
scsicmd->scsi_done(scsicmd);
|
||||
|
@ -1497,6 +1698,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
|
|||
{
|
||||
case READ_6:
|
||||
case READ_10:
|
||||
case READ_12:
|
||||
case READ_16:
|
||||
/*
|
||||
* Hack to keep track of ordinal number of the device that
|
||||
* corresponds to a container. Needed to convert
|
||||
|
@ -1504,17 +1707,19 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
|
|||
*/
|
||||
|
||||
spin_unlock_irq(host->host_lock);
|
||||
if (scsicmd->request->rq_disk)
|
||||
memcpy(fsa_dev_ptr[cid].devname,
|
||||
scsicmd->request->rq_disk->disk_name,
|
||||
8);
|
||||
|
||||
if (scsicmd->request->rq_disk)
|
||||
strlcpy(fsa_dev_ptr[cid].devname,
|
||||
scsicmd->request->rq_disk->disk_name,
|
||||
min(sizeof(fsa_dev_ptr[cid].devname),
|
||||
sizeof(scsicmd->request->rq_disk->disk_name) + 1));
|
||||
ret = aac_read(scsicmd, cid);
|
||||
spin_lock_irq(host->host_lock);
|
||||
return ret;
|
||||
|
||||
case WRITE_6:
|
||||
case WRITE_10:
|
||||
case WRITE_12:
|
||||
case WRITE_16:
|
||||
spin_unlock_irq(host->host_lock);
|
||||
ret = aac_write(scsicmd, cid);
|
||||
spin_lock_irq(host->host_lock);
|
||||
|
@ -1745,6 +1950,8 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
|
|||
case WRITE_10:
|
||||
case READ_12:
|
||||
case WRITE_12:
|
||||
case READ_16:
|
||||
case WRITE_16:
|
||||
if(le32_to_cpu(srbreply->data_xfer_length) < scsicmd->underflow ) {
|
||||
printk(KERN_WARNING"aacraid: SCSI CMD underflow\n");
|
||||
} else {
|
||||
|
@ -1850,8 +2057,8 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
|
|||
sizeof(scsicmd->sense_buffer) :
|
||||
le32_to_cpu(srbreply->sense_data_size);
|
||||
#ifdef AAC_DETAILED_STATUS_INFO
|
||||
dprintk((KERN_WARNING "aac_srb_callback: check condition, status = %d len=%d\n",
|
||||
le32_to_cpu(srbreply->status), len));
|
||||
printk(KERN_WARNING "aac_srb_callback: check condition, status = %d len=%d\n",
|
||||
le32_to_cpu(srbreply->status), len);
|
||||
#endif
|
||||
memcpy(scsicmd->sense_buffer, srbreply->sense_data, len);
|
||||
|
||||
|
|
|
@ -1,6 +1,10 @@
|
|||
#if (!defined(dprintk))
|
||||
# define dprintk(x)
|
||||
#endif
|
||||
/* eg: if (nblank(dprintk(x))) */
|
||||
#define _nblank(x) #x
|
||||
#define nblank(x) _nblank(x)[0]
|
||||
|
||||
|
||||
/*------------------------------------------------------------------------------
|
||||
* D E F I N E S
|
||||
|
@ -302,7 +306,6 @@ enum aac_queue_types {
|
|||
*/
|
||||
|
||||
#define FsaNormal 1
|
||||
#define FsaHigh 2
|
||||
|
||||
/*
|
||||
* Define the FIB. The FIB is the where all the requested data and
|
||||
|
@ -546,8 +549,6 @@ struct aac_queue {
|
|||
/* This is only valid for adapter to host command queues. */
|
||||
spinlock_t *lock; /* Spinlock for this queue must take this lock before accessing the lock */
|
||||
spinlock_t lockdata; /* Actual lock (used only on one side of the lock) */
|
||||
unsigned long SavedIrql; /* Previous IRQL when the spin lock is taken */
|
||||
u32 padding; /* Padding - FIXME - can remove I believe */
|
||||
struct list_head cmdq; /* A queue of FIBs which need to be prcessed by the FS thread. This is */
|
||||
/* only valid for command queues which receive entries from the adapter. */
|
||||
struct list_head pendingq; /* A queue of outstanding fib's to the adapter. */
|
||||
|
@ -776,7 +777,9 @@ struct fsa_dev_info {
|
|||
u64 last;
|
||||
u64 size;
|
||||
u32 type;
|
||||
u32 config_waiting_on;
|
||||
u16 queue_depth;
|
||||
u8 config_needed;
|
||||
u8 valid;
|
||||
u8 ro;
|
||||
u8 locked;
|
||||
|
@ -1012,6 +1015,7 @@ struct aac_dev
|
|||
/* macro side-effects BEWARE */
|
||||
# define raw_io_interface \
|
||||
init->InitStructRevision==cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_4)
|
||||
u8 raw_io_64;
|
||||
u8 printf_enabled;
|
||||
};
|
||||
|
||||
|
@ -1362,8 +1366,10 @@ struct aac_srb_reply
|
|||
#define VM_CtBlockVerify64 18
|
||||
#define VM_CtHostRead64 19
|
||||
#define VM_CtHostWrite64 20
|
||||
#define VM_DrvErrTblLog 21
|
||||
#define VM_NameServe64 22
|
||||
|
||||
#define MAX_VMCOMMAND_NUM 21 /* used for sizing stats array - leave last */
|
||||
#define MAX_VMCOMMAND_NUM 23 /* used for sizing stats array - leave last */
|
||||
|
||||
/*
|
||||
* Descriptive information (eg, vital stats)
|
||||
|
@ -1472,6 +1478,7 @@ struct aac_mntent {
|
|||
manager (eg, filesystem) */
|
||||
__le32 altoid; /* != oid <==> snapshot or
|
||||
broken mirror exists */
|
||||
__le32 capacityhigh;
|
||||
};
|
||||
|
||||
#define FSCS_NOTCLEAN 0x0001 /* fsck is neccessary before mounting */
|
||||
|
@ -1707,6 +1714,7 @@ extern struct aac_common aac_config;
|
|||
#define AifCmdJobProgress 2 /* Progress report */
|
||||
#define AifJobCtrZero 101 /* Array Zero progress */
|
||||
#define AifJobStsSuccess 1 /* Job completes */
|
||||
#define AifJobStsRunning 102 /* Job running */
|
||||
#define AifCmdAPIReport 3 /* Report from other user of API */
|
||||
#define AifCmdDriverNotify 4 /* Notify host driver of event */
|
||||
#define AifDenMorphComplete 200 /* A morph operation completed */
|
||||
|
@ -1777,6 +1785,7 @@ int fib_adapter_complete(struct fib * fibptr, unsigned short size);
|
|||
struct aac_driver_ident* aac_get_driver_ident(int devtype);
|
||||
int aac_get_adapter_info(struct aac_dev* dev);
|
||||
int aac_send_shutdown(struct aac_dev *dev);
|
||||
int probe_container(struct aac_dev *dev, int cid);
|
||||
extern int numacb;
|
||||
extern int acbsize;
|
||||
extern char aac_driver_version[];
|
||||
|
|
|
@ -195,7 +195,7 @@ int aac_send_shutdown(struct aac_dev * dev)
|
|||
fibctx,
|
||||
sizeof(struct aac_close),
|
||||
FsaNormal,
|
||||
1, 1,
|
||||
-2 /* Timeout silently */, 1,
|
||||
NULL, NULL);
|
||||
|
||||
if (status == 0)
|
||||
|
@ -313,8 +313,15 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
|
|||
dev->max_fib_size = sizeof(struct hw_fib);
|
||||
dev->sg_tablesize = host->sg_tablesize = (dev->max_fib_size
|
||||
- sizeof(struct aac_fibhdr)
|
||||
- sizeof(struct aac_write) + sizeof(struct sgmap))
|
||||
/ sizeof(struct sgmap);
|
||||
- sizeof(struct aac_write) + sizeof(struct sgentry))
|
||||
/ sizeof(struct sgentry);
|
||||
dev->raw_io_64 = 0;
|
||||
if ((!aac_adapter_sync_cmd(dev, GET_ADAPTER_PROPERTIES,
|
||||
0, 0, 0, 0, 0, 0, status+0, status+1, status+2, NULL, NULL)) &&
|
||||
(status[0] == 0x00000001)) {
|
||||
if (status[1] & AAC_OPT_NEW_COMM_64)
|
||||
dev->raw_io_64 = 1;
|
||||
}
|
||||
if ((!aac_adapter_sync_cmd(dev, GET_COMM_PREFERRED_SETTINGS,
|
||||
0, 0, 0, 0, 0, 0,
|
||||
status+0, status+1, status+2, status+3, status+4))
|
||||
|
@ -342,8 +349,8 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
|
|||
dev->max_fib_size = 512;
|
||||
dev->sg_tablesize = host->sg_tablesize
|
||||
= (512 - sizeof(struct aac_fibhdr)
|
||||
- sizeof(struct aac_write) + sizeof(struct sgmap))
|
||||
/ sizeof(struct sgmap);
|
||||
- sizeof(struct aac_write) + sizeof(struct sgentry))
|
||||
/ sizeof(struct sgentry);
|
||||
host->can_queue = AAC_NUM_IO_FIB;
|
||||
} else if (acbsize == 2048) {
|
||||
host->max_sectors = 512;
|
||||
|
|
|
@ -39,7 +39,9 @@
|
|||
#include <linux/completion.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <scsi/scsi_host.h>
|
||||
#include <scsi/scsi_device.h>
|
||||
#include <asm/semaphore.h>
|
||||
#include <asm/delay.h>
|
||||
|
||||
#include "aacraid.h"
|
||||
|
||||
|
@ -269,40 +271,22 @@ static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entr
|
|||
/* Interrupt Moderation, only interrupt for first two entries */
|
||||
if (idx != le32_to_cpu(*(q->headers.consumer))) {
|
||||
if (--idx == 0) {
|
||||
if (qid == AdapHighCmdQueue)
|
||||
idx = ADAP_HIGH_CMD_ENTRIES;
|
||||
else if (qid == AdapNormCmdQueue)
|
||||
if (qid == AdapNormCmdQueue)
|
||||
idx = ADAP_NORM_CMD_ENTRIES;
|
||||
else if (qid == AdapHighRespQueue)
|
||||
idx = ADAP_HIGH_RESP_ENTRIES;
|
||||
else if (qid == AdapNormRespQueue)
|
||||
else
|
||||
idx = ADAP_NORM_RESP_ENTRIES;
|
||||
}
|
||||
if (idx != le32_to_cpu(*(q->headers.consumer)))
|
||||
*nonotify = 1;
|
||||
}
|
||||
|
||||
if (qid == AdapHighCmdQueue) {
|
||||
if (*index >= ADAP_HIGH_CMD_ENTRIES)
|
||||
*index = 0;
|
||||
} else if (qid == AdapNormCmdQueue) {
|
||||
if (qid == AdapNormCmdQueue) {
|
||||
if (*index >= ADAP_NORM_CMD_ENTRIES)
|
||||
*index = 0; /* Wrap to front of the Producer Queue. */
|
||||
}
|
||||
else if (qid == AdapHighRespQueue)
|
||||
{
|
||||
if (*index >= ADAP_HIGH_RESP_ENTRIES)
|
||||
*index = 0;
|
||||
}
|
||||
else if (qid == AdapNormRespQueue)
|
||||
{
|
||||
} else {
|
||||
if (*index >= ADAP_NORM_RESP_ENTRIES)
|
||||
*index = 0; /* Wrap to front of the Producer Queue. */
|
||||
}
|
||||
else {
|
||||
printk("aacraid: invalid qid\n");
|
||||
BUG();
|
||||
}
|
||||
|
||||
if ((*index + 1) == le32_to_cpu(*(q->headers.consumer))) { /* Queue is full */
|
||||
printk(KERN_WARNING "Queue %d full, %u outstanding.\n",
|
||||
|
@ -334,12 +318,8 @@ static int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_f
|
|||
{
|
||||
struct aac_entry * entry = NULL;
|
||||
int map = 0;
|
||||
struct aac_queue * q = &dev->queues->queue[qid];
|
||||
|
||||
spin_lock_irqsave(q->lock, q->SavedIrql);
|
||||
|
||||
if (qid == AdapHighCmdQueue || qid == AdapNormCmdQueue)
|
||||
{
|
||||
if (qid == AdapNormCmdQueue) {
|
||||
/* if no entries wait for some if caller wants to */
|
||||
while (!aac_get_entry(dev, qid, &entry, index, nonotify))
|
||||
{
|
||||
|
@ -350,9 +330,7 @@ static int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_f
|
|||
*/
|
||||
entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
|
||||
map = 1;
|
||||
}
|
||||
else if (qid == AdapHighRespQueue || qid == AdapNormRespQueue)
|
||||
{
|
||||
} else {
|
||||
while(!aac_get_entry(dev, qid, &entry, index, nonotify))
|
||||
{
|
||||
/* if no entries wait for some if caller wants to */
|
||||
|
@ -375,42 +353,6 @@ static int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_f
|
|||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* aac_insert_entry - insert a queue entry
|
||||
* @dev: Adapter
|
||||
* @index: Index of entry to insert
|
||||
* @qid: Queue number
|
||||
* @nonotify: Suppress adapter notification
|
||||
*
|
||||
* Gets the next free QE off the requested priorty adapter command
|
||||
* queue and associates the Fib with the QE. The QE represented by
|
||||
* index is ready to insert on the queue when this routine returns
|
||||
* success.
|
||||
*/
|
||||
|
||||
static int aac_insert_entry(struct aac_dev * dev, u32 index, u32 qid, unsigned long nonotify)
|
||||
{
|
||||
struct aac_queue * q = &dev->queues->queue[qid];
|
||||
|
||||
if(q == NULL)
|
||||
BUG();
|
||||
*(q->headers.producer) = cpu_to_le32(index + 1);
|
||||
spin_unlock_irqrestore(q->lock, q->SavedIrql);
|
||||
|
||||
if (qid == AdapHighCmdQueue ||
|
||||
qid == AdapNormCmdQueue ||
|
||||
qid == AdapHighRespQueue ||
|
||||
qid == AdapNormRespQueue)
|
||||
{
|
||||
if (!nonotify)
|
||||
aac_adapter_notify(dev, qid);
|
||||
}
|
||||
else
|
||||
printk("Suprise insert!\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Define the highest level of host to adapter communication routines.
|
||||
* These routines will support host to adapter FS commuication. These
|
||||
|
@ -439,12 +381,13 @@ static int aac_insert_entry(struct aac_dev * dev, u32 index, u32 qid, unsigned l
|
|||
int fib_send(u16 command, struct fib * fibptr, unsigned long size, int priority, int wait, int reply, fib_callback callback, void * callback_data)
|
||||
{
|
||||
u32 index;
|
||||
u32 qid;
|
||||
struct aac_dev * dev = fibptr->dev;
|
||||
unsigned long nointr = 0;
|
||||
struct hw_fib * hw_fib = fibptr->hw_fib;
|
||||
struct aac_queue * q;
|
||||
unsigned long flags = 0;
|
||||
unsigned long qflags;
|
||||
|
||||
if (!(hw_fib->header.XferState & cpu_to_le32(HostOwned)))
|
||||
return -EBUSY;
|
||||
/*
|
||||
|
@ -497,26 +440,8 @@ int fib_send(u16 command, struct fib * fibptr, unsigned long size, int priority
|
|||
* Get a queue entry connect the FIB to it and send an notify
|
||||
* the adapter a command is ready.
|
||||
*/
|
||||
if (priority == FsaHigh) {
|
||||
hw_fib->header.XferState |= cpu_to_le32(HighPriority);
|
||||
qid = AdapHighCmdQueue;
|
||||
} else {
|
||||
hw_fib->header.XferState |= cpu_to_le32(NormalPriority);
|
||||
qid = AdapNormCmdQueue;
|
||||
}
|
||||
q = &dev->queues->queue[qid];
|
||||
hw_fib->header.XferState |= cpu_to_le32(NormalPriority);
|
||||
|
||||
if(wait)
|
||||
spin_lock_irqsave(&fibptr->event_lock, flags);
|
||||
if(aac_queue_get( dev, &index, qid, hw_fib, 1, fibptr, &nointr)<0)
|
||||
return -EWOULDBLOCK;
|
||||
dprintk((KERN_DEBUG "fib_send: inserting a queue entry at index %d.\n",index));
|
||||
dprintk((KERN_DEBUG "Fib contents:.\n"));
|
||||
dprintk((KERN_DEBUG " Command = %d.\n", hw_fib->header.Command));
|
||||
dprintk((KERN_DEBUG " XferState = %x.\n", hw_fib->header.XferState));
|
||||
dprintk((KERN_DEBUG " hw_fib va being sent=%p\n",fibptr->hw_fib));
|
||||
dprintk((KERN_DEBUG " hw_fib pa being sent=%lx\n",(ulong)fibptr->hw_fib_pa));
|
||||
dprintk((KERN_DEBUG " fib being sent=%p\n",fibptr));
|
||||
/*
|
||||
* Fill in the Callback and CallbackContext if we are not
|
||||
* going to wait.
|
||||
|
@ -525,22 +450,67 @@ int fib_send(u16 command, struct fib * fibptr, unsigned long size, int priority
|
|||
fibptr->callback = callback;
|
||||
fibptr->callback_data = callback_data;
|
||||
}
|
||||
FIB_COUNTER_INCREMENT(aac_config.FibsSent);
|
||||
list_add_tail(&fibptr->queue, &q->pendingq);
|
||||
q->numpending++;
|
||||
|
||||
fibptr->done = 0;
|
||||
fibptr->flags = 0;
|
||||
|
||||
if(aac_insert_entry(dev, index, qid, (nointr & aac_config.irq_mod)) < 0)
|
||||
return -EWOULDBLOCK;
|
||||
FIB_COUNTER_INCREMENT(aac_config.FibsSent);
|
||||
|
||||
dprintk((KERN_DEBUG "fib_send: inserting a queue entry at index %d.\n",index));
|
||||
dprintk((KERN_DEBUG "Fib contents:.\n"));
|
||||
dprintk((KERN_DEBUG " Command = %d.\n", hw_fib->header.Command));
|
||||
dprintk((KERN_DEBUG " XferState = %x.\n", hw_fib->header.XferState));
|
||||
dprintk((KERN_DEBUG " hw_fib va being sent=%p\n",fibptr->hw_fib));
|
||||
dprintk((KERN_DEBUG " hw_fib pa being sent=%lx\n",(ulong)fibptr->hw_fib_pa));
|
||||
dprintk((KERN_DEBUG " fib being sent=%p\n",fibptr));
|
||||
|
||||
q = &dev->queues->queue[AdapNormCmdQueue];
|
||||
|
||||
if(wait)
|
||||
spin_lock_irqsave(&fibptr->event_lock, flags);
|
||||
spin_lock_irqsave(q->lock, qflags);
|
||||
aac_queue_get( dev, &index, AdapNormCmdQueue, hw_fib, 1, fibptr, &nointr);
|
||||
|
||||
list_add_tail(&fibptr->queue, &q->pendingq);
|
||||
q->numpending++;
|
||||
*(q->headers.producer) = cpu_to_le32(index + 1);
|
||||
spin_unlock_irqrestore(q->lock, qflags);
|
||||
if (!(nointr & aac_config.irq_mod))
|
||||
aac_adapter_notify(dev, AdapNormCmdQueue);
|
||||
/*
|
||||
* If the caller wanted us to wait for response wait now.
|
||||
*/
|
||||
|
||||
if (wait) {
|
||||
spin_unlock_irqrestore(&fibptr->event_lock, flags);
|
||||
down(&fibptr->event_wait);
|
||||
/* Only set for first known interruptable command */
|
||||
if (wait < 0) {
|
||||
/*
|
||||
* *VERY* Dangerous to time out a command, the
|
||||
* assumption is made that we have no hope of
|
||||
* functioning because an interrupt routing or other
|
||||
* hardware failure has occurred.
|
||||
*/
|
||||
unsigned long count = 36000000L; /* 3 minutes */
|
||||
unsigned long qflags;
|
||||
while (down_trylock(&fibptr->event_wait)) {
|
||||
if (--count == 0) {
|
||||
spin_lock_irqsave(q->lock, qflags);
|
||||
q->numpending--;
|
||||
list_del(&fibptr->queue);
|
||||
spin_unlock_irqrestore(q->lock, qflags);
|
||||
if (wait == -1) {
|
||||
printk(KERN_ERR "aacraid: fib_send: first asynchronous command timed out.\n"
|
||||
"Usually a result of a PCI interrupt routing problem;\n"
|
||||
"update mother board BIOS or consider utilizing one of\n"
|
||||
"the SAFE mode kernel options (acpi, apic etc)\n");
|
||||
}
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
udelay(5);
|
||||
}
|
||||
} else
|
||||
down(&fibptr->event_wait);
|
||||
if(fibptr->done == 0)
|
||||
BUG();
|
||||
|
||||
|
@ -622,15 +592,9 @@ void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid)
|
|||
case HostNormCmdQueue:
|
||||
notify = HostNormCmdNotFull;
|
||||
break;
|
||||
case HostHighCmdQueue:
|
||||
notify = HostHighCmdNotFull;
|
||||
break;
|
||||
case HostNormRespQueue:
|
||||
notify = HostNormRespNotFull;
|
||||
break;
|
||||
case HostHighRespQueue:
|
||||
notify = HostHighRespNotFull;
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
return;
|
||||
|
@ -652,9 +616,13 @@ int fib_adapter_complete(struct fib * fibptr, unsigned short size)
|
|||
{
|
||||
struct hw_fib * hw_fib = fibptr->hw_fib;
|
||||
struct aac_dev * dev = fibptr->dev;
|
||||
struct aac_queue * q;
|
||||
unsigned long nointr = 0;
|
||||
if (hw_fib->header.XferState == 0)
|
||||
unsigned long qflags;
|
||||
|
||||
if (hw_fib->header.XferState == 0) {
|
||||
return 0;
|
||||
}
|
||||
/*
|
||||
* If we plan to do anything check the structure type first.
|
||||
*/
|
||||
|
@ -669,37 +637,21 @@ int fib_adapter_complete(struct fib * fibptr, unsigned short size)
|
|||
* send the completed cdb to the adapter.
|
||||
*/
|
||||
if (hw_fib->header.XferState & cpu_to_le32(SentFromAdapter)) {
|
||||
u32 index;
|
||||
hw_fib->header.XferState |= cpu_to_le32(HostProcessed);
|
||||
if (hw_fib->header.XferState & cpu_to_le32(HighPriority)) {
|
||||
u32 index;
|
||||
if (size)
|
||||
{
|
||||
size += sizeof(struct aac_fibhdr);
|
||||
if (size > le16_to_cpu(hw_fib->header.SenderSize))
|
||||
return -EMSGSIZE;
|
||||
hw_fib->header.Size = cpu_to_le16(size);
|
||||
}
|
||||
if(aac_queue_get(dev, &index, AdapHighRespQueue, hw_fib, 1, NULL, &nointr) < 0) {
|
||||
return -EWOULDBLOCK;
|
||||
}
|
||||
if (aac_insert_entry(dev, index, AdapHighRespQueue, (nointr & (int)aac_config.irq_mod)) != 0) {
|
||||
}
|
||||
} else if (hw_fib->header.XferState &
|
||||
cpu_to_le32(NormalPriority)) {
|
||||
u32 index;
|
||||
|
||||
if (size) {
|
||||
size += sizeof(struct aac_fibhdr);
|
||||
if (size > le16_to_cpu(hw_fib->header.SenderSize))
|
||||
return -EMSGSIZE;
|
||||
hw_fib->header.Size = cpu_to_le16(size);
|
||||
}
|
||||
if (aac_queue_get(dev, &index, AdapNormRespQueue, hw_fib, 1, NULL, &nointr) < 0)
|
||||
return -EWOULDBLOCK;
|
||||
if (aac_insert_entry(dev, index, AdapNormRespQueue, (nointr & (int)aac_config.irq_mod)) != 0)
|
||||
{
|
||||
}
|
||||
if (size) {
|
||||
size += sizeof(struct aac_fibhdr);
|
||||
if (size > le16_to_cpu(hw_fib->header.SenderSize))
|
||||
return -EMSGSIZE;
|
||||
hw_fib->header.Size = cpu_to_le16(size);
|
||||
}
|
||||
q = &dev->queues->queue[AdapNormRespQueue];
|
||||
spin_lock_irqsave(q->lock, qflags);
|
||||
aac_queue_get(dev, &index, AdapNormRespQueue, hw_fib, 1, NULL, &nointr);
|
||||
*(q->headers.producer) = cpu_to_le32(index + 1);
|
||||
spin_unlock_irqrestore(q->lock, qflags);
|
||||
if (!(nointr & (int)aac_config.irq_mod))
|
||||
aac_adapter_notify(dev, AdapNormRespQueue);
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -791,6 +743,268 @@ void aac_printf(struct aac_dev *dev, u32 val)
|
|||
memset(cp, 0, 256);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* aac_handle_aif - Handle a message from the firmware
|
||||
* @dev: Which adapter this fib is from
|
||||
* @fibptr: Pointer to fibptr from adapter
|
||||
*
|
||||
* This routine handles a driver notify fib from the adapter and
|
||||
* dispatches it to the appropriate routine for handling.
|
||||
*/
|
||||
|
||||
static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
|
||||
{
|
||||
struct hw_fib * hw_fib = fibptr->hw_fib;
|
||||
struct aac_aifcmd * aifcmd = (struct aac_aifcmd *)hw_fib->data;
|
||||
int busy;
|
||||
u32 container;
|
||||
struct scsi_device *device;
|
||||
enum {
|
||||
NOTHING,
|
||||
DELETE,
|
||||
ADD,
|
||||
CHANGE
|
||||
} device_config_needed;
|
||||
|
||||
/* Sniff for container changes */
|
||||
|
||||
if (!dev)
|
||||
return;
|
||||
container = (u32)-1;
|
||||
|
||||
/*
|
||||
* We have set this up to try and minimize the number of
|
||||
* re-configures that take place. As a result of this when
|
||||
* certain AIF's come in we will set a flag waiting for another
|
||||
* type of AIF before setting the re-config flag.
|
||||
*/
|
||||
switch (le32_to_cpu(aifcmd->command)) {
|
||||
case AifCmdDriverNotify:
|
||||
switch (le32_to_cpu(((u32 *)aifcmd->data)[0])) {
|
||||
/*
|
||||
* Morph or Expand complete
|
||||
*/
|
||||
case AifDenMorphComplete:
|
||||
case AifDenVolumeExtendComplete:
|
||||
container = le32_to_cpu(((u32 *)aifcmd->data)[1]);
|
||||
if (container >= dev->maximum_num_containers)
|
||||
break;
|
||||
|
||||
/*
|
||||
* Find the Scsi_Device associated with the SCSI
|
||||
* address. Make sure we have the right array, and if
|
||||
* so set the flag to initiate a new re-config once we
|
||||
* see an AifEnConfigChange AIF come through.
|
||||
*/
|
||||
|
||||
if ((dev != NULL) && (dev->scsi_host_ptr != NULL)) {
|
||||
device = scsi_device_lookup(dev->scsi_host_ptr,
|
||||
CONTAINER_TO_CHANNEL(container),
|
||||
CONTAINER_TO_ID(container),
|
||||
CONTAINER_TO_LUN(container));
|
||||
if (device) {
|
||||
dev->fsa_dev[container].config_needed = CHANGE;
|
||||
dev->fsa_dev[container].config_waiting_on = AifEnConfigChange;
|
||||
scsi_device_put(device);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* If we are waiting on something and this happens to be
|
||||
* that thing then set the re-configure flag.
|
||||
*/
|
||||
if (container != (u32)-1) {
|
||||
if (container >= dev->maximum_num_containers)
|
||||
break;
|
||||
if (dev->fsa_dev[container].config_waiting_on ==
|
||||
le32_to_cpu(*(u32 *)aifcmd->data))
|
||||
dev->fsa_dev[container].config_waiting_on = 0;
|
||||
} else for (container = 0;
|
||||
container < dev->maximum_num_containers; ++container) {
|
||||
if (dev->fsa_dev[container].config_waiting_on ==
|
||||
le32_to_cpu(*(u32 *)aifcmd->data))
|
||||
dev->fsa_dev[container].config_waiting_on = 0;
|
||||
}
|
||||
break;
|
||||
|
||||
case AifCmdEventNotify:
|
||||
switch (le32_to_cpu(((u32 *)aifcmd->data)[0])) {
|
||||
/*
|
||||
* Add an Array.
|
||||
*/
|
||||
case AifEnAddContainer:
|
||||
container = le32_to_cpu(((u32 *)aifcmd->data)[1]);
|
||||
if (container >= dev->maximum_num_containers)
|
||||
break;
|
||||
dev->fsa_dev[container].config_needed = ADD;
|
||||
dev->fsa_dev[container].config_waiting_on =
|
||||
AifEnConfigChange;
|
||||
break;
|
||||
|
||||
/*
|
||||
* Delete an Array.
|
||||
*/
|
||||
case AifEnDeleteContainer:
|
||||
container = le32_to_cpu(((u32 *)aifcmd->data)[1]);
|
||||
if (container >= dev->maximum_num_containers)
|
||||
break;
|
||||
dev->fsa_dev[container].config_needed = DELETE;
|
||||
dev->fsa_dev[container].config_waiting_on =
|
||||
AifEnConfigChange;
|
||||
break;
|
||||
|
||||
/*
|
||||
* Container change detected. If we currently are not
|
||||
* waiting on something else, setup to wait on a Config Change.
|
||||
*/
|
||||
case AifEnContainerChange:
|
||||
container = le32_to_cpu(((u32 *)aifcmd->data)[1]);
|
||||
if (container >= dev->maximum_num_containers)
|
||||
break;
|
||||
if (dev->fsa_dev[container].config_waiting_on)
|
||||
break;
|
||||
dev->fsa_dev[container].config_needed = CHANGE;
|
||||
dev->fsa_dev[container].config_waiting_on =
|
||||
AifEnConfigChange;
|
||||
break;
|
||||
|
||||
case AifEnConfigChange:
|
||||
break;
|
||||
|
||||
}
|
||||
|
||||
/*
|
||||
* If we are waiting on something and this happens to be
|
||||
* that thing then set the re-configure flag.
|
||||
*/
|
||||
if (container != (u32)-1) {
|
||||
if (container >= dev->maximum_num_containers)
|
||||
break;
|
||||
if (dev->fsa_dev[container].config_waiting_on ==
|
||||
le32_to_cpu(*(u32 *)aifcmd->data))
|
||||
dev->fsa_dev[container].config_waiting_on = 0;
|
||||
} else for (container = 0;
|
||||
container < dev->maximum_num_containers; ++container) {
|
||||
if (dev->fsa_dev[container].config_waiting_on ==
|
||||
le32_to_cpu(*(u32 *)aifcmd->data))
|
||||
dev->fsa_dev[container].config_waiting_on = 0;
|
||||
}
|
||||
break;
|
||||
|
||||
case AifCmdJobProgress:
|
||||
/*
|
||||
* These are job progress AIF's. When a Clear is being
|
||||
* done on a container it is initially created then hidden from
|
||||
* the OS. When the clear completes we don't get a config
|
||||
* change so we monitor the job status complete on a clear then
|
||||
* wait for a container change.
|
||||
*/
|
||||
|
||||
if ((((u32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero))
|
||||
&& ((((u32 *)aifcmd->data)[6] == ((u32 *)aifcmd->data)[5])
|
||||
|| (((u32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsSuccess)))) {
|
||||
for (container = 0;
|
||||
container < dev->maximum_num_containers;
|
||||
++container) {
|
||||
/*
|
||||
* Stomp on all config sequencing for all
|
||||
* containers?
|
||||
*/
|
||||
dev->fsa_dev[container].config_waiting_on =
|
||||
AifEnContainerChange;
|
||||
dev->fsa_dev[container].config_needed = ADD;
|
||||
}
|
||||
}
|
||||
if ((((u32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero))
|
||||
&& (((u32 *)aifcmd->data)[6] == 0)
|
||||
&& (((u32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsRunning))) {
|
||||
for (container = 0;
|
||||
container < dev->maximum_num_containers;
|
||||
++container) {
|
||||
/*
|
||||
* Stomp on all config sequencing for all
|
||||
* containers?
|
||||
*/
|
||||
dev->fsa_dev[container].config_waiting_on =
|
||||
AifEnContainerChange;
|
||||
dev->fsa_dev[container].config_needed = DELETE;
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
device_config_needed = NOTHING;
|
||||
for (container = 0; container < dev->maximum_num_containers;
|
||||
++container) {
|
||||
if ((dev->fsa_dev[container].config_waiting_on == 0)
|
||||
&& (dev->fsa_dev[container].config_needed != NOTHING)) {
|
||||
device_config_needed =
|
||||
dev->fsa_dev[container].config_needed;
|
||||
dev->fsa_dev[container].config_needed = NOTHING;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (device_config_needed == NOTHING)
|
||||
return;
|
||||
|
||||
/*
|
||||
* If we decided that a re-configuration needs to be done,
|
||||
* schedule it here on the way out the door, please close the door
|
||||
* behind you.
|
||||
*/
|
||||
|
||||
busy = 0;
|
||||
|
||||
|
||||
/*
|
||||
* Find the Scsi_Device associated with the SCSI address,
|
||||
* and mark it as changed, invalidating the cache. This deals
|
||||
* with changes to existing device IDs.
|
||||
*/
|
||||
|
||||
if (!dev || !dev->scsi_host_ptr)
|
||||
return;
|
||||
/*
|
||||
* force reload of disk info via probe_container
|
||||
*/
|
||||
if ((device_config_needed == CHANGE)
|
||||
&& (dev->fsa_dev[container].valid == 1))
|
||||
dev->fsa_dev[container].valid = 2;
|
||||
if ((device_config_needed == CHANGE) ||
|
||||
(device_config_needed == ADD))
|
||||
probe_container(dev, container);
|
||||
device = scsi_device_lookup(dev->scsi_host_ptr,
|
||||
CONTAINER_TO_CHANNEL(container),
|
||||
CONTAINER_TO_ID(container),
|
||||
CONTAINER_TO_LUN(container));
|
||||
if (device) {
|
||||
switch (device_config_needed) {
|
||||
case DELETE:
|
||||
scsi_remove_device(device);
|
||||
break;
|
||||
case CHANGE:
|
||||
if (!dev->fsa_dev[container].valid) {
|
||||
scsi_remove_device(device);
|
||||
break;
|
||||
}
|
||||
scsi_rescan_device(&device->sdev_gendev);
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
scsi_device_put(device);
|
||||
}
|
||||
if (device_config_needed == ADD) {
|
||||
scsi_add_device(dev->scsi_host_ptr,
|
||||
CONTAINER_TO_CHANNEL(container),
|
||||
CONTAINER_TO_ID(container),
|
||||
CONTAINER_TO_LUN(container));
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* aac_command_thread - command processing thread
|
||||
* @dev: Adapter to monitor
|
||||
|
@ -805,7 +1019,6 @@ int aac_command_thread(struct aac_dev * dev)
|
|||
{
|
||||
struct hw_fib *hw_fib, *hw_newfib;
|
||||
struct fib *fib, *newfib;
|
||||
struct aac_queue_block *queues = dev->queues;
|
||||
struct aac_fib_context *fibctx;
|
||||
unsigned long flags;
|
||||
DECLARE_WAITQUEUE(wait, current);
|
||||
|
@ -825,21 +1038,22 @@ int aac_command_thread(struct aac_dev * dev)
|
|||
* Let the DPC know it has a place to send the AIF's to.
|
||||
*/
|
||||
dev->aif_thread = 1;
|
||||
add_wait_queue(&queues->queue[HostNormCmdQueue].cmdready, &wait);
|
||||
add_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait);
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
dprintk ((KERN_INFO "aac_command_thread start\n"));
|
||||
while(1)
|
||||
{
|
||||
spin_lock_irqsave(queues->queue[HostNormCmdQueue].lock, flags);
|
||||
while(!list_empty(&(queues->queue[HostNormCmdQueue].cmdq))) {
|
||||
spin_lock_irqsave(dev->queues->queue[HostNormCmdQueue].lock, flags);
|
||||
while(!list_empty(&(dev->queues->queue[HostNormCmdQueue].cmdq))) {
|
||||
struct list_head *entry;
|
||||
struct aac_aifcmd * aifcmd;
|
||||
|
||||
set_current_state(TASK_RUNNING);
|
||||
|
||||
entry = queues->queue[HostNormCmdQueue].cmdq.next;
|
||||
|
||||
entry = dev->queues->queue[HostNormCmdQueue].cmdq.next;
|
||||
list_del(entry);
|
||||
|
||||
spin_unlock_irqrestore(queues->queue[HostNormCmdQueue].lock, flags);
|
||||
|
||||
spin_unlock_irqrestore(dev->queues->queue[HostNormCmdQueue].lock, flags);
|
||||
fib = list_entry(entry, struct fib, fiblink);
|
||||
/*
|
||||
* We will process the FIB here or pass it to a
|
||||
|
@ -860,6 +1074,7 @@ int aac_command_thread(struct aac_dev * dev)
|
|||
aifcmd = (struct aac_aifcmd *) hw_fib->data;
|
||||
if (aifcmd->command == cpu_to_le32(AifCmdDriverNotify)) {
|
||||
/* Handle Driver Notify Events */
|
||||
aac_handle_aif(dev, fib);
|
||||
*(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
|
||||
fib_adapter_complete(fib, (u16)sizeof(u32));
|
||||
} else {
|
||||
|
@ -869,9 +1084,62 @@ int aac_command_thread(struct aac_dev * dev)
|
|||
|
||||
u32 time_now, time_last;
|
||||
unsigned long flagv;
|
||||
|
||||
unsigned num;
|
||||
struct hw_fib ** hw_fib_pool, ** hw_fib_p;
|
||||
struct fib ** fib_pool, ** fib_p;
|
||||
|
||||
/* Sniff events */
|
||||
if ((aifcmd->command ==
|
||||
cpu_to_le32(AifCmdEventNotify)) ||
|
||||
(aifcmd->command ==
|
||||
cpu_to_le32(AifCmdJobProgress))) {
|
||||
aac_handle_aif(dev, fib);
|
||||
}
|
||||
|
||||
time_now = jiffies/HZ;
|
||||
|
||||
/*
|
||||
* Warning: no sleep allowed while
|
||||
* holding spinlock. We take the estimate
|
||||
* and pre-allocate a set of fibs outside the
|
||||
* lock.
|
||||
*/
|
||||
num = le32_to_cpu(dev->init->AdapterFibsSize)
|
||||
/ sizeof(struct hw_fib); /* some extra */
|
||||
spin_lock_irqsave(&dev->fib_lock, flagv);
|
||||
entry = dev->fib_list.next;
|
||||
while (entry != &dev->fib_list) {
|
||||
entry = entry->next;
|
||||
++num;
|
||||
}
|
||||
spin_unlock_irqrestore(&dev->fib_lock, flagv);
|
||||
hw_fib_pool = NULL;
|
||||
fib_pool = NULL;
|
||||
if (num
|
||||
&& ((hw_fib_pool = kmalloc(sizeof(struct hw_fib *) * num, GFP_KERNEL)))
|
||||
&& ((fib_pool = kmalloc(sizeof(struct fib *) * num, GFP_KERNEL)))) {
|
||||
hw_fib_p = hw_fib_pool;
|
||||
fib_p = fib_pool;
|
||||
while (hw_fib_p < &hw_fib_pool[num]) {
|
||||
if (!(*(hw_fib_p++) = kmalloc(sizeof(struct hw_fib), GFP_KERNEL))) {
|
||||
--hw_fib_p;
|
||||
break;
|
||||
}
|
||||
if (!(*(fib_p++) = kmalloc(sizeof(struct fib), GFP_KERNEL))) {
|
||||
kfree(*(--hw_fib_p));
|
||||
break;
|
||||
}
|
||||
}
|
||||
if ((num = hw_fib_p - hw_fib_pool) == 0) {
|
||||
kfree(fib_pool);
|
||||
fib_pool = NULL;
|
||||
kfree(hw_fib_pool);
|
||||
hw_fib_pool = NULL;
|
||||
}
|
||||
} else if (hw_fib_pool) {
|
||||
kfree(hw_fib_pool);
|
||||
hw_fib_pool = NULL;
|
||||
}
|
||||
spin_lock_irqsave(&dev->fib_lock, flagv);
|
||||
entry = dev->fib_list.next;
|
||||
/*
|
||||
|
@ -880,6 +1148,8 @@ int aac_command_thread(struct aac_dev * dev)
|
|||
* fib, and then set the event to wake up the
|
||||
* thread that is waiting for it.
|
||||
*/
|
||||
hw_fib_p = hw_fib_pool;
|
||||
fib_p = fib_pool;
|
||||
while (entry != &dev->fib_list) {
|
||||
/*
|
||||
* Extract the fibctx
|
||||
|
@ -912,9 +1182,11 @@ int aac_command_thread(struct aac_dev * dev)
|
|||
* Warning: no sleep allowed while
|
||||
* holding spinlock
|
||||
*/
|
||||
hw_newfib = kmalloc(sizeof(struct hw_fib), GFP_ATOMIC);
|
||||
newfib = kmalloc(sizeof(struct fib), GFP_ATOMIC);
|
||||
if (newfib && hw_newfib) {
|
||||
if (hw_fib_p < &hw_fib_pool[num]) {
|
||||
hw_newfib = *hw_fib_p;
|
||||
*(hw_fib_p++) = NULL;
|
||||
newfib = *fib_p;
|
||||
*(fib_p++) = NULL;
|
||||
/*
|
||||
* Make the copy of the FIB
|
||||
*/
|
||||
|
@ -929,15 +1201,11 @@ int aac_command_thread(struct aac_dev * dev)
|
|||
fibctx->count++;
|
||||
/*
|
||||
* Set the event to wake up the
|
||||
* thread that will waiting.
|
||||
* thread that is waiting.
|
||||
*/
|
||||
up(&fibctx->wait_sem);
|
||||
} else {
|
||||
printk(KERN_WARNING "aifd: didn't allocate NewFib.\n");
|
||||
if(newfib)
|
||||
kfree(newfib);
|
||||
if(hw_newfib)
|
||||
kfree(hw_newfib);
|
||||
}
|
||||
entry = entry->next;
|
||||
}
|
||||
|
@ -947,21 +1215,38 @@ int aac_command_thread(struct aac_dev * dev)
|
|||
*(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
|
||||
fib_adapter_complete(fib, sizeof(u32));
|
||||
spin_unlock_irqrestore(&dev->fib_lock, flagv);
|
||||
/* Free up the remaining resources */
|
||||
hw_fib_p = hw_fib_pool;
|
||||
fib_p = fib_pool;
|
||||
while (hw_fib_p < &hw_fib_pool[num]) {
|
||||
if (*hw_fib_p)
|
||||
kfree(*hw_fib_p);
|
||||
if (*fib_p)
|
||||
kfree(*fib_p);
|
||||
++fib_p;
|
||||
++hw_fib_p;
|
||||
}
|
||||
if (hw_fib_pool)
|
||||
kfree(hw_fib_pool);
|
||||
if (fib_pool)
|
||||
kfree(fib_pool);
|
||||
}
|
||||
spin_lock_irqsave(queues->queue[HostNormCmdQueue].lock, flags);
|
||||
kfree(fib);
|
||||
spin_lock_irqsave(dev->queues->queue[HostNormCmdQueue].lock, flags);
|
||||
}
|
||||
/*
|
||||
* There are no more AIF's
|
||||
*/
|
||||
spin_unlock_irqrestore(queues->queue[HostNormCmdQueue].lock, flags);
|
||||
spin_unlock_irqrestore(dev->queues->queue[HostNormCmdQueue].lock, flags);
|
||||
schedule();
|
||||
|
||||
if(signal_pending(current))
|
||||
break;
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
}
|
||||
remove_wait_queue(&queues->queue[HostNormCmdQueue].cmdready, &wait);
|
||||
if (dev->queues)
|
||||
remove_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait);
|
||||
dev->aif_thread = 0;
|
||||
complete_and_exit(&dev->aif_completion, 0);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -748,7 +748,8 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
|
|||
unique_id++;
|
||||
}
|
||||
|
||||
if (pci_enable_device(pdev))
|
||||
error = pci_enable_device(pdev);
|
||||
if (error)
|
||||
goto out;
|
||||
|
||||
if (pci_set_dma_mask(pdev, 0xFFFFFFFFULL) ||
|
||||
|
@ -772,6 +773,7 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
|
|||
shost->irq = pdev->irq;
|
||||
shost->base = pci_resource_start(pdev, 0);
|
||||
shost->unique_id = unique_id;
|
||||
shost->max_cmd_len = 16;
|
||||
|
||||
aac = (struct aac_dev *)shost->hostdata;
|
||||
aac->scsi_host_ptr = shost;
|
||||
|
@ -799,7 +801,9 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
|
|||
goto out_free_fibs;
|
||||
|
||||
aac->maximum_num_channels = aac_drivers[index].channels;
|
||||
aac_get_adapter_info(aac);
|
||||
error = aac_get_adapter_info(aac);
|
||||
if (error < 0)
|
||||
goto out_deinit;
|
||||
|
||||
/*
|
||||
* Lets override negotiations and drop the maximum SG limit to 34
|
||||
|
@ -927,8 +931,8 @@ static int __init aac_init(void)
|
|||
printk(KERN_INFO "Adaptec %s driver (%s)\n",
|
||||
AAC_DRIVERNAME, aac_driver_version);
|
||||
|
||||
error = pci_module_init(&aac_pci_driver);
|
||||
if (error)
|
||||
error = pci_register_driver(&aac_pci_driver);
|
||||
if (error < 0)
|
||||
return error;
|
||||
|
||||
aac_cfg_major = register_chrdev( 0, "aac", &aac_cfg_fops);
|
||||
|
|
|
@ -112,6 +112,9 @@ aic7770_remove(struct device *dev)
|
|||
struct ahc_softc *ahc = dev_get_drvdata(dev);
|
||||
u_long s;
|
||||
|
||||
if (ahc->platform_data && ahc->platform_data->host)
|
||||
scsi_remove_host(ahc->platform_data->host);
|
||||
|
||||
ahc_lock(ahc, &s);
|
||||
ahc_intr_enable(ahc, FALSE);
|
||||
ahc_unlock(ahc, &s);
|
||||
|
|
|
@ -1192,11 +1192,6 @@ ahd_platform_free(struct ahd_softc *ahd)
|
|||
int i, j;
|
||||
|
||||
if (ahd->platform_data != NULL) {
|
||||
if (ahd->platform_data->host != NULL) {
|
||||
scsi_remove_host(ahd->platform_data->host);
|
||||
scsi_host_put(ahd->platform_data->host);
|
||||
}
|
||||
|
||||
/* destroy all of the device and target objects */
|
||||
for (i = 0; i < AHD_NUM_TARGETS; i++) {
|
||||
starget = ahd->platform_data->starget[i];
|
||||
|
@ -1226,6 +1221,9 @@ ahd_platform_free(struct ahd_softc *ahd)
|
|||
release_mem_region(ahd->platform_data->mem_busaddr,
|
||||
0x1000);
|
||||
}
|
||||
if (ahd->platform_data->host)
|
||||
scsi_host_put(ahd->platform_data->host);
|
||||
|
||||
free(ahd->platform_data, M_DEVBUF);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -95,6 +95,9 @@ ahd_linux_pci_dev_remove(struct pci_dev *pdev)
|
|||
struct ahd_softc *ahd = pci_get_drvdata(pdev);
|
||||
u_long s;
|
||||
|
||||
if (ahd->platform_data && ahd->platform_data->host)
|
||||
scsi_remove_host(ahd->platform_data->host);
|
||||
|
||||
ahd_lock(ahd, &s);
|
||||
ahd_intr_enable(ahd, FALSE);
|
||||
ahd_unlock(ahd, &s);
|
||||
|
|
|
@ -1209,11 +1209,6 @@ ahc_platform_free(struct ahc_softc *ahc)
|
|||
int i, j;
|
||||
|
||||
if (ahc->platform_data != NULL) {
|
||||
if (ahc->platform_data->host != NULL) {
|
||||
scsi_remove_host(ahc->platform_data->host);
|
||||
scsi_host_put(ahc->platform_data->host);
|
||||
}
|
||||
|
||||
/* destroy all of the device and target objects */
|
||||
for (i = 0; i < AHC_NUM_TARGETS; i++) {
|
||||
starget = ahc->platform_data->starget[i];
|
||||
|
@ -1242,6 +1237,9 @@ ahc_platform_free(struct ahc_softc *ahc)
|
|||
0x1000);
|
||||
}
|
||||
|
||||
if (ahc->platform_data->host)
|
||||
scsi_host_put(ahc->platform_data->host);
|
||||
|
||||
free(ahc->platform_data, M_DEVBUF);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -143,6 +143,9 @@ ahc_linux_pci_dev_remove(struct pci_dev *pdev)
|
|||
struct ahc_softc *ahc = pci_get_drvdata(pdev);
|
||||
u_long s;
|
||||
|
||||
if (ahc->platform_data && ahc->platform_data->host)
|
||||
scsi_remove_host(ahc->platform_data->host);
|
||||
|
||||
ahc_lock(ahc, &s);
|
||||
ahc_intr_enable(ahc, FALSE);
|
||||
ahc_unlock(ahc, &s);
|
||||
|
|
|
@ -176,6 +176,7 @@ void scsi_remove_host(struct Scsi_Host *shost)
|
|||
transport_unregister_device(&shost->shost_gendev);
|
||||
class_device_unregister(&shost->shost_classdev);
|
||||
device_del(&shost->shost_gendev);
|
||||
scsi_proc_hostdir_rm(shost->hostt);
|
||||
}
|
||||
EXPORT_SYMBOL(scsi_remove_host);
|
||||
|
||||
|
@ -262,7 +263,6 @@ static void scsi_host_dev_release(struct device *dev)
|
|||
if (shost->work_q)
|
||||
destroy_workqueue(shost->work_q);
|
||||
|
||||
scsi_proc_hostdir_rm(shost->hostt);
|
||||
scsi_destroy_command_freelist(shost);
|
||||
kfree(shost->shost_data);
|
||||
|
||||
|
|
|
@ -973,10 +973,10 @@ lpfc_get_host_fabric_name (struct Scsi_Host *shost)
|
|||
if ((phba->fc_flag & FC_FABRIC) ||
|
||||
((phba->fc_topology == TOPOLOGY_LOOP) &&
|
||||
(phba->fc_flag & FC_PUBLIC_LOOP)))
|
||||
node_name = wwn_to_u64(phba->fc_fabparam.nodeName.wwn);
|
||||
node_name = wwn_to_u64(phba->fc_fabparam.nodeName.u.wwn);
|
||||
else
|
||||
/* fabric is local port if there is no F/FL_Port */
|
||||
node_name = wwn_to_u64(phba->fc_nodename.wwn);
|
||||
node_name = wwn_to_u64(phba->fc_nodename.u.wwn);
|
||||
|
||||
spin_unlock_irq(shost->host_lock);
|
||||
|
||||
|
@ -1110,7 +1110,7 @@ lpfc_get_starget_node_name(struct scsi_target *starget)
|
|||
/* Search the mapped list for this target ID */
|
||||
list_for_each_entry(ndlp, &phba->fc_nlpmap_list, nlp_listp) {
|
||||
if (starget->id == ndlp->nlp_sid) {
|
||||
node_name = wwn_to_u64(ndlp->nlp_nodename.wwn);
|
||||
node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -1131,7 +1131,7 @@ lpfc_get_starget_port_name(struct scsi_target *starget)
|
|||
/* Search the mapped list for this target ID */
|
||||
list_for_each_entry(ndlp, &phba->fc_nlpmap_list, nlp_listp) {
|
||||
if (starget->id == ndlp->nlp_sid) {
|
||||
port_name = wwn_to_u64(ndlp->nlp_portname.wwn);
|
||||
port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1019,8 +1019,8 @@ lpfc_register_remote_port(struct lpfc_hba * phba,
|
|||
struct fc_rport_identifiers rport_ids;
|
||||
|
||||
/* Remote port has reappeared. Re-register w/ FC transport */
|
||||
rport_ids.node_name = wwn_to_u64(ndlp->nlp_nodename.wwn);
|
||||
rport_ids.port_name = wwn_to_u64(ndlp->nlp_portname.wwn);
|
||||
rport_ids.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
|
||||
rport_ids.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
|
||||
rport_ids.port_id = ndlp->nlp_DID;
|
||||
rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
|
||||
if (ndlp->nlp_type & NLP_FCP_TARGET)
|
||||
|
|
|
@ -280,9 +280,9 @@ struct lpfc_name {
|
|||
#define NAME_CCITT_GR_TYPE 0xE
|
||||
uint8_t IEEEextLsb; /* FC Word 0, bit 16:23, IEEE extended Lsb */
|
||||
uint8_t IEEE[6]; /* FC IEEE address */
|
||||
};
|
||||
} s;
|
||||
uint8_t wwn[8];
|
||||
};
|
||||
} u;
|
||||
};
|
||||
|
||||
struct csp {
|
||||
|
|
|
@ -285,7 +285,7 @@ lpfc_config_port_post(struct lpfc_hba * phba)
|
|||
if (phba->SerialNumber[0] == 0) {
|
||||
uint8_t *outptr;
|
||||
|
||||
outptr = (uint8_t *) & phba->fc_nodename.IEEE[0];
|
||||
outptr = &phba->fc_nodename.u.s.IEEE[0];
|
||||
for (i = 0; i < 12; i++) {
|
||||
status = *outptr++;
|
||||
j = ((status & 0xf0) >> 4);
|
||||
|
@ -1523,8 +1523,8 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
|
|||
* Must done after lpfc_sli_hba_setup()
|
||||
*/
|
||||
|
||||
fc_host_node_name(host) = wwn_to_u64(phba->fc_nodename.wwn);
|
||||
fc_host_port_name(host) = wwn_to_u64(phba->fc_portname.wwn);
|
||||
fc_host_node_name(host) = wwn_to_u64(phba->fc_nodename.u.wwn);
|
||||
fc_host_port_name(host) = wwn_to_u64(phba->fc_portname.u.wwn);
|
||||
fc_host_supported_classes(host) = FC_COS_CLASS3;
|
||||
|
||||
memset(fc_host_supported_fc4s(host), 0,
|
||||
|
|
|
@ -621,8 +621,6 @@ mega_build_cmd(adapter_t *adapter, Scsi_Cmnd *cmd, int *busy)
|
|||
if(islogical) {
|
||||
switch (cmd->cmnd[0]) {
|
||||
case TEST_UNIT_READY:
|
||||
memset(cmd->request_buffer, 0, cmd->request_bufflen);
|
||||
|
||||
#if MEGA_HAVE_CLUSTERING
|
||||
/*
|
||||
* Do we support clustering and is the support enabled
|
||||
|
@ -652,11 +650,28 @@ mega_build_cmd(adapter_t *adapter, Scsi_Cmnd *cmd, int *busy)
|
|||
return NULL;
|
||||
#endif
|
||||
|
||||
case MODE_SENSE:
|
||||
case MODE_SENSE: {
|
||||
char *buf;
|
||||
|
||||
if (cmd->use_sg) {
|
||||
struct scatterlist *sg;
|
||||
|
||||
sg = (struct scatterlist *)cmd->request_buffer;
|
||||
buf = kmap_atomic(sg->page, KM_IRQ0) +
|
||||
sg->offset;
|
||||
} else
|
||||
buf = cmd->request_buffer;
|
||||
memset(cmd->request_buffer, 0, cmd->cmnd[4]);
|
||||
if (cmd->use_sg) {
|
||||
struct scatterlist *sg;
|
||||
|
||||
sg = (struct scatterlist *)cmd->request_buffer;
|
||||
kunmap_atomic(buf - sg->offset, KM_IRQ0);
|
||||
}
|
||||
cmd->result = (DID_OK << 16);
|
||||
cmd->scsi_done(cmd);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
case READ_CAPACITY:
|
||||
case INQUIRY:
|
||||
|
@ -1685,14 +1700,23 @@ mega_rundoneq (adapter_t *adapter)
|
|||
static void
|
||||
mega_free_scb(adapter_t *adapter, scb_t *scb)
|
||||
{
|
||||
unsigned long length;
|
||||
|
||||
switch( scb->dma_type ) {
|
||||
|
||||
case MEGA_DMA_TYPE_NONE:
|
||||
break;
|
||||
|
||||
case MEGA_BULK_DATA:
|
||||
if (scb->cmd->use_sg == 0)
|
||||
length = scb->cmd->request_bufflen;
|
||||
else {
|
||||
struct scatterlist *sgl =
|
||||
(struct scatterlist *)scb->cmd->request_buffer;
|
||||
length = sgl->length;
|
||||
}
|
||||
pci_unmap_page(adapter->dev, scb->dma_h_bulkdata,
|
||||
scb->cmd->request_bufflen, scb->dma_direction);
|
||||
length, scb->dma_direction);
|
||||
break;
|
||||
|
||||
case MEGA_SGLIST:
|
||||
|
@ -1741,6 +1765,7 @@ mega_build_sglist(adapter_t *adapter, scb_t *scb, u32 *buf, u32 *len)
|
|||
struct scatterlist *sgl;
|
||||
struct page *page;
|
||||
unsigned long offset;
|
||||
unsigned int length;
|
||||
Scsi_Cmnd *cmd;
|
||||
int sgcnt;
|
||||
int idx;
|
||||
|
@ -1748,14 +1773,23 @@ mega_build_sglist(adapter_t *adapter, scb_t *scb, u32 *buf, u32 *len)
|
|||
cmd = scb->cmd;
|
||||
|
||||
/* Scatter-gather not used */
|
||||
if( !cmd->use_sg ) {
|
||||
if( cmd->use_sg == 0 || (cmd->use_sg == 1 &&
|
||||
!adapter->has_64bit_addr)) {
|
||||
|
||||
page = virt_to_page(cmd->request_buffer);
|
||||
offset = offset_in_page(cmd->request_buffer);
|
||||
if (cmd->use_sg == 0) {
|
||||
page = virt_to_page(cmd->request_buffer);
|
||||
offset = offset_in_page(cmd->request_buffer);
|
||||
length = cmd->request_bufflen;
|
||||
} else {
|
||||
sgl = (struct scatterlist *)cmd->request_buffer;
|
||||
page = sgl->page;
|
||||
offset = sgl->offset;
|
||||
length = sgl->length;
|
||||
}
|
||||
|
||||
scb->dma_h_bulkdata = pci_map_page(adapter->dev,
|
||||
page, offset,
|
||||
cmd->request_bufflen,
|
||||
length,
|
||||
scb->dma_direction);
|
||||
scb->dma_type = MEGA_BULK_DATA;
|
||||
|
||||
|
@ -1765,14 +1799,14 @@ mega_build_sglist(adapter_t *adapter, scb_t *scb, u32 *buf, u32 *len)
|
|||
*/
|
||||
if( adapter->has_64bit_addr ) {
|
||||
scb->sgl64[0].address = scb->dma_h_bulkdata;
|
||||
scb->sgl64[0].length = cmd->request_bufflen;
|
||||
scb->sgl64[0].length = length;
|
||||
*buf = (u32)scb->sgl_dma_addr;
|
||||
*len = (u32)cmd->request_bufflen;
|
||||
*len = (u32)length;
|
||||
return 1;
|
||||
}
|
||||
else {
|
||||
*buf = (u32)scb->dma_h_bulkdata;
|
||||
*len = (u32)cmd->request_bufflen;
|
||||
*len = (u32)length;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -1791,27 +1825,23 @@ mega_build_sglist(adapter_t *adapter, scb_t *scb, u32 *buf, u32 *len)
|
|||
|
||||
if( sgcnt > adapter->sglen ) BUG();
|
||||
|
||||
*len = 0;
|
||||
|
||||
for( idx = 0; idx < sgcnt; idx++, sgl++ ) {
|
||||
|
||||
if( adapter->has_64bit_addr ) {
|
||||
scb->sgl64[idx].address = sg_dma_address(sgl);
|
||||
scb->sgl64[idx].length = sg_dma_len(sgl);
|
||||
*len += scb->sgl64[idx].length = sg_dma_len(sgl);
|
||||
}
|
||||
else {
|
||||
scb->sgl[idx].address = sg_dma_address(sgl);
|
||||
scb->sgl[idx].length = sg_dma_len(sgl);
|
||||
*len += scb->sgl[idx].length = sg_dma_len(sgl);
|
||||
}
|
||||
}
|
||||
|
||||
/* Reset pointer and length fields */
|
||||
*buf = scb->sgl_dma_addr;
|
||||
|
||||
/*
|
||||
* For passthru command, dataxferlen must be set, even for commands
|
||||
* with a sg list
|
||||
*/
|
||||
*len = (u32)cmd->request_bufflen;
|
||||
|
||||
/* Return count of SG requests */
|
||||
return sgcnt;
|
||||
}
|
||||
|
|
|
@ -76,3 +76,12 @@ config MEGARAID_LEGACY
|
|||
To compile this driver as a module, choose M here: the
|
||||
module will be called megaraid
|
||||
endif
|
||||
|
||||
config MEGARAID_SAS
|
||||
tristate "LSI Logic MegaRAID SAS RAID Module"
|
||||
depends on PCI && SCSI
|
||||
help
|
||||
Module for LSI Logic's SAS based RAID controllers.
|
||||
To compile this driver as a module, choose 'm' here.
|
||||
Module will be called megaraid_sas
|
||||
|
||||
|
|
|
@ -1,2 +1,3 @@
|
|||
obj-$(CONFIG_MEGARAID_MM) += megaraid_mm.o
|
||||
obj-$(CONFIG_MEGARAID_MAILBOX) += megaraid_mbox.o
|
||||
obj-$(CONFIG_MEGARAID_SAS) += megaraid_sas.o
|
||||
|
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -330,6 +330,8 @@ qla2x00_update_login_fcport(scsi_qla_host_t *ha, struct mbx_entry *mbxstat,
|
|||
fcport->flags &= ~FCF_FAILOVER_NEEDED;
|
||||
fcport->iodesc_idx_sent = IODESC_INVALID_INDEX;
|
||||
atomic_set(&fcport->state, FCS_ONLINE);
|
||||
if (fcport->rport)
|
||||
fc_remote_port_unblock(fcport->rport);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -587,6 +587,7 @@ static int scsi_probe_lun(struct scsi_device *sdev, char *inq_result,
|
|||
if (sdev->scsi_level >= 2 ||
|
||||
(sdev->scsi_level == 1 && (inq_result[3] & 0x0f) == 1))
|
||||
sdev->scsi_level++;
|
||||
sdev->sdev_target->scsi_level = sdev->scsi_level;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -771,6 +772,15 @@ static int scsi_add_lun(struct scsi_device *sdev, char *inq_result, int *bflags)
|
|||
return SCSI_SCAN_LUN_PRESENT;
|
||||
}
|
||||
|
||||
static inline void scsi_destroy_sdev(struct scsi_device *sdev)
|
||||
{
|
||||
if (sdev->host->hostt->slave_destroy)
|
||||
sdev->host->hostt->slave_destroy(sdev);
|
||||
transport_destroy_device(&sdev->sdev_gendev);
|
||||
put_device(&sdev->sdev_gendev);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* scsi_probe_and_add_lun - probe a LUN, if a LUN is found add it
|
||||
* @starget: pointer to target device structure
|
||||
|
@ -803,9 +813,9 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
|
|||
* The rescan flag is used as an optimization, the first scan of a
|
||||
* host adapter calls into here with rescan == 0.
|
||||
*/
|
||||
if (rescan) {
|
||||
sdev = scsi_device_lookup_by_target(starget, lun);
|
||||
if (sdev) {
|
||||
sdev = scsi_device_lookup_by_target(starget, lun);
|
||||
if (sdev) {
|
||||
if (rescan || sdev->sdev_state != SDEV_CREATED) {
|
||||
SCSI_LOG_SCAN_BUS(3, printk(KERN_INFO
|
||||
"scsi scan: device exists on %s\n",
|
||||
sdev->sdev_gendev.bus_id));
|
||||
|
@ -820,9 +830,9 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
|
|||
sdev->model);
|
||||
return SCSI_SCAN_LUN_PRESENT;
|
||||
}
|
||||
}
|
||||
|
||||
sdev = scsi_alloc_sdev(starget, lun, hostdata);
|
||||
scsi_device_put(sdev);
|
||||
} else
|
||||
sdev = scsi_alloc_sdev(starget, lun, hostdata);
|
||||
if (!sdev)
|
||||
goto out;
|
||||
|
||||
|
@ -877,12 +887,8 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
|
|||
res = SCSI_SCAN_NO_RESPONSE;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (sdev->host->hostt->slave_destroy)
|
||||
sdev->host->hostt->slave_destroy(sdev);
|
||||
transport_destroy_device(&sdev->sdev_gendev);
|
||||
put_device(&sdev->sdev_gendev);
|
||||
}
|
||||
} else
|
||||
scsi_destroy_sdev(sdev);
|
||||
out:
|
||||
return res;
|
||||
}
|
||||
|
@ -1054,7 +1060,7 @@ EXPORT_SYMBOL(int_to_scsilun);
|
|||
* 0: scan completed (or no memory, so further scanning is futile)
|
||||
* 1: no report lun scan, or not configured
|
||||
**/
|
||||
static int scsi_report_lun_scan(struct scsi_device *sdev, int bflags,
|
||||
static int scsi_report_lun_scan(struct scsi_target *starget, int bflags,
|
||||
int rescan)
|
||||
{
|
||||
char devname[64];
|
||||
|
@ -1067,7 +1073,8 @@ static int scsi_report_lun_scan(struct scsi_device *sdev, int bflags,
|
|||
struct scsi_lun *lunp, *lun_data;
|
||||
u8 *data;
|
||||
struct scsi_sense_hdr sshdr;
|
||||
struct scsi_target *starget = scsi_target(sdev);
|
||||
struct scsi_device *sdev;
|
||||
struct Scsi_Host *shost = dev_to_shost(&starget->dev);
|
||||
|
||||
/*
|
||||
* Only support SCSI-3 and up devices if BLIST_NOREPORTLUN is not set.
|
||||
|
@ -1075,15 +1082,23 @@ static int scsi_report_lun_scan(struct scsi_device *sdev, int bflags,
|
|||
* support more than 8 LUNs.
|
||||
*/
|
||||
if ((bflags & BLIST_NOREPORTLUN) ||
|
||||
sdev->scsi_level < SCSI_2 ||
|
||||
(sdev->scsi_level < SCSI_3 &&
|
||||
(!(bflags & BLIST_REPORTLUN2) || sdev->host->max_lun <= 8)) )
|
||||
starget->scsi_level < SCSI_2 ||
|
||||
(starget->scsi_level < SCSI_3 &&
|
||||
(!(bflags & BLIST_REPORTLUN2) || shost->max_lun <= 8)) )
|
||||
return 1;
|
||||
if (bflags & BLIST_NOLUN)
|
||||
return 0;
|
||||
|
||||
if (!(sdev = scsi_device_lookup_by_target(starget, 0))) {
|
||||
sdev = scsi_alloc_sdev(starget, 0, NULL);
|
||||
if (!sdev)
|
||||
return 0;
|
||||
if (scsi_device_get(sdev))
|
||||
return 0;
|
||||
}
|
||||
|
||||
sprintf(devname, "host %d channel %d id %d",
|
||||
sdev->host->host_no, sdev->channel, sdev->id);
|
||||
shost->host_no, sdev->channel, sdev->id);
|
||||
|
||||
/*
|
||||
* Allocate enough to hold the header (the same size as one scsi_lun)
|
||||
|
@ -1098,8 +1113,10 @@ static int scsi_report_lun_scan(struct scsi_device *sdev, int bflags,
|
|||
length = (max_scsi_report_luns + 1) * sizeof(struct scsi_lun);
|
||||
lun_data = kmalloc(length, GFP_ATOMIC |
|
||||
(sdev->host->unchecked_isa_dma ? __GFP_DMA : 0));
|
||||
if (!lun_data)
|
||||
if (!lun_data) {
|
||||
printk(ALLOC_FAILURE_MSG, __FUNCTION__);
|
||||
goto out;
|
||||
}
|
||||
|
||||
scsi_cmd[0] = REPORT_LUNS;
|
||||
|
||||
|
@ -1201,10 +1218,6 @@ static int scsi_report_lun_scan(struct scsi_device *sdev, int bflags,
|
|||
for (i = 0; i < sizeof(struct scsi_lun); i++)
|
||||
printk("%02x", data[i]);
|
||||
printk(" has a LUN larger than currently supported.\n");
|
||||
} else if (lun == 0) {
|
||||
/*
|
||||
* LUN 0 has already been scanned.
|
||||
*/
|
||||
} else if (lun > sdev->host->max_lun) {
|
||||
printk(KERN_WARNING "scsi: %s lun%d has a LUN larger"
|
||||
" than allowed by the host adapter\n",
|
||||
|
@ -1227,13 +1240,13 @@ static int scsi_report_lun_scan(struct scsi_device *sdev, int bflags,
|
|||
}
|
||||
|
||||
kfree(lun_data);
|
||||
return 0;
|
||||
|
||||
out:
|
||||
/*
|
||||
* We are out of memory, don't try scanning any further.
|
||||
*/
|
||||
printk(ALLOC_FAILURE_MSG, __FUNCTION__);
|
||||
scsi_device_put(sdev);
|
||||
if (sdev->sdev_state == SDEV_CREATED)
|
||||
/*
|
||||
* the sdev we used didn't appear in the report luns scan
|
||||
*/
|
||||
scsi_destroy_sdev(sdev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1299,7 +1312,6 @@ static void __scsi_scan_target(struct device *parent, unsigned int channel,
|
|||
struct Scsi_Host *shost = dev_to_shost(parent);
|
||||
int bflags = 0;
|
||||
int res;
|
||||
struct scsi_device *sdev = NULL;
|
||||
struct scsi_target *starget;
|
||||
|
||||
if (shost->this_id == id)
|
||||
|
@ -1325,27 +1337,16 @@ static void __scsi_scan_target(struct device *parent, unsigned int channel,
|
|||
* Scan LUN 0, if there is some response, scan further. Ideally, we
|
||||
* would not configure LUN 0 until all LUNs are scanned.
|
||||
*/
|
||||
res = scsi_probe_and_add_lun(starget, 0, &bflags, &sdev, rescan, NULL);
|
||||
if (res == SCSI_SCAN_LUN_PRESENT) {
|
||||
if (scsi_report_lun_scan(sdev, bflags, rescan) != 0)
|
||||
res = scsi_probe_and_add_lun(starget, 0, &bflags, NULL, rescan, NULL);
|
||||
if (res == SCSI_SCAN_LUN_PRESENT || res == SCSI_SCAN_TARGET_PRESENT) {
|
||||
if (scsi_report_lun_scan(starget, bflags, rescan) != 0)
|
||||
/*
|
||||
* The REPORT LUN did not scan the target,
|
||||
* do a sequential scan.
|
||||
*/
|
||||
scsi_sequential_lun_scan(starget, bflags,
|
||||
res, sdev->scsi_level, rescan);
|
||||
} else if (res == SCSI_SCAN_TARGET_PRESENT) {
|
||||
/*
|
||||
* There's a target here, but lun 0 is offline so we
|
||||
* can't use the report_lun scan. Fall back to a
|
||||
* sequential lun scan with a bflags of SPARSELUN and
|
||||
* a default scsi level of SCSI_2
|
||||
*/
|
||||
scsi_sequential_lun_scan(starget, BLIST_SPARSELUN,
|
||||
SCSI_SCAN_TARGET_PRESENT, SCSI_2, rescan);
|
||||
res, starget->scsi_level, rescan);
|
||||
}
|
||||
if (sdev)
|
||||
scsi_device_put(sdev);
|
||||
|
||||
out_reap:
|
||||
/* now determine if the target has any children at all
|
||||
|
@ -1542,10 +1543,7 @@ void scsi_free_host_dev(struct scsi_device *sdev)
|
|||
{
|
||||
BUG_ON(sdev->id != sdev->host->this_id);
|
||||
|
||||
if (sdev->host->hostt->slave_destroy)
|
||||
sdev->host->hostt->slave_destroy(sdev);
|
||||
transport_destroy_device(&sdev->sdev_gendev);
|
||||
put_device(&sdev->sdev_gendev);
|
||||
scsi_destroy_sdev(sdev);
|
||||
}
|
||||
EXPORT_SYMBOL(scsi_free_host_dev);
|
||||
|
||||
|
|
|
@ -628,17 +628,16 @@ sas_rphy_delete(struct sas_rphy *rphy)
|
|||
struct Scsi_Host *shost = dev_to_shost(parent->dev.parent);
|
||||
struct sas_host_attrs *sas_host = to_sas_host_attrs(shost);
|
||||
|
||||
transport_destroy_device(&rphy->dev);
|
||||
scsi_remove_target(dev);
|
||||
|
||||
scsi_remove_target(&rphy->dev);
|
||||
transport_remove_device(dev);
|
||||
device_del(dev);
|
||||
transport_destroy_device(dev);
|
||||
|
||||
spin_lock(&sas_host->lock);
|
||||
list_del(&rphy->list);
|
||||
spin_unlock(&sas_host->lock);
|
||||
|
||||
transport_remove_device(dev);
|
||||
device_del(dev);
|
||||
transport_destroy_device(dev);
|
||||
put_device(&parent->dev);
|
||||
}
|
||||
EXPORT_SYMBOL(sas_rphy_delete);
|
||||
|
|
|
@ -185,6 +185,7 @@
|
|||
#define PCI_DEVICE_ID_LSI_61C102 0x0901
|
||||
#define PCI_DEVICE_ID_LSI_63C815 0x1000
|
||||
#define PCI_DEVICE_ID_LSI_SAS1064 0x0050
|
||||
#define PCI_DEVICE_ID_LSI_SAS1064R 0x0411
|
||||
#define PCI_DEVICE_ID_LSI_SAS1066 0x005E
|
||||
#define PCI_DEVICE_ID_LSI_SAS1068 0x0054
|
||||
#define PCI_DEVICE_ID_LSI_SAS1064A 0x005C
|
||||
|
@ -560,6 +561,7 @@
|
|||
#define PCI_VENDOR_ID_DELL 0x1028
|
||||
#define PCI_DEVICE_ID_DELL_RACIII 0x0008
|
||||
#define PCI_DEVICE_ID_DELL_RAC4 0x0012
|
||||
#define PCI_DEVICE_ID_DELL_PERC5 0x0015
|
||||
|
||||
#define PCI_VENDOR_ID_MATROX 0x102B
|
||||
#define PCI_DEVICE_ID_MATROX_MGA_2 0x0518
|
||||
|
|
|
@ -163,6 +163,7 @@ struct scsi_target {
|
|||
unsigned int id; /* target id ... replace
|
||||
* scsi_device.id eventually */
|
||||
unsigned long create:1; /* signal that it needs to be added */
|
||||
char scsi_level;
|
||||
void *hostdata; /* available to low-level driver */
|
||||
unsigned long starget_data[0]; /* for the transport */
|
||||
/* starget_data must be the last element!!!! */
|
||||
|
|
Loading…
Reference in New Issue