Merge remote-tracking branch 'mkp-scsi/4.11/scsi-fixes' into fixes
This commit is contained in:
commit
ca4a213db0
|
@ -1253,20 +1253,6 @@ config SCSI_LPFC_DEBUG_FS
|
||||||
This makes debugging information from the lpfc driver
|
This makes debugging information from the lpfc driver
|
||||||
available via the debugfs filesystem.
|
available via the debugfs filesystem.
|
||||||
|
|
||||||
config LPFC_NVME_INITIATOR
|
|
||||||
bool "Emulex LightPulse Fibre Channel NVME Initiator Support"
|
|
||||||
depends on SCSI_LPFC && NVME_FC
|
|
||||||
---help---
|
|
||||||
This enables NVME Initiator support in the Emulex lpfc driver.
|
|
||||||
|
|
||||||
config LPFC_NVME_TARGET
|
|
||||||
bool "Emulex LightPulse Fibre Channel NVME Initiator Support"
|
|
||||||
depends on SCSI_LPFC && NVME_TARGET_FC
|
|
||||||
---help---
|
|
||||||
This enables NVME Target support in the Emulex lpfc driver.
|
|
||||||
Target enablement must still be enabled on a per adapter
|
|
||||||
basis by module parameters.
|
|
||||||
|
|
||||||
config SCSI_SIM710
|
config SCSI_SIM710
|
||||||
tristate "Simple 53c710 SCSI support (Compaq, NCR machines)"
|
tristate "Simple 53c710 SCSI support (Compaq, NCR machines)"
|
||||||
depends on (EISA || MCA) && SCSI
|
depends on (EISA || MCA) && SCSI
|
||||||
|
|
|
@ -2956,7 +2956,7 @@ static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr,
|
||||||
/* fill_cmd can't fail here, no data buffer to map. */
|
/* fill_cmd can't fail here, no data buffer to map. */
|
||||||
(void) fill_cmd(c, reset_type, h, NULL, 0, 0,
|
(void) fill_cmd(c, reset_type, h, NULL, 0, 0,
|
||||||
scsi3addr, TYPE_MSG);
|
scsi3addr, TYPE_MSG);
|
||||||
rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, DEFAULT_TIMEOUT);
|
rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
|
||||||
if (rc) {
|
if (rc) {
|
||||||
dev_warn(&h->pdev->dev, "Failed to send reset command\n");
|
dev_warn(&h->pdev->dev, "Failed to send reset command\n");
|
||||||
goto out;
|
goto out;
|
||||||
|
@ -3714,7 +3714,7 @@ static int hpsa_get_volume_status(struct ctlr_info *h,
|
||||||
* # (integer code indicating one of several NOT READY states
|
* # (integer code indicating one of several NOT READY states
|
||||||
* describing why a volume is to be kept offline)
|
* describing why a volume is to be kept offline)
|
||||||
*/
|
*/
|
||||||
static int hpsa_volume_offline(struct ctlr_info *h,
|
static unsigned char hpsa_volume_offline(struct ctlr_info *h,
|
||||||
unsigned char scsi3addr[])
|
unsigned char scsi3addr[])
|
||||||
{
|
{
|
||||||
struct CommandList *c;
|
struct CommandList *c;
|
||||||
|
@ -3735,7 +3735,7 @@ static int hpsa_volume_offline(struct ctlr_info *h,
|
||||||
DEFAULT_TIMEOUT);
|
DEFAULT_TIMEOUT);
|
||||||
if (rc) {
|
if (rc) {
|
||||||
cmd_free(h, c);
|
cmd_free(h, c);
|
||||||
return 0;
|
return HPSA_VPD_LV_STATUS_UNSUPPORTED;
|
||||||
}
|
}
|
||||||
sense = c->err_info->SenseInfo;
|
sense = c->err_info->SenseInfo;
|
||||||
if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo))
|
if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo))
|
||||||
|
@ -3746,19 +3746,13 @@ static int hpsa_volume_offline(struct ctlr_info *h,
|
||||||
cmd_status = c->err_info->CommandStatus;
|
cmd_status = c->err_info->CommandStatus;
|
||||||
scsi_status = c->err_info->ScsiStatus;
|
scsi_status = c->err_info->ScsiStatus;
|
||||||
cmd_free(h, c);
|
cmd_free(h, c);
|
||||||
/* Is the volume 'not ready'? */
|
|
||||||
if (cmd_status != CMD_TARGET_STATUS ||
|
|
||||||
scsi_status != SAM_STAT_CHECK_CONDITION ||
|
|
||||||
sense_key != NOT_READY ||
|
|
||||||
asc != ASC_LUN_NOT_READY) {
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Determine the reason for not ready state */
|
/* Determine the reason for not ready state */
|
||||||
ldstat = hpsa_get_volume_status(h, scsi3addr);
|
ldstat = hpsa_get_volume_status(h, scsi3addr);
|
||||||
|
|
||||||
/* Keep volume offline in certain cases: */
|
/* Keep volume offline in certain cases: */
|
||||||
switch (ldstat) {
|
switch (ldstat) {
|
||||||
|
case HPSA_LV_FAILED:
|
||||||
case HPSA_LV_UNDERGOING_ERASE:
|
case HPSA_LV_UNDERGOING_ERASE:
|
||||||
case HPSA_LV_NOT_AVAILABLE:
|
case HPSA_LV_NOT_AVAILABLE:
|
||||||
case HPSA_LV_UNDERGOING_RPI:
|
case HPSA_LV_UNDERGOING_RPI:
|
||||||
|
@ -3780,7 +3774,7 @@ static int hpsa_volume_offline(struct ctlr_info *h,
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
return 0;
|
return HPSA_LV_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -3853,10 +3847,10 @@ static int hpsa_update_device_info(struct ctlr_info *h,
|
||||||
/* Do an inquiry to the device to see what it is. */
|
/* Do an inquiry to the device to see what it is. */
|
||||||
if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
|
if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
|
||||||
(unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
|
(unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
|
||||||
/* Inquiry failed (msg printed already) */
|
|
||||||
dev_err(&h->pdev->dev,
|
dev_err(&h->pdev->dev,
|
||||||
"hpsa_update_device_info: inquiry failed\n");
|
"%s: inquiry failed, device will be skipped.\n",
|
||||||
rc = -EIO;
|
__func__);
|
||||||
|
rc = HPSA_INQUIRY_FAILED;
|
||||||
goto bail_out;
|
goto bail_out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3885,15 +3879,19 @@ static int hpsa_update_device_info(struct ctlr_info *h,
|
||||||
if ((this_device->devtype == TYPE_DISK ||
|
if ((this_device->devtype == TYPE_DISK ||
|
||||||
this_device->devtype == TYPE_ZBC) &&
|
this_device->devtype == TYPE_ZBC) &&
|
||||||
is_logical_dev_addr_mode(scsi3addr)) {
|
is_logical_dev_addr_mode(scsi3addr)) {
|
||||||
int volume_offline;
|
unsigned char volume_offline;
|
||||||
|
|
||||||
hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
|
hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
|
||||||
if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC)
|
if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC)
|
||||||
hpsa_get_ioaccel_status(h, scsi3addr, this_device);
|
hpsa_get_ioaccel_status(h, scsi3addr, this_device);
|
||||||
volume_offline = hpsa_volume_offline(h, scsi3addr);
|
volume_offline = hpsa_volume_offline(h, scsi3addr);
|
||||||
if (volume_offline < 0 || volume_offline > 0xff)
|
if (volume_offline == HPSA_LV_FAILED) {
|
||||||
volume_offline = HPSA_VPD_LV_STATUS_UNSUPPORTED;
|
rc = HPSA_LV_FAILED;
|
||||||
this_device->volume_offline = volume_offline & 0xff;
|
dev_err(&h->pdev->dev,
|
||||||
|
"%s: LV failed, device will be skipped.\n",
|
||||||
|
__func__);
|
||||||
|
goto bail_out;
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
this_device->raid_level = RAID_UNKNOWN;
|
this_device->raid_level = RAID_UNKNOWN;
|
||||||
this_device->offload_config = 0;
|
this_device->offload_config = 0;
|
||||||
|
@ -4379,8 +4377,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h)
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
if (rc) {
|
if (rc) {
|
||||||
dev_warn(&h->pdev->dev,
|
h->drv_req_rescan = 1;
|
||||||
"Inquiry failed, skipping device.\n");
|
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -5558,7 +5555,7 @@ static void hpsa_scan_complete(struct ctlr_info *h)
|
||||||
|
|
||||||
spin_lock_irqsave(&h->scan_lock, flags);
|
spin_lock_irqsave(&h->scan_lock, flags);
|
||||||
h->scan_finished = 1;
|
h->scan_finished = 1;
|
||||||
wake_up_all(&h->scan_wait_queue);
|
wake_up(&h->scan_wait_queue);
|
||||||
spin_unlock_irqrestore(&h->scan_lock, flags);
|
spin_unlock_irqrestore(&h->scan_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -5576,11 +5573,23 @@ static void hpsa_scan_start(struct Scsi_Host *sh)
|
||||||
if (unlikely(lockup_detected(h)))
|
if (unlikely(lockup_detected(h)))
|
||||||
return hpsa_scan_complete(h);
|
return hpsa_scan_complete(h);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If a scan is already waiting to run, no need to add another
|
||||||
|
*/
|
||||||
|
spin_lock_irqsave(&h->scan_lock, flags);
|
||||||
|
if (h->scan_waiting) {
|
||||||
|
spin_unlock_irqrestore(&h->scan_lock, flags);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
spin_unlock_irqrestore(&h->scan_lock, flags);
|
||||||
|
|
||||||
/* wait until any scan already in progress is finished. */
|
/* wait until any scan already in progress is finished. */
|
||||||
while (1) {
|
while (1) {
|
||||||
spin_lock_irqsave(&h->scan_lock, flags);
|
spin_lock_irqsave(&h->scan_lock, flags);
|
||||||
if (h->scan_finished)
|
if (h->scan_finished)
|
||||||
break;
|
break;
|
||||||
|
h->scan_waiting = 1;
|
||||||
spin_unlock_irqrestore(&h->scan_lock, flags);
|
spin_unlock_irqrestore(&h->scan_lock, flags);
|
||||||
wait_event(h->scan_wait_queue, h->scan_finished);
|
wait_event(h->scan_wait_queue, h->scan_finished);
|
||||||
/* Note: We don't need to worry about a race between this
|
/* Note: We don't need to worry about a race between this
|
||||||
|
@ -5590,6 +5599,7 @@ static void hpsa_scan_start(struct Scsi_Host *sh)
|
||||||
*/
|
*/
|
||||||
}
|
}
|
||||||
h->scan_finished = 0; /* mark scan as in progress */
|
h->scan_finished = 0; /* mark scan as in progress */
|
||||||
|
h->scan_waiting = 0;
|
||||||
spin_unlock_irqrestore(&h->scan_lock, flags);
|
spin_unlock_irqrestore(&h->scan_lock, flags);
|
||||||
|
|
||||||
if (unlikely(lockup_detected(h)))
|
if (unlikely(lockup_detected(h)))
|
||||||
|
@ -8792,6 +8802,7 @@ static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||||
init_waitqueue_head(&h->event_sync_wait_queue);
|
init_waitqueue_head(&h->event_sync_wait_queue);
|
||||||
mutex_init(&h->reset_mutex);
|
mutex_init(&h->reset_mutex);
|
||||||
h->scan_finished = 1; /* no scan currently in progress */
|
h->scan_finished = 1; /* no scan currently in progress */
|
||||||
|
h->scan_waiting = 0;
|
||||||
|
|
||||||
pci_set_drvdata(pdev, h);
|
pci_set_drvdata(pdev, h);
|
||||||
h->ndevices = 0;
|
h->ndevices = 0;
|
||||||
|
|
|
@ -201,6 +201,7 @@ struct ctlr_info {
|
||||||
dma_addr_t errinfo_pool_dhandle;
|
dma_addr_t errinfo_pool_dhandle;
|
||||||
unsigned long *cmd_pool_bits;
|
unsigned long *cmd_pool_bits;
|
||||||
int scan_finished;
|
int scan_finished;
|
||||||
|
u8 scan_waiting : 1;
|
||||||
spinlock_t scan_lock;
|
spinlock_t scan_lock;
|
||||||
wait_queue_head_t scan_wait_queue;
|
wait_queue_head_t scan_wait_queue;
|
||||||
|
|
||||||
|
|
|
@ -156,6 +156,7 @@
|
||||||
#define CFGTBL_BusType_Fibre2G 0x00000200l
|
#define CFGTBL_BusType_Fibre2G 0x00000200l
|
||||||
|
|
||||||
/* VPD Inquiry types */
|
/* VPD Inquiry types */
|
||||||
|
#define HPSA_INQUIRY_FAILED 0x02
|
||||||
#define HPSA_VPD_SUPPORTED_PAGES 0x00
|
#define HPSA_VPD_SUPPORTED_PAGES 0x00
|
||||||
#define HPSA_VPD_LV_DEVICE_ID 0x83
|
#define HPSA_VPD_LV_DEVICE_ID 0x83
|
||||||
#define HPSA_VPD_LV_DEVICE_GEOMETRY 0xC1
|
#define HPSA_VPD_LV_DEVICE_GEOMETRY 0xC1
|
||||||
|
@ -166,6 +167,7 @@
|
||||||
/* Logical volume states */
|
/* Logical volume states */
|
||||||
#define HPSA_VPD_LV_STATUS_UNSUPPORTED 0xff
|
#define HPSA_VPD_LV_STATUS_UNSUPPORTED 0xff
|
||||||
#define HPSA_LV_OK 0x0
|
#define HPSA_LV_OK 0x0
|
||||||
|
#define HPSA_LV_FAILED 0x01
|
||||||
#define HPSA_LV_NOT_AVAILABLE 0x0b
|
#define HPSA_LV_NOT_AVAILABLE 0x0b
|
||||||
#define HPSA_LV_UNDERGOING_ERASE 0x0F
|
#define HPSA_LV_UNDERGOING_ERASE 0x0F
|
||||||
#define HPSA_LV_UNDERGOING_RPI 0x12
|
#define HPSA_LV_UNDERGOING_RPI 0x12
|
||||||
|
|
|
@ -3315,9 +3315,9 @@ LPFC_ATTR_R(nvmet_mrq_post, LPFC_DEF_MRQ_POST,
|
||||||
* lpfc_enable_fc4_type: Defines what FC4 types are supported.
|
* lpfc_enable_fc4_type: Defines what FC4 types are supported.
|
||||||
* Supported Values: 1 - register just FCP
|
* Supported Values: 1 - register just FCP
|
||||||
* 3 - register both FCP and NVME
|
* 3 - register both FCP and NVME
|
||||||
* Supported values are [1,3]. Default value is 3
|
* Supported values are [1,3]. Default value is 1
|
||||||
*/
|
*/
|
||||||
LPFC_ATTR_R(enable_fc4_type, LPFC_ENABLE_BOTH,
|
LPFC_ATTR_R(enable_fc4_type, LPFC_ENABLE_FCP,
|
||||||
LPFC_ENABLE_FCP, LPFC_ENABLE_BOTH,
|
LPFC_ENABLE_FCP, LPFC_ENABLE_BOTH,
|
||||||
"Define fc4 type to register with fabric.");
|
"Define fc4 type to register with fabric.");
|
||||||
|
|
||||||
|
|
|
@ -5891,10 +5891,17 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
|
||||||
/* Check to see if it matches any module parameter */
|
/* Check to see if it matches any module parameter */
|
||||||
for (i = 0; i < lpfc_enable_nvmet_cnt; i++) {
|
for (i = 0; i < lpfc_enable_nvmet_cnt; i++) {
|
||||||
if (wwn == lpfc_enable_nvmet[i]) {
|
if (wwn == lpfc_enable_nvmet[i]) {
|
||||||
|
#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
|
||||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||||
"6017 NVME Target %016llx\n",
|
"6017 NVME Target %016llx\n",
|
||||||
wwn);
|
wwn);
|
||||||
phba->nvmet_support = 1; /* a match */
|
phba->nvmet_support = 1; /* a match */
|
||||||
|
#else
|
||||||
|
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||||
|
"6021 Can't enable NVME Target."
|
||||||
|
" NVME_TARGET_FC infrastructure"
|
||||||
|
" is not in kernel\n");
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -2149,7 +2149,7 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
|
||||||
/* localport is allocated from the stack, but the registration
|
/* localport is allocated from the stack, but the registration
|
||||||
* call allocates heap memory as well as the private area.
|
* call allocates heap memory as well as the private area.
|
||||||
*/
|
*/
|
||||||
#ifdef CONFIG_LPFC_NVME_INITIATOR
|
#if (IS_ENABLED(CONFIG_NVME_FC))
|
||||||
ret = nvme_fc_register_localport(&nfcp_info, &lpfc_nvme_template,
|
ret = nvme_fc_register_localport(&nfcp_info, &lpfc_nvme_template,
|
||||||
&vport->phba->pcidev->dev, &localport);
|
&vport->phba->pcidev->dev, &localport);
|
||||||
#else
|
#else
|
||||||
|
@ -2190,7 +2190,7 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
|
||||||
void
|
void
|
||||||
lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
|
lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_LPFC_NVME_INITIATOR
|
#if (IS_ENABLED(CONFIG_NVME_FC))
|
||||||
struct nvme_fc_local_port *localport;
|
struct nvme_fc_local_port *localport;
|
||||||
struct lpfc_nvme_lport *lport;
|
struct lpfc_nvme_lport *lport;
|
||||||
struct lpfc_nvme_rport *rport = NULL, *rport_next = NULL;
|
struct lpfc_nvme_rport *rport = NULL, *rport_next = NULL;
|
||||||
|
@ -2274,7 +2274,7 @@ lpfc_nvme_update_localport(struct lpfc_vport *vport)
|
||||||
int
|
int
|
||||||
lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
|
lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_LPFC_NVME_INITIATOR
|
#if (IS_ENABLED(CONFIG_NVME_FC))
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
struct nvme_fc_local_port *localport;
|
struct nvme_fc_local_port *localport;
|
||||||
struct lpfc_nvme_lport *lport;
|
struct lpfc_nvme_lport *lport;
|
||||||
|
@ -2403,7 +2403,7 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
|
||||||
void
|
void
|
||||||
lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
|
lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_LPFC_NVME_INITIATOR
|
#if (IS_ENABLED(CONFIG_NVME_FC))
|
||||||
int ret;
|
int ret;
|
||||||
struct nvme_fc_local_port *localport;
|
struct nvme_fc_local_port *localport;
|
||||||
struct lpfc_nvme_lport *lport;
|
struct lpfc_nvme_lport *lport;
|
||||||
|
|
|
@ -671,7 +671,7 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
|
||||||
lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP |
|
lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP |
|
||||||
NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED;
|
NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED;
|
||||||
|
|
||||||
#ifdef CONFIG_LPFC_NVME_TARGET
|
#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
|
||||||
error = nvmet_fc_register_targetport(&pinfo, &lpfc_tgttemplate,
|
error = nvmet_fc_register_targetport(&pinfo, &lpfc_tgttemplate,
|
||||||
&phba->pcidev->dev,
|
&phba->pcidev->dev,
|
||||||
&phba->targetport);
|
&phba->targetport);
|
||||||
|
@ -756,7 +756,7 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
|
||||||
void
|
void
|
||||||
lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
|
lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_LPFC_NVME_TARGET
|
#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
|
||||||
struct lpfc_nvmet_tgtport *tgtp;
|
struct lpfc_nvmet_tgtport *tgtp;
|
||||||
|
|
||||||
if (phba->nvmet_support == 0)
|
if (phba->nvmet_support == 0)
|
||||||
|
@ -788,7 +788,7 @@ static void
|
||||||
lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
|
lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
|
||||||
struct hbq_dmabuf *nvmebuf)
|
struct hbq_dmabuf *nvmebuf)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_LPFC_NVME_TARGET
|
#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
|
||||||
struct lpfc_nvmet_tgtport *tgtp;
|
struct lpfc_nvmet_tgtport *tgtp;
|
||||||
struct fc_frame_header *fc_hdr;
|
struct fc_frame_header *fc_hdr;
|
||||||
struct lpfc_nvmet_rcv_ctx *ctxp;
|
struct lpfc_nvmet_rcv_ctx *ctxp;
|
||||||
|
@ -891,7 +891,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
|
||||||
struct rqb_dmabuf *nvmebuf,
|
struct rqb_dmabuf *nvmebuf,
|
||||||
uint64_t isr_timestamp)
|
uint64_t isr_timestamp)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_LPFC_NVME_TARGET
|
#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
|
||||||
struct lpfc_nvmet_rcv_ctx *ctxp;
|
struct lpfc_nvmet_rcv_ctx *ctxp;
|
||||||
struct lpfc_nvmet_tgtport *tgtp;
|
struct lpfc_nvmet_tgtport *tgtp;
|
||||||
struct fc_frame_header *fc_hdr;
|
struct fc_frame_header *fc_hdr;
|
||||||
|
|
|
@ -35,8 +35,8 @@
|
||||||
/*
|
/*
|
||||||
* MegaRAID SAS Driver meta data
|
* MegaRAID SAS Driver meta data
|
||||||
*/
|
*/
|
||||||
#define MEGASAS_VERSION "07.701.16.00-rc1"
|
#define MEGASAS_VERSION "07.701.17.00-rc1"
|
||||||
#define MEGASAS_RELDATE "February 2, 2017"
|
#define MEGASAS_RELDATE "March 2, 2017"
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Device IDs
|
* Device IDs
|
||||||
|
|
|
@ -1963,6 +1963,9 @@ static int megasas_slave_alloc(struct scsi_device *sdev)
|
||||||
if (!mr_device_priv_data)
|
if (!mr_device_priv_data)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
sdev->hostdata = mr_device_priv_data;
|
sdev->hostdata = mr_device_priv_data;
|
||||||
|
|
||||||
|
atomic_set(&mr_device_priv_data->r1_ldio_hint,
|
||||||
|
instance->r1_ldio_hint_default);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -5034,10 +5037,12 @@ megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe)
|
||||||
&instance->irq_context[j]);
|
&instance->irq_context[j]);
|
||||||
/* Retry irq register for IO_APIC*/
|
/* Retry irq register for IO_APIC*/
|
||||||
instance->msix_vectors = 0;
|
instance->msix_vectors = 0;
|
||||||
if (is_probe)
|
if (is_probe) {
|
||||||
|
pci_free_irq_vectors(instance->pdev);
|
||||||
return megasas_setup_irqs_ioapic(instance);
|
return megasas_setup_irqs_ioapic(instance);
|
||||||
else
|
} else {
|
||||||
return -1;
|
return -1;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -5277,9 +5282,11 @@ static int megasas_init_fw(struct megasas_instance *instance)
|
||||||
MPI2_REPLY_POST_HOST_INDEX_OFFSET);
|
MPI2_REPLY_POST_HOST_INDEX_OFFSET);
|
||||||
}
|
}
|
||||||
|
|
||||||
i = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_LEGACY);
|
if (!instance->msix_vectors) {
|
||||||
if (i < 0)
|
i = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_LEGACY);
|
||||||
goto fail_setup_irqs;
|
if (i < 0)
|
||||||
|
goto fail_setup_irqs;
|
||||||
|
}
|
||||||
|
|
||||||
dev_info(&instance->pdev->dev,
|
dev_info(&instance->pdev->dev,
|
||||||
"firmware supports msix\t: (%d)", fw_msix_count);
|
"firmware supports msix\t: (%d)", fw_msix_count);
|
||||||
|
|
|
@ -2159,7 +2159,7 @@ megasas_set_raidflag_cpu_affinity(union RAID_CONTEXT_UNION *praid_context,
|
||||||
cpu_sel = MR_RAID_CTX_CPUSEL_1;
|
cpu_sel = MR_RAID_CTX_CPUSEL_1;
|
||||||
|
|
||||||
if (is_stream_detected(rctx_g35) &&
|
if (is_stream_detected(rctx_g35) &&
|
||||||
(raid->level == 5) &&
|
((raid->level == 5) || (raid->level == 6)) &&
|
||||||
(raid->writeMode == MR_RL_WRITE_THROUGH_MODE) &&
|
(raid->writeMode == MR_RL_WRITE_THROUGH_MODE) &&
|
||||||
(cpu_sel == MR_RAID_CTX_CPUSEL_FCFS))
|
(cpu_sel == MR_RAID_CTX_CPUSEL_FCFS))
|
||||||
cpu_sel = MR_RAID_CTX_CPUSEL_0;
|
cpu_sel = MR_RAID_CTX_CPUSEL_0;
|
||||||
|
@ -2338,7 +2338,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
|
||||||
fp_possible = false;
|
fp_possible = false;
|
||||||
atomic_dec(&instance->fw_outstanding);
|
atomic_dec(&instance->fw_outstanding);
|
||||||
} else if ((scsi_buff_len > MR_LARGE_IO_MIN_SIZE) ||
|
} else if ((scsi_buff_len > MR_LARGE_IO_MIN_SIZE) ||
|
||||||
atomic_dec_if_positive(&mrdev_priv->r1_ldio_hint)) {
|
(atomic_dec_if_positive(&mrdev_priv->r1_ldio_hint) > 0)) {
|
||||||
fp_possible = false;
|
fp_possible = false;
|
||||||
atomic_dec(&instance->fw_outstanding);
|
atomic_dec(&instance->fw_outstanding);
|
||||||
if (scsi_buff_len > MR_LARGE_IO_MIN_SIZE)
|
if (scsi_buff_len > MR_LARGE_IO_MIN_SIZE)
|
||||||
|
|
|
@ -7642,7 +7642,7 @@ static inline ssize_t ufshcd_pm_lvl_store(struct device *dev,
|
||||||
if (kstrtoul(buf, 0, &value))
|
if (kstrtoul(buf, 0, &value))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if ((value < UFS_PM_LVL_0) || (value >= UFS_PM_LVL_MAX))
|
if (value >= UFS_PM_LVL_MAX)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
spin_lock_irqsave(hba->host->host_lock, flags);
|
spin_lock_irqsave(hba->host->host_lock, flags);
|
||||||
|
|
Loading…
Reference in New Issue