SCSI fixes on 20171205
We have a bunch of fixes for aacraid, a set of coherency fixes that only affect non-coherent platforms and one coccinelle detected null check after use. Signed-off-by: James E.J. Bottomley <jejb@linux.vnet.ibm.com> -----BEGIN PGP SIGNATURE----- Version: GnuPG v2 iQIcBAABAgAGBQJaJs8oAAoJEAVr7HOZEZN4MhkQAJZ/KfYI4CTrX45NAV3AlOT1 CW39vYT3ODjsw97uJkoJjbXKIqHemObP2JlcIjhdffd/Vrk1Yn88KclUktSjwhBV X5wGprktrmCKKcL1iobSv5o2r/2TZpeMHBIgdC1LogCw7L5eCBibnx8GiU+OkXJM 2aGw8GPS+hySilTde20aL6OumQfLFuZzk8TeZ5bAAjyIgIJqw/1pyn+2Hy5EnyW7 n5RpC8qG+cNLXHHYaITHX666lSRM+DMdRGVNLxK9dzdQkaFpu7w598/aeM0zRJuS IqAVlLowY+pt3C14ax9jxvGiZ96kuYClWBeWuw4oHGfLxNqNU2xd38xzJTkUoRX7 0F16+froi85DV8UWbDswakOfs0vAoW1kLES3nnwrZ6inQ9yHANEBrXY4jZ3HwcEy ax81fYMrpd8kD9lI0mGiX5qoanfv08jTn5UfFYNddYFcCrKRymDCVZDw6p/9JFxV Tkry526TxELziqsfKNHt2yFKKDJ8CjtQqFSUyeo/pBCo7X87aV5B3oFgtb4lxseb yT7o+mo452jNuL8veMPe6vz21uTwbfQfof1wk4wV8bRydGwu7ofOdeILLgtQIieM yb+8f/XGpg1Q+Y3pTfO46a/d76KxhEHVsgqQLwMQB7p+C9PZh5Fc3VdSyjmUFyEN Dc6i3IbfiZVgKm9Tsouf =boPv -----END PGP SIGNATURE----- Merge tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi Pull SCSI fixes from James Bottomley: "A bunch of fixes for aacraid, a set of coherency fixes that only affect non-coherent platforms and one coccinelle detected null check after use" * tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: scsi: libsas: align sata_device's rps_resp on a cacheline scsi: use dma_get_cache_alignment() as minimum DMA alignment scsi: dma-mapping: always provide dma_get_cache_alignment scsi: ufs: ufshcd: fix potential NULL pointer dereference in ufshcd_config_vreg scsi: aacraid: Prevent crash in case of free interrupt during scsi EH path scsi: aacraid: Perform initialization reset only once scsi: aacraid: Check for PCI state of device in a generic way
This commit is contained in:
commit
13231cacce
|
@ -1673,6 +1673,7 @@ struct aac_dev
|
||||||
struct aac_hba_map_info hba_map[AAC_MAX_BUSES][AAC_MAX_TARGETS];
|
struct aac_hba_map_info hba_map[AAC_MAX_BUSES][AAC_MAX_TARGETS];
|
||||||
u8 adapter_shutdown;
|
u8 adapter_shutdown;
|
||||||
u32 handle_pci_error;
|
u32 handle_pci_error;
|
||||||
|
bool init_reset;
|
||||||
};
|
};
|
||||||
|
|
||||||
#define aac_adapter_interrupt(dev) \
|
#define aac_adapter_interrupt(dev) \
|
||||||
|
|
|
@ -467,35 +467,6 @@ int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_EEH
|
|
||||||
static inline int aac_check_eeh_failure(struct aac_dev *dev)
|
|
||||||
{
|
|
||||||
/* Check for an EEH failure for the given
|
|
||||||
* device node. Function eeh_dev_check_failure()
|
|
||||||
* returns 0 if there has not been an EEH error
|
|
||||||
* otherwise returns a non-zero value.
|
|
||||||
*
|
|
||||||
* Need to be called before any PCI operation,
|
|
||||||
* i.e.,before aac_adapter_check_health()
|
|
||||||
*/
|
|
||||||
struct eeh_dev *edev = pci_dev_to_eeh_dev(dev->pdev);
|
|
||||||
|
|
||||||
if (eeh_dev_check_failure(edev)) {
|
|
||||||
/* The EEH mechanisms will handle this
|
|
||||||
* error and reset the device if
|
|
||||||
* necessary.
|
|
||||||
*/
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
#else
|
|
||||||
static inline int aac_check_eeh_failure(struct aac_dev *dev)
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Define the highest level of host to adapter communication routines.
|
* Define the highest level of host to adapter communication routines.
|
||||||
* These routines will support host to adapter FS commuication. These
|
* These routines will support host to adapter FS commuication. These
|
||||||
|
@ -701,7 +672,7 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
|
||||||
return -ETIMEDOUT;
|
return -ETIMEDOUT;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (aac_check_eeh_failure(dev))
|
if (unlikely(pci_channel_offline(dev->pdev)))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
if ((blink = aac_adapter_check_health(dev)) > 0) {
|
if ((blink = aac_adapter_check_health(dev)) > 0) {
|
||||||
|
@ -801,7 +772,7 @@ int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback,
|
||||||
|
|
||||||
spin_unlock_irqrestore(&fibptr->event_lock, flags);
|
spin_unlock_irqrestore(&fibptr->event_lock, flags);
|
||||||
|
|
||||||
if (aac_check_eeh_failure(dev))
|
if (unlikely(pci_channel_offline(dev->pdev)))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
fibptr->flags |= FIB_CONTEXT_FLAG_WAIT;
|
fibptr->flags |= FIB_CONTEXT_FLAG_WAIT;
|
||||||
|
@ -1583,6 +1554,7 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
|
||||||
* will ensure that i/o is queisced and the card is flushed in that
|
* will ensure that i/o is queisced and the card is flushed in that
|
||||||
* case.
|
* case.
|
||||||
*/
|
*/
|
||||||
|
aac_free_irq(aac);
|
||||||
aac_fib_map_free(aac);
|
aac_fib_map_free(aac);
|
||||||
dma_free_coherent(&aac->pdev->dev, aac->comm_size, aac->comm_addr,
|
dma_free_coherent(&aac->pdev->dev, aac->comm_size, aac->comm_addr,
|
||||||
aac->comm_phys);
|
aac->comm_phys);
|
||||||
|
@ -1590,7 +1562,6 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
|
||||||
aac->comm_phys = 0;
|
aac->comm_phys = 0;
|
||||||
kfree(aac->queues);
|
kfree(aac->queues);
|
||||||
aac->queues = NULL;
|
aac->queues = NULL;
|
||||||
aac_free_irq(aac);
|
|
||||||
kfree(aac->fsa_dev);
|
kfree(aac->fsa_dev);
|
||||||
aac->fsa_dev = NULL;
|
aac->fsa_dev = NULL;
|
||||||
|
|
||||||
|
|
|
@ -1680,6 +1680,9 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||||
aac->cardtype = index;
|
aac->cardtype = index;
|
||||||
INIT_LIST_HEAD(&aac->entry);
|
INIT_LIST_HEAD(&aac->entry);
|
||||||
|
|
||||||
|
if (aac_reset_devices || reset_devices)
|
||||||
|
aac->init_reset = true;
|
||||||
|
|
||||||
aac->fibs = kzalloc(sizeof(struct fib) * (shost->can_queue + AAC_NUM_MGT_FIB), GFP_KERNEL);
|
aac->fibs = kzalloc(sizeof(struct fib) * (shost->can_queue + AAC_NUM_MGT_FIB), GFP_KERNEL);
|
||||||
if (!aac->fibs)
|
if (!aac->fibs)
|
||||||
goto out_free_host;
|
goto out_free_host;
|
||||||
|
|
|
@ -561,11 +561,16 @@ int _aac_rx_init(struct aac_dev *dev)
|
||||||
dev->a_ops.adapter_sync_cmd = rx_sync_cmd;
|
dev->a_ops.adapter_sync_cmd = rx_sync_cmd;
|
||||||
dev->a_ops.adapter_enable_int = aac_rx_disable_interrupt;
|
dev->a_ops.adapter_enable_int = aac_rx_disable_interrupt;
|
||||||
dev->OIMR = status = rx_readb (dev, MUnit.OIMR);
|
dev->OIMR = status = rx_readb (dev, MUnit.OIMR);
|
||||||
if ((((status & 0x0c) != 0x0c) || aac_reset_devices || reset_devices) &&
|
|
||||||
!aac_rx_restart_adapter(dev, 0, IOP_HWSOFT_RESET))
|
if (((status & 0x0c) != 0x0c) || dev->init_reset) {
|
||||||
|
dev->init_reset = false;
|
||||||
|
if (!aac_rx_restart_adapter(dev, 0, IOP_HWSOFT_RESET)) {
|
||||||
/* Make sure the Hardware FIFO is empty */
|
/* Make sure the Hardware FIFO is empty */
|
||||||
while ((++restart < 512) &&
|
while ((++restart < 512) &&
|
||||||
(rx_readl(dev, MUnit.OutboundQueue) != 0xFFFFFFFFL));
|
(rx_readl(dev, MUnit.OutboundQueue) != 0xFFFFFFFFL));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Check to see if the board panic'd while booting.
|
* Check to see if the board panic'd while booting.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -868,9 +868,13 @@ int aac_src_init(struct aac_dev *dev)
|
||||||
/* Failure to reset here is an option ... */
|
/* Failure to reset here is an option ... */
|
||||||
dev->a_ops.adapter_sync_cmd = src_sync_cmd;
|
dev->a_ops.adapter_sync_cmd = src_sync_cmd;
|
||||||
dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
|
dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
|
||||||
if ((aac_reset_devices || reset_devices) &&
|
|
||||||
!aac_src_restart_adapter(dev, 0, IOP_HWSOFT_RESET))
|
if (dev->init_reset) {
|
||||||
|
dev->init_reset = false;
|
||||||
|
if (!aac_src_restart_adapter(dev, 0, IOP_HWSOFT_RESET))
|
||||||
++restart;
|
++restart;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Check to see if the board panic'd while booting.
|
* Check to see if the board panic'd while booting.
|
||||||
*/
|
*/
|
||||||
|
@ -1014,9 +1018,13 @@ int aac_srcv_init(struct aac_dev *dev)
|
||||||
/* Failure to reset here is an option ... */
|
/* Failure to reset here is an option ... */
|
||||||
dev->a_ops.adapter_sync_cmd = src_sync_cmd;
|
dev->a_ops.adapter_sync_cmd = src_sync_cmd;
|
||||||
dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
|
dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
|
||||||
if ((aac_reset_devices || reset_devices) &&
|
|
||||||
!aac_src_restart_adapter(dev, 0, IOP_HWSOFT_RESET))
|
if (dev->init_reset) {
|
||||||
|
dev->init_reset = false;
|
||||||
|
if (!aac_src_restart_adapter(dev, 0, IOP_HWSOFT_RESET))
|
||||||
++restart;
|
++restart;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Check to see if flash update is running.
|
* Check to see if flash update is running.
|
||||||
* Wait for the adapter to be up and running. Wait up to 5 minutes
|
* Wait for the adapter to be up and running. Wait up to 5 minutes
|
||||||
|
|
|
@ -2148,11 +2148,13 @@ void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
|
||||||
q->limits.cluster = 0;
|
q->limits.cluster = 0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* set a reasonable default alignment on word boundaries: the
|
* Set a reasonable default alignment: The larger of 32-byte (dword),
|
||||||
* host and device may alter it using
|
* which is a common minimum for HBAs, and the minimum DMA alignment,
|
||||||
* blk_queue_update_dma_alignment() later.
|
* which is set by the platform.
|
||||||
|
*
|
||||||
|
* Devices that require a bigger alignment can increase it later.
|
||||||
*/
|
*/
|
||||||
blk_queue_dma_alignment(q, 0x03);
|
blk_queue_dma_alignment(q, max(4, dma_get_cache_alignment()) - 1);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(__scsi_init_queue);
|
EXPORT_SYMBOL_GPL(__scsi_init_queue);
|
||||||
|
|
||||||
|
|
|
@ -6559,12 +6559,15 @@ static int ufshcd_config_vreg(struct device *dev,
|
||||||
struct ufs_vreg *vreg, bool on)
|
struct ufs_vreg *vreg, bool on)
|
||||||
{
|
{
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
struct regulator *reg = vreg->reg;
|
struct regulator *reg;
|
||||||
const char *name = vreg->name;
|
const char *name;
|
||||||
int min_uV, uA_load;
|
int min_uV, uA_load;
|
||||||
|
|
||||||
BUG_ON(!vreg);
|
BUG_ON(!vreg);
|
||||||
|
|
||||||
|
reg = vreg->reg;
|
||||||
|
name = vreg->name;
|
||||||
|
|
||||||
if (regulator_count_voltages(reg) > 0) {
|
if (regulator_count_voltages(reg) > 0) {
|
||||||
min_uV = on ? vreg->min_uV : 0;
|
min_uV = on ? vreg->min_uV : 0;
|
||||||
ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
|
ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
|
||||||
|
|
|
@ -704,7 +704,6 @@ static inline void *dma_zalloc_coherent(struct device *dev, size_t size,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_HAS_DMA
|
|
||||||
static inline int dma_get_cache_alignment(void)
|
static inline int dma_get_cache_alignment(void)
|
||||||
{
|
{
|
||||||
#ifdef ARCH_DMA_MINALIGN
|
#ifdef ARCH_DMA_MINALIGN
|
||||||
|
@ -712,7 +711,6 @@ static inline int dma_get_cache_alignment(void)
|
||||||
#endif
|
#endif
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
/* flags for the coherent memory api */
|
/* flags for the coherent memory api */
|
||||||
#define DMA_MEMORY_EXCLUSIVE 0x01
|
#define DMA_MEMORY_EXCLUSIVE 0x01
|
||||||
|
|
|
@ -159,11 +159,11 @@ struct expander_device {
|
||||||
|
|
||||||
struct sata_device {
|
struct sata_device {
|
||||||
unsigned int class;
|
unsigned int class;
|
||||||
struct smp_resp rps_resp; /* report_phy_sata_resp */
|
|
||||||
u8 port_no; /* port number, if this is a PM (Port) */
|
u8 port_no; /* port number, if this is a PM (Port) */
|
||||||
|
|
||||||
struct ata_port *ap;
|
struct ata_port *ap;
|
||||||
struct ata_host ata_host;
|
struct ata_host ata_host;
|
||||||
|
struct smp_resp rps_resp ____cacheline_aligned; /* report_phy_sata_resp */
|
||||||
u8 fis[ATA_RESP_FIS_SIZE];
|
u8 fis[ATA_RESP_FIS_SIZE];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue