mirror of https://gitee.com/openkylin/linux.git
for-5.5/libata-20191121
-----BEGIN PGP SIGNATURE----- iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAl3WxTEQHGF4Ym9lQGtl cm5lbC5kawAKCRD301j7KXHgppkrD/wI7A0K+CE6Cd0QQtVmZaMG6r68rj4wUtyB J4j44DOzfLwhRoQ4VhqbAysUiVHc2sT/10Ogq0vFmi5y2ewNQWHQ7tCCv52zbh0X bJdt67GkM81MNpX1Hjn/ABcSAy8nzQ+efPmbS/6SnEZvSh9HIajwE6E7CxPaQ6NT mZtLsPTvnn1Ct01JZecnFYSRWvjnKDd89B2KjU1FoUcOjZxCJiTXB1XQWlFAI2rH qnzTXYN6CewnENLlkUCdfFCIsaRh4q2gN7plSVL35VPYTWOYNSmgJT5glRZ7ZyHi fYPsAGvvYBRdqF/goOklqTRtmFNhwbnjW5pspcwQMyAlHGCpGRemxmQ8OA7qtjSk XrZrrPO5TZNs/c7vR5697anAa3fD5YefaRvzBJR9IpWS1rDOTg9P9y3isWOclQQz llVSZE4yW7qEBp5x9uvOR2jCdqx9HNaXKB3u+Qc9etYGdsAKROgeW7YbLkafhnQL ak0BM/BhHJYuJ/GIAPgQyB/D982G3I4vvUSB5Ixf3w6v4MDowUDznJNHdvhWSoe2 eU1gb0tfVUSdCcHMcAYKtFJikGgD19rjESKQW8UJM1aUOyJkawdzTx4grMdzOfzB 3vue4QKT3sPHeSCLQGLZLCKBN8sHRtKSPGaJKwXdvyZYFc4YZ8eFhg7ElICFJFRZ SS361Hviog== =ucL7 -----END PGP SIGNATURE----- Merge tag 'for-5.5/libata-20191121' of git://git.kernel.dk/linux-block Pull libata updates from Jens Axboe: "Just a few fixes all over the place, support for the Annapurna SATA controller, and a patchset that cleans up the error defines and ultimately fixes anissue with sata_mv" * tag 'for-5.5/libata-20191121' of git://git.kernel.dk/linux-block: ata: pata_artop: make arrays static const, makes object smaller ata_piix: remove open-coded dmi_match(DMI_OEM_STRING) ata: sata_mv, avoid trigerrable BUG_ON ata: make qc_prep return ata_completion_errors ata: define AC_ERR_OK ata: Documentation, fix function names libata: Ensure ata_port probe has completed before detach ahci: tegra: use regulator_bulk_set_supply_names() ahci: Add support for Amazon's Annapurna Labs SATA controller
This commit is contained in:
commit
6e7b06a4c8
|
@ -250,23 +250,23 @@ High-level taskfile hooks
|
|||
|
||||
::
|
||||
|
||||
void (*qc_prep) (struct ata_queued_cmd *qc);
|
||||
enum ata_completion_errors (*qc_prep) (struct ata_queued_cmd *qc);
|
||||
int (*qc_issue) (struct ata_queued_cmd *qc);
|
||||
|
||||
|
||||
Higher-level hooks, these two hooks can potentially supercede several of
|
||||
Higher-level hooks, these two hooks can potentially supersede several of
|
||||
the above taskfile/DMA engine hooks. ``->qc_prep`` is called after the
|
||||
buffers have been DMA-mapped, and is typically used to populate the
|
||||
hardware's DMA scatter-gather table. Most drivers use the standard
|
||||
:c:func:`ata_qc_prep` helper function, but more advanced drivers roll their
|
||||
own.
|
||||
hardware's DMA scatter-gather table. Some drivers use the standard
|
||||
:c:func:`ata_bmdma_qc_prep` and :c:func:`ata_bmdma_dumb_qc_prep` helper
|
||||
functions, but more advanced drivers roll their own.
|
||||
|
||||
``->qc_issue`` is used to make a command active, once the hardware and S/G
|
||||
tables have been prepared. IDE BMDMA drivers use the helper function
|
||||
:c:func:`ata_qc_issue_prot` for taskfile protocol-based dispatch. More
|
||||
:c:func:`ata_sff_qc_issue` for taskfile protocol-based dispatch. More
|
||||
advanced drivers implement their own ``->qc_issue``.
|
||||
|
||||
:c:func:`ata_qc_issue_prot` calls ``->tf_load()``, ``->bmdma_setup()``, and
|
||||
:c:func:`ata_sff_qc_issue` calls ``->sff_tf_load()``, ``->bmdma_setup()``, and
|
||||
``->bmdma_start()`` as necessary to initiate a transfer.
|
||||
|
||||
Exception and probe handling (EH)
|
||||
|
|
|
@ -56,7 +56,7 @@ struct acard_sg {
|
|||
__le32 size; /* bit 31 (EOT) max==0x10000 (64k) */
|
||||
};
|
||||
|
||||
static void acard_ahci_qc_prep(struct ata_queued_cmd *qc);
|
||||
static enum ata_completion_errors acard_ahci_qc_prep(struct ata_queued_cmd *qc);
|
||||
static bool acard_ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
|
||||
static int acard_ahci_port_start(struct ata_port *ap);
|
||||
static int acard_ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
|
||||
|
@ -210,7 +210,7 @@ static unsigned int acard_ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
|
|||
return si;
|
||||
}
|
||||
|
||||
static void acard_ahci_qc_prep(struct ata_queued_cmd *qc)
|
||||
static enum ata_completion_errors acard_ahci_qc_prep(struct ata_queued_cmd *qc)
|
||||
{
|
||||
struct ata_port *ap = qc->ap;
|
||||
struct ahci_port_priv *pp = ap->private_data;
|
||||
|
@ -248,6 +248,8 @@ static void acard_ahci_qc_prep(struct ata_queued_cmd *qc)
|
|||
opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
|
||||
|
||||
ahci_fill_cmd_slot(pp, qc->hw_tag, opts);
|
||||
|
||||
return AC_ERR_OK;
|
||||
}
|
||||
|
||||
static bool acard_ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
|
||||
|
|
|
@ -56,6 +56,7 @@ enum board_ids {
|
|||
board_ahci_yes_fbs,
|
||||
|
||||
/* board IDs for specific chipsets in alphabetical order */
|
||||
board_ahci_al,
|
||||
board_ahci_avn,
|
||||
board_ahci_mcp65,
|
||||
board_ahci_mcp77,
|
||||
|
@ -167,6 +168,13 @@ static const struct ata_port_info ahci_port_info[] = {
|
|||
.port_ops = &ahci_ops,
|
||||
},
|
||||
/* by chipsets */
|
||||
[board_ahci_al] = {
|
||||
AHCI_HFLAGS (AHCI_HFLAG_NO_PMP | AHCI_HFLAG_NO_MSI),
|
||||
.flags = AHCI_FLAG_COMMON,
|
||||
.pio_mask = ATA_PIO4,
|
||||
.udma_mask = ATA_UDMA6,
|
||||
.port_ops = &ahci_ops,
|
||||
},
|
||||
[board_ahci_avn] = {
|
||||
.flags = AHCI_FLAG_COMMON,
|
||||
.pio_mask = ATA_PIO4,
|
||||
|
@ -415,6 +423,11 @@ static const struct pci_device_id ahci_pci_tbl[] = {
|
|||
{ PCI_VDEVICE(ATI, 0x4394), board_ahci_sb700 }, /* ATI SB700/800 */
|
||||
{ PCI_VDEVICE(ATI, 0x4395), board_ahci_sb700 }, /* ATI SB700/800 */
|
||||
|
||||
/* Amazon's Annapurna Labs support */
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, 0x0031),
|
||||
.class = PCI_CLASS_STORAGE_SATA_AHCI,
|
||||
.class_mask = 0xffffff,
|
||||
board_ahci_al },
|
||||
/* AMD */
|
||||
{ PCI_VDEVICE(AMD, 0x7800), board_ahci }, /* AMD Hudson-2 */
|
||||
{ PCI_VDEVICE(AMD, 0x7900), board_ahci }, /* AMD CZ */
|
||||
|
|
|
@ -483,7 +483,6 @@ static int tegra_ahci_probe(struct platform_device *pdev)
|
|||
struct tegra_ahci_priv *tegra;
|
||||
struct resource *res;
|
||||
int ret;
|
||||
unsigned int i;
|
||||
|
||||
hpriv = ahci_platform_get_resources(pdev, 0);
|
||||
if (IS_ERR(hpriv))
|
||||
|
@ -543,8 +542,9 @@ static int tegra_ahci_probe(struct platform_device *pdev)
|
|||
if (!tegra->supplies)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < tegra->soc->num_supplies; i++)
|
||||
tegra->supplies[i].supply = tegra->soc->supply_names[i];
|
||||
regulator_bulk_set_supply_names(tegra->supplies,
|
||||
tegra->soc->supply_names,
|
||||
tegra->soc->num_supplies);
|
||||
|
||||
ret = devm_regulator_bulk_get(&pdev->dev,
|
||||
tegra->soc->num_supplies,
|
||||
|
|
|
@ -840,6 +840,12 @@ static int piix_broken_suspend(void)
|
|||
DMI_MATCH(DMI_PRODUCT_NAME, "Tecra M3"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.ident = "TECRA M3",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_OEM_STRING, "Tecra M3,"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.ident = "TECRA M4",
|
||||
.matches = {
|
||||
|
@ -955,18 +961,10 @@ static int piix_broken_suspend(void)
|
|||
|
||||
{ } /* terminate list */
|
||||
};
|
||||
static const char *oemstrs[] = {
|
||||
"Tecra M3,",
|
||||
};
|
||||
int i;
|
||||
|
||||
if (dmi_check_system(sysids))
|
||||
return 1;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(oemstrs); i++)
|
||||
if (dmi_find_device(DMI_DEV_TYPE_OEM_STRING, oemstrs[i], NULL))
|
||||
return 1;
|
||||
|
||||
/* TECRA M4 sometimes forgets its identify and reports bogus
|
||||
* DMI information. As the bogus information is a bit
|
||||
* generic, match as many entries as possible. This manual
|
||||
|
|
|
@ -57,7 +57,7 @@ static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
|
|||
static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
|
||||
static int ahci_port_start(struct ata_port *ap);
|
||||
static void ahci_port_stop(struct ata_port *ap);
|
||||
static void ahci_qc_prep(struct ata_queued_cmd *qc);
|
||||
static enum ata_completion_errors ahci_qc_prep(struct ata_queued_cmd *qc);
|
||||
static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc);
|
||||
static void ahci_freeze(struct ata_port *ap);
|
||||
static void ahci_thaw(struct ata_port *ap);
|
||||
|
@ -1624,7 +1624,7 @@ static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc)
|
|||
return sata_pmp_qc_defer_cmd_switch(qc);
|
||||
}
|
||||
|
||||
static void ahci_qc_prep(struct ata_queued_cmd *qc)
|
||||
static enum ata_completion_errors ahci_qc_prep(struct ata_queued_cmd *qc)
|
||||
{
|
||||
struct ata_port *ap = qc->ap;
|
||||
struct ahci_port_priv *pp = ap->private_data;
|
||||
|
@ -1660,6 +1660,8 @@ static void ahci_qc_prep(struct ata_queued_cmd *qc)
|
|||
opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
|
||||
|
||||
ahci_fill_cmd_slot(pp, qc->hw_tag, opts);
|
||||
|
||||
return AC_ERR_OK;
|
||||
}
|
||||
|
||||
static void ahci_fbs_dec_intr(struct ata_port *ap)
|
||||
|
|
|
@ -4980,7 +4980,10 @@ int ata_std_qc_defer(struct ata_queued_cmd *qc)
|
|||
return ATA_DEFER_LINK;
|
||||
}
|
||||
|
||||
void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
|
||||
enum ata_completion_errors ata_noop_qc_prep(struct ata_queued_cmd *qc)
|
||||
{
|
||||
return AC_ERR_OK;
|
||||
}
|
||||
|
||||
/**
|
||||
* ata_sg_init - Associate command with scatter-gather table.
|
||||
|
@ -5443,7 +5446,9 @@ void ata_qc_issue(struct ata_queued_cmd *qc)
|
|||
return;
|
||||
}
|
||||
|
||||
ap->ops->qc_prep(qc);
|
||||
qc->err_mask |= ap->ops->qc_prep(qc);
|
||||
if (unlikely(qc->err_mask))
|
||||
goto err;
|
||||
trace_ata_qc_issue(qc);
|
||||
qc->err_mask |= ap->ops->qc_issue(qc);
|
||||
if (unlikely(qc->err_mask))
|
||||
|
@ -6708,6 +6713,9 @@ void ata_host_detach(struct ata_host *host)
|
|||
{
|
||||
int i;
|
||||
|
||||
/* Ensure ata_port probe has completed */
|
||||
async_synchronize_full();
|
||||
|
||||
for (i = 0; i < host->n_ports; i++)
|
||||
ata_port_detach(host->ports[i]);
|
||||
|
||||
|
|
|
@ -2679,12 +2679,14 @@ static void ata_bmdma_fill_sg_dumb(struct ata_queued_cmd *qc)
|
|||
* LOCKING:
|
||||
* spin_lock_irqsave(host lock)
|
||||
*/
|
||||
void ata_bmdma_qc_prep(struct ata_queued_cmd *qc)
|
||||
enum ata_completion_errors ata_bmdma_qc_prep(struct ata_queued_cmd *qc)
|
||||
{
|
||||
if (!(qc->flags & ATA_QCFLAG_DMAMAP))
|
||||
return;
|
||||
return AC_ERR_OK;
|
||||
|
||||
ata_bmdma_fill_sg(qc);
|
||||
|
||||
return AC_ERR_OK;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ata_bmdma_qc_prep);
|
||||
|
||||
|
@ -2697,12 +2699,14 @@ EXPORT_SYMBOL_GPL(ata_bmdma_qc_prep);
|
|||
* LOCKING:
|
||||
* spin_lock_irqsave(host lock)
|
||||
*/
|
||||
void ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc)
|
||||
enum ata_completion_errors ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc)
|
||||
{
|
||||
if (!(qc->flags & ATA_QCFLAG_DMAMAP))
|
||||
return;
|
||||
return AC_ERR_OK;
|
||||
|
||||
ata_bmdma_fill_sg_dumb(qc);
|
||||
|
||||
return AC_ERR_OK;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ata_bmdma_dumb_qc_prep);
|
||||
|
||||
|
|
|
@ -100,7 +100,7 @@ static void artop6210_load_piomode(struct ata_port *ap, struct ata_device *adev,
|
|||
{
|
||||
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
|
||||
int dn = adev->devno + 2 * ap->port_no;
|
||||
const u16 timing[2][5] = {
|
||||
static const u16 timing[2][5] = {
|
||||
{ 0x0000, 0x000A, 0x0008, 0x0303, 0x0301 },
|
||||
{ 0x0700, 0x070A, 0x0708, 0x0403, 0x0401 }
|
||||
|
||||
|
@ -154,7 +154,7 @@ static void artop6260_load_piomode (struct ata_port *ap, struct ata_device *adev
|
|||
{
|
||||
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
|
||||
int dn = adev->devno + 2 * ap->port_no;
|
||||
const u8 timing[2][5] = {
|
||||
static const u8 timing[2][5] = {
|
||||
{ 0x00, 0x0A, 0x08, 0x33, 0x31 },
|
||||
{ 0x70, 0x7A, 0x78, 0x43, 0x41 }
|
||||
|
||||
|
|
|
@ -510,7 +510,7 @@ static int pata_macio_cable_detect(struct ata_port *ap)
|
|||
return ATA_CBL_PATA40;
|
||||
}
|
||||
|
||||
static void pata_macio_qc_prep(struct ata_queued_cmd *qc)
|
||||
static enum ata_completion_errors pata_macio_qc_prep(struct ata_queued_cmd *qc)
|
||||
{
|
||||
unsigned int write = (qc->tf.flags & ATA_TFLAG_WRITE);
|
||||
struct ata_port *ap = qc->ap;
|
||||
|
@ -523,7 +523,7 @@ static void pata_macio_qc_prep(struct ata_queued_cmd *qc)
|
|||
__func__, qc, qc->flags, write, qc->dev->devno);
|
||||
|
||||
if (!(qc->flags & ATA_QCFLAG_DMAMAP))
|
||||
return;
|
||||
return AC_ERR_OK;
|
||||
|
||||
table = (struct dbdma_cmd *) priv->dma_table_cpu;
|
||||
|
||||
|
@ -568,6 +568,8 @@ static void pata_macio_qc_prep(struct ata_queued_cmd *qc)
|
|||
table->command = cpu_to_le16(DBDMA_STOP);
|
||||
|
||||
dev_dbgdma(priv->dev, "%s: %d DMA list entries\n", __func__, pi);
|
||||
|
||||
return AC_ERR_OK;
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -44,25 +44,27 @@ static void pxa_ata_dma_irq(void *d)
|
|||
/*
|
||||
* Prepare taskfile for submission.
|
||||
*/
|
||||
static void pxa_qc_prep(struct ata_queued_cmd *qc)
|
||||
static enum ata_completion_errors pxa_qc_prep(struct ata_queued_cmd *qc)
|
||||
{
|
||||
struct pata_pxa_data *pd = qc->ap->private_data;
|
||||
struct dma_async_tx_descriptor *tx;
|
||||
enum dma_transfer_direction dir;
|
||||
|
||||
if (!(qc->flags & ATA_QCFLAG_DMAMAP))
|
||||
return;
|
||||
return AC_ERR_OK;
|
||||
|
||||
dir = (qc->dma_dir == DMA_TO_DEVICE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM);
|
||||
tx = dmaengine_prep_slave_sg(pd->dma_chan, qc->sg, qc->n_elem, dir,
|
||||
DMA_PREP_INTERRUPT);
|
||||
if (!tx) {
|
||||
ata_dev_err(qc->dev, "prep_slave_sg() failed\n");
|
||||
return;
|
||||
return AC_ERR_OK;
|
||||
}
|
||||
tx->callback = pxa_ata_dma_irq;
|
||||
tx->callback_param = pd;
|
||||
pd->dma_cookie = dmaengine_submit(tx);
|
||||
|
||||
return AC_ERR_OK;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -116,7 +116,7 @@ static int adma_ata_init_one(struct pci_dev *pdev,
|
|||
const struct pci_device_id *ent);
|
||||
static int adma_port_start(struct ata_port *ap);
|
||||
static void adma_port_stop(struct ata_port *ap);
|
||||
static void adma_qc_prep(struct ata_queued_cmd *qc);
|
||||
static enum ata_completion_errors adma_qc_prep(struct ata_queued_cmd *qc);
|
||||
static unsigned int adma_qc_issue(struct ata_queued_cmd *qc);
|
||||
static int adma_check_atapi_dma(struct ata_queued_cmd *qc);
|
||||
static void adma_freeze(struct ata_port *ap);
|
||||
|
@ -295,7 +295,7 @@ static int adma_fill_sg(struct ata_queued_cmd *qc)
|
|||
return i;
|
||||
}
|
||||
|
||||
static void adma_qc_prep(struct ata_queued_cmd *qc)
|
||||
static enum ata_completion_errors adma_qc_prep(struct ata_queued_cmd *qc)
|
||||
{
|
||||
struct adma_port_priv *pp = qc->ap->private_data;
|
||||
u8 *buf = pp->pkt;
|
||||
|
@ -306,7 +306,7 @@ static void adma_qc_prep(struct ata_queued_cmd *qc)
|
|||
|
||||
adma_enter_reg_mode(qc->ap);
|
||||
if (qc->tf.protocol != ATA_PROT_DMA)
|
||||
return;
|
||||
return AC_ERR_OK;
|
||||
|
||||
buf[i++] = 0; /* Response flags */
|
||||
buf[i++] = 0; /* reserved */
|
||||
|
@ -371,6 +371,7 @@ static void adma_qc_prep(struct ata_queued_cmd *qc)
|
|||
printk("%s\n", obuf);
|
||||
}
|
||||
#endif
|
||||
return AC_ERR_OK;
|
||||
}
|
||||
|
||||
static inline void adma_packet_start(struct ata_queued_cmd *qc)
|
||||
|
|
|
@ -502,7 +502,7 @@ static unsigned int sata_fsl_fill_sg(struct ata_queued_cmd *qc, void *cmd_desc,
|
|||
return num_prde;
|
||||
}
|
||||
|
||||
static void sata_fsl_qc_prep(struct ata_queued_cmd *qc)
|
||||
static enum ata_completion_errors sata_fsl_qc_prep(struct ata_queued_cmd *qc)
|
||||
{
|
||||
struct ata_port *ap = qc->ap;
|
||||
struct sata_fsl_port_priv *pp = ap->private_data;
|
||||
|
@ -548,6 +548,8 @@ static void sata_fsl_qc_prep(struct ata_queued_cmd *qc)
|
|||
|
||||
VPRINTK("SATA FSL : xx_qc_prep, di = 0x%x, ttl = %d, num_prde = %d\n",
|
||||
desc_info, ttl_dwords, num_prde);
|
||||
|
||||
return AC_ERR_OK;
|
||||
}
|
||||
|
||||
static unsigned int sata_fsl_qc_issue(struct ata_queued_cmd *qc)
|
||||
|
|
|
@ -478,7 +478,7 @@ static void inic_fill_sg(struct inic_prd *prd, struct ata_queued_cmd *qc)
|
|||
prd[-1].flags |= PRD_END;
|
||||
}
|
||||
|
||||
static void inic_qc_prep(struct ata_queued_cmd *qc)
|
||||
static enum ata_completion_errors inic_qc_prep(struct ata_queued_cmd *qc)
|
||||
{
|
||||
struct inic_port_priv *pp = qc->ap->private_data;
|
||||
struct inic_pkt *pkt = pp->pkt;
|
||||
|
@ -538,6 +538,8 @@ static void inic_qc_prep(struct ata_queued_cmd *qc)
|
|||
inic_fill_sg(prd, qc);
|
||||
|
||||
pp->cpb_tbl[0] = pp->pkt_dma;
|
||||
|
||||
return AC_ERR_OK;
|
||||
}
|
||||
|
||||
static unsigned int inic_qc_issue(struct ata_queued_cmd *qc)
|
||||
|
|
|
@ -592,8 +592,8 @@ static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
|
|||
static int mv_port_start(struct ata_port *ap);
|
||||
static void mv_port_stop(struct ata_port *ap);
|
||||
static int mv_qc_defer(struct ata_queued_cmd *qc);
|
||||
static void mv_qc_prep(struct ata_queued_cmd *qc);
|
||||
static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
|
||||
static enum ata_completion_errors mv_qc_prep(struct ata_queued_cmd *qc);
|
||||
static enum ata_completion_errors mv_qc_prep_iie(struct ata_queued_cmd *qc);
|
||||
static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
|
||||
static int mv_hardreset(struct ata_link *link, unsigned int *class,
|
||||
unsigned long deadline);
|
||||
|
@ -2031,7 +2031,7 @@ static void mv_rw_multi_errata_sata24(struct ata_queued_cmd *qc)
|
|||
* LOCKING:
|
||||
* Inherited from caller.
|
||||
*/
|
||||
static void mv_qc_prep(struct ata_queued_cmd *qc)
|
||||
static enum ata_completion_errors mv_qc_prep(struct ata_queued_cmd *qc)
|
||||
{
|
||||
struct ata_port *ap = qc->ap;
|
||||
struct mv_port_priv *pp = ap->private_data;
|
||||
|
@ -2043,15 +2043,15 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
|
|||
switch (tf->protocol) {
|
||||
case ATA_PROT_DMA:
|
||||
if (tf->command == ATA_CMD_DSM)
|
||||
return;
|
||||
return AC_ERR_OK;
|
||||
/* fall-thru */
|
||||
case ATA_PROT_NCQ:
|
||||
break; /* continue below */
|
||||
case ATA_PROT_PIO:
|
||||
mv_rw_multi_errata_sata24(qc);
|
||||
return;
|
||||
return AC_ERR_OK;
|
||||
default:
|
||||
return;
|
||||
return AC_ERR_OK;
|
||||
}
|
||||
|
||||
/* Fill in command request block
|
||||
|
@ -2098,12 +2098,10 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
|
|||
* non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
|
||||
* of which are defined/used by Linux. If we get here, this
|
||||
* driver needs work.
|
||||
*
|
||||
* FIXME: modify libata to give qc_prep a return value and
|
||||
* return error here.
|
||||
*/
|
||||
BUG_ON(tf->command);
|
||||
break;
|
||||
ata_port_err(ap, "%s: unsupported command: %.2x\n", __func__,
|
||||
tf->command);
|
||||
return AC_ERR_INVALID;
|
||||
}
|
||||
mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
|
||||
mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
|
||||
|
@ -2116,8 +2114,10 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
|
|||
mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
|
||||
|
||||
if (!(qc->flags & ATA_QCFLAG_DMAMAP))
|
||||
return;
|
||||
return AC_ERR_OK;
|
||||
mv_fill_sg(qc);
|
||||
|
||||
return AC_ERR_OK;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -2132,7 +2132,7 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
|
|||
* LOCKING:
|
||||
* Inherited from caller.
|
||||
*/
|
||||
static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
|
||||
static enum ata_completion_errors mv_qc_prep_iie(struct ata_queued_cmd *qc)
|
||||
{
|
||||
struct ata_port *ap = qc->ap;
|
||||
struct mv_port_priv *pp = ap->private_data;
|
||||
|
@ -2143,9 +2143,9 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
|
|||
|
||||
if ((tf->protocol != ATA_PROT_DMA) &&
|
||||
(tf->protocol != ATA_PROT_NCQ))
|
||||
return;
|
||||
return AC_ERR_OK;
|
||||
if (tf->command == ATA_CMD_DSM)
|
||||
return; /* use bmdma for this */
|
||||
return AC_ERR_OK; /* use bmdma for this */
|
||||
|
||||
/* Fill in Gen IIE command request block */
|
||||
if (!(tf->flags & ATA_TFLAG_WRITE))
|
||||
|
@ -2186,8 +2186,10 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
|
|||
);
|
||||
|
||||
if (!(qc->flags & ATA_QCFLAG_DMAMAP))
|
||||
return;
|
||||
return AC_ERR_OK;
|
||||
mv_fill_sg(qc);
|
||||
|
||||
return AC_ERR_OK;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -297,7 +297,7 @@ static void nv_ck804_freeze(struct ata_port *ap);
|
|||
static void nv_ck804_thaw(struct ata_port *ap);
|
||||
static int nv_adma_slave_config(struct scsi_device *sdev);
|
||||
static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
|
||||
static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
|
||||
static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc);
|
||||
static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
|
||||
static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
|
||||
static void nv_adma_irq_clear(struct ata_port *ap);
|
||||
|
@ -319,7 +319,7 @@ static void nv_mcp55_freeze(struct ata_port *ap);
|
|||
static void nv_swncq_error_handler(struct ata_port *ap);
|
||||
static int nv_swncq_slave_config(struct scsi_device *sdev);
|
||||
static int nv_swncq_port_start(struct ata_port *ap);
|
||||
static void nv_swncq_qc_prep(struct ata_queued_cmd *qc);
|
||||
static enum ata_completion_errors nv_swncq_qc_prep(struct ata_queued_cmd *qc);
|
||||
static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
|
||||
static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc);
|
||||
static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis);
|
||||
|
@ -1344,7 +1344,7 @@ static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
|
|||
return 1;
|
||||
}
|
||||
|
||||
static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
|
||||
static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc)
|
||||
{
|
||||
struct nv_adma_port_priv *pp = qc->ap->private_data;
|
||||
struct nv_adma_cpb *cpb = &pp->cpb[qc->hw_tag];
|
||||
|
@ -1356,7 +1356,7 @@ static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
|
|||
(qc->flags & ATA_QCFLAG_DMAMAP));
|
||||
nv_adma_register_mode(qc->ap);
|
||||
ata_bmdma_qc_prep(qc);
|
||||
return;
|
||||
return AC_ERR_OK;
|
||||
}
|
||||
|
||||
cpb->resp_flags = NV_CPB_RESP_DONE;
|
||||
|
@ -1388,6 +1388,8 @@ static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
|
|||
cpb->ctl_flags = ctl_flags;
|
||||
wmb();
|
||||
cpb->resp_flags = 0;
|
||||
|
||||
return AC_ERR_OK;
|
||||
}
|
||||
|
||||
static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
|
||||
|
@ -1950,17 +1952,19 @@ static int nv_swncq_port_start(struct ata_port *ap)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void nv_swncq_qc_prep(struct ata_queued_cmd *qc)
|
||||
static enum ata_completion_errors nv_swncq_qc_prep(struct ata_queued_cmd *qc)
|
||||
{
|
||||
if (qc->tf.protocol != ATA_PROT_NCQ) {
|
||||
ata_bmdma_qc_prep(qc);
|
||||
return;
|
||||
return AC_ERR_OK;
|
||||
}
|
||||
|
||||
if (!(qc->flags & ATA_QCFLAG_DMAMAP))
|
||||
return;
|
||||
return AC_ERR_OK;
|
||||
|
||||
nv_swncq_fill_sg(qc);
|
||||
|
||||
return AC_ERR_OK;
|
||||
}
|
||||
|
||||
static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
|
||||
|
|
|
@ -139,7 +139,7 @@ static int pdc_sata_scr_write(struct ata_link *link, unsigned int sc_reg, u32 va
|
|||
static int pdc_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
|
||||
static int pdc_common_port_start(struct ata_port *ap);
|
||||
static int pdc_sata_port_start(struct ata_port *ap);
|
||||
static void pdc_qc_prep(struct ata_queued_cmd *qc);
|
||||
static enum ata_completion_errors pdc_qc_prep(struct ata_queued_cmd *qc);
|
||||
static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
|
||||
static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
|
||||
static int pdc_check_atapi_dma(struct ata_queued_cmd *qc);
|
||||
|
@ -633,7 +633,7 @@ static void pdc_fill_sg(struct ata_queued_cmd *qc)
|
|||
prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
|
||||
}
|
||||
|
||||
static void pdc_qc_prep(struct ata_queued_cmd *qc)
|
||||
static enum ata_completion_errors pdc_qc_prep(struct ata_queued_cmd *qc)
|
||||
{
|
||||
struct pdc_port_priv *pp = qc->ap->private_data;
|
||||
unsigned int i;
|
||||
|
@ -665,6 +665,8 @@ static void pdc_qc_prep(struct ata_queued_cmd *qc)
|
|||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return AC_ERR_OK;
|
||||
}
|
||||
|
||||
static int pdc_is_sataii_tx4(unsigned long flags)
|
||||
|
|
|
@ -100,7 +100,7 @@ static int qs_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
|
|||
static int qs_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
|
||||
static int qs_port_start(struct ata_port *ap);
|
||||
static void qs_host_stop(struct ata_host *host);
|
||||
static void qs_qc_prep(struct ata_queued_cmd *qc);
|
||||
static enum ata_completion_errors qs_qc_prep(struct ata_queued_cmd *qc);
|
||||
static unsigned int qs_qc_issue(struct ata_queued_cmd *qc);
|
||||
static int qs_check_atapi_dma(struct ata_queued_cmd *qc);
|
||||
static void qs_freeze(struct ata_port *ap);
|
||||
|
@ -260,7 +260,7 @@ static unsigned int qs_fill_sg(struct ata_queued_cmd *qc)
|
|||
return si;
|
||||
}
|
||||
|
||||
static void qs_qc_prep(struct ata_queued_cmd *qc)
|
||||
static enum ata_completion_errors qs_qc_prep(struct ata_queued_cmd *qc)
|
||||
{
|
||||
struct qs_port_priv *pp = qc->ap->private_data;
|
||||
u8 dflags = QS_DF_PORD, *buf = pp->pkt;
|
||||
|
@ -272,7 +272,7 @@ static void qs_qc_prep(struct ata_queued_cmd *qc)
|
|||
|
||||
qs_enter_reg_mode(qc->ap);
|
||||
if (qc->tf.protocol != ATA_PROT_DMA)
|
||||
return;
|
||||
return AC_ERR_OK;
|
||||
|
||||
nelem = qs_fill_sg(qc);
|
||||
|
||||
|
@ -295,6 +295,8 @@ static void qs_qc_prep(struct ata_queued_cmd *qc)
|
|||
|
||||
/* frame information structure (FIS) */
|
||||
ata_tf_to_fis(&qc->tf, 0, 1, &buf[32]);
|
||||
|
||||
return AC_ERR_OK;
|
||||
}
|
||||
|
||||
static inline void qs_packet_start(struct ata_queued_cmd *qc)
|
||||
|
|
|
@ -550,12 +550,14 @@ static void sata_rcar_bmdma_fill_sg(struct ata_queued_cmd *qc)
|
|||
prd[si - 1].addr |= cpu_to_le32(SATA_RCAR_DTEND);
|
||||
}
|
||||
|
||||
static void sata_rcar_qc_prep(struct ata_queued_cmd *qc)
|
||||
static enum ata_completion_errors sata_rcar_qc_prep(struct ata_queued_cmd *qc)
|
||||
{
|
||||
if (!(qc->flags & ATA_QCFLAG_DMAMAP))
|
||||
return;
|
||||
return AC_ERR_OK;
|
||||
|
||||
sata_rcar_bmdma_fill_sg(qc);
|
||||
|
||||
return AC_ERR_OK;
|
||||
}
|
||||
|
||||
static void sata_rcar_bmdma_setup(struct ata_queued_cmd *qc)
|
||||
|
|
|
@ -103,7 +103,7 @@ static void sil_dev_config(struct ata_device *dev);
|
|||
static int sil_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
|
||||
static int sil_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
|
||||
static int sil_set_mode(struct ata_link *link, struct ata_device **r_failed);
|
||||
static void sil_qc_prep(struct ata_queued_cmd *qc);
|
||||
static enum ata_completion_errors sil_qc_prep(struct ata_queued_cmd *qc);
|
||||
static void sil_bmdma_setup(struct ata_queued_cmd *qc);
|
||||
static void sil_bmdma_start(struct ata_queued_cmd *qc);
|
||||
static void sil_bmdma_stop(struct ata_queued_cmd *qc);
|
||||
|
@ -317,12 +317,14 @@ static void sil_fill_sg(struct ata_queued_cmd *qc)
|
|||
last_prd->flags_len |= cpu_to_le32(ATA_PRD_EOT);
|
||||
}
|
||||
|
||||
static void sil_qc_prep(struct ata_queued_cmd *qc)
|
||||
static enum ata_completion_errors sil_qc_prep(struct ata_queued_cmd *qc)
|
||||
{
|
||||
if (!(qc->flags & ATA_QCFLAG_DMAMAP))
|
||||
return;
|
||||
return AC_ERR_OK;
|
||||
|
||||
sil_fill_sg(qc);
|
||||
|
||||
return AC_ERR_OK;
|
||||
}
|
||||
|
||||
static unsigned char sil_get_device_cache_line(struct pci_dev *pdev)
|
||||
|
|
|
@ -326,7 +326,7 @@ static void sil24_dev_config(struct ata_device *dev);
|
|||
static int sil24_scr_read(struct ata_link *link, unsigned sc_reg, u32 *val);
|
||||
static int sil24_scr_write(struct ata_link *link, unsigned sc_reg, u32 val);
|
||||
static int sil24_qc_defer(struct ata_queued_cmd *qc);
|
||||
static void sil24_qc_prep(struct ata_queued_cmd *qc);
|
||||
static enum ata_completion_errors sil24_qc_prep(struct ata_queued_cmd *qc);
|
||||
static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc);
|
||||
static bool sil24_qc_fill_rtf(struct ata_queued_cmd *qc);
|
||||
static void sil24_pmp_attach(struct ata_port *ap);
|
||||
|
@ -830,7 +830,7 @@ static int sil24_qc_defer(struct ata_queued_cmd *qc)
|
|||
return ata_std_qc_defer(qc);
|
||||
}
|
||||
|
||||
static void sil24_qc_prep(struct ata_queued_cmd *qc)
|
||||
static enum ata_completion_errors sil24_qc_prep(struct ata_queued_cmd *qc)
|
||||
{
|
||||
struct ata_port *ap = qc->ap;
|
||||
struct sil24_port_priv *pp = ap->private_data;
|
||||
|
@ -874,6 +874,8 @@ static void sil24_qc_prep(struct ata_queued_cmd *qc)
|
|||
|
||||
if (qc->flags & ATA_QCFLAG_DMAMAP)
|
||||
sil24_fill_sg(qc, sge);
|
||||
|
||||
return AC_ERR_OK;
|
||||
}
|
||||
|
||||
static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc)
|
||||
|
|
|
@ -202,7 +202,7 @@ static void pdc_error_handler(struct ata_port *ap);
|
|||
static void pdc_freeze(struct ata_port *ap);
|
||||
static void pdc_thaw(struct ata_port *ap);
|
||||
static int pdc_port_start(struct ata_port *ap);
|
||||
static void pdc20621_qc_prep(struct ata_queued_cmd *qc);
|
||||
static enum ata_completion_errors pdc20621_qc_prep(struct ata_queued_cmd *qc);
|
||||
static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
|
||||
static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
|
||||
static unsigned int pdc20621_dimm_init(struct ata_host *host);
|
||||
|
@ -530,7 +530,7 @@ static void pdc20621_nodata_prep(struct ata_queued_cmd *qc)
|
|||
VPRINTK("ata pkt buf ofs %u, mmio copied\n", i);
|
||||
}
|
||||
|
||||
static void pdc20621_qc_prep(struct ata_queued_cmd *qc)
|
||||
static enum ata_completion_errors pdc20621_qc_prep(struct ata_queued_cmd *qc)
|
||||
{
|
||||
switch (qc->tf.protocol) {
|
||||
case ATA_PROT_DMA:
|
||||
|
@ -542,6 +542,8 @@ static void pdc20621_qc_prep(struct ata_queued_cmd *qc)
|
|||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return AC_ERR_OK;
|
||||
}
|
||||
|
||||
static void __pdc20621_push_hdma(struct ata_queued_cmd *qc,
|
||||
|
|
|
@ -484,6 +484,7 @@ enum hsm_task_states {
|
|||
};
|
||||
|
||||
enum ata_completion_errors {
|
||||
AC_ERR_OK = 0, /* no error */
|
||||
AC_ERR_DEV = (1 << 0), /* device reported error */
|
||||
AC_ERR_HSM = (1 << 1), /* host state machine violation */
|
||||
AC_ERR_TIMEOUT = (1 << 2), /* timeout */
|
||||
|
@ -891,9 +892,9 @@ struct ata_port_operations {
|
|||
/*
|
||||
* Command execution
|
||||
*/
|
||||
int (*qc_defer)(struct ata_queued_cmd *qc);
|
||||
int (*check_atapi_dma)(struct ata_queued_cmd *qc);
|
||||
void (*qc_prep)(struct ata_queued_cmd *qc);
|
||||
int (*qc_defer)(struct ata_queued_cmd *qc);
|
||||
int (*check_atapi_dma)(struct ata_queued_cmd *qc);
|
||||
enum ata_completion_errors (*qc_prep)(struct ata_queued_cmd *qc);
|
||||
unsigned int (*qc_issue)(struct ata_queued_cmd *qc);
|
||||
bool (*qc_fill_rtf)(struct ata_queued_cmd *qc);
|
||||
|
||||
|
@ -1161,7 +1162,7 @@ extern int ata_xfer_mode2shift(unsigned long xfer_mode);
|
|||
extern const char *ata_mode_string(unsigned long xfer_mask);
|
||||
extern unsigned long ata_id_xfermask(const u16 *id);
|
||||
extern int ata_std_qc_defer(struct ata_queued_cmd *qc);
|
||||
extern void ata_noop_qc_prep(struct ata_queued_cmd *qc);
|
||||
extern enum ata_completion_errors ata_noop_qc_prep(struct ata_queued_cmd *qc);
|
||||
extern void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
|
||||
unsigned int n_elem);
|
||||
extern unsigned int ata_dev_classify(const struct ata_taskfile *tf);
|
||||
|
@ -1893,9 +1894,9 @@ extern const struct ata_port_operations ata_bmdma_port_ops;
|
|||
.sg_tablesize = LIBATA_MAX_PRD, \
|
||||
.dma_boundary = ATA_DMA_BOUNDARY
|
||||
|
||||
extern void ata_bmdma_qc_prep(struct ata_queued_cmd *qc);
|
||||
extern enum ata_completion_errors ata_bmdma_qc_prep(struct ata_queued_cmd *qc);
|
||||
extern unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc);
|
||||
extern void ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc);
|
||||
extern enum ata_completion_errors ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc);
|
||||
extern unsigned int ata_bmdma_port_intr(struct ata_port *ap,
|
||||
struct ata_queued_cmd *qc);
|
||||
extern irqreturn_t ata_bmdma_interrupt(int irq, void *dev_instance);
|
||||
|
|
Loading…
Reference in New Issue