mirror of https://gitee.com/openkylin/linux.git
Merge branch 'upstream-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jgarzik/libata-dev
* 'upstream-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jgarzik/libata-dev: (49 commits) libata-sff: separate out BMDMA qc_issue libata-sff: prd is BMDMA specific libata-sff: ata_sff_[dumb_]qc_prep are BMDMA specific libata-sff: separate out BMDMA EH libata-sff: port_task is SFF specific libata-sff: ap->[last_]ctl are SFF specific libata-sff: rename ap->ops->drain_fifo() to sff_drain_fifo() libata-sff: introduce ata_sff_init/exit() and ata_sff_port_init() libata-sff: clean up BMDMA initialization libata-sff: clean up inheritance in several drivers libata-sff: reorder SFF/BMDMA functions sata_inic162x: kill PORT_PRD_ADDR initialization libata: kill ATA_FLAG_DISABLED libata-sff: kill unused prototype and make ata_dev_select() static libata-sff: update bmdma host bus error handling sata_mv: remove unnecessary initialization sata_inic162x: inic162x is not dependent on CONFIG_ATA_SFF pata_sch: use ata_pci_sff_init_one() pata_sil680: Do our own exec_command posting libata: Remove excess delay in the tf_load path ...
This commit is contained in:
commit
bd7fc2f2d8
|
@ -81,16 +81,14 @@ void (*port_disable) (struct ata_port *);
|
|||
</programlisting>
|
||||
|
||||
<para>
|
||||
Called from ata_bus_probe() and ata_bus_reset() error paths,
|
||||
as well as when unregistering from the SCSI module (rmmod, hot
|
||||
unplug).
|
||||
Called from ata_bus_probe() error path, as well as when
|
||||
unregistering from the SCSI module (rmmod, hot unplug).
|
||||
This function should do whatever needs to be done to take the
|
||||
port out of use. In most cases, ata_port_disable() can be used
|
||||
as this hook.
|
||||
</para>
|
||||
<para>
|
||||
Called from ata_bus_probe() on a failed probe.
|
||||
Called from ata_bus_reset() on a failed bus reset.
|
||||
Called from ata_scsi_release().
|
||||
</para>
|
||||
|
||||
|
@ -227,6 +225,18 @@ u8 (*sff_check_altstatus)(struct ata_port *ap);
|
|||
|
||||
</sect2>
|
||||
|
||||
<sect2><title>Write specific ATA shadow register</title>
|
||||
<programlisting>
|
||||
void (*sff_set_devctl)(struct ata_port *ap, u8 ctl);
|
||||
</programlisting>
|
||||
|
||||
<para>
|
||||
Write the device control ATA shadow register to the hardware.
|
||||
Most drivers don't need to define this.
|
||||
</para>
|
||||
|
||||
</sect2>
|
||||
|
||||
<sect2><title>Select ATA device on bus</title>
|
||||
<programlisting>
|
||||
void (*sff_dev_select)(struct ata_port *ap, unsigned int device);
|
||||
|
|
|
@ -65,6 +65,14 @@ config SATA_AHCI
|
|||
|
||||
If unsure, say N.
|
||||
|
||||
config SATA_AHCI_PLATFORM
|
||||
tristate "Platform AHCI SATA support"
|
||||
help
|
||||
This option enables support for Platform AHCI Serial ATA
|
||||
controllers.
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
config SATA_SIL24
|
||||
tristate "Silicon Image 3124/3132 SATA support"
|
||||
depends on PCI
|
||||
|
@ -73,6 +81,12 @@ config SATA_SIL24
|
|||
|
||||
If unsure, say N.
|
||||
|
||||
config SATA_INIC162X
|
||||
tristate "Initio 162x SATA support"
|
||||
depends on PCI
|
||||
help
|
||||
This option enables support for Initio 162x Serial ATA.
|
||||
|
||||
config SATA_FSL
|
||||
tristate "Freescale 3.0Gbps SATA support"
|
||||
depends on FSL_SOC
|
||||
|
@ -213,12 +227,6 @@ config SATA_VITESSE
|
|||
|
||||
If unsure, say N.
|
||||
|
||||
config SATA_INIC162X
|
||||
tristate "Initio 162x SATA support"
|
||||
depends on PCI
|
||||
help
|
||||
This option enables support for Initio 162x Serial ATA.
|
||||
|
||||
config PATA_ACPI
|
||||
tristate "ACPI firmware driver for PATA"
|
||||
depends on ATA_ACPI
|
||||
|
|
|
@ -1,7 +1,8 @@
|
|||
|
||||
obj-$(CONFIG_ATA) += libata.o
|
||||
|
||||
obj-$(CONFIG_SATA_AHCI) += ahci.o
|
||||
obj-$(CONFIG_SATA_AHCI) += ahci.o libahci.o
|
||||
obj-$(CONFIG_SATA_AHCI_PLATFORM) += ahci_platform.o libahci.o
|
||||
obj-$(CONFIG_SATA_SVW) += sata_svw.o
|
||||
obj-$(CONFIG_ATA_PIIX) += ata_piix.o
|
||||
obj-$(CONFIG_SATA_PROMISE) += sata_promise.o
|
||||
|
|
2574
drivers/ata/ahci.c
2574
drivers/ata/ahci.c
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,343 @@
|
|||
/*
|
||||
* ahci.h - Common AHCI SATA definitions and declarations
|
||||
*
|
||||
* Maintained by: Jeff Garzik <jgarzik@pobox.com>
|
||||
* Please ALWAYS copy linux-ide@vger.kernel.org
|
||||
* on emails.
|
||||
*
|
||||
* Copyright 2004-2005 Red Hat, Inc.
|
||||
*
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2, or (at your option)
|
||||
* any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; see the file COPYING. If not, write to
|
||||
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
|
||||
*
|
||||
*
|
||||
* libata documentation is available via 'make {ps|pdf}docs',
|
||||
* as Documentation/DocBook/libata.*
|
||||
*
|
||||
* AHCI hardware documentation:
|
||||
* http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
|
||||
* http://www.intel.com/technology/serialata/pdf/rev1_1.pdf
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _AHCI_H
|
||||
#define _AHCI_H
|
||||
|
||||
#include <linux/libata.h>
|
||||
|
||||
/* Enclosure Management Control */
|
||||
#define EM_CTRL_MSG_TYPE 0x000f0000
|
||||
|
||||
/* Enclosure Management LED Message Type */
|
||||
#define EM_MSG_LED_HBA_PORT 0x0000000f
|
||||
#define EM_MSG_LED_PMP_SLOT 0x0000ff00
|
||||
#define EM_MSG_LED_VALUE 0xffff0000
|
||||
#define EM_MSG_LED_VALUE_ACTIVITY 0x00070000
|
||||
#define EM_MSG_LED_VALUE_OFF 0xfff80000
|
||||
#define EM_MSG_LED_VALUE_ON 0x00010000
|
||||
|
||||
enum {
|
||||
AHCI_MAX_PORTS = 32,
|
||||
AHCI_MAX_SG = 168, /* hardware max is 64K */
|
||||
AHCI_DMA_BOUNDARY = 0xffffffff,
|
||||
AHCI_MAX_CMDS = 32,
|
||||
AHCI_CMD_SZ = 32,
|
||||
AHCI_CMD_SLOT_SZ = AHCI_MAX_CMDS * AHCI_CMD_SZ,
|
||||
AHCI_RX_FIS_SZ = 256,
|
||||
AHCI_CMD_TBL_CDB = 0x40,
|
||||
AHCI_CMD_TBL_HDR_SZ = 0x80,
|
||||
AHCI_CMD_TBL_SZ = AHCI_CMD_TBL_HDR_SZ + (AHCI_MAX_SG * 16),
|
||||
AHCI_CMD_TBL_AR_SZ = AHCI_CMD_TBL_SZ * AHCI_MAX_CMDS,
|
||||
AHCI_PORT_PRIV_DMA_SZ = AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_AR_SZ +
|
||||
AHCI_RX_FIS_SZ,
|
||||
AHCI_PORT_PRIV_FBS_DMA_SZ = AHCI_CMD_SLOT_SZ +
|
||||
AHCI_CMD_TBL_AR_SZ +
|
||||
(AHCI_RX_FIS_SZ * 16),
|
||||
AHCI_IRQ_ON_SG = (1 << 31),
|
||||
AHCI_CMD_ATAPI = (1 << 5),
|
||||
AHCI_CMD_WRITE = (1 << 6),
|
||||
AHCI_CMD_PREFETCH = (1 << 7),
|
||||
AHCI_CMD_RESET = (1 << 8),
|
||||
AHCI_CMD_CLR_BUSY = (1 << 10),
|
||||
|
||||
RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */
|
||||
RX_FIS_SDB = 0x58, /* offset of SDB FIS data */
|
||||
RX_FIS_UNK = 0x60, /* offset of Unknown FIS data */
|
||||
|
||||
/* global controller registers */
|
||||
HOST_CAP = 0x00, /* host capabilities */
|
||||
HOST_CTL = 0x04, /* global host control */
|
||||
HOST_IRQ_STAT = 0x08, /* interrupt status */
|
||||
HOST_PORTS_IMPL = 0x0c, /* bitmap of implemented ports */
|
||||
HOST_VERSION = 0x10, /* AHCI spec. version compliancy */
|
||||
HOST_EM_LOC = 0x1c, /* Enclosure Management location */
|
||||
HOST_EM_CTL = 0x20, /* Enclosure Management Control */
|
||||
HOST_CAP2 = 0x24, /* host capabilities, extended */
|
||||
|
||||
/* HOST_CTL bits */
|
||||
HOST_RESET = (1 << 0), /* reset controller; self-clear */
|
||||
HOST_IRQ_EN = (1 << 1), /* global IRQ enable */
|
||||
HOST_AHCI_EN = (1 << 31), /* AHCI enabled */
|
||||
|
||||
/* HOST_CAP bits */
|
||||
HOST_CAP_SXS = (1 << 5), /* Supports External SATA */
|
||||
HOST_CAP_EMS = (1 << 6), /* Enclosure Management support */
|
||||
HOST_CAP_CCC = (1 << 7), /* Command Completion Coalescing */
|
||||
HOST_CAP_PART = (1 << 13), /* Partial state capable */
|
||||
HOST_CAP_SSC = (1 << 14), /* Slumber state capable */
|
||||
HOST_CAP_PIO_MULTI = (1 << 15), /* PIO multiple DRQ support */
|
||||
HOST_CAP_FBS = (1 << 16), /* FIS-based switching support */
|
||||
HOST_CAP_PMP = (1 << 17), /* Port Multiplier support */
|
||||
HOST_CAP_ONLY = (1 << 18), /* Supports AHCI mode only */
|
||||
HOST_CAP_CLO = (1 << 24), /* Command List Override support */
|
||||
HOST_CAP_LED = (1 << 25), /* Supports activity LED */
|
||||
HOST_CAP_ALPM = (1 << 26), /* Aggressive Link PM support */
|
||||
HOST_CAP_SSS = (1 << 27), /* Staggered Spin-up */
|
||||
HOST_CAP_MPS = (1 << 28), /* Mechanical presence switch */
|
||||
HOST_CAP_SNTF = (1 << 29), /* SNotification register */
|
||||
HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */
|
||||
HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */
|
||||
|
||||
/* HOST_CAP2 bits */
|
||||
HOST_CAP2_BOH = (1 << 0), /* BIOS/OS handoff supported */
|
||||
HOST_CAP2_NVMHCI = (1 << 1), /* NVMHCI supported */
|
||||
HOST_CAP2_APST = (1 << 2), /* Automatic partial to slumber */
|
||||
|
||||
/* registers for each SATA port */
|
||||
PORT_LST_ADDR = 0x00, /* command list DMA addr */
|
||||
PORT_LST_ADDR_HI = 0x04, /* command list DMA addr hi */
|
||||
PORT_FIS_ADDR = 0x08, /* FIS rx buf addr */
|
||||
PORT_FIS_ADDR_HI = 0x0c, /* FIS rx buf addr hi */
|
||||
PORT_IRQ_STAT = 0x10, /* interrupt status */
|
||||
PORT_IRQ_MASK = 0x14, /* interrupt enable/disable mask */
|
||||
PORT_CMD = 0x18, /* port command */
|
||||
PORT_TFDATA = 0x20, /* taskfile data */
|
||||
PORT_SIG = 0x24, /* device TF signature */
|
||||
PORT_CMD_ISSUE = 0x38, /* command issue */
|
||||
PORT_SCR_STAT = 0x28, /* SATA phy register: SStatus */
|
||||
PORT_SCR_CTL = 0x2c, /* SATA phy register: SControl */
|
||||
PORT_SCR_ERR = 0x30, /* SATA phy register: SError */
|
||||
PORT_SCR_ACT = 0x34, /* SATA phy register: SActive */
|
||||
PORT_SCR_NTF = 0x3c, /* SATA phy register: SNotification */
|
||||
PORT_FBS = 0x40, /* FIS-based Switching */
|
||||
|
||||
/* PORT_IRQ_{STAT,MASK} bits */
|
||||
PORT_IRQ_COLD_PRES = (1 << 31), /* cold presence detect */
|
||||
PORT_IRQ_TF_ERR = (1 << 30), /* task file error */
|
||||
PORT_IRQ_HBUS_ERR = (1 << 29), /* host bus fatal error */
|
||||
PORT_IRQ_HBUS_DATA_ERR = (1 << 28), /* host bus data error */
|
||||
PORT_IRQ_IF_ERR = (1 << 27), /* interface fatal error */
|
||||
PORT_IRQ_IF_NONFATAL = (1 << 26), /* interface non-fatal error */
|
||||
PORT_IRQ_OVERFLOW = (1 << 24), /* xfer exhausted available S/G */
|
||||
PORT_IRQ_BAD_PMP = (1 << 23), /* incorrect port multiplier */
|
||||
|
||||
PORT_IRQ_PHYRDY = (1 << 22), /* PhyRdy changed */
|
||||
PORT_IRQ_DEV_ILCK = (1 << 7), /* device interlock */
|
||||
PORT_IRQ_CONNECT = (1 << 6), /* port connect change status */
|
||||
PORT_IRQ_SG_DONE = (1 << 5), /* descriptor processed */
|
||||
PORT_IRQ_UNK_FIS = (1 << 4), /* unknown FIS rx'd */
|
||||
PORT_IRQ_SDB_FIS = (1 << 3), /* Set Device Bits FIS rx'd */
|
||||
PORT_IRQ_DMAS_FIS = (1 << 2), /* DMA Setup FIS rx'd */
|
||||
PORT_IRQ_PIOS_FIS = (1 << 1), /* PIO Setup FIS rx'd */
|
||||
PORT_IRQ_D2H_REG_FIS = (1 << 0), /* D2H Register FIS rx'd */
|
||||
|
||||
PORT_IRQ_FREEZE = PORT_IRQ_HBUS_ERR |
|
||||
PORT_IRQ_IF_ERR |
|
||||
PORT_IRQ_CONNECT |
|
||||
PORT_IRQ_PHYRDY |
|
||||
PORT_IRQ_UNK_FIS |
|
||||
PORT_IRQ_BAD_PMP,
|
||||
PORT_IRQ_ERROR = PORT_IRQ_FREEZE |
|
||||
PORT_IRQ_TF_ERR |
|
||||
PORT_IRQ_HBUS_DATA_ERR,
|
||||
DEF_PORT_IRQ = PORT_IRQ_ERROR | PORT_IRQ_SG_DONE |
|
||||
PORT_IRQ_SDB_FIS | PORT_IRQ_DMAS_FIS |
|
||||
PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS,
|
||||
|
||||
/* PORT_CMD bits */
|
||||
PORT_CMD_ASP = (1 << 27), /* Aggressive Slumber/Partial */
|
||||
PORT_CMD_ALPE = (1 << 26), /* Aggressive Link PM enable */
|
||||
PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */
|
||||
PORT_CMD_FBSCP = (1 << 22), /* FBS Capable Port */
|
||||
PORT_CMD_PMP = (1 << 17), /* PMP attached */
|
||||
PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */
|
||||
PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */
|
||||
PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */
|
||||
PORT_CMD_CLO = (1 << 3), /* Command list override */
|
||||
PORT_CMD_POWER_ON = (1 << 2), /* Power up device */
|
||||
PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */
|
||||
PORT_CMD_START = (1 << 0), /* Enable port DMA engine */
|
||||
|
||||
PORT_CMD_ICC_MASK = (0xf << 28), /* i/f ICC state mask */
|
||||
PORT_CMD_ICC_ACTIVE = (0x1 << 28), /* Put i/f in active state */
|
||||
PORT_CMD_ICC_PARTIAL = (0x2 << 28), /* Put i/f in partial state */
|
||||
PORT_CMD_ICC_SLUMBER = (0x6 << 28), /* Put i/f in slumber state */
|
||||
|
||||
PORT_FBS_DWE_OFFSET = 16, /* FBS device with error offset */
|
||||
PORT_FBS_ADO_OFFSET = 12, /* FBS active dev optimization offset */
|
||||
PORT_FBS_DEV_OFFSET = 8, /* FBS device to issue offset */
|
||||
PORT_FBS_DEV_MASK = (0xf << PORT_FBS_DEV_OFFSET), /* FBS.DEV */
|
||||
PORT_FBS_SDE = (1 << 2), /* FBS single device error */
|
||||
PORT_FBS_DEC = (1 << 1), /* FBS device error clear */
|
||||
PORT_FBS_EN = (1 << 0), /* Enable FBS */
|
||||
|
||||
/* hpriv->flags bits */
|
||||
AHCI_HFLAG_NO_NCQ = (1 << 0),
|
||||
AHCI_HFLAG_IGN_IRQ_IF_ERR = (1 << 1), /* ignore IRQ_IF_ERR */
|
||||
AHCI_HFLAG_IGN_SERR_INTERNAL = (1 << 2), /* ignore SERR_INTERNAL */
|
||||
AHCI_HFLAG_32BIT_ONLY = (1 << 3), /* force 32bit */
|
||||
AHCI_HFLAG_MV_PATA = (1 << 4), /* PATA port */
|
||||
AHCI_HFLAG_NO_MSI = (1 << 5), /* no PCI MSI */
|
||||
AHCI_HFLAG_NO_PMP = (1 << 6), /* no PMP */
|
||||
AHCI_HFLAG_NO_HOTPLUG = (1 << 7), /* ignore PxSERR.DIAG.N */
|
||||
AHCI_HFLAG_SECT255 = (1 << 8), /* max 255 sectors */
|
||||
AHCI_HFLAG_YES_NCQ = (1 << 9), /* force NCQ cap on */
|
||||
AHCI_HFLAG_NO_SUSPEND = (1 << 10), /* don't suspend */
|
||||
AHCI_HFLAG_SRST_TOUT_IS_OFFLINE = (1 << 11), /* treat SRST timeout as
|
||||
link offline */
|
||||
AHCI_HFLAG_NO_SNTF = (1 << 12), /* no sntf */
|
||||
AHCI_HFLAG_NO_FPDMA_AA = (1 << 13), /* no FPDMA AA */
|
||||
|
||||
/* ap->flags bits */
|
||||
|
||||
AHCI_FLAG_COMMON = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
|
||||
ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
|
||||
ATA_FLAG_ACPI_SATA | ATA_FLAG_AN |
|
||||
ATA_FLAG_IPM,
|
||||
|
||||
ICH_MAP = 0x90, /* ICH MAP register */
|
||||
|
||||
/* em constants */
|
||||
EM_MAX_SLOTS = 8,
|
||||
EM_MAX_RETRY = 5,
|
||||
|
||||
/* em_ctl bits */
|
||||
EM_CTL_RST = (1 << 9), /* Reset */
|
||||
EM_CTL_TM = (1 << 8), /* Transmit Message */
|
||||
EM_CTL_MR = (1 << 0), /* Message Recieved */
|
||||
EM_CTL_ALHD = (1 << 26), /* Activity LED */
|
||||
EM_CTL_XMT = (1 << 25), /* Transmit Only */
|
||||
EM_CTL_SMB = (1 << 24), /* Single Message Buffer */
|
||||
|
||||
/* em message type */
|
||||
EM_MSG_TYPE_LED = (1 << 0), /* LED */
|
||||
EM_MSG_TYPE_SAFTE = (1 << 1), /* SAF-TE */
|
||||
EM_MSG_TYPE_SES2 = (1 << 2), /* SES-2 */
|
||||
EM_MSG_TYPE_SGPIO = (1 << 3), /* SGPIO */
|
||||
};
|
||||
|
||||
struct ahci_cmd_hdr {
|
||||
__le32 opts;
|
||||
__le32 status;
|
||||
__le32 tbl_addr;
|
||||
__le32 tbl_addr_hi;
|
||||
__le32 reserved[4];
|
||||
};
|
||||
|
||||
struct ahci_sg {
|
||||
__le32 addr;
|
||||
__le32 addr_hi;
|
||||
__le32 reserved;
|
||||
__le32 flags_size;
|
||||
};
|
||||
|
||||
struct ahci_em_priv {
|
||||
enum sw_activity blink_policy;
|
||||
struct timer_list timer;
|
||||
unsigned long saved_activity;
|
||||
unsigned long activity;
|
||||
unsigned long led_state;
|
||||
};
|
||||
|
||||
struct ahci_port_priv {
|
||||
struct ata_link *active_link;
|
||||
struct ahci_cmd_hdr *cmd_slot;
|
||||
dma_addr_t cmd_slot_dma;
|
||||
void *cmd_tbl;
|
||||
dma_addr_t cmd_tbl_dma;
|
||||
void *rx_fis;
|
||||
dma_addr_t rx_fis_dma;
|
||||
/* for NCQ spurious interrupt analysis */
|
||||
unsigned int ncq_saw_d2h:1;
|
||||
unsigned int ncq_saw_dmas:1;
|
||||
unsigned int ncq_saw_sdb:1;
|
||||
u32 intr_mask; /* interrupts to enable */
|
||||
bool fbs_supported; /* set iff FBS is supported */
|
||||
bool fbs_enabled; /* set iff FBS is enabled */
|
||||
int fbs_last_dev; /* save FBS.DEV of last FIS */
|
||||
/* enclosure management info per PM slot */
|
||||
struct ahci_em_priv em_priv[EM_MAX_SLOTS];
|
||||
};
|
||||
|
||||
struct ahci_host_priv {
|
||||
void __iomem * mmio; /* bus-independant mem map */
|
||||
unsigned int flags; /* AHCI_HFLAG_* */
|
||||
u32 cap; /* cap to use */
|
||||
u32 cap2; /* cap2 to use */
|
||||
u32 port_map; /* port map to use */
|
||||
u32 saved_cap; /* saved initial cap */
|
||||
u32 saved_cap2; /* saved initial cap2 */
|
||||
u32 saved_port_map; /* saved initial port_map */
|
||||
u32 em_loc; /* enclosure management location */
|
||||
u32 em_buf_sz; /* EM buffer size in byte */
|
||||
u32 em_msg_type; /* EM message type */
|
||||
};
|
||||
|
||||
extern int ahci_ignore_sss;
|
||||
|
||||
extern struct scsi_host_template ahci_sht;
|
||||
extern struct ata_port_operations ahci_ops;
|
||||
|
||||
void ahci_save_initial_config(struct device *dev,
|
||||
struct ahci_host_priv *hpriv,
|
||||
unsigned int force_port_map,
|
||||
unsigned int mask_port_map);
|
||||
void ahci_init_controller(struct ata_host *host);
|
||||
int ahci_reset_controller(struct ata_host *host);
|
||||
|
||||
int ahci_do_softreset(struct ata_link *link, unsigned int *class,
|
||||
int pmp, unsigned long deadline,
|
||||
int (*check_ready)(struct ata_link *link));
|
||||
|
||||
int ahci_stop_engine(struct ata_port *ap);
|
||||
void ahci_start_engine(struct ata_port *ap);
|
||||
int ahci_check_ready(struct ata_link *link);
|
||||
int ahci_kick_engine(struct ata_port *ap);
|
||||
void ahci_set_em_messages(struct ahci_host_priv *hpriv,
|
||||
struct ata_port_info *pi);
|
||||
int ahci_reset_em(struct ata_host *host);
|
||||
irqreturn_t ahci_interrupt(int irq, void *dev_instance);
|
||||
void ahci_print_info(struct ata_host *host, const char *scc_s);
|
||||
|
||||
static inline void __iomem *__ahci_port_base(struct ata_host *host,
|
||||
unsigned int port_no)
|
||||
{
|
||||
struct ahci_host_priv *hpriv = host->private_data;
|
||||
void __iomem *mmio = hpriv->mmio;
|
||||
|
||||
return mmio + 0x100 + (port_no * 0x80);
|
||||
}
|
||||
|
||||
static inline void __iomem *ahci_port_base(struct ata_port *ap)
|
||||
{
|
||||
return __ahci_port_base(ap->host, ap->port_no);
|
||||
}
|
||||
|
||||
static inline int ahci_nr_ports(u32 cap)
|
||||
{
|
||||
return (cap & 0x1f) + 1;
|
||||
}
|
||||
|
||||
#endif /* _AHCI_H */
|
|
@ -0,0 +1,192 @@
|
|||
/*
|
||||
* AHCI SATA platform driver
|
||||
*
|
||||
* Copyright 2004-2005 Red Hat, Inc.
|
||||
* Jeff Garzik <jgarzik@pobox.com>
|
||||
* Copyright 2010 MontaVista Software, LLC.
|
||||
* Anton Vorontsov <avorontsov@ru.mvista.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2, or (at your option)
|
||||
* any later version.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/libata.h>
|
||||
#include <linux/ahci_platform.h>
|
||||
#include "ahci.h"
|
||||
|
||||
static int __init ahci_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
struct ahci_platform_data *pdata = dev->platform_data;
|
||||
struct ata_port_info pi = {
|
||||
.flags = AHCI_FLAG_COMMON,
|
||||
.pio_mask = ATA_PIO4,
|
||||
.udma_mask = ATA_UDMA6,
|
||||
.port_ops = &ahci_ops,
|
||||
};
|
||||
const struct ata_port_info *ppi[] = { &pi, NULL };
|
||||
struct ahci_host_priv *hpriv;
|
||||
struct ata_host *host;
|
||||
struct resource *mem;
|
||||
int irq;
|
||||
int n_ports;
|
||||
int i;
|
||||
int rc;
|
||||
|
||||
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
if (!mem) {
|
||||
dev_err(dev, "no mmio space\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
irq = platform_get_irq(pdev, 0);
|
||||
if (irq <= 0) {
|
||||
dev_err(dev, "no irq\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (pdata && pdata->init) {
|
||||
rc = pdata->init(dev);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
|
||||
if (pdata && pdata->ata_port_info)
|
||||
pi = *pdata->ata_port_info;
|
||||
|
||||
hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
|
||||
if (!hpriv) {
|
||||
rc = -ENOMEM;
|
||||
goto err0;
|
||||
}
|
||||
|
||||
hpriv->flags |= (unsigned long)pi.private_data;
|
||||
|
||||
hpriv->mmio = devm_ioremap(dev, mem->start, resource_size(mem));
|
||||
if (!hpriv->mmio) {
|
||||
dev_err(dev, "can't map %pR\n", mem);
|
||||
rc = -ENOMEM;
|
||||
goto err0;
|
||||
}
|
||||
|
||||
ahci_save_initial_config(dev, hpriv,
|
||||
pdata ? pdata->force_port_map : 0,
|
||||
pdata ? pdata->mask_port_map : 0);
|
||||
|
||||
/* prepare host */
|
||||
if (hpriv->cap & HOST_CAP_NCQ)
|
||||
pi.flags |= ATA_FLAG_NCQ;
|
||||
|
||||
if (hpriv->cap & HOST_CAP_PMP)
|
||||
pi.flags |= ATA_FLAG_PMP;
|
||||
|
||||
ahci_set_em_messages(hpriv, &pi);
|
||||
|
||||
/* CAP.NP sometimes indicate the index of the last enabled
|
||||
* port, at other times, that of the last possible port, so
|
||||
* determining the maximum port number requires looking at
|
||||
* both CAP.NP and port_map.
|
||||
*/
|
||||
n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
|
||||
|
||||
host = ata_host_alloc_pinfo(dev, ppi, n_ports);
|
||||
if (!host) {
|
||||
rc = -ENOMEM;
|
||||
goto err0;
|
||||
}
|
||||
|
||||
host->private_data = hpriv;
|
||||
|
||||
if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
|
||||
host->flags |= ATA_HOST_PARALLEL_SCAN;
|
||||
else
|
||||
printk(KERN_INFO "ahci: SSS flag set, parallel bus scan disabled\n");
|
||||
|
||||
if (pi.flags & ATA_FLAG_EM)
|
||||
ahci_reset_em(host);
|
||||
|
||||
for (i = 0; i < host->n_ports; i++) {
|
||||
struct ata_port *ap = host->ports[i];
|
||||
|
||||
ata_port_desc(ap, "mmio %pR", mem);
|
||||
ata_port_desc(ap, "port 0x%x", 0x100 + ap->port_no * 0x80);
|
||||
|
||||
/* set initial link pm policy */
|
||||
ap->pm_policy = NOT_AVAILABLE;
|
||||
|
||||
/* set enclosure management message type */
|
||||
if (ap->flags & ATA_FLAG_EM)
|
||||
ap->em_message_type = hpriv->em_msg_type;
|
||||
|
||||
/* disabled/not-implemented port */
|
||||
if (!(hpriv->port_map & (1 << i)))
|
||||
ap->ops = &ata_dummy_port_ops;
|
||||
}
|
||||
|
||||
rc = ahci_reset_controller(host);
|
||||
if (rc)
|
||||
goto err0;
|
||||
|
||||
ahci_init_controller(host);
|
||||
ahci_print_info(host, "platform");
|
||||
|
||||
rc = ata_host_activate(host, irq, ahci_interrupt, IRQF_SHARED,
|
||||
&ahci_sht);
|
||||
if (rc)
|
||||
goto err0;
|
||||
|
||||
return 0;
|
||||
err0:
|
||||
if (pdata && pdata->exit)
|
||||
pdata->exit(dev);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int __devexit ahci_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
struct ahci_platform_data *pdata = dev->platform_data;
|
||||
struct ata_host *host = dev_get_drvdata(dev);
|
||||
|
||||
ata_host_detach(host);
|
||||
|
||||
if (pdata && pdata->exit)
|
||||
pdata->exit(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct platform_driver ahci_driver = {
|
||||
.probe = ahci_probe,
|
||||
.remove = __devexit_p(ahci_remove),
|
||||
.driver = {
|
||||
.name = "ahci",
|
||||
.owner = THIS_MODULE,
|
||||
},
|
||||
};
|
||||
|
||||
static int __init ahci_init(void)
|
||||
{
|
||||
return platform_driver_probe(&ahci_driver, ahci_probe);
|
||||
}
|
||||
module_init(ahci_init);
|
||||
|
||||
static void __exit ahci_exit(void)
|
||||
{
|
||||
platform_driver_unregister(&ahci_driver);
|
||||
}
|
||||
module_exit(ahci_exit);
|
||||
|
||||
MODULE_DESCRIPTION("AHCI SATA platform driver");
|
||||
MODULE_AUTHOR("Anton Vorontsov <avorontsov@ru.mvista.com>");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_ALIAS("platform:ahci");
|
File diff suppressed because it is too large
Load Diff
|
@ -65,6 +65,7 @@
|
|||
#include <linux/libata.h>
|
||||
#include <asm/byteorder.h>
|
||||
#include <linux/cdrom.h>
|
||||
#include <linux/ratelimit.h>
|
||||
|
||||
#include "libata.h"
|
||||
|
||||
|
@ -96,7 +97,6 @@ static void ata_dev_xfermask(struct ata_device *dev);
|
|||
static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
|
||||
|
||||
unsigned int ata_print_id = 1;
|
||||
static struct workqueue_struct *ata_wq;
|
||||
|
||||
struct workqueue_struct *ata_aux_wq;
|
||||
|
||||
|
@ -1685,52 +1685,6 @@ unsigned long ata_id_xfermask(const u16 *id)
|
|||
return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
|
||||
}
|
||||
|
||||
/**
|
||||
* ata_pio_queue_task - Queue port_task
|
||||
* @ap: The ata_port to queue port_task for
|
||||
* @data: data for @fn to use
|
||||
* @delay: delay time in msecs for workqueue function
|
||||
*
|
||||
* Schedule @fn(@data) for execution after @delay jiffies using
|
||||
* port_task. There is one port_task per port and it's the
|
||||
* user(low level driver)'s responsibility to make sure that only
|
||||
* one task is active at any given time.
|
||||
*
|
||||
* libata core layer takes care of synchronization between
|
||||
* port_task and EH. ata_pio_queue_task() may be ignored for EH
|
||||
* synchronization.
|
||||
*
|
||||
* LOCKING:
|
||||
* Inherited from caller.
|
||||
*/
|
||||
void ata_pio_queue_task(struct ata_port *ap, void *data, unsigned long delay)
|
||||
{
|
||||
ap->port_task_data = data;
|
||||
|
||||
/* may fail if ata_port_flush_task() in progress */
|
||||
queue_delayed_work(ata_wq, &ap->port_task, msecs_to_jiffies(delay));
|
||||
}
|
||||
|
||||
/**
|
||||
* ata_port_flush_task - Flush port_task
|
||||
* @ap: The ata_port to flush port_task for
|
||||
*
|
||||
* After this function completes, port_task is guranteed not to
|
||||
* be running or scheduled.
|
||||
*
|
||||
* LOCKING:
|
||||
* Kernel thread context (may sleep)
|
||||
*/
|
||||
void ata_port_flush_task(struct ata_port *ap)
|
||||
{
|
||||
DPRINTK("ENTER\n");
|
||||
|
||||
cancel_rearming_delayed_work(&ap->port_task);
|
||||
|
||||
if (ata_msg_ctl(ap))
|
||||
ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __func__);
|
||||
}
|
||||
|
||||
static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
|
||||
{
|
||||
struct completion *waiting = qc->private_data;
|
||||
|
@ -1852,7 +1806,7 @@ unsigned ata_exec_internal_sg(struct ata_device *dev,
|
|||
|
||||
rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
|
||||
|
||||
ata_port_flush_task(ap);
|
||||
ata_sff_flush_pio_task(ap);
|
||||
|
||||
if (!rc) {
|
||||
spin_lock_irqsave(ap->lock, flags);
|
||||
|
@ -1906,22 +1860,6 @@ unsigned ata_exec_internal_sg(struct ata_device *dev,
|
|||
ap->qc_active = preempted_qc_active;
|
||||
ap->nr_active_links = preempted_nr_active_links;
|
||||
|
||||
/* XXX - Some LLDDs (sata_mv) disable port on command failure.
|
||||
* Until those drivers are fixed, we detect the condition
|
||||
* here, fail the command with AC_ERR_SYSTEM and reenable the
|
||||
* port.
|
||||
*
|
||||
* Note that this doesn't change any behavior as internal
|
||||
* command failure results in disabling the device in the
|
||||
* higher layer for LLDDs without new reset/EH callbacks.
|
||||
*
|
||||
* Kill the following code as soon as those drivers are fixed.
|
||||
*/
|
||||
if (ap->flags & ATA_FLAG_DISABLED) {
|
||||
err_mask |= AC_ERR_SYSTEM;
|
||||
ata_port_probe(ap);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(ap->lock, flags);
|
||||
|
||||
if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout)
|
||||
|
@ -2767,8 +2705,6 @@ int ata_bus_probe(struct ata_port *ap)
|
|||
int rc;
|
||||
struct ata_device *dev;
|
||||
|
||||
ata_port_probe(ap);
|
||||
|
||||
ata_for_each_dev(dev, &ap->link, ALL)
|
||||
tries[dev->devno] = ATA_PROBE_MAX_TRIES;
|
||||
|
||||
|
@ -2796,8 +2732,7 @@ int ata_bus_probe(struct ata_port *ap)
|
|||
ap->ops->phy_reset(ap);
|
||||
|
||||
ata_for_each_dev(dev, &ap->link, ALL) {
|
||||
if (!(ap->flags & ATA_FLAG_DISABLED) &&
|
||||
dev->class != ATA_DEV_UNKNOWN)
|
||||
if (dev->class != ATA_DEV_UNKNOWN)
|
||||
classes[dev->devno] = dev->class;
|
||||
else
|
||||
classes[dev->devno] = ATA_DEV_NONE;
|
||||
|
@ -2805,8 +2740,6 @@ int ata_bus_probe(struct ata_port *ap)
|
|||
dev->class = ATA_DEV_UNKNOWN;
|
||||
}
|
||||
|
||||
ata_port_probe(ap);
|
||||
|
||||
/* read IDENTIFY page and configure devices. We have to do the identify
|
||||
specific sequence bass-ackwards so that PDIAG- is released by
|
||||
the slave device */
|
||||
|
@ -2856,8 +2789,6 @@ int ata_bus_probe(struct ata_port *ap)
|
|||
ata_for_each_dev(dev, &ap->link, ENABLED)
|
||||
return 0;
|
||||
|
||||
/* no device present, disable port */
|
||||
ata_port_disable(ap);
|
||||
return -ENODEV;
|
||||
|
||||
fail:
|
||||
|
@ -2888,22 +2819,6 @@ int ata_bus_probe(struct ata_port *ap)
|
|||
goto retry;
|
||||
}
|
||||
|
||||
/**
|
||||
* ata_port_probe - Mark port as enabled
|
||||
* @ap: Port for which we indicate enablement
|
||||
*
|
||||
* Modify @ap data structure such that the system
|
||||
* thinks that the entire port is enabled.
|
||||
*
|
||||
* LOCKING: host lock, or some other form of
|
||||
* serialization.
|
||||
*/
|
||||
|
||||
void ata_port_probe(struct ata_port *ap)
|
||||
{
|
||||
ap->flags &= ~ATA_FLAG_DISABLED;
|
||||
}
|
||||
|
||||
/**
|
||||
* sata_print_link_status - Print SATA link status
|
||||
* @link: SATA link to printk link status about
|
||||
|
@ -2950,26 +2865,6 @@ struct ata_device *ata_dev_pair(struct ata_device *adev)
|
|||
return pair;
|
||||
}
|
||||
|
||||
/**
|
||||
* ata_port_disable - Disable port.
|
||||
* @ap: Port to be disabled.
|
||||
*
|
||||
* Modify @ap data structure such that the system
|
||||
* thinks that the entire port is disabled, and should
|
||||
* never attempt to probe or communicate with devices
|
||||
* on this port.
|
||||
*
|
||||
* LOCKING: host lock, or some other form of
|
||||
* serialization.
|
||||
*/
|
||||
|
||||
void ata_port_disable(struct ata_port *ap)
|
||||
{
|
||||
ap->link.device[0].class = ATA_DEV_NONE;
|
||||
ap->link.device[1].class = ATA_DEV_NONE;
|
||||
ap->flags |= ATA_FLAG_DISABLED;
|
||||
}
|
||||
|
||||
/**
|
||||
* sata_down_spd_limit - adjust SATA spd limit downward
|
||||
* @link: Link to adjust SATA spd limit for
|
||||
|
@ -3631,9 +3526,15 @@ int ata_wait_ready(struct ata_link *link, unsigned long deadline,
|
|||
int (*check_ready)(struct ata_link *link))
|
||||
{
|
||||
unsigned long start = jiffies;
|
||||
unsigned long nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
|
||||
unsigned long nodev_deadline;
|
||||
int warned = 0;
|
||||
|
||||
/* choose which 0xff timeout to use, read comment in libata.h */
|
||||
if (link->ap->host->flags & ATA_HOST_PARALLEL_SCAN)
|
||||
nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT_LONG);
|
||||
else
|
||||
nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
|
||||
|
||||
/* Slave readiness can't be tested separately from master. On
|
||||
* M/S emulation configuration, this function should be called
|
||||
* only on the master and it will handle both master and slave.
|
||||
|
@ -3651,12 +3552,12 @@ int ata_wait_ready(struct ata_link *link, unsigned long deadline,
|
|||
if (ready > 0)
|
||||
return 0;
|
||||
|
||||
/* -ENODEV could be transient. Ignore -ENODEV if link
|
||||
/*
|
||||
* -ENODEV could be transient. Ignore -ENODEV if link
|
||||
* is online. Also, some SATA devices take a long
|
||||
* time to clear 0xff after reset. For example,
|
||||
* HHD424020F7SV00 iVDR needs >= 800ms while Quantum
|
||||
* GoVault needs even more than that. Wait for
|
||||
* ATA_TMOUT_FF_WAIT on -ENODEV if link isn't offline.
|
||||
* time to clear 0xff after reset. Wait for
|
||||
* ATA_TMOUT_FF_WAIT[_LONG] on -ENODEV if link isn't
|
||||
* offline.
|
||||
*
|
||||
* Note that some PATA controllers (pata_ali) explode
|
||||
* if status register is read more than once when
|
||||
|
@ -5557,30 +5458,6 @@ void ata_host_resume(struct ata_host *host)
|
|||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* ata_port_start - Set port up for dma.
|
||||
* @ap: Port to initialize
|
||||
*
|
||||
* Called just after data structures for each port are
|
||||
* initialized. Allocates space for PRD table.
|
||||
*
|
||||
* May be used as the port_start() entry in ata_port_operations.
|
||||
*
|
||||
* LOCKING:
|
||||
* Inherited from caller.
|
||||
*/
|
||||
int ata_port_start(struct ata_port *ap)
|
||||
{
|
||||
struct device *dev = ap->dev;
|
||||
|
||||
ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
|
||||
GFP_KERNEL);
|
||||
if (!ap->prd)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ata_dev_init - Initialize an ata_device structure
|
||||
* @dev: Device structure to initialize
|
||||
|
@ -5709,12 +5586,9 @@ struct ata_port *ata_port_alloc(struct ata_host *host)
|
|||
|
||||
ap->pflags |= ATA_PFLAG_INITIALIZING;
|
||||
ap->lock = &host->lock;
|
||||
ap->flags = ATA_FLAG_DISABLED;
|
||||
ap->print_id = -1;
|
||||
ap->ctl = ATA_DEVCTL_OBS;
|
||||
ap->host = host;
|
||||
ap->dev = host->dev;
|
||||
ap->last_ctl = 0xFF;
|
||||
|
||||
#if defined(ATA_VERBOSE_DEBUG)
|
||||
/* turn on all debugging levels */
|
||||
|
@ -5725,11 +5599,6 @@ struct ata_port *ata_port_alloc(struct ata_host *host)
|
|||
ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ATA_SFF
|
||||
INIT_DELAYED_WORK(&ap->port_task, ata_pio_task);
|
||||
#else
|
||||
INIT_DELAYED_WORK(&ap->port_task, NULL);
|
||||
#endif
|
||||
INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
|
||||
INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
|
||||
INIT_LIST_HEAD(&ap->eh_done_q);
|
||||
|
@ -5747,6 +5616,8 @@ struct ata_port *ata_port_alloc(struct ata_host *host)
|
|||
ap->stats.unhandled_irq = 1;
|
||||
ap->stats.idle_irq = 1;
|
||||
#endif
|
||||
ata_sff_port_init(ap);
|
||||
|
||||
return ap;
|
||||
}
|
||||
|
||||
|
@ -6138,8 +6009,6 @@ static void async_port_probe(void *data, async_cookie_t cookie)
|
|||
struct ata_eh_info *ehi = &ap->link.eh_info;
|
||||
unsigned long flags;
|
||||
|
||||
ata_port_probe(ap);
|
||||
|
||||
/* kick EH for boot probing */
|
||||
spin_lock_irqsave(ap->lock, flags);
|
||||
|
||||
|
@ -6663,62 +6532,43 @@ static void __init ata_parse_force_param(void)
|
|||
|
||||
static int __init ata_init(void)
|
||||
{
|
||||
ata_parse_force_param();
|
||||
int rc = -ENOMEM;
|
||||
|
||||
/*
|
||||
* FIXME: In UP case, there is only one workqueue thread and if you
|
||||
* have more than one PIO device, latency is bloody awful, with
|
||||
* occasional multi-second "hiccups" as one PIO device waits for
|
||||
* another. It's an ugly wart that users DO occasionally complain
|
||||
* about; luckily most users have at most one PIO polled device.
|
||||
*/
|
||||
ata_wq = create_workqueue("ata");
|
||||
if (!ata_wq)
|
||||
goto free_force_tbl;
|
||||
ata_parse_force_param();
|
||||
|
||||
ata_aux_wq = create_singlethread_workqueue("ata_aux");
|
||||
if (!ata_aux_wq)
|
||||
goto free_wq;
|
||||
goto fail;
|
||||
|
||||
rc = ata_sff_init();
|
||||
if (rc)
|
||||
goto fail;
|
||||
|
||||
printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
|
||||
return 0;
|
||||
|
||||
free_wq:
|
||||
destroy_workqueue(ata_wq);
|
||||
free_force_tbl:
|
||||
fail:
|
||||
kfree(ata_force_tbl);
|
||||
return -ENOMEM;
|
||||
if (ata_aux_wq)
|
||||
destroy_workqueue(ata_aux_wq);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void __exit ata_exit(void)
|
||||
{
|
||||
ata_sff_exit();
|
||||
kfree(ata_force_tbl);
|
||||
destroy_workqueue(ata_wq);
|
||||
destroy_workqueue(ata_aux_wq);
|
||||
}
|
||||
|
||||
subsys_initcall(ata_init);
|
||||
module_exit(ata_exit);
|
||||
|
||||
static unsigned long ratelimit_time;
|
||||
static DEFINE_SPINLOCK(ata_ratelimit_lock);
|
||||
static DEFINE_RATELIMIT_STATE(ratelimit, HZ / 5, 1);
|
||||
|
||||
int ata_ratelimit(void)
|
||||
{
|
||||
int rc;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&ata_ratelimit_lock, flags);
|
||||
|
||||
if (time_after(jiffies, ratelimit_time)) {
|
||||
rc = 1;
|
||||
ratelimit_time = jiffies + (HZ/5);
|
||||
} else
|
||||
rc = 0;
|
||||
|
||||
spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
|
||||
|
||||
return rc;
|
||||
return __ratelimit(&ratelimit);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -6826,11 +6676,9 @@ EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
|
|||
EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
|
||||
EXPORT_SYMBOL_GPL(ata_mode_string);
|
||||
EXPORT_SYMBOL_GPL(ata_id_xfermask);
|
||||
EXPORT_SYMBOL_GPL(ata_port_start);
|
||||
EXPORT_SYMBOL_GPL(ata_do_set_mode);
|
||||
EXPORT_SYMBOL_GPL(ata_std_qc_defer);
|
||||
EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
|
||||
EXPORT_SYMBOL_GPL(ata_port_probe);
|
||||
EXPORT_SYMBOL_GPL(ata_dev_disable);
|
||||
EXPORT_SYMBOL_GPL(sata_set_spd);
|
||||
EXPORT_SYMBOL_GPL(ata_wait_after_reset);
|
||||
|
@ -6842,7 +6690,6 @@ EXPORT_SYMBOL_GPL(sata_std_hardreset);
|
|||
EXPORT_SYMBOL_GPL(ata_std_postreset);
|
||||
EXPORT_SYMBOL_GPL(ata_dev_classify);
|
||||
EXPORT_SYMBOL_GPL(ata_dev_pair);
|
||||
EXPORT_SYMBOL_GPL(ata_port_disable);
|
||||
EXPORT_SYMBOL_GPL(ata_ratelimit);
|
||||
EXPORT_SYMBOL_GPL(ata_wait_register);
|
||||
EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
|
||||
|
@ -6864,7 +6711,6 @@ EXPORT_SYMBOL_GPL(ata_id_c_string);
|
|||
EXPORT_SYMBOL_GPL(ata_do_dev_read_id);
|
||||
EXPORT_SYMBOL_GPL(ata_scsi_simulate);
|
||||
|
||||
EXPORT_SYMBOL_GPL(ata_pio_queue_task);
|
||||
EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
|
||||
EXPORT_SYMBOL_GPL(ata_timing_find_mode);
|
||||
EXPORT_SYMBOL_GPL(ata_timing_compute);
|
||||
|
|
|
@ -550,8 +550,8 @@ void ata_scsi_error(struct Scsi_Host *host)
|
|||
|
||||
DPRINTK("ENTER\n");
|
||||
|
||||
/* synchronize with port task */
|
||||
ata_port_flush_task(ap);
|
||||
/* make sure sff pio task is not running */
|
||||
ata_sff_flush_pio_task(ap);
|
||||
|
||||
/* synchronize with host lock and sort out timeouts */
|
||||
|
||||
|
@ -3684,7 +3684,7 @@ void ata_std_error_handler(struct ata_port *ap)
|
|||
ata_reset_fn_t hardreset = ops->hardreset;
|
||||
|
||||
/* ignore built-in hardreset if SCR access is not available */
|
||||
if (ata_is_builtin_hardreset(hardreset) && !sata_scr_valid(&ap->link))
|
||||
if (hardreset == sata_std_hardreset && !sata_scr_valid(&ap->link))
|
||||
hardreset = NULL;
|
||||
|
||||
ata_do_eh(ap, ops->prereset, ops->softreset, hardreset, ops->postreset);
|
||||
|
|
|
@ -231,10 +231,14 @@ static const char *sata_pmp_spec_rev_str(const u32 *gscr)
|
|||
return "<unknown>";
|
||||
}
|
||||
|
||||
#define PMP_GSCR_SII_POL 129
|
||||
|
||||
static int sata_pmp_configure(struct ata_device *dev, int print_info)
|
||||
{
|
||||
struct ata_port *ap = dev->link->ap;
|
||||
u32 *gscr = dev->gscr;
|
||||
u16 vendor = sata_pmp_gscr_vendor(gscr);
|
||||
u16 devid = sata_pmp_gscr_devid(gscr);
|
||||
unsigned int err_mask = 0;
|
||||
const char *reason;
|
||||
int nr_ports, rc;
|
||||
|
@ -260,12 +264,34 @@ static int sata_pmp_configure(struct ata_device *dev, int print_info)
|
|||
goto fail;
|
||||
}
|
||||
|
||||
/* Disable sending Early R_OK.
|
||||
* With "cached read" HDD testing and multiple ports busy on a SATA
|
||||
* host controller, 3726 PMP will very rarely drop a deferred
|
||||
* R_OK that was intended for the host. Symptom will be all
|
||||
* 5 drives under test will timeout, get reset, and recover.
|
||||
*/
|
||||
if (vendor == 0x1095 && devid == 0x3726) {
|
||||
u32 reg;
|
||||
|
||||
err_mask = sata_pmp_read(&ap->link, PMP_GSCR_SII_POL, ®);
|
||||
if (err_mask) {
|
||||
rc = -EIO;
|
||||
reason = "failed to read Sil3726 Private Register";
|
||||
goto fail;
|
||||
}
|
||||
reg &= ~0x1;
|
||||
err_mask = sata_pmp_write(&ap->link, PMP_GSCR_SII_POL, reg);
|
||||
if (err_mask) {
|
||||
rc = -EIO;
|
||||
reason = "failed to write Sil3726 Private Register";
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
|
||||
if (print_info) {
|
||||
ata_dev_printk(dev, KERN_INFO, "Port Multiplier %s, "
|
||||
"0x%04x:0x%04x r%d, %d ports, feat 0x%x/0x%x\n",
|
||||
sata_pmp_spec_rev_str(gscr),
|
||||
sata_pmp_gscr_vendor(gscr),
|
||||
sata_pmp_gscr_devid(gscr),
|
||||
sata_pmp_spec_rev_str(gscr), vendor, devid,
|
||||
sata_pmp_gscr_rev(gscr),
|
||||
nr_ports, gscr[SATA_PMP_GSCR_FEAT_EN],
|
||||
gscr[SATA_PMP_GSCR_FEAT]);
|
||||
|
|
|
@ -3345,9 +3345,6 @@ void ata_scsi_scan_host(struct ata_port *ap, int sync)
|
|||
struct ata_link *link;
|
||||
struct ata_device *dev;
|
||||
|
||||
if (ap->flags & ATA_FLAG_DISABLED)
|
||||
return;
|
||||
|
||||
repeat:
|
||||
ata_for_each_link(link, ap, EDGE) {
|
||||
ata_for_each_dev(dev, link, ENABLED) {
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -38,17 +38,6 @@ struct ata_scsi_args {
|
|||
void (*done)(struct scsi_cmnd *);
|
||||
};
|
||||
|
||||
static inline int ata_is_builtin_hardreset(ata_reset_fn_t reset)
|
||||
{
|
||||
if (reset == sata_std_hardreset)
|
||||
return 1;
|
||||
#ifdef CONFIG_ATA_SFF
|
||||
if (reset == sata_sff_hardreset)
|
||||
return 1;
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* libata-core.c */
|
||||
enum {
|
||||
/* flags for ata_dev_read_id() */
|
||||
|
@ -79,7 +68,6 @@ extern int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
|
|||
u64 block, u32 n_block, unsigned int tf_flags,
|
||||
unsigned int tag);
|
||||
extern u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev);
|
||||
extern void ata_port_flush_task(struct ata_port *ap);
|
||||
extern unsigned ata_exec_internal(struct ata_device *dev,
|
||||
struct ata_taskfile *tf, const u8 *cdb,
|
||||
int dma_dir, void *buf, unsigned int buflen,
|
||||
|
@ -202,10 +190,19 @@ static inline int sata_pmp_attach(struct ata_device *dev)
|
|||
|
||||
/* libata-sff.c */
|
||||
#ifdef CONFIG_ATA_SFF
|
||||
extern void ata_dev_select(struct ata_port *ap, unsigned int device,
|
||||
unsigned int wait, unsigned int can_sleep);
|
||||
extern u8 ata_irq_on(struct ata_port *ap);
|
||||
extern void ata_pio_task(struct work_struct *work);
|
||||
extern void ata_sff_flush_pio_task(struct ata_port *ap);
|
||||
extern void ata_sff_port_init(struct ata_port *ap);
|
||||
extern int ata_sff_init(void);
|
||||
extern void ata_sff_exit(void);
|
||||
#else /* CONFIG_ATA_SFF */
|
||||
static inline void ata_sff_flush_pio_task(struct ata_port *ap)
|
||||
{ }
|
||||
static inline void ata_sff_port_init(struct ata_port *ap)
|
||||
{ }
|
||||
static inline int ata_sff_init(void)
|
||||
{ return 0; }
|
||||
static inline void ata_sff_exit(void)
|
||||
{ }
|
||||
#endif /* CONFIG_ATA_SFF */
|
||||
|
||||
#endif /* __LIBATA_H__ */
|
||||
|
|
|
@ -101,7 +101,7 @@ static unsigned long pacpi_discover_modes(struct ata_port *ap, struct ata_device
|
|||
static unsigned long pacpi_mode_filter(struct ata_device *adev, unsigned long mask)
|
||||
{
|
||||
struct pata_acpi *acpi = adev->link->ap->private_data;
|
||||
return ata_bmdma_mode_filter(adev, mask & acpi->mask[adev->devno]);
|
||||
return mask & acpi->mask[adev->devno];
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -172,7 +172,7 @@ static unsigned int pacpi_qc_issue(struct ata_queued_cmd *qc)
|
|||
struct pata_acpi *acpi = ap->private_data;
|
||||
|
||||
if (acpi->gtm.flags & 0x10)
|
||||
return ata_sff_qc_issue(qc);
|
||||
return ata_bmdma_qc_issue(qc);
|
||||
|
||||
if (adev != acpi->last) {
|
||||
pacpi_set_piomode(ap, adev);
|
||||
|
@ -180,7 +180,7 @@ static unsigned int pacpi_qc_issue(struct ata_queued_cmd *qc)
|
|||
pacpi_set_dmamode(ap, adev);
|
||||
acpi->last = adev;
|
||||
}
|
||||
return ata_sff_qc_issue(qc);
|
||||
return ata_bmdma_qc_issue(qc);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -205,7 +205,7 @@ static int pacpi_port_start(struct ata_port *ap)
|
|||
return -ENOMEM;
|
||||
acpi->mask[0] = pacpi_discover_modes(ap, &ap->link.device[0]);
|
||||
acpi->mask[1] = pacpi_discover_modes(ap, &ap->link.device[1]);
|
||||
ret = ata_sff_port_start(ap);
|
||||
ret = ata_bmdma_port_start(ap);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
|
|
|
@ -124,7 +124,7 @@ static unsigned long ali_20_filter(struct ata_device *adev, unsigned long mask)
|
|||
ata_id_c_string(adev->id, model_num, ATA_ID_PROD, sizeof(model_num));
|
||||
if (strstr(model_num, "WDC"))
|
||||
return mask &= ~ATA_MASK_UDMA;
|
||||
return ata_bmdma_mode_filter(adev, mask);
|
||||
return mask;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -202,7 +202,6 @@ static struct ata_port_operations pata_at91_port_ops = {
|
|||
.sff_data_xfer = pata_at91_data_xfer_noirq,
|
||||
.set_piomode = pata_at91_set_piomode,
|
||||
.cable_detect = ata_cable_40wire,
|
||||
.port_start = ATA_OP_NULL,
|
||||
};
|
||||
|
||||
static int __devinit pata_at91_probe(struct platform_device *pdev)
|
||||
|
|
|
@ -217,7 +217,7 @@ static struct scsi_host_template atiixp_sht = {
|
|||
static struct ata_port_operations atiixp_port_ops = {
|
||||
.inherits = &ata_bmdma_port_ops,
|
||||
|
||||
.qc_prep = ata_sff_dumb_qc_prep,
|
||||
.qc_prep = ata_bmdma_dumb_qc_prep,
|
||||
.bmdma_start = atiixp_bmdma_start,
|
||||
.bmdma_stop = atiixp_bmdma_stop,
|
||||
|
||||
|
|
|
@ -820,6 +820,18 @@ static void bfin_dev_select(struct ata_port *ap, unsigned int device)
|
|||
ata_sff_pause(ap);
|
||||
}
|
||||
|
||||
/**
|
||||
* bfin_set_devctl - Write device control reg
|
||||
* @ap: port where the device is
|
||||
* @ctl: value to write
|
||||
*/
|
||||
|
||||
static u8 bfin_set_devctl(struct ata_port *ap, u8 ctl)
|
||||
{
|
||||
void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
|
||||
write_atapi_register(base, ATA_REG_CTRL, ctl);
|
||||
}
|
||||
|
||||
/**
|
||||
* bfin_bmdma_setup - Set up IDE DMA transaction
|
||||
* @qc: Info associated with this ATA transaction.
|
||||
|
@ -1215,56 +1227,6 @@ static void bfin_irq_clear(struct ata_port *ap)
|
|||
| MULTI_TERM_INT | UDMAIN_TERM_INT | UDMAOUT_TERM_INT);
|
||||
}
|
||||
|
||||
/**
|
||||
* bfin_irq_on - Enable interrupts on a port.
|
||||
* @ap: Port on which interrupts are enabled.
|
||||
*
|
||||
* Note: Original code is ata_sff_irq_on().
|
||||
*/
|
||||
|
||||
static unsigned char bfin_irq_on(struct ata_port *ap)
|
||||
{
|
||||
void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
|
||||
u8 tmp;
|
||||
|
||||
dev_dbg(ap->dev, "in atapi irq on\n");
|
||||
ap->ctl &= ~ATA_NIEN;
|
||||
ap->last_ctl = ap->ctl;
|
||||
|
||||
write_atapi_register(base, ATA_REG_CTRL, ap->ctl);
|
||||
tmp = ata_wait_idle(ap);
|
||||
|
||||
bfin_irq_clear(ap);
|
||||
|
||||
return tmp;
|
||||
}
|
||||
|
||||
/**
|
||||
* bfin_freeze - Freeze DMA controller port
|
||||
* @ap: port to freeze
|
||||
*
|
||||
* Note: Original code is ata_sff_freeze().
|
||||
*/
|
||||
|
||||
static void bfin_freeze(struct ata_port *ap)
|
||||
{
|
||||
void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
|
||||
|
||||
dev_dbg(ap->dev, "in atapi dma freeze\n");
|
||||
ap->ctl |= ATA_NIEN;
|
||||
ap->last_ctl = ap->ctl;
|
||||
|
||||
write_atapi_register(base, ATA_REG_CTRL, ap->ctl);
|
||||
|
||||
/* Under certain circumstances, some controllers raise IRQ on
|
||||
* ATA_NIEN manipulation. Also, many controllers fail to mask
|
||||
* previously pending IRQ on ATA_NIEN assertion. Clear it.
|
||||
*/
|
||||
ap->ops->sff_check_status(ap);
|
||||
|
||||
bfin_irq_clear(ap);
|
||||
}
|
||||
|
||||
/**
|
||||
* bfin_thaw - Thaw DMA controller port
|
||||
* @ap: port to thaw
|
||||
|
@ -1276,7 +1238,7 @@ void bfin_thaw(struct ata_port *ap)
|
|||
{
|
||||
dev_dbg(ap->dev, "in atapi dma thaw\n");
|
||||
bfin_check_status(ap);
|
||||
bfin_irq_on(ap);
|
||||
ata_sff_irq_on(ap);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1293,7 +1255,7 @@ static void bfin_postreset(struct ata_link *link, unsigned int *classes)
|
|||
void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
|
||||
|
||||
/* re-enable interrupts */
|
||||
bfin_irq_on(ap);
|
||||
ata_sff_irq_on(ap);
|
||||
|
||||
/* is double-select really necessary? */
|
||||
if (classes[0] != ATA_DEV_NONE)
|
||||
|
@ -1438,18 +1400,12 @@ static irqreturn_t bfin_ata_interrupt(int irq, void *dev_instance)
|
|||
spin_lock_irqsave(&host->lock, flags);
|
||||
|
||||
for (i = 0; i < host->n_ports; i++) {
|
||||
struct ata_port *ap;
|
||||
struct ata_port *ap = host->ports[i];
|
||||
struct ata_queued_cmd *qc;
|
||||
|
||||
ap = host->ports[i];
|
||||
if (ap &&
|
||||
!(ap->flags & ATA_FLAG_DISABLED)) {
|
||||
struct ata_queued_cmd *qc;
|
||||
|
||||
qc = ata_qc_from_tag(ap, ap->link.active_tag);
|
||||
if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
|
||||
(qc->flags & ATA_QCFLAG_ACTIVE))
|
||||
handled |= bfin_ata_host_intr(ap, qc);
|
||||
}
|
||||
qc = ata_qc_from_tag(ap, ap->link.active_tag);
|
||||
if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
|
||||
handled |= bfin_ata_host_intr(ap, qc);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&host->lock, flags);
|
||||
|
@ -1465,7 +1421,7 @@ static struct scsi_host_template bfin_sht = {
|
|||
};
|
||||
|
||||
static struct ata_port_operations bfin_pata_ops = {
|
||||
.inherits = &ata_sff_port_ops,
|
||||
.inherits = &ata_bmdma_port_ops,
|
||||
|
||||
.set_piomode = bfin_set_piomode,
|
||||
.set_dmamode = bfin_set_dmamode,
|
||||
|
@ -1476,6 +1432,7 @@ static struct ata_port_operations bfin_pata_ops = {
|
|||
.sff_check_status = bfin_check_status,
|
||||
.sff_check_altstatus = bfin_check_altstatus,
|
||||
.sff_dev_select = bfin_dev_select,
|
||||
.sff_set_devctl = bfin_set_devctl,
|
||||
|
||||
.bmdma_setup = bfin_bmdma_setup,
|
||||
.bmdma_start = bfin_bmdma_start,
|
||||
|
@ -1485,13 +1442,11 @@ static struct ata_port_operations bfin_pata_ops = {
|
|||
|
||||
.qc_prep = ata_noop_qc_prep,
|
||||
|
||||
.freeze = bfin_freeze,
|
||||
.thaw = bfin_thaw,
|
||||
.softreset = bfin_softreset,
|
||||
.postreset = bfin_postreset,
|
||||
|
||||
.sff_irq_clear = bfin_irq_clear,
|
||||
.sff_irq_on = bfin_irq_on,
|
||||
|
||||
.port_start = bfin_port_start,
|
||||
.port_stop = bfin_port_stop,
|
||||
|
|
|
@ -153,24 +153,20 @@ static int cmd640_port_start(struct ata_port *ap)
|
|||
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
|
||||
struct cmd640_reg *timing;
|
||||
|
||||
int ret = ata_sff_port_start(ap);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
timing = devm_kzalloc(&pdev->dev, sizeof(struct cmd640_reg), GFP_KERNEL);
|
||||
if (timing == NULL)
|
||||
return -ENOMEM;
|
||||
timing->last = -1; /* Force a load */
|
||||
ap->private_data = timing;
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct scsi_host_template cmd640_sht = {
|
||||
ATA_BMDMA_SHT(DRV_NAME),
|
||||
ATA_PIO_SHT(DRV_NAME),
|
||||
};
|
||||
|
||||
static struct ata_port_operations cmd640_port_ops = {
|
||||
.inherits = &ata_bmdma_port_ops,
|
||||
.inherits = &ata_sff_port_ops,
|
||||
/* In theory xfer_noirq is not needed once we kill the prefetcher */
|
||||
.sff_data_xfer = ata_sff_data_xfer_noirq,
|
||||
.qc_issue = cmd640_qc_issue,
|
||||
|
@ -181,13 +177,10 @@ static struct ata_port_operations cmd640_port_ops = {
|
|||
|
||||
static void cmd640_hardware_init(struct pci_dev *pdev)
|
||||
{
|
||||
u8 r;
|
||||
u8 ctrl;
|
||||
|
||||
/* CMD640 detected, commiserations */
|
||||
pci_write_config_byte(pdev, 0x5B, 0x00);
|
||||
/* Get version info */
|
||||
pci_read_config_byte(pdev, CFR, &r);
|
||||
/* PIO0 command cycles */
|
||||
pci_write_config_byte(pdev, CMDTIM, 0);
|
||||
/* 512 byte bursts (sector) */
|
||||
|
|
|
@ -110,7 +110,7 @@ static struct scsi_host_template cs5520_sht = {
|
|||
|
||||
static struct ata_port_operations cs5520_port_ops = {
|
||||
.inherits = &ata_bmdma_port_ops,
|
||||
.qc_prep = ata_sff_dumb_qc_prep,
|
||||
.qc_prep = ata_bmdma_dumb_qc_prep,
|
||||
.cable_detect = ata_cable_40wire,
|
||||
.set_piomode = cs5520_set_piomode,
|
||||
};
|
||||
|
|
|
@ -156,7 +156,7 @@ static unsigned int cs5530_qc_issue(struct ata_queued_cmd *qc)
|
|||
cs5530_set_dmamode(ap, adev);
|
||||
}
|
||||
|
||||
return ata_sff_qc_issue(qc);
|
||||
return ata_bmdma_qc_issue(qc);
|
||||
}
|
||||
|
||||
static struct scsi_host_template cs5530_sht = {
|
||||
|
@ -167,7 +167,7 @@ static struct scsi_host_template cs5530_sht = {
|
|||
static struct ata_port_operations cs5530_port_ops = {
|
||||
.inherits = &ata_bmdma_port_ops,
|
||||
|
||||
.qc_prep = ata_sff_dumb_qc_prep,
|
||||
.qc_prep = ata_bmdma_dumb_qc_prep,
|
||||
.qc_issue = cs5530_qc_issue,
|
||||
|
||||
.cable_detect = ata_cable_40wire,
|
||||
|
|
|
@ -182,7 +182,7 @@ static unsigned long hpt366_filter(struct ata_device *adev, unsigned long mask)
|
|||
} else if (adev->class == ATA_DEV_ATAPI)
|
||||
mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
|
||||
|
||||
return ata_bmdma_mode_filter(adev, mask);
|
||||
return mask;
|
||||
}
|
||||
|
||||
static int hpt36x_cable_detect(struct ata_port *ap)
|
||||
|
|
|
@ -282,7 +282,7 @@ static unsigned long hpt370_filter(struct ata_device *adev, unsigned long mask)
|
|||
if (hpt_dma_blacklisted(adev, "UDMA100", bad_ata100_5))
|
||||
mask &= ~(0xE0 << ATA_SHIFT_UDMA);
|
||||
}
|
||||
return ata_bmdma_mode_filter(adev, mask);
|
||||
return mask;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -298,7 +298,7 @@ static unsigned long hpt370a_filter(struct ata_device *adev, unsigned long mask)
|
|||
if (hpt_dma_blacklisted(adev, "UDMA100", bad_ata100_5))
|
||||
mask &= ~(0xE0 << ATA_SHIFT_UDMA);
|
||||
}
|
||||
return ata_bmdma_mode_filter(adev, mask);
|
||||
return mask;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -320,7 +320,7 @@ static unsigned int hpt3x2n_qc_issue(struct ata_queued_cmd *qc)
|
|||
|
||||
hpt3x2n_set_clock(ap, dpll ? 0x21 : 0x23);
|
||||
}
|
||||
return ata_sff_qc_issue(qc);
|
||||
return ata_bmdma_qc_issue(qc);
|
||||
}
|
||||
|
||||
static struct scsi_host_template hpt3x2n_sht = {
|
||||
|
|
|
@ -321,7 +321,7 @@ static void pata_icside_postreset(struct ata_link *link, unsigned int *classes)
|
|||
}
|
||||
|
||||
static struct ata_port_operations pata_icside_port_ops = {
|
||||
.inherits = &ata_sff_port_ops,
|
||||
.inherits = &ata_bmdma_port_ops,
|
||||
/* no need to build any PRD tables for DMA */
|
||||
.qc_prep = ata_noop_qc_prep,
|
||||
.sff_data_xfer = ata_sff_data_xfer_noirq,
|
||||
|
@ -333,7 +333,8 @@ static struct ata_port_operations pata_icside_port_ops = {
|
|||
.cable_detect = ata_cable_40wire,
|
||||
.set_dmamode = pata_icside_set_dmamode,
|
||||
.postreset = pata_icside_postreset,
|
||||
.post_internal_cmd = pata_icside_bmdma_stop,
|
||||
|
||||
.port_start = ATA_OP_NULL, /* don't need PRD table */
|
||||
};
|
||||
|
||||
static void __devinit
|
||||
|
|
|
@ -430,7 +430,7 @@ static unsigned int it821x_smart_qc_issue(struct ata_queued_cmd *qc)
|
|||
case 0xFC: /* Internal 'report rebuild state' */
|
||||
/* Arguably should just no-op this one */
|
||||
case ATA_CMD_SET_FEATURES:
|
||||
return ata_sff_qc_issue(qc);
|
||||
return ata_bmdma_qc_issue(qc);
|
||||
}
|
||||
printk(KERN_DEBUG "it821x: can't process command 0x%02X\n", qc->tf.command);
|
||||
return AC_ERR_DEV;
|
||||
|
@ -448,7 +448,7 @@ static unsigned int it821x_smart_qc_issue(struct ata_queued_cmd *qc)
|
|||
static unsigned int it821x_passthru_qc_issue(struct ata_queued_cmd *qc)
|
||||
{
|
||||
it821x_passthru_dev_select(qc->ap, qc->dev->devno);
|
||||
return ata_sff_qc_issue(qc);
|
||||
return ata_bmdma_qc_issue(qc);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -739,7 +739,7 @@ static int it821x_port_start(struct ata_port *ap)
|
|||
struct it821x_dev *itdev;
|
||||
u8 conf;
|
||||
|
||||
int ret = ata_sff_port_start(ap);
|
||||
int ret = ata_bmdma_port_start(ap);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
|
|
|
@ -720,6 +720,8 @@ static int pata_macio_port_start(struct ata_port *ap)
|
|||
if (priv->dma_table_cpu == NULL) {
|
||||
dev_err(priv->dev, "Unable to allocate DMA command list\n");
|
||||
ap->ioaddr.bmdma_addr = NULL;
|
||||
ap->mwdma_mask = 0;
|
||||
ap->udma_mask = 0;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -917,7 +919,7 @@ static struct scsi_host_template pata_macio_sht = {
|
|||
};
|
||||
|
||||
static struct ata_port_operations pata_macio_ops = {
|
||||
.inherits = &ata_sff_port_ops,
|
||||
.inherits = &ata_bmdma_port_ops,
|
||||
|
||||
.freeze = pata_macio_freeze,
|
||||
.set_piomode = pata_macio_set_timings,
|
||||
|
@ -925,7 +927,6 @@ static struct ata_port_operations pata_macio_ops = {
|
|||
.cable_detect = pata_macio_cable_detect,
|
||||
.sff_dev_select = pata_macio_dev_select,
|
||||
.qc_prep = pata_macio_qc_prep,
|
||||
.mode_filter = ata_bmdma_mode_filter,
|
||||
.bmdma_setup = pata_macio_bmdma_setup,
|
||||
.bmdma_start = pata_macio_bmdma_start,
|
||||
.bmdma_stop = pata_macio_bmdma_stop,
|
||||
|
|
|
@ -64,13 +64,13 @@ struct mpc52xx_ata_priv {
|
|||
|
||||
|
||||
/* ATAPI-4 PIO specs (in ns) */
|
||||
static const int ataspec_t0[5] = {600, 383, 240, 180, 120};
|
||||
static const int ataspec_t1[5] = { 70, 50, 30, 30, 25};
|
||||
static const int ataspec_t2_8[5] = {290, 290, 290, 80, 70};
|
||||
static const int ataspec_t2_16[5] = {165, 125, 100, 80, 70};
|
||||
static const int ataspec_t2i[5] = { 0, 0, 0, 70, 25};
|
||||
static const int ataspec_t4[5] = { 30, 20, 15, 10, 10};
|
||||
static const int ataspec_ta[5] = { 35, 35, 35, 35, 35};
|
||||
static const u16 ataspec_t0[5] = {600, 383, 240, 180, 120};
|
||||
static const u16 ataspec_t1[5] = { 70, 50, 30, 30, 25};
|
||||
static const u16 ataspec_t2_8[5] = {290, 290, 290, 80, 70};
|
||||
static const u16 ataspec_t2_16[5] = {165, 125, 100, 80, 70};
|
||||
static const u16 ataspec_t2i[5] = { 0, 0, 0, 70, 25};
|
||||
static const u16 ataspec_t4[5] = { 30, 20, 15, 10, 10};
|
||||
static const u16 ataspec_ta[5] = { 35, 35, 35, 35, 35};
|
||||
|
||||
#define CALC_CLKCYC(c,v) ((((v)+(c)-1)/(c)))
|
||||
|
||||
|
@ -78,13 +78,13 @@ static const int ataspec_ta[5] = { 35, 35, 35, 35, 35};
|
|||
|
||||
/* ATAPI-4 MDMA specs (in clocks) */
|
||||
struct mdmaspec {
|
||||
u32 t0M;
|
||||
u32 td;
|
||||
u32 th;
|
||||
u32 tj;
|
||||
u32 tkw;
|
||||
u32 tm;
|
||||
u32 tn;
|
||||
u8 t0M;
|
||||
u8 td;
|
||||
u8 th;
|
||||
u8 tj;
|
||||
u8 tkw;
|
||||
u8 tm;
|
||||
u8 tn;
|
||||
};
|
||||
|
||||
static const struct mdmaspec mdmaspec66[3] = {
|
||||
|
@ -101,23 +101,23 @@ static const struct mdmaspec mdmaspec132[3] = {
|
|||
|
||||
/* ATAPI-4 UDMA specs (in clocks) */
|
||||
struct udmaspec {
|
||||
u32 tcyc;
|
||||
u32 t2cyc;
|
||||
u32 tds;
|
||||
u32 tdh;
|
||||
u32 tdvs;
|
||||
u32 tdvh;
|
||||
u32 tfs;
|
||||
u32 tli;
|
||||
u32 tmli;
|
||||
u32 taz;
|
||||
u32 tzah;
|
||||
u32 tenv;
|
||||
u32 tsr;
|
||||
u32 trfs;
|
||||
u32 trp;
|
||||
u32 tack;
|
||||
u32 tss;
|
||||
u8 tcyc;
|
||||
u8 t2cyc;
|
||||
u8 tds;
|
||||
u8 tdh;
|
||||
u8 tdvs;
|
||||
u8 tdvh;
|
||||
u8 tfs;
|
||||
u8 tli;
|
||||
u8 tmli;
|
||||
u8 taz;
|
||||
u8 tzah;
|
||||
u8 tenv;
|
||||
u8 tsr;
|
||||
u8 trfs;
|
||||
u8 trp;
|
||||
u8 tack;
|
||||
u8 tss;
|
||||
};
|
||||
|
||||
static const struct udmaspec udmaspec66[6] = {
|
||||
|
@ -270,7 +270,7 @@ mpc52xx_ata_compute_pio_timings(struct mpc52xx_ata_priv *priv, int dev, int pio)
|
|||
{
|
||||
struct mpc52xx_ata_timings *timing = &priv->timings[dev];
|
||||
unsigned int ipb_period = priv->ipb_period;
|
||||
unsigned int t0, t1, t2_8, t2_16, t2i, t4, ta;
|
||||
u32 t0, t1, t2_8, t2_16, t2i, t4, ta;
|
||||
|
||||
if ((pio < 0) || (pio > 4))
|
||||
return -EINVAL;
|
||||
|
@ -299,8 +299,8 @@ mpc52xx_ata_compute_mdma_timings(struct mpc52xx_ata_priv *priv, int dev,
|
|||
if (speed < 0 || speed > 2)
|
||||
return -EINVAL;
|
||||
|
||||
t->mdma1 = (s->t0M << 24) | (s->td << 16) | (s->tkw << 8) | (s->tm);
|
||||
t->mdma2 = (s->th << 24) | (s->tj << 16) | (s->tn << 8);
|
||||
t->mdma1 = ((u32)s->t0M << 24) | ((u32)s->td << 16) | ((u32)s->tkw << 8) | s->tm;
|
||||
t->mdma2 = ((u32)s->th << 24) | ((u32)s->tj << 16) | ((u32)s->tn << 8);
|
||||
t->using_udma = 0;
|
||||
|
||||
return 0;
|
||||
|
@ -316,11 +316,11 @@ mpc52xx_ata_compute_udma_timings(struct mpc52xx_ata_priv *priv, int dev,
|
|||
if (speed < 0 || speed > 2)
|
||||
return -EINVAL;
|
||||
|
||||
t->udma1 = (s->t2cyc << 24) | (s->tcyc << 16) | (s->tds << 8) | s->tdh;
|
||||
t->udma2 = (s->tdvs << 24) | (s->tdvh << 16) | (s->tfs << 8) | s->tli;
|
||||
t->udma3 = (s->tmli << 24) | (s->taz << 16) | (s->tenv << 8) | s->tsr;
|
||||
t->udma4 = (s->tss << 24) | (s->trfs << 16) | (s->trp << 8) | s->tack;
|
||||
t->udma5 = (s->tzah << 24);
|
||||
t->udma1 = ((u32)s->t2cyc << 24) | ((u32)s->tcyc << 16) | ((u32)s->tds << 8) | s->tdh;
|
||||
t->udma2 = ((u32)s->tdvs << 24) | ((u32)s->tdvh << 16) | ((u32)s->tfs << 8) | s->tli;
|
||||
t->udma3 = ((u32)s->tmli << 24) | ((u32)s->taz << 16) | ((u32)s->tenv << 8) | s->tsr;
|
||||
t->udma4 = ((u32)s->tss << 24) | ((u32)s->trfs << 16) | ((u32)s->trp << 8) | s->tack;
|
||||
t->udma5 = (u32)s->tzah << 24;
|
||||
t->using_udma = 1;
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -126,7 +126,7 @@ static void ns87415_bmdma_setup(struct ata_queued_cmd *qc)
|
|||
|
||||
/* load PRD table addr. */
|
||||
mb(); /* make sure PRD table writes are visible to controller */
|
||||
iowrite32(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
|
||||
iowrite32(ap->bmdma_prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
|
||||
|
||||
/* specify data direction, triple-check start bit is clear */
|
||||
dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
|
||||
|
|
|
@ -489,9 +489,8 @@ static void octeon_cf_exec_command16(struct ata_port *ap,
|
|||
ata_wait_idle(ap);
|
||||
}
|
||||
|
||||
static u8 octeon_cf_irq_on(struct ata_port *ap)
|
||||
static void octeon_cf_irq_on(struct ata_port *ap)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void octeon_cf_irq_clear(struct ata_port *ap)
|
||||
|
@ -655,9 +654,6 @@ static irqreturn_t octeon_cf_interrupt(int irq, void *dev_instance)
|
|||
ap = host->ports[i];
|
||||
ocd = ap->dev->platform_data;
|
||||
|
||||
if (ap->flags & ATA_FLAG_DISABLED)
|
||||
continue;
|
||||
|
||||
ocd = ap->dev->platform_data;
|
||||
cf_port = ap->private_data;
|
||||
dma_int.u64 =
|
||||
|
@ -667,8 +663,7 @@ static irqreturn_t octeon_cf_interrupt(int irq, void *dev_instance)
|
|||
|
||||
qc = ata_qc_from_tag(ap, ap->link.active_tag);
|
||||
|
||||
if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
|
||||
(qc->flags & ATA_QCFLAG_ACTIVE)) {
|
||||
if (qc && !(qc->tf.flags & ATA_TFLAG_POLLING)) {
|
||||
if (dma_int.s.done && !dma_cfg.s.en) {
|
||||
if (!sg_is_last(qc->cursg)) {
|
||||
qc->cursg = sg_next(qc->cursg);
|
||||
|
@ -738,8 +733,7 @@ static void octeon_cf_delayed_finish(struct work_struct *work)
|
|||
goto out;
|
||||
}
|
||||
qc = ata_qc_from_tag(ap, ap->link.active_tag);
|
||||
if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
|
||||
(qc->flags & ATA_QCFLAG_ACTIVE))
|
||||
if (qc && !(qc->tf.flags & ATA_TFLAG_POLLING))
|
||||
octeon_cf_dma_finished(ap, qc);
|
||||
out:
|
||||
spin_unlock_irqrestore(&host->lock, flags);
|
||||
|
|
|
@ -200,7 +200,7 @@ static unsigned int oldpiix_qc_issue(struct ata_queued_cmd *qc)
|
|||
if (ata_dma_enabled(adev))
|
||||
oldpiix_set_dmamode(ap, adev);
|
||||
}
|
||||
return ata_sff_qc_issue(qc);
|
||||
return ata_bmdma_qc_issue(qc);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -165,7 +165,7 @@ static struct ata_port_operations pcmcia_8bit_port_ops = {
|
|||
.sff_data_xfer = ata_data_xfer_8bit,
|
||||
.cable_detect = ata_cable_40wire,
|
||||
.set_mode = pcmcia_set_mode_8bit,
|
||||
.drain_fifo = pcmcia_8bit_drain_fifo,
|
||||
.sff_drain_fifo = pcmcia_8bit_drain_fifo,
|
||||
};
|
||||
|
||||
|
||||
|
|
|
@ -265,7 +265,7 @@ static unsigned long pdc2027x_mode_filter(struct ata_device *adev, unsigned long
|
|||
struct ata_device *pair = ata_dev_pair(adev);
|
||||
|
||||
if (adev->class != ATA_DEV_ATA || adev->devno == 0 || pair == NULL)
|
||||
return ata_bmdma_mode_filter(adev, mask);
|
||||
return mask;
|
||||
|
||||
/* Check for slave of a Maxtor at UDMA6 */
|
||||
ata_id_c_string(pair->id, model_num, ATA_ID_PROD,
|
||||
|
@ -274,7 +274,7 @@ static unsigned long pdc2027x_mode_filter(struct ata_device *adev, unsigned long
|
|||
if (strstr(model_num, "Maxtor") == NULL && pair->dma_mode == XFER_UDMA_6)
|
||||
mask &= ~ (1 << (6 + ATA_SHIFT_UDMA));
|
||||
|
||||
return ata_bmdma_mode_filter(adev, mask);
|
||||
return mask;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -249,7 +249,7 @@ static int pdc2026x_port_start(struct ata_port *ap)
|
|||
u8 burst = ioread8(bmdma + 0x1f);
|
||||
iowrite8(burst | 0x01, bmdma + 0x1f);
|
||||
}
|
||||
return ata_sff_port_start(ap);
|
||||
return ata_bmdma_port_start(ap);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -53,7 +53,6 @@ static struct ata_port_operations pata_platform_port_ops = {
|
|||
.sff_data_xfer = ata_sff_data_xfer_noirq,
|
||||
.cable_detect = ata_cable_unknown,
|
||||
.set_mode = pata_platform_set_mode,
|
||||
.port_start = ATA_OP_NULL,
|
||||
};
|
||||
|
||||
static void pata_platform_setup_port(struct ata_ioports *ioaddr,
|
||||
|
|
|
@ -179,7 +179,7 @@ static unsigned int radisys_qc_issue(struct ata_queued_cmd *qc)
|
|||
radisys_set_piomode(ap, adev);
|
||||
}
|
||||
}
|
||||
return ata_sff_qc_issue(qc);
|
||||
return ata_bmdma_qc_issue(qc);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -174,7 +174,7 @@ static unsigned int sc1200_qc_issue(struct ata_queued_cmd *qc)
|
|||
sc1200_set_dmamode(ap, adev);
|
||||
}
|
||||
|
||||
return ata_sff_qc_issue(qc);
|
||||
return ata_bmdma_qc_issue(qc);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -209,7 +209,7 @@ static struct scsi_host_template sc1200_sht = {
|
|||
|
||||
static struct ata_port_operations sc1200_port_ops = {
|
||||
.inherits = &ata_bmdma_port_ops,
|
||||
.qc_prep = ata_sff_dumb_qc_prep,
|
||||
.qc_prep = ata_bmdma_dumb_qc_prep,
|
||||
.qc_issue = sc1200_qc_issue,
|
||||
.qc_defer = sc1200_qc_defer,
|
||||
.cable_detect = ata_cable_40wire,
|
||||
|
|
|
@ -265,7 +265,7 @@ unsigned long scc_mode_filter(struct ata_device *adev, unsigned long mask)
|
|||
printk(KERN_INFO "%s: limit ATAPI UDMA to UDMA4\n", DRV_NAME);
|
||||
mask &= ~(0xE0 << ATA_SHIFT_UDMA);
|
||||
}
|
||||
return ata_bmdma_mode_filter(adev, mask);
|
||||
return mask;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -415,6 +415,17 @@ static void scc_dev_select (struct ata_port *ap, unsigned int device)
|
|||
ata_sff_pause(ap);
|
||||
}
|
||||
|
||||
/**
|
||||
* scc_set_devctl - Write device control reg
|
||||
* @ap: port where the device is
|
||||
* @ctl: value to write
|
||||
*/
|
||||
|
||||
static void scc_set_devctl(struct ata_port *ap, u8 ctl)
|
||||
{
|
||||
out_be32(ap->ioaddr.ctl_addr, ctl);
|
||||
}
|
||||
|
||||
/**
|
||||
* scc_bmdma_setup - Set up PCI IDE BMDMA transaction
|
||||
* @qc: Info associated with this ATA transaction.
|
||||
|
@ -430,7 +441,7 @@ static void scc_bmdma_setup (struct ata_queued_cmd *qc)
|
|||
void __iomem *mmio = ap->ioaddr.bmdma_addr;
|
||||
|
||||
/* load PRD table addr */
|
||||
out_be32(mmio + SCC_DMA_TABLE_OFS, ap->prd_dma);
|
||||
out_be32(mmio + SCC_DMA_TABLE_OFS, ap->bmdma_prd_dma);
|
||||
|
||||
/* specify data direction, triple-check start bit is clear */
|
||||
dmactl = in_be32(mmio + SCC_DMA_CMD);
|
||||
|
@ -501,8 +512,8 @@ static unsigned int scc_devchk (struct ata_port *ap,
|
|||
* Note: Original code is ata_sff_wait_after_reset
|
||||
*/
|
||||
|
||||
int scc_wait_after_reset(struct ata_link *link, unsigned int devmask,
|
||||
unsigned long deadline)
|
||||
static int scc_wait_after_reset(struct ata_link *link, unsigned int devmask,
|
||||
unsigned long deadline)
|
||||
{
|
||||
struct ata_port *ap = link->ap;
|
||||
struct ata_ioports *ioaddr = &ap->ioaddr;
|
||||
|
@ -816,54 +827,6 @@ static unsigned int scc_data_xfer (struct ata_device *dev, unsigned char *buf,
|
|||
return words << 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* scc_irq_on - Enable interrupts on a port.
|
||||
* @ap: Port on which interrupts are enabled.
|
||||
*
|
||||
* Note: Original code is ata_sff_irq_on().
|
||||
*/
|
||||
|
||||
static u8 scc_irq_on (struct ata_port *ap)
|
||||
{
|
||||
struct ata_ioports *ioaddr = &ap->ioaddr;
|
||||
u8 tmp;
|
||||
|
||||
ap->ctl &= ~ATA_NIEN;
|
||||
ap->last_ctl = ap->ctl;
|
||||
|
||||
out_be32(ioaddr->ctl_addr, ap->ctl);
|
||||
tmp = ata_wait_idle(ap);
|
||||
|
||||
ap->ops->sff_irq_clear(ap);
|
||||
|
||||
return tmp;
|
||||
}
|
||||
|
||||
/**
|
||||
* scc_freeze - Freeze BMDMA controller port
|
||||
* @ap: port to freeze
|
||||
*
|
||||
* Note: Original code is ata_sff_freeze().
|
||||
*/
|
||||
|
||||
static void scc_freeze (struct ata_port *ap)
|
||||
{
|
||||
struct ata_ioports *ioaddr = &ap->ioaddr;
|
||||
|
||||
ap->ctl |= ATA_NIEN;
|
||||
ap->last_ctl = ap->ctl;
|
||||
|
||||
out_be32(ioaddr->ctl_addr, ap->ctl);
|
||||
|
||||
/* Under certain circumstances, some controllers raise IRQ on
|
||||
* ATA_NIEN manipulation. Also, many controllers fail to mask
|
||||
* previously pending IRQ on ATA_NIEN assertion. Clear it.
|
||||
*/
|
||||
ap->ops->sff_check_status(ap);
|
||||
|
||||
ap->ops->sff_irq_clear(ap);
|
||||
}
|
||||
|
||||
/**
|
||||
* scc_pata_prereset - prepare for reset
|
||||
* @ap: ATA port to be reset
|
||||
|
@ -903,8 +866,7 @@ static void scc_postreset(struct ata_link *link, unsigned int *classes)
|
|||
}
|
||||
|
||||
/* set up device control */
|
||||
if (ap->ioaddr.ctl_addr)
|
||||
out_be32(ap->ioaddr.ctl_addr, ap->ctl);
|
||||
out_be32(ap->ioaddr.ctl_addr, ap->ctl);
|
||||
|
||||
DPRINTK("EXIT\n");
|
||||
}
|
||||
|
@ -930,7 +892,7 @@ static void scc_irq_clear (struct ata_port *ap)
|
|||
* scc_port_start - Set port up for dma.
|
||||
* @ap: Port to initialize
|
||||
*
|
||||
* Allocate space for PRD table using ata_port_start().
|
||||
* Allocate space for PRD table using ata_bmdma_port_start().
|
||||
* Set PRD table address for PTERADD. (PRD Transfer End Read)
|
||||
*/
|
||||
|
||||
|
@ -939,11 +901,11 @@ static int scc_port_start (struct ata_port *ap)
|
|||
void __iomem *mmio = ap->ioaddr.bmdma_addr;
|
||||
int rc;
|
||||
|
||||
rc = ata_port_start(ap);
|
||||
rc = ata_bmdma_port_start(ap);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
out_be32(mmio + SCC_DMA_PTERADD, ap->prd_dma);
|
||||
out_be32(mmio + SCC_DMA_PTERADD, ap->bmdma_prd_dma);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -978,6 +940,7 @@ static struct ata_port_operations scc_pata_ops = {
|
|||
.sff_check_status = scc_check_status,
|
||||
.sff_check_altstatus = scc_check_altstatus,
|
||||
.sff_dev_select = scc_dev_select,
|
||||
.sff_set_devctl = scc_set_devctl,
|
||||
|
||||
.bmdma_setup = scc_bmdma_setup,
|
||||
.bmdma_start = scc_bmdma_start,
|
||||
|
@ -985,14 +948,11 @@ static struct ata_port_operations scc_pata_ops = {
|
|||
.bmdma_status = scc_bmdma_status,
|
||||
.sff_data_xfer = scc_data_xfer,
|
||||
|
||||
.freeze = scc_freeze,
|
||||
.prereset = scc_pata_prereset,
|
||||
.softreset = scc_softreset,
|
||||
.postreset = scc_postreset,
|
||||
.post_internal_cmd = scc_bmdma_stop,
|
||||
|
||||
.sff_irq_clear = scc_irq_clear,
|
||||
.sff_irq_on = scc_irq_on,
|
||||
|
||||
.port_start = scc_port_start,
|
||||
.port_stop = scc_port_stop,
|
||||
|
|
|
@ -174,22 +174,12 @@ static int __devinit sch_init_one(struct pci_dev *pdev,
|
|||
{
|
||||
static int printed_version;
|
||||
const struct ata_port_info *ppi[] = { &sch_port_info, NULL };
|
||||
struct ata_host *host;
|
||||
int rc;
|
||||
|
||||
if (!printed_version++)
|
||||
dev_printk(KERN_DEBUG, &pdev->dev,
|
||||
"version " DRV_VERSION "\n");
|
||||
|
||||
/* enable device and prepare host */
|
||||
rc = pcim_enable_device(pdev);
|
||||
if (rc)
|
||||
return rc;
|
||||
rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
|
||||
if (rc)
|
||||
return rc;
|
||||
pci_set_master(pdev);
|
||||
return ata_pci_sff_activate_host(host, ata_sff_interrupt, &sch_sht);
|
||||
return ata_pci_sff_init_one(pdev, ppi, &sch_sht, NULL, 0);
|
||||
}
|
||||
|
||||
static int __init sch_init(void)
|
||||
|
|
|
@ -198,7 +198,7 @@ static unsigned long serverworks_osb4_filter(struct ata_device *adev, unsigned l
|
|||
{
|
||||
if (adev->class == ATA_DEV_ATA)
|
||||
mask &= ~ATA_MASK_UDMA;
|
||||
return ata_bmdma_mode_filter(adev, mask);
|
||||
return mask;
|
||||
}
|
||||
|
||||
|
||||
|
@ -218,7 +218,7 @@ static unsigned long serverworks_csb_filter(struct ata_device *adev, unsigned lo
|
|||
|
||||
/* Disk, UDMA */
|
||||
if (adev->class != ATA_DEV_ATA)
|
||||
return ata_bmdma_mode_filter(adev, mask);
|
||||
return mask;
|
||||
|
||||
/* Actually do need to check */
|
||||
ata_id_c_string(adev->id, model_num, ATA_ID_PROD, sizeof(model_num));
|
||||
|
@ -227,7 +227,7 @@ static unsigned long serverworks_csb_filter(struct ata_device *adev, unsigned lo
|
|||
if (!strcmp(p, model_num))
|
||||
mask &= ~(0xE0 << ATA_SHIFT_UDMA);
|
||||
}
|
||||
return ata_bmdma_mode_filter(adev, mask);
|
||||
return mask;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -190,15 +190,37 @@ static void sil680_set_dmamode(struct ata_port *ap, struct ata_device *adev)
|
|||
pci_write_config_word(pdev, ua, ultra);
|
||||
}
|
||||
|
||||
/**
|
||||
* sil680_sff_exec_command - issue ATA command to host controller
|
||||
* @ap: port to which command is being issued
|
||||
* @tf: ATA taskfile register set
|
||||
*
|
||||
* Issues ATA command, with proper synchronization with interrupt
|
||||
* handler / other threads. Use our MMIO space for PCI posting to avoid
|
||||
* a hideously slow cycle all the way to the device.
|
||||
*
|
||||
* LOCKING:
|
||||
* spin_lock_irqsave(host lock)
|
||||
*/
|
||||
void sil680_sff_exec_command(struct ata_port *ap,
|
||||
const struct ata_taskfile *tf)
|
||||
{
|
||||
DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command);
|
||||
iowrite8(tf->command, ap->ioaddr.command_addr);
|
||||
ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
|
||||
}
|
||||
|
||||
static struct scsi_host_template sil680_sht = {
|
||||
ATA_BMDMA_SHT(DRV_NAME),
|
||||
};
|
||||
|
||||
|
||||
static struct ata_port_operations sil680_port_ops = {
|
||||
.inherits = &ata_bmdma32_port_ops,
|
||||
.cable_detect = sil680_cable_detect,
|
||||
.set_piomode = sil680_set_piomode,
|
||||
.set_dmamode = sil680_set_dmamode,
|
||||
.inherits = &ata_bmdma32_port_ops,
|
||||
.sff_exec_command = sil680_sff_exec_command,
|
||||
.cable_detect = sil680_cable_detect,
|
||||
.set_piomode = sil680_set_piomode,
|
||||
.set_dmamode = sil680_set_dmamode,
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
|
@ -355,7 +355,7 @@ static unsigned long via_mode_filter(struct ata_device *dev, unsigned long mask)
|
|||
mask &= ~ ATA_MASK_UDMA;
|
||||
}
|
||||
}
|
||||
return ata_bmdma_mode_filter(dev, mask);
|
||||
return mask;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -417,8 +417,6 @@ static void via_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
|
|||
tf->lbam,
|
||||
tf->lbah);
|
||||
}
|
||||
|
||||
ata_wait_idle(ap);
|
||||
}
|
||||
|
||||
static int via_port_start(struct ata_port *ap)
|
||||
|
@ -426,7 +424,7 @@ static int via_port_start(struct ata_port *ap)
|
|||
struct via_port *vp;
|
||||
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
|
||||
|
||||
int ret = ata_sff_port_start(ap);
|
||||
int ret = ata_bmdma_port_start(ap);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
|
|
|
@ -324,10 +324,8 @@ static void adma_qc_prep(struct ata_queued_cmd *qc)
|
|||
VPRINTK("ENTER\n");
|
||||
|
||||
adma_enter_reg_mode(qc->ap);
|
||||
if (qc->tf.protocol != ATA_PROT_DMA) {
|
||||
ata_sff_qc_prep(qc);
|
||||
if (qc->tf.protocol != ATA_PROT_DMA)
|
||||
return;
|
||||
}
|
||||
|
||||
buf[i++] = 0; /* Response flags */
|
||||
buf[i++] = 0; /* reserved */
|
||||
|
@ -442,8 +440,6 @@ static inline unsigned int adma_intr_pkt(struct ata_host *host)
|
|||
continue;
|
||||
handled = 1;
|
||||
adma_enter_reg_mode(ap);
|
||||
if (ap->flags & ATA_FLAG_DISABLED)
|
||||
continue;
|
||||
pp = ap->private_data;
|
||||
if (!pp || pp->state != adma_state_pkt)
|
||||
continue;
|
||||
|
@ -484,42 +480,38 @@ static inline unsigned int adma_intr_mmio(struct ata_host *host)
|
|||
unsigned int handled = 0, port_no;
|
||||
|
||||
for (port_no = 0; port_no < host->n_ports; ++port_no) {
|
||||
struct ata_port *ap;
|
||||
ap = host->ports[port_no];
|
||||
if (ap && (!(ap->flags & ATA_FLAG_DISABLED))) {
|
||||
struct ata_queued_cmd *qc;
|
||||
struct adma_port_priv *pp = ap->private_data;
|
||||
if (!pp || pp->state != adma_state_mmio)
|
||||
struct ata_port *ap = host->ports[port_no];
|
||||
struct adma_port_priv *pp = ap->private_data;
|
||||
struct ata_queued_cmd *qc;
|
||||
|
||||
if (!pp || pp->state != adma_state_mmio)
|
||||
continue;
|
||||
qc = ata_qc_from_tag(ap, ap->link.active_tag);
|
||||
if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
|
||||
|
||||
/* check main status, clearing INTRQ */
|
||||
u8 status = ata_sff_check_status(ap);
|
||||
if ((status & ATA_BUSY))
|
||||
continue;
|
||||
qc = ata_qc_from_tag(ap, ap->link.active_tag);
|
||||
if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
|
||||
DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
|
||||
ap->print_id, qc->tf.protocol, status);
|
||||
|
||||
/* check main status, clearing INTRQ */
|
||||
u8 status = ata_sff_check_status(ap);
|
||||
if ((status & ATA_BUSY))
|
||||
continue;
|
||||
DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
|
||||
ap->print_id, qc->tf.protocol, status);
|
||||
/* complete taskfile transaction */
|
||||
pp->state = adma_state_idle;
|
||||
qc->err_mask |= ac_err_mask(status);
|
||||
if (!qc->err_mask)
|
||||
ata_qc_complete(qc);
|
||||
else {
|
||||
struct ata_eh_info *ehi = &ap->link.eh_info;
|
||||
ata_ehi_clear_desc(ehi);
|
||||
ata_ehi_push_desc(ehi, "status 0x%02X", status);
|
||||
|
||||
/* complete taskfile transaction */
|
||||
pp->state = adma_state_idle;
|
||||
qc->err_mask |= ac_err_mask(status);
|
||||
if (!qc->err_mask)
|
||||
ata_qc_complete(qc);
|
||||
else {
|
||||
struct ata_eh_info *ehi =
|
||||
&ap->link.eh_info;
|
||||
ata_ehi_clear_desc(ehi);
|
||||
ata_ehi_push_desc(ehi,
|
||||
"status 0x%02X", status);
|
||||
|
||||
if (qc->err_mask == AC_ERR_DEV)
|
||||
ata_port_abort(ap);
|
||||
else
|
||||
ata_port_freeze(ap);
|
||||
}
|
||||
handled = 1;
|
||||
if (qc->err_mask == AC_ERR_DEV)
|
||||
ata_port_abort(ap);
|
||||
else
|
||||
ata_port_freeze(ap);
|
||||
}
|
||||
handled = 1;
|
||||
}
|
||||
}
|
||||
return handled;
|
||||
|
@ -562,11 +554,7 @@ static int adma_port_start(struct ata_port *ap)
|
|||
{
|
||||
struct device *dev = ap->host->dev;
|
||||
struct adma_port_priv *pp;
|
||||
int rc;
|
||||
|
||||
rc = ata_port_start(ap);
|
||||
if (rc)
|
||||
return rc;
|
||||
adma_enter_reg_mode(ap);
|
||||
pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
|
||||
if (!pp)
|
||||
|
|
|
@ -415,22 +415,11 @@ static irqreturn_t inic_interrupt(int irq, void *dev_instance)
|
|||
|
||||
spin_lock(&host->lock);
|
||||
|
||||
for (i = 0; i < NR_PORTS; i++) {
|
||||
struct ata_port *ap = host->ports[i];
|
||||
|
||||
if (!(host_irq_stat & (HIRQ_PORT0 << i)))
|
||||
continue;
|
||||
|
||||
if (likely(ap && !(ap->flags & ATA_FLAG_DISABLED))) {
|
||||
inic_host_intr(ap);
|
||||
for (i = 0; i < NR_PORTS; i++)
|
||||
if (host_irq_stat & (HIRQ_PORT0 << i)) {
|
||||
inic_host_intr(host->ports[i]);
|
||||
handled++;
|
||||
} else {
|
||||
if (ata_ratelimit())
|
||||
dev_printk(KERN_ERR, host->dev, "interrupt "
|
||||
"from disabled port %d (0x%x)\n",
|
||||
i, host_irq_stat);
|
||||
}
|
||||
}
|
||||
|
||||
spin_unlock(&host->lock);
|
||||
|
||||
|
@ -679,8 +668,7 @@ static void init_port(struct ata_port *ap)
|
|||
memset(pp->pkt, 0, sizeof(struct inic_pkt));
|
||||
memset(pp->cpb_tbl, 0, IDMA_CPB_TBL_SIZE);
|
||||
|
||||
/* setup PRD and CPB lookup table addresses */
|
||||
writel(ap->prd_dma, port_base + PORT_PRD_ADDR);
|
||||
/* setup CPB lookup table addresses */
|
||||
writel(pp->cpb_tbl_dma, port_base + PORT_CPB_CPBLAR);
|
||||
}
|
||||
|
||||
|
@ -694,7 +682,6 @@ static int inic_port_start(struct ata_port *ap)
|
|||
{
|
||||
struct device *dev = ap->host->dev;
|
||||
struct inic_port_priv *pp;
|
||||
int rc;
|
||||
|
||||
/* alloc and initialize private data */
|
||||
pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
|
||||
|
@ -703,10 +690,6 @@ static int inic_port_start(struct ata_port *ap)
|
|||
ap->private_data = pp;
|
||||
|
||||
/* Alloc resources */
|
||||
rc = ata_port_start(ap);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
pp->pkt = dmam_alloc_coherent(dev, sizeof(struct inic_pkt),
|
||||
&pp->pkt_dma, GFP_KERNEL);
|
||||
if (!pp->pkt)
|
||||
|
|
|
@ -686,16 +686,27 @@ static struct ata_port_operations mv5_ops = {
|
|||
};
|
||||
|
||||
static struct ata_port_operations mv6_ops = {
|
||||
.inherits = &mv5_ops,
|
||||
.dev_config = mv6_dev_config,
|
||||
.scr_read = mv_scr_read,
|
||||
.scr_write = mv_scr_write,
|
||||
.inherits = &ata_bmdma_port_ops,
|
||||
|
||||
.lost_interrupt = ATA_OP_NULL,
|
||||
|
||||
.qc_defer = mv_qc_defer,
|
||||
.qc_prep = mv_qc_prep,
|
||||
.qc_issue = mv_qc_issue,
|
||||
|
||||
.dev_config = mv6_dev_config,
|
||||
|
||||
.freeze = mv_eh_freeze,
|
||||
.thaw = mv_eh_thaw,
|
||||
.hardreset = mv_hardreset,
|
||||
.softreset = mv_softreset,
|
||||
.pmp_hardreset = mv_pmp_hardreset,
|
||||
.pmp_softreset = mv_softreset,
|
||||
.softreset = mv_softreset,
|
||||
.error_handler = mv_pmp_error_handler,
|
||||
|
||||
.scr_read = mv_scr_read,
|
||||
.scr_write = mv_scr_write,
|
||||
|
||||
.sff_check_status = mv_sff_check_status,
|
||||
.sff_irq_clear = mv_sff_irq_clear,
|
||||
.check_atapi_dma = mv_check_atapi_dma,
|
||||
|
@ -703,6 +714,9 @@ static struct ata_port_operations mv6_ops = {
|
|||
.bmdma_start = mv_bmdma_start,
|
||||
.bmdma_stop = mv_bmdma_stop,
|
||||
.bmdma_status = mv_bmdma_status,
|
||||
|
||||
.port_start = mv_port_start,
|
||||
.port_stop = mv_port_stop,
|
||||
};
|
||||
|
||||
static struct ata_port_operations mv_iie_ops = {
|
||||
|
@ -2248,7 +2262,7 @@ static unsigned int mv_qc_issue_fis(struct ata_queued_cmd *qc)
|
|||
}
|
||||
|
||||
if (qc->tf.flags & ATA_TFLAG_POLLING)
|
||||
ata_pio_queue_task(ap, qc, 0);
|
||||
ata_sff_queue_pio_task(ap, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2344,7 +2358,7 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
|
|||
if (IS_GEN_II(hpriv))
|
||||
return mv_qc_issue_fis(qc);
|
||||
}
|
||||
return ata_sff_qc_issue(qc);
|
||||
return ata_bmdma_qc_issue(qc);
|
||||
}
|
||||
|
||||
static struct ata_queued_cmd *mv_get_active_qc(struct ata_port *ap)
|
||||
|
@ -2355,13 +2369,9 @@ static struct ata_queued_cmd *mv_get_active_qc(struct ata_port *ap)
|
|||
if (pp->pp_flags & MV_PP_FLAG_NCQ_EN)
|
||||
return NULL;
|
||||
qc = ata_qc_from_tag(ap, ap->link.active_tag);
|
||||
if (qc) {
|
||||
if (qc->tf.flags & ATA_TFLAG_POLLING)
|
||||
qc = NULL;
|
||||
else if (!(qc->flags & ATA_QCFLAG_ACTIVE))
|
||||
qc = NULL;
|
||||
}
|
||||
return qc;
|
||||
if (qc && !(qc->tf.flags & ATA_TFLAG_POLLING))
|
||||
return qc;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void mv_pmp_error_handler(struct ata_port *ap)
|
||||
|
@ -2546,9 +2556,7 @@ static void mv_unexpected_intr(struct ata_port *ap, int edma_was_enabled)
|
|||
char *when = "idle";
|
||||
|
||||
ata_ehi_clear_desc(ehi);
|
||||
if (ap->flags & ATA_FLAG_DISABLED) {
|
||||
when = "disabled";
|
||||
} else if (edma_was_enabled) {
|
||||
if (edma_was_enabled) {
|
||||
when = "EDMA enabled";
|
||||
} else {
|
||||
struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
|
||||
|
@ -2782,10 +2790,6 @@ static void mv_port_intr(struct ata_port *ap, u32 port_cause)
|
|||
struct mv_port_priv *pp;
|
||||
int edma_was_enabled;
|
||||
|
||||
if (ap->flags & ATA_FLAG_DISABLED) {
|
||||
mv_unexpected_intr(ap, 0);
|
||||
return;
|
||||
}
|
||||
/*
|
||||
* Grab a snapshot of the EDMA_EN flag setting,
|
||||
* so that we have a consistent view for this port,
|
||||
|
@ -3656,9 +3660,6 @@ static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
|
|||
/* special case: control/altstatus doesn't have ATA_REG_ address */
|
||||
port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST;
|
||||
|
||||
/* unused: */
|
||||
port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
|
||||
|
||||
/* Clear any currently outstanding port interrupt conditions */
|
||||
serr = port_mmio + mv_scr_offset(SCR_ERROR);
|
||||
writelfl(readl(serr), serr);
|
||||
|
|
|
@ -272,7 +272,7 @@ enum ncq_saw_flag_list {
|
|||
};
|
||||
|
||||
struct nv_swncq_port_priv {
|
||||
struct ata_prd *prd; /* our SG list */
|
||||
struct ata_bmdma_prd *prd; /* our SG list */
|
||||
dma_addr_t prd_dma; /* and its DMA mapping */
|
||||
void __iomem *sactive_block;
|
||||
void __iomem *irq_block;
|
||||
|
@ -933,107 +933,108 @@ static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
|
|||
|
||||
for (i = 0; i < host->n_ports; i++) {
|
||||
struct ata_port *ap = host->ports[i];
|
||||
struct nv_adma_port_priv *pp = ap->private_data;
|
||||
void __iomem *mmio = pp->ctl_block;
|
||||
u16 status;
|
||||
u32 gen_ctl;
|
||||
u32 notifier, notifier_error;
|
||||
|
||||
notifier_clears[i] = 0;
|
||||
|
||||
if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
|
||||
struct nv_adma_port_priv *pp = ap->private_data;
|
||||
void __iomem *mmio = pp->ctl_block;
|
||||
u16 status;
|
||||
u32 gen_ctl;
|
||||
u32 notifier, notifier_error;
|
||||
/* if ADMA is disabled, use standard ata interrupt handler */
|
||||
if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
|
||||
u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
|
||||
>> (NV_INT_PORT_SHIFT * i);
|
||||
handled += nv_host_intr(ap, irq_stat);
|
||||
continue;
|
||||
}
|
||||
|
||||
/* if ADMA is disabled, use standard ata interrupt handler */
|
||||
if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
|
||||
u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
|
||||
>> (NV_INT_PORT_SHIFT * i);
|
||||
handled += nv_host_intr(ap, irq_stat);
|
||||
continue;
|
||||
}
|
||||
/* if in ATA register mode, check for standard interrupts */
|
||||
if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
|
||||
u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
|
||||
>> (NV_INT_PORT_SHIFT * i);
|
||||
if (ata_tag_valid(ap->link.active_tag))
|
||||
/** NV_INT_DEV indication seems unreliable
|
||||
at times at least in ADMA mode. Force it
|
||||
on always when a command is active, to
|
||||
prevent losing interrupts. */
|
||||
irq_stat |= NV_INT_DEV;
|
||||
handled += nv_host_intr(ap, irq_stat);
|
||||
}
|
||||
|
||||
/* if in ATA register mode, check for standard interrupts */
|
||||
if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
|
||||
u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
|
||||
>> (NV_INT_PORT_SHIFT * i);
|
||||
notifier = readl(mmio + NV_ADMA_NOTIFIER);
|
||||
notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
|
||||
notifier_clears[i] = notifier | notifier_error;
|
||||
|
||||
gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
|
||||
|
||||
if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
|
||||
!notifier_error)
|
||||
/* Nothing to do */
|
||||
continue;
|
||||
|
||||
status = readw(mmio + NV_ADMA_STAT);
|
||||
|
||||
/*
|
||||
* Clear status. Ensure the controller sees the
|
||||
* clearing before we start looking at any of the CPB
|
||||
* statuses, so that any CPB completions after this
|
||||
* point in the handler will raise another interrupt.
|
||||
*/
|
||||
writew(status, mmio + NV_ADMA_STAT);
|
||||
readw(mmio + NV_ADMA_STAT); /* flush posted write */
|
||||
rmb();
|
||||
|
||||
handled++; /* irq handled if we got here */
|
||||
|
||||
/* freeze if hotplugged or controller error */
|
||||
if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
|
||||
NV_ADMA_STAT_HOTUNPLUG |
|
||||
NV_ADMA_STAT_TIMEOUT |
|
||||
NV_ADMA_STAT_SERROR))) {
|
||||
struct ata_eh_info *ehi = &ap->link.eh_info;
|
||||
|
||||
ata_ehi_clear_desc(ehi);
|
||||
__ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status);
|
||||
if (status & NV_ADMA_STAT_TIMEOUT) {
|
||||
ehi->err_mask |= AC_ERR_SYSTEM;
|
||||
ata_ehi_push_desc(ehi, "timeout");
|
||||
} else if (status & NV_ADMA_STAT_HOTPLUG) {
|
||||
ata_ehi_hotplugged(ehi);
|
||||
ata_ehi_push_desc(ehi, "hotplug");
|
||||
} else if (status & NV_ADMA_STAT_HOTUNPLUG) {
|
||||
ata_ehi_hotplugged(ehi);
|
||||
ata_ehi_push_desc(ehi, "hot unplug");
|
||||
} else if (status & NV_ADMA_STAT_SERROR) {
|
||||
/* let EH analyze SError and figure out cause */
|
||||
ata_ehi_push_desc(ehi, "SError");
|
||||
} else
|
||||
ata_ehi_push_desc(ehi, "unknown");
|
||||
ata_port_freeze(ap);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (status & (NV_ADMA_STAT_DONE |
|
||||
NV_ADMA_STAT_CPBERR |
|
||||
NV_ADMA_STAT_CMD_COMPLETE)) {
|
||||
u32 check_commands = notifier_clears[i];
|
||||
int pos, error = 0;
|
||||
|
||||
if (status & NV_ADMA_STAT_CPBERR) {
|
||||
/* check all active commands */
|
||||
if (ata_tag_valid(ap->link.active_tag))
|
||||
/** NV_INT_DEV indication seems unreliable at times
|
||||
at least in ADMA mode. Force it on always when a
|
||||
command is active, to prevent losing interrupts. */
|
||||
irq_stat |= NV_INT_DEV;
|
||||
handled += nv_host_intr(ap, irq_stat);
|
||||
check_commands = 1 <<
|
||||
ap->link.active_tag;
|
||||
else
|
||||
check_commands = ap->link.sactive;
|
||||
}
|
||||
|
||||
notifier = readl(mmio + NV_ADMA_NOTIFIER);
|
||||
notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
|
||||
notifier_clears[i] = notifier | notifier_error;
|
||||
|
||||
gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
|
||||
|
||||
if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
|
||||
!notifier_error)
|
||||
/* Nothing to do */
|
||||
continue;
|
||||
|
||||
status = readw(mmio + NV_ADMA_STAT);
|
||||
|
||||
/* Clear status. Ensure the controller sees the clearing before we start
|
||||
looking at any of the CPB statuses, so that any CPB completions after
|
||||
this point in the handler will raise another interrupt. */
|
||||
writew(status, mmio + NV_ADMA_STAT);
|
||||
readw(mmio + NV_ADMA_STAT); /* flush posted write */
|
||||
rmb();
|
||||
|
||||
handled++; /* irq handled if we got here */
|
||||
|
||||
/* freeze if hotplugged or controller error */
|
||||
if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
|
||||
NV_ADMA_STAT_HOTUNPLUG |
|
||||
NV_ADMA_STAT_TIMEOUT |
|
||||
NV_ADMA_STAT_SERROR))) {
|
||||
struct ata_eh_info *ehi = &ap->link.eh_info;
|
||||
|
||||
ata_ehi_clear_desc(ehi);
|
||||
__ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status);
|
||||
if (status & NV_ADMA_STAT_TIMEOUT) {
|
||||
ehi->err_mask |= AC_ERR_SYSTEM;
|
||||
ata_ehi_push_desc(ehi, "timeout");
|
||||
} else if (status & NV_ADMA_STAT_HOTPLUG) {
|
||||
ata_ehi_hotplugged(ehi);
|
||||
ata_ehi_push_desc(ehi, "hotplug");
|
||||
} else if (status & NV_ADMA_STAT_HOTUNPLUG) {
|
||||
ata_ehi_hotplugged(ehi);
|
||||
ata_ehi_push_desc(ehi, "hot unplug");
|
||||
} else if (status & NV_ADMA_STAT_SERROR) {
|
||||
/* let libata analyze SError and figure out the cause */
|
||||
ata_ehi_push_desc(ehi, "SError");
|
||||
} else
|
||||
ata_ehi_push_desc(ehi, "unknown");
|
||||
ata_port_freeze(ap);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (status & (NV_ADMA_STAT_DONE |
|
||||
NV_ADMA_STAT_CPBERR |
|
||||
NV_ADMA_STAT_CMD_COMPLETE)) {
|
||||
u32 check_commands = notifier_clears[i];
|
||||
int pos, error = 0;
|
||||
|
||||
if (status & NV_ADMA_STAT_CPBERR) {
|
||||
/* Check all active commands */
|
||||
if (ata_tag_valid(ap->link.active_tag))
|
||||
check_commands = 1 <<
|
||||
ap->link.active_tag;
|
||||
else
|
||||
check_commands = ap->
|
||||
link.sactive;
|
||||
}
|
||||
|
||||
/** Check CPBs for completed commands */
|
||||
while ((pos = ffs(check_commands)) && !error) {
|
||||
pos--;
|
||||
error = nv_adma_check_cpb(ap, pos,
|
||||
/* check CPBs for completed commands */
|
||||
while ((pos = ffs(check_commands)) && !error) {
|
||||
pos--;
|
||||
error = nv_adma_check_cpb(ap, pos,
|
||||
notifier_error & (1 << pos));
|
||||
check_commands &= ~(1 << pos);
|
||||
}
|
||||
check_commands &= ~(1 << pos);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1130,7 +1131,7 @@ static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
|
|||
struct nv_adma_port_priv *pp = qc->ap->private_data;
|
||||
|
||||
if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
|
||||
ata_sff_post_internal_cmd(qc);
|
||||
ata_bmdma_post_internal_cmd(qc);
|
||||
}
|
||||
|
||||
static int nv_adma_port_start(struct ata_port *ap)
|
||||
|
@ -1155,7 +1156,8 @@ static int nv_adma_port_start(struct ata_port *ap)
|
|||
if (rc)
|
||||
return rc;
|
||||
|
||||
rc = ata_port_start(ap);
|
||||
/* we might fallback to bmdma, allocate bmdma resources */
|
||||
rc = ata_bmdma_port_start(ap);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
|
@ -1407,7 +1409,7 @@ static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
|
|||
BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
|
||||
(qc->flags & ATA_QCFLAG_DMAMAP));
|
||||
nv_adma_register_mode(qc->ap);
|
||||
ata_sff_qc_prep(qc);
|
||||
ata_bmdma_qc_prep(qc);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1466,7 +1468,7 @@ static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
|
|||
BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
|
||||
(qc->flags & ATA_QCFLAG_DMAMAP));
|
||||
nv_adma_register_mode(qc->ap);
|
||||
return ata_sff_qc_issue(qc);
|
||||
return ata_bmdma_qc_issue(qc);
|
||||
} else
|
||||
nv_adma_mode(qc->ap);
|
||||
|
||||
|
@ -1498,22 +1500,19 @@ static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
|
|||
spin_lock_irqsave(&host->lock, flags);
|
||||
|
||||
for (i = 0; i < host->n_ports; i++) {
|
||||
struct ata_port *ap;
|
||||
struct ata_port *ap = host->ports[i];
|
||||
struct ata_queued_cmd *qc;
|
||||
|
||||
ap = host->ports[i];
|
||||
if (ap &&
|
||||
!(ap->flags & ATA_FLAG_DISABLED)) {
|
||||
struct ata_queued_cmd *qc;
|
||||
|
||||
qc = ata_qc_from_tag(ap, ap->link.active_tag);
|
||||
if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
|
||||
handled += ata_sff_host_intr(ap, qc);
|
||||
else
|
||||
// No request pending? Clear interrupt status
|
||||
// anyway, in case there's one pending.
|
||||
ap->ops->sff_check_status(ap);
|
||||
qc = ata_qc_from_tag(ap, ap->link.active_tag);
|
||||
if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
|
||||
handled += ata_sff_host_intr(ap, qc);
|
||||
} else {
|
||||
/*
|
||||
* No request pending? Clear interrupt status
|
||||
* anyway, in case there's one pending.
|
||||
*/
|
||||
ap->ops->sff_check_status(ap);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&host->lock, flags);
|
||||
|
@ -1526,11 +1525,7 @@ static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
|
|||
int i, handled = 0;
|
||||
|
||||
for (i = 0; i < host->n_ports; i++) {
|
||||
struct ata_port *ap = host->ports[i];
|
||||
|
||||
if (ap && !(ap->flags & ATA_FLAG_DISABLED))
|
||||
handled += nv_host_intr(ap, irq_stat);
|
||||
|
||||
handled += nv_host_intr(host->ports[i], irq_stat);
|
||||
irq_stat >>= NV_INT_PORT_SHIFT;
|
||||
}
|
||||
|
||||
|
@ -1744,7 +1739,7 @@ static void nv_adma_error_handler(struct ata_port *ap)
|
|||
readw(mmio + NV_ADMA_CTL); /* flush posted write */
|
||||
}
|
||||
|
||||
ata_sff_error_handler(ap);
|
||||
ata_bmdma_error_handler(ap);
|
||||
}
|
||||
|
||||
static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc)
|
||||
|
@ -1870,7 +1865,7 @@ static void nv_swncq_error_handler(struct ata_port *ap)
|
|||
ehc->i.action |= ATA_EH_RESET;
|
||||
}
|
||||
|
||||
ata_sff_error_handler(ap);
|
||||
ata_bmdma_error_handler(ap);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
|
@ -1991,7 +1986,8 @@ static int nv_swncq_port_start(struct ata_port *ap)
|
|||
struct nv_swncq_port_priv *pp;
|
||||
int rc;
|
||||
|
||||
rc = ata_port_start(ap);
|
||||
/* we might fallback to bmdma, allocate bmdma resources */
|
||||
rc = ata_bmdma_port_start(ap);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
|
@ -2016,7 +2012,7 @@ static int nv_swncq_port_start(struct ata_port *ap)
|
|||
static void nv_swncq_qc_prep(struct ata_queued_cmd *qc)
|
||||
{
|
||||
if (qc->tf.protocol != ATA_PROT_NCQ) {
|
||||
ata_sff_qc_prep(qc);
|
||||
ata_bmdma_qc_prep(qc);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -2031,7 +2027,7 @@ static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
|
|||
struct ata_port *ap = qc->ap;
|
||||
struct scatterlist *sg;
|
||||
struct nv_swncq_port_priv *pp = ap->private_data;
|
||||
struct ata_prd *prd;
|
||||
struct ata_bmdma_prd *prd;
|
||||
unsigned int si, idx;
|
||||
|
||||
prd = pp->prd + ATA_MAX_PRD * qc->tag;
|
||||
|
@ -2092,7 +2088,7 @@ static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc)
|
|||
struct nv_swncq_port_priv *pp = ap->private_data;
|
||||
|
||||
if (qc->tf.protocol != ATA_PROT_NCQ)
|
||||
return ata_sff_qc_issue(qc);
|
||||
return ata_bmdma_qc_issue(qc);
|
||||
|
||||
DPRINTK("Enter\n");
|
||||
|
||||
|
@ -2380,16 +2376,14 @@ static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance)
|
|||
for (i = 0; i < host->n_ports; i++) {
|
||||
struct ata_port *ap = host->ports[i];
|
||||
|
||||
if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
|
||||
if (ap->link.sactive) {
|
||||
nv_swncq_host_interrupt(ap, (u16)irq_stat);
|
||||
handled = 1;
|
||||
} else {
|
||||
if (irq_stat) /* reserve Hotplug */
|
||||
nv_swncq_irq_clear(ap, 0xfff0);
|
||||
if (ap->link.sactive) {
|
||||
nv_swncq_host_interrupt(ap, (u16)irq_stat);
|
||||
handled = 1;
|
||||
} else {
|
||||
if (irq_stat) /* reserve Hotplug */
|
||||
nv_swncq_irq_clear(ap, 0xfff0);
|
||||
|
||||
handled += nv_host_intr(ap, (u8)irq_stat);
|
||||
}
|
||||
handled += nv_host_intr(ap, (u8)irq_stat);
|
||||
}
|
||||
irq_stat >>= NV_INT_PORT_SHIFT_MCP55;
|
||||
}
|
||||
|
@ -2479,8 +2473,7 @@ static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
}
|
||||
|
||||
pci_set_master(pdev);
|
||||
return ata_host_activate(host, pdev->irq, ipriv->irq_handler,
|
||||
IRQF_SHARED, ipriv->sht);
|
||||
return ata_pci_sff_activate_host(host, ipriv->irq_handler, ipriv->sht);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
|
|
|
@ -333,7 +333,8 @@ static int pdc_common_port_start(struct ata_port *ap)
|
|||
struct pdc_port_priv *pp;
|
||||
int rc;
|
||||
|
||||
rc = ata_port_start(ap);
|
||||
/* we use the same prd table as bmdma, allocate it */
|
||||
rc = ata_bmdma_port_start(ap);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
|
@ -499,7 +500,7 @@ static int pdc_sata_scr_write(struct ata_link *link,
|
|||
static void pdc_atapi_pkt(struct ata_queued_cmd *qc)
|
||||
{
|
||||
struct ata_port *ap = qc->ap;
|
||||
dma_addr_t sg_table = ap->prd_dma;
|
||||
dma_addr_t sg_table = ap->bmdma_prd_dma;
|
||||
unsigned int cdb_len = qc->dev->cdb_len;
|
||||
u8 *cdb = qc->cdb;
|
||||
struct pdc_port_priv *pp = ap->private_data;
|
||||
|
@ -587,6 +588,7 @@ static void pdc_atapi_pkt(struct ata_queued_cmd *qc)
|
|||
static void pdc_fill_sg(struct ata_queued_cmd *qc)
|
||||
{
|
||||
struct ata_port *ap = qc->ap;
|
||||
struct ata_bmdma_prd *prd = ap->bmdma_prd;
|
||||
struct scatterlist *sg;
|
||||
const u32 SG_COUNT_ASIC_BUG = 41*4;
|
||||
unsigned int si, idx;
|
||||
|
@ -613,8 +615,8 @@ static void pdc_fill_sg(struct ata_queued_cmd *qc)
|
|||
if ((offset + sg_len) > 0x10000)
|
||||
len = 0x10000 - offset;
|
||||
|
||||
ap->prd[idx].addr = cpu_to_le32(addr);
|
||||
ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
|
||||
prd[idx].addr = cpu_to_le32(addr);
|
||||
prd[idx].flags_len = cpu_to_le32(len & 0xffff);
|
||||
VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
|
||||
|
||||
idx++;
|
||||
|
@ -623,27 +625,27 @@ static void pdc_fill_sg(struct ata_queued_cmd *qc)
|
|||
}
|
||||
}
|
||||
|
||||
len = le32_to_cpu(ap->prd[idx - 1].flags_len);
|
||||
len = le32_to_cpu(prd[idx - 1].flags_len);
|
||||
|
||||
if (len > SG_COUNT_ASIC_BUG) {
|
||||
u32 addr;
|
||||
|
||||
VPRINTK("Splitting last PRD.\n");
|
||||
|
||||
addr = le32_to_cpu(ap->prd[idx - 1].addr);
|
||||
ap->prd[idx - 1].flags_len = cpu_to_le32(len - SG_COUNT_ASIC_BUG);
|
||||
addr = le32_to_cpu(prd[idx - 1].addr);
|
||||
prd[idx - 1].flags_len = cpu_to_le32(len - SG_COUNT_ASIC_BUG);
|
||||
VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx - 1, addr, SG_COUNT_ASIC_BUG);
|
||||
|
||||
addr = addr + len - SG_COUNT_ASIC_BUG;
|
||||
len = SG_COUNT_ASIC_BUG;
|
||||
ap->prd[idx].addr = cpu_to_le32(addr);
|
||||
ap->prd[idx].flags_len = cpu_to_le32(len);
|
||||
prd[idx].addr = cpu_to_le32(addr);
|
||||
prd[idx].flags_len = cpu_to_le32(len);
|
||||
VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
|
||||
|
||||
idx++;
|
||||
}
|
||||
|
||||
ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
|
||||
prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
|
||||
}
|
||||
|
||||
static void pdc_qc_prep(struct ata_queued_cmd *qc)
|
||||
|
@ -658,7 +660,7 @@ static void pdc_qc_prep(struct ata_queued_cmd *qc)
|
|||
pdc_fill_sg(qc);
|
||||
/*FALLTHROUGH*/
|
||||
case ATA_PROT_NODATA:
|
||||
i = pdc_pkt_header(&qc->tf, qc->ap->prd_dma,
|
||||
i = pdc_pkt_header(&qc->tf, qc->ap->bmdma_prd_dma,
|
||||
qc->dev->devno, pp->pkt);
|
||||
if (qc->tf.flags & ATA_TFLAG_LBA48)
|
||||
i = pdc_prep_lba48(&qc->tf, pp->pkt, i);
|
||||
|
@ -838,7 +840,7 @@ static void pdc_error_handler(struct ata_port *ap)
|
|||
if (!(ap->pflags & ATA_PFLAG_FROZEN))
|
||||
pdc_reset_port(ap);
|
||||
|
||||
ata_std_error_handler(ap);
|
||||
ata_sff_error_handler(ap);
|
||||
}
|
||||
|
||||
static void pdc_post_internal_cmd(struct ata_queued_cmd *qc)
|
||||
|
@ -984,8 +986,7 @@ static irqreturn_t pdc_interrupt(int irq, void *dev_instance)
|
|||
/* check for a plug or unplug event */
|
||||
ata_no = pdc_port_no_to_ata_no(i, is_sataii_tx4);
|
||||
tmp = hotplug_status & (0x11 << ata_no);
|
||||
if (tmp && ap &&
|
||||
!(ap->flags & ATA_FLAG_DISABLED)) {
|
||||
if (tmp) {
|
||||
struct ata_eh_info *ehi = &ap->link.eh_info;
|
||||
ata_ehi_clear_desc(ehi);
|
||||
ata_ehi_hotplugged(ehi);
|
||||
|
@ -997,8 +998,7 @@ static irqreturn_t pdc_interrupt(int irq, void *dev_instance)
|
|||
|
||||
/* check for a packet interrupt */
|
||||
tmp = mask & (1 << (i + 1));
|
||||
if (tmp && ap &&
|
||||
!(ap->flags & ATA_FLAG_DISABLED)) {
|
||||
if (tmp) {
|
||||
struct ata_queued_cmd *qc;
|
||||
|
||||
qc = ata_qc_from_tag(ap, ap->link.active_tag);
|
||||
|
|
|
@ -147,7 +147,6 @@ static struct ata_port_operations qs_ata_ops = {
|
|||
.prereset = qs_prereset,
|
||||
.softreset = ATA_OP_NULL,
|
||||
.error_handler = qs_error_handler,
|
||||
.post_internal_cmd = ATA_OP_NULL,
|
||||
.lost_interrupt = ATA_OP_NULL,
|
||||
|
||||
.scr_read = qs_scr_read,
|
||||
|
@ -255,7 +254,7 @@ static int qs_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
|
|||
static void qs_error_handler(struct ata_port *ap)
|
||||
{
|
||||
qs_enter_reg_mode(ap);
|
||||
ata_std_error_handler(ap);
|
||||
ata_sff_error_handler(ap);
|
||||
}
|
||||
|
||||
static int qs_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
|
||||
|
@ -304,10 +303,8 @@ static void qs_qc_prep(struct ata_queued_cmd *qc)
|
|||
VPRINTK("ENTER\n");
|
||||
|
||||
qs_enter_reg_mode(qc->ap);
|
||||
if (qc->tf.protocol != ATA_PROT_DMA) {
|
||||
ata_sff_qc_prep(qc);
|
||||
if (qc->tf.protocol != ATA_PROT_DMA)
|
||||
return;
|
||||
}
|
||||
|
||||
nelem = qs_fill_sg(qc);
|
||||
|
||||
|
@ -404,26 +401,24 @@ static inline unsigned int qs_intr_pkt(struct ata_host *host)
|
|||
u8 sHST = sff1 & 0x3f; /* host status */
|
||||
unsigned int port_no = (sff1 >> 8) & 0x03;
|
||||
struct ata_port *ap = host->ports[port_no];
|
||||
struct qs_port_priv *pp = ap->private_data;
|
||||
struct ata_queued_cmd *qc;
|
||||
|
||||
DPRINTK("SFF=%08x%08x: sCHAN=%u sHST=%d sDST=%02x\n",
|
||||
sff1, sff0, port_no, sHST, sDST);
|
||||
handled = 1;
|
||||
if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
|
||||
struct ata_queued_cmd *qc;
|
||||
struct qs_port_priv *pp = ap->private_data;
|
||||
if (!pp || pp->state != qs_state_pkt)
|
||||
continue;
|
||||
qc = ata_qc_from_tag(ap, ap->link.active_tag);
|
||||
if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
|
||||
switch (sHST) {
|
||||
case 0: /* successful CPB */
|
||||
case 3: /* device error */
|
||||
qs_enter_reg_mode(qc->ap);
|
||||
qs_do_or_die(qc, sDST);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
if (!pp || pp->state != qs_state_pkt)
|
||||
continue;
|
||||
qc = ata_qc_from_tag(ap, ap->link.active_tag);
|
||||
if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
|
||||
switch (sHST) {
|
||||
case 0: /* successful CPB */
|
||||
case 3: /* device error */
|
||||
qs_enter_reg_mode(qc->ap);
|
||||
qs_do_or_die(qc, sDST);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -436,33 +431,30 @@ static inline unsigned int qs_intr_mmio(struct ata_host *host)
|
|||
unsigned int handled = 0, port_no;
|
||||
|
||||
for (port_no = 0; port_no < host->n_ports; ++port_no) {
|
||||
struct ata_port *ap;
|
||||
ap = host->ports[port_no];
|
||||
if (ap &&
|
||||
!(ap->flags & ATA_FLAG_DISABLED)) {
|
||||
struct ata_queued_cmd *qc;
|
||||
struct qs_port_priv *pp;
|
||||
qc = ata_qc_from_tag(ap, ap->link.active_tag);
|
||||
if (!qc || !(qc->flags & ATA_QCFLAG_ACTIVE)) {
|
||||
/*
|
||||
* The qstor hardware generates spurious
|
||||
* interrupts from time to time when switching
|
||||
* in and out of packet mode.
|
||||
* There's no obvious way to know if we're
|
||||
* here now due to that, so just ack the irq
|
||||
* and pretend we knew it was ours.. (ugh).
|
||||
* This does not affect packet mode.
|
||||
*/
|
||||
ata_sff_check_status(ap);
|
||||
handled = 1;
|
||||
continue;
|
||||
}
|
||||
pp = ap->private_data;
|
||||
if (!pp || pp->state != qs_state_mmio)
|
||||
continue;
|
||||
if (!(qc->tf.flags & ATA_TFLAG_POLLING))
|
||||
handled |= ata_sff_host_intr(ap, qc);
|
||||
struct ata_port *ap = host->ports[port_no];
|
||||
struct qs_port_priv *pp = ap->private_data;
|
||||
struct ata_queued_cmd *qc;
|
||||
|
||||
qc = ata_qc_from_tag(ap, ap->link.active_tag);
|
||||
if (!qc) {
|
||||
/*
|
||||
* The qstor hardware generates spurious
|
||||
* interrupts from time to time when switching
|
||||
* in and out of packet mode. There's no
|
||||
* obvious way to know if we're here now due
|
||||
* to that, so just ack the irq and pretend we
|
||||
* knew it was ours.. (ugh). This does not
|
||||
* affect packet mode.
|
||||
*/
|
||||
ata_sff_check_status(ap);
|
||||
handled = 1;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!pp || pp->state != qs_state_mmio)
|
||||
continue;
|
||||
if (!(qc->tf.flags & ATA_TFLAG_POLLING))
|
||||
handled |= ata_sff_host_intr(ap, qc);
|
||||
}
|
||||
return handled;
|
||||
}
|
||||
|
@ -509,11 +501,7 @@ static int qs_port_start(struct ata_port *ap)
|
|||
void __iomem *mmio_base = qs_mmio_base(ap->host);
|
||||
void __iomem *chan = mmio_base + (ap->port_no * 0x4000);
|
||||
u64 addr;
|
||||
int rc;
|
||||
|
||||
rc = ata_port_start(ap);
|
||||
if (rc)
|
||||
return rc;
|
||||
pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
|
||||
if (!pp)
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -284,7 +284,7 @@ static void sil_bmdma_setup(struct ata_queued_cmd *qc)
|
|||
void __iomem *bmdma = ap->ioaddr.bmdma_addr;
|
||||
|
||||
/* load PRD table addr. */
|
||||
iowrite32(ap->prd_dma, bmdma + ATA_DMA_TABLE_OFS);
|
||||
iowrite32(ap->bmdma_prd_dma, bmdma + ATA_DMA_TABLE_OFS);
|
||||
|
||||
/* issue r/w command */
|
||||
ap->ops->sff_exec_command(ap, &qc->tf);
|
||||
|
@ -311,10 +311,10 @@ static void sil_fill_sg(struct ata_queued_cmd *qc)
|
|||
{
|
||||
struct scatterlist *sg;
|
||||
struct ata_port *ap = qc->ap;
|
||||
struct ata_prd *prd, *last_prd = NULL;
|
||||
struct ata_bmdma_prd *prd, *last_prd = NULL;
|
||||
unsigned int si;
|
||||
|
||||
prd = &ap->prd[0];
|
||||
prd = &ap->bmdma_prd[0];
|
||||
for_each_sg(qc->sg, sg, qc->n_elem, si) {
|
||||
/* Note h/w doesn't support 64-bit, so we unconditionally
|
||||
* truncate dma_addr_t to u32.
|
||||
|
@ -532,9 +532,6 @@ static irqreturn_t sil_interrupt(int irq, void *dev_instance)
|
|||
struct ata_port *ap = host->ports[i];
|
||||
u32 bmdma2 = readl(mmio_base + sil_port[ap->port_no].bmdma2);
|
||||
|
||||
if (unlikely(ap->flags & ATA_FLAG_DISABLED))
|
||||
continue;
|
||||
|
||||
/* turn off SATA_IRQ if not supported */
|
||||
if (ap->flags & SIL_FLAG_NO_SATA_IRQ)
|
||||
bmdma2 &= ~SIL_DMA_SATA_IRQ;
|
||||
|
|
|
@ -1160,13 +1160,8 @@ static irqreturn_t sil24_interrupt(int irq, void *dev_instance)
|
|||
|
||||
for (i = 0; i < host->n_ports; i++)
|
||||
if (status & (1 << i)) {
|
||||
struct ata_port *ap = host->ports[i];
|
||||
if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
|
||||
sil24_host_intr(ap);
|
||||
handled++;
|
||||
} else
|
||||
printk(KERN_ERR DRV_NAME
|
||||
": interrupt from disabled port %d\n", i);
|
||||
sil24_host_intr(host->ports[i]);
|
||||
handled++;
|
||||
}
|
||||
|
||||
spin_unlock(&host->lock);
|
||||
|
|
|
@ -224,7 +224,7 @@ static void k2_bmdma_setup_mmio(struct ata_queued_cmd *qc)
|
|||
|
||||
/* load PRD table addr. */
|
||||
mb(); /* make sure PRD table writes are visible to controller */
|
||||
writel(ap->prd_dma, mmio + ATA_DMA_TABLE_OFS);
|
||||
writel(ap->bmdma_prd_dma, mmio + ATA_DMA_TABLE_OFS);
|
||||
|
||||
/* specify data direction, triple-check start bit is clear */
|
||||
dmactl = readb(mmio + ATA_DMA_CMD);
|
||||
|
|
|
@ -302,11 +302,6 @@ static int pdc_port_start(struct ata_port *ap)
|
|||
{
|
||||
struct device *dev = ap->host->dev;
|
||||
struct pdc_port_priv *pp;
|
||||
int rc;
|
||||
|
||||
rc = ata_port_start(ap);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
|
||||
if (!pp)
|
||||
|
@ -840,8 +835,7 @@ static irqreturn_t pdc20621_interrupt(int irq, void *dev_instance)
|
|||
ap = host->ports[port_no];
|
||||
tmp = mask & (1 << i);
|
||||
VPRINTK("seq %u, port_no %u, ap %p, tmp %x\n", i, port_no, ap, tmp);
|
||||
if (tmp && ap &&
|
||||
!(ap->flags & ATA_FLAG_DISABLED)) {
|
||||
if (tmp && ap) {
|
||||
struct ata_queued_cmd *qc;
|
||||
|
||||
qc = ata_qc_from_tag(ap, ap->link.active_tag);
|
||||
|
@ -927,7 +921,7 @@ static void pdc_error_handler(struct ata_port *ap)
|
|||
if (!(ap->pflags & ATA_PFLAG_FROZEN))
|
||||
pdc_reset_port(ap);
|
||||
|
||||
ata_std_error_handler(ap);
|
||||
ata_sff_error_handler(ap);
|
||||
}
|
||||
|
||||
static void pdc_post_internal_cmd(struct ata_queued_cmd *qc)
|
||||
|
|
|
@ -181,9 +181,7 @@ static int uli_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
if (rc)
|
||||
return rc;
|
||||
|
||||
rc = ata_pci_bmdma_init(host);
|
||||
if (rc)
|
||||
return rc;
|
||||
ata_pci_bmdma_init(host);
|
||||
|
||||
iomap = host->iomap;
|
||||
|
||||
|
|
|
@ -284,14 +284,8 @@ static irqreturn_t vsc_sata_interrupt(int irq, void *dev_instance)
|
|||
for (i = 0; i < host->n_ports; i++) {
|
||||
u8 port_status = (status >> (8 * i)) & 0xff;
|
||||
if (port_status) {
|
||||
struct ata_port *ap = host->ports[i];
|
||||
|
||||
if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
|
||||
vsc_port_intr(port_status, ap);
|
||||
handled++;
|
||||
} else
|
||||
dev_printk(KERN_ERR, host->dev,
|
||||
"interrupt from disabled port %d\n", i);
|
||||
vsc_port_intr(port_status, host->ports[i]);
|
||||
handled++;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -4295,7 +4295,7 @@ static void ipr_slave_destroy(struct scsi_device *sdev)
|
|||
res = (struct ipr_resource_entry *) sdev->hostdata;
|
||||
if (res) {
|
||||
if (res->sata_port)
|
||||
ata_port_disable(res->sata_port->ap);
|
||||
res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
|
||||
sdev->hostdata = NULL;
|
||||
res->sdev = NULL;
|
||||
res->sata_port = NULL;
|
||||
|
@ -5751,13 +5751,13 @@ static void ipr_ata_phy_reset(struct ata_port *ap)
|
|||
rc = ipr_device_reset(ioa_cfg, res);
|
||||
|
||||
if (rc) {
|
||||
ata_port_disable(ap);
|
||||
ap->link.device[0].class = ATA_DEV_NONE;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
ap->link.device[0].class = res->ata_class;
|
||||
if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
|
||||
ata_port_disable(ap);
|
||||
ap->link.device[0].class = ATA_DEV_NONE;
|
||||
|
||||
out_unlock:
|
||||
spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
|
||||
|
|
|
@ -818,7 +818,7 @@ void sas_slave_destroy(struct scsi_device *scsi_dev)
|
|||
struct domain_device *dev = sdev_to_domain_dev(scsi_dev);
|
||||
|
||||
if (dev_is_sata(dev))
|
||||
ata_port_disable(dev->sata_dev.ap);
|
||||
dev->sata_dev.ap->link.device[0].class = ATA_DEV_NONE;
|
||||
}
|
||||
|
||||
int sas_change_queue_depth(struct scsi_device *scsi_dev, int new_depth,
|
||||
|
|
|
@ -0,0 +1,29 @@
|
|||
/*
|
||||
* AHCI SATA platform driver
|
||||
*
|
||||
* Copyright 2004-2005 Red Hat, Inc.
|
||||
* Jeff Garzik <jgarzik@pobox.com>
|
||||
* Copyright 2010 MontaVista Software, LLC.
|
||||
* Anton Vorontsov <avorontsov@ru.mvista.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2, or (at your option)
|
||||
* any later version.
|
||||
*/
|
||||
|
||||
#ifndef _AHCI_PLATFORM_H
|
||||
#define _AHCI_PLATFORM_H
|
||||
|
||||
struct device;
|
||||
struct ata_port_info;
|
||||
|
||||
struct ahci_platform_data {
|
||||
int (*init)(struct device *dev);
|
||||
void (*exit)(struct device *dev);
|
||||
const struct ata_port_info *ata_port_info;
|
||||
unsigned int force_port_map;
|
||||
unsigned int mask_port_map;
|
||||
};
|
||||
|
||||
#endif /* _AHCI_PLATFORM_H */
|
|
@ -467,7 +467,7 @@ enum ata_ioctls {
|
|||
|
||||
/* core structures */
|
||||
|
||||
struct ata_prd {
|
||||
struct ata_bmdma_prd {
|
||||
__le32 addr;
|
||||
__le32 flags_len;
|
||||
};
|
||||
|
|
|
@ -202,12 +202,6 @@ enum {
|
|||
ATA_FLAG_SW_ACTIVITY = (1 << 22), /* driver supports sw activity
|
||||
* led */
|
||||
|
||||
/* The following flag belongs to ap->pflags but is kept in
|
||||
* ap->flags because it's referenced in many LLDs and will be
|
||||
* removed in not-too-distant future.
|
||||
*/
|
||||
ATA_FLAG_DISABLED = (1 << 23), /* port is disabled, ignore it */
|
||||
|
||||
/* bits 24:31 of ap->flags are reserved for LLD specific flags */
|
||||
|
||||
|
||||
|
@ -256,12 +250,13 @@ enum {
|
|||
ATA_TMOUT_INTERNAL_QUICK = 5000,
|
||||
ATA_TMOUT_MAX_PARK = 30000,
|
||||
|
||||
/* FIXME: GoVault needs 2s but we can't afford that without
|
||||
* parallel probing. 800ms is enough for iVDR disk
|
||||
* HHD424020F7SV00. Increase to 2secs when parallel probing
|
||||
* is in place.
|
||||
/*
|
||||
* GoVault needs 2s and iVDR disk HHD424020F7SV00 800ms. 2s
|
||||
* is too much without parallel probing. Use 2s if parallel
|
||||
* probing is available, 800ms otherwise.
|
||||
*/
|
||||
ATA_TMOUT_FF_WAIT = 800,
|
||||
ATA_TMOUT_FF_WAIT_LONG = 2000,
|
||||
ATA_TMOUT_FF_WAIT = 800,
|
||||
|
||||
/* Spec mandates to wait for ">= 2ms" before checking status
|
||||
* after reset. We wait 150ms, because that was the magic
|
||||
|
@ -721,15 +716,15 @@ struct ata_port {
|
|||
unsigned int print_id; /* user visible unique port ID */
|
||||
unsigned int port_no; /* 0 based port no. inside the host */
|
||||
|
||||
struct ata_prd *prd; /* our SG list */
|
||||
dma_addr_t prd_dma; /* and its DMA mapping */
|
||||
|
||||
#ifdef CONFIG_ATA_SFF
|
||||
struct ata_ioports ioaddr; /* ATA cmd/ctl/dma register blocks */
|
||||
#endif /* CONFIG_ATA_SFF */
|
||||
|
||||
u8 ctl; /* cache of ATA control register */
|
||||
u8 last_ctl; /* Cache last written value */
|
||||
struct delayed_work sff_pio_task;
|
||||
struct ata_bmdma_prd *bmdma_prd; /* BMDMA SG list */
|
||||
dma_addr_t bmdma_prd_dma; /* and its DMA mapping */
|
||||
#endif /* CONFIG_ATA_SFF */
|
||||
|
||||
unsigned int pio_mask;
|
||||
unsigned int mwdma_mask;
|
||||
unsigned int udma_mask;
|
||||
|
@ -751,8 +746,6 @@ struct ata_port {
|
|||
struct ata_host *host;
|
||||
struct device *dev;
|
||||
|
||||
void *port_task_data;
|
||||
struct delayed_work port_task;
|
||||
struct delayed_work hotplug_task;
|
||||
struct work_struct scsi_rescan_task;
|
||||
|
||||
|
@ -849,6 +842,7 @@ struct ata_port_operations {
|
|||
* SFF / taskfile oriented ops
|
||||
*/
|
||||
void (*sff_dev_select)(struct ata_port *ap, unsigned int device);
|
||||
void (*sff_set_devctl)(struct ata_port *ap, u8 ctl);
|
||||
u8 (*sff_check_status)(struct ata_port *ap);
|
||||
u8 (*sff_check_altstatus)(struct ata_port *ap);
|
||||
void (*sff_tf_load)(struct ata_port *ap, const struct ata_taskfile *tf);
|
||||
|
@ -857,16 +851,15 @@ struct ata_port_operations {
|
|||
const struct ata_taskfile *tf);
|
||||
unsigned int (*sff_data_xfer)(struct ata_device *dev,
|
||||
unsigned char *buf, unsigned int buflen, int rw);
|
||||
u8 (*sff_irq_on)(struct ata_port *);
|
||||
void (*sff_irq_on)(struct ata_port *);
|
||||
bool (*sff_irq_check)(struct ata_port *);
|
||||
void (*sff_irq_clear)(struct ata_port *);
|
||||
void (*sff_drain_fifo)(struct ata_queued_cmd *qc);
|
||||
|
||||
void (*bmdma_setup)(struct ata_queued_cmd *qc);
|
||||
void (*bmdma_start)(struct ata_queued_cmd *qc);
|
||||
void (*bmdma_stop)(struct ata_queued_cmd *qc);
|
||||
u8 (*bmdma_status)(struct ata_port *ap);
|
||||
|
||||
void (*drain_fifo)(struct ata_queued_cmd *qc);
|
||||
#endif /* CONFIG_ATA_SFF */
|
||||
|
||||
ssize_t (*em_show)(struct ata_port *ap, char *buf);
|
||||
|
@ -935,7 +928,6 @@ static inline int ata_port_is_dummy(struct ata_port *ap)
|
|||
return ap->ops == &ata_dummy_port_ops;
|
||||
}
|
||||
|
||||
extern void ata_port_probe(struct ata_port *);
|
||||
extern int sata_set_spd(struct ata_link *link);
|
||||
extern int ata_std_prereset(struct ata_link *link, unsigned long deadline);
|
||||
extern int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
|
||||
|
@ -950,7 +942,6 @@ extern int sata_link_hardreset(struct ata_link *link,
|
|||
extern int sata_std_hardreset(struct ata_link *link, unsigned int *class,
|
||||
unsigned long deadline);
|
||||
extern void ata_std_postreset(struct ata_link *link, unsigned int *classes);
|
||||
extern void ata_port_disable(struct ata_port *);
|
||||
|
||||
extern struct ata_host *ata_host_alloc(struct device *dev, int max_ports);
|
||||
extern struct ata_host *ata_host_alloc_pinfo(struct device *dev,
|
||||
|
@ -1006,7 +997,6 @@ extern unsigned long ata_xfer_mode2mask(u8 xfer_mode);
|
|||
extern int ata_xfer_mode2shift(unsigned long xfer_mode);
|
||||
extern const char *ata_mode_string(unsigned long xfer_mask);
|
||||
extern unsigned long ata_id_xfermask(const u16 *id);
|
||||
extern int ata_port_start(struct ata_port *ap);
|
||||
extern int ata_std_qc_defer(struct ata_queued_cmd *qc);
|
||||
extern void ata_noop_qc_prep(struct ata_queued_cmd *qc);
|
||||
extern void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
|
||||
|
@ -1039,9 +1029,6 @@ extern int ata_cable_sata(struct ata_port *ap);
|
|||
extern int ata_cable_ignore(struct ata_port *ap);
|
||||
extern int ata_cable_unknown(struct ata_port *ap);
|
||||
|
||||
extern void ata_pio_queue_task(struct ata_port *ap, void *data,
|
||||
unsigned long delay);
|
||||
|
||||
/* Timing helpers */
|
||||
extern unsigned int ata_pio_need_iordy(const struct ata_device *);
|
||||
extern const struct ata_timing *ata_timing_find_mode(u8 xfer_mode);
|
||||
|
@ -1443,7 +1430,11 @@ static inline void ata_tf_init(struct ata_device *dev, struct ata_taskfile *tf)
|
|||
{
|
||||
memset(tf, 0, sizeof(*tf));
|
||||
|
||||
#ifdef CONFIG_ATA_SFF
|
||||
tf->ctl = dev->link->ap->ctl;
|
||||
#else
|
||||
tf->ctl = ATA_DEVCTL_OBS;
|
||||
#endif
|
||||
if (dev->devno == 0)
|
||||
tf->device = ATA_DEVICE_OBS;
|
||||
else
|
||||
|
@ -1578,8 +1569,6 @@ extern const struct ata_port_operations ata_bmdma32_port_ops;
|
|||
.sg_tablesize = LIBATA_MAX_PRD, \
|
||||
.dma_boundary = ATA_DMA_BOUNDARY
|
||||
|
||||
extern void ata_sff_qc_prep(struct ata_queued_cmd *qc);
|
||||
extern void ata_sff_dumb_qc_prep(struct ata_queued_cmd *qc);
|
||||
extern void ata_sff_dev_select(struct ata_port *ap, unsigned int device);
|
||||
extern u8 ata_sff_check_status(struct ata_port *ap);
|
||||
extern void ata_sff_pause(struct ata_port *ap);
|
||||
|
@ -1597,10 +1586,11 @@ extern unsigned int ata_sff_data_xfer32(struct ata_device *dev,
|
|||
unsigned char *buf, unsigned int buflen, int rw);
|
||||
extern unsigned int ata_sff_data_xfer_noirq(struct ata_device *dev,
|
||||
unsigned char *buf, unsigned int buflen, int rw);
|
||||
extern u8 ata_sff_irq_on(struct ata_port *ap);
|
||||
extern void ata_sff_irq_on(struct ata_port *ap);
|
||||
extern void ata_sff_irq_clear(struct ata_port *ap);
|
||||
extern int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
|
||||
u8 status, int in_wq);
|
||||
extern void ata_sff_queue_pio_task(struct ata_port *ap, unsigned long delay);
|
||||
extern unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc);
|
||||
extern bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc);
|
||||
extern unsigned int ata_sff_host_intr(struct ata_port *ap,
|
||||
|
@ -1621,21 +1611,8 @@ extern int sata_sff_hardreset(struct ata_link *link, unsigned int *class,
|
|||
extern void ata_sff_postreset(struct ata_link *link, unsigned int *classes);
|
||||
extern void ata_sff_drain_fifo(struct ata_queued_cmd *qc);
|
||||
extern void ata_sff_error_handler(struct ata_port *ap);
|
||||
extern void ata_sff_post_internal_cmd(struct ata_queued_cmd *qc);
|
||||
extern int ata_sff_port_start(struct ata_port *ap);
|
||||
extern int ata_sff_port_start32(struct ata_port *ap);
|
||||
extern void ata_sff_std_ports(struct ata_ioports *ioaddr);
|
||||
extern unsigned long ata_bmdma_mode_filter(struct ata_device *dev,
|
||||
unsigned long xfer_mask);
|
||||
extern void ata_bmdma_setup(struct ata_queued_cmd *qc);
|
||||
extern void ata_bmdma_start(struct ata_queued_cmd *qc);
|
||||
extern void ata_bmdma_stop(struct ata_queued_cmd *qc);
|
||||
extern u8 ata_bmdma_status(struct ata_port *ap);
|
||||
extern void ata_bus_reset(struct ata_port *ap);
|
||||
|
||||
#ifdef CONFIG_PCI
|
||||
extern int ata_pci_bmdma_clear_simplex(struct pci_dev *pdev);
|
||||
extern int ata_pci_bmdma_init(struct ata_host *host);
|
||||
extern int ata_pci_sff_init_host(struct ata_host *host);
|
||||
extern int ata_pci_sff_prepare_host(struct pci_dev *pdev,
|
||||
const struct ata_port_info * const * ppi,
|
||||
|
@ -1648,6 +1625,23 @@ extern int ata_pci_sff_init_one(struct pci_dev *pdev,
|
|||
struct scsi_host_template *sht, void *host_priv, int hflags);
|
||||
#endif /* CONFIG_PCI */
|
||||
|
||||
extern void ata_bmdma_qc_prep(struct ata_queued_cmd *qc);
|
||||
extern unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc);
|
||||
extern void ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc);
|
||||
extern void ata_bmdma_error_handler(struct ata_port *ap);
|
||||
extern void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc);
|
||||
extern void ata_bmdma_setup(struct ata_queued_cmd *qc);
|
||||
extern void ata_bmdma_start(struct ata_queued_cmd *qc);
|
||||
extern void ata_bmdma_stop(struct ata_queued_cmd *qc);
|
||||
extern u8 ata_bmdma_status(struct ata_port *ap);
|
||||
extern int ata_bmdma_port_start(struct ata_port *ap);
|
||||
extern int ata_bmdma_port_start32(struct ata_port *ap);
|
||||
|
||||
#ifdef CONFIG_PCI
|
||||
extern int ata_pci_bmdma_clear_simplex(struct pci_dev *pdev);
|
||||
extern void ata_pci_bmdma_init(struct ata_host *host);
|
||||
#endif /* CONFIG_PCI */
|
||||
|
||||
/**
|
||||
* ata_sff_busy_wait - Wait for a port status register
|
||||
* @ap: Port to wait for.
|
||||
|
|
Loading…
Reference in New Issue