libata: Grand renaming.

The biggest change is that ata_host_set is renamed to ata_host.

* ata_host_set			=> ata_host
* ata_probe_ent->host_flags	=> ata_probe_ent->port_flags
* ata_probe_ent->host_set_flags	=> ata_probe_ent->_host_flags
* ata_host_stats		=> ata_port_stats
* ata_port->host		=> ata_port->scsi_host
* ata_port->host_set		=> ata_port->host
* ata_port_info->host_flags	=> ata_port_info->flags
* ata_(.*)host_set(.*)\(\)	=> ata_\1host\2()

The leading underscore in ata_probe_ent->_host_flags is to avoid
reusing ->host_flags for different purpose.  Currently, the only user
of the field is libata-bmdma.c and probe_ent itself is scheduled to be
removed.

ata_port->host is reused for different purpose but this field is used
inside libata core proper and of different type.

Signed-off-by: Tejun Heo <htejun@gmail.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
This commit is contained in:
Jeff Garzik 2006-08-24 03:19:22 -04:00
parent 54a86bfc3d
commit cca3974e48
21 changed files with 568 additions and 573 deletions

View File

@ -277,7 +277,7 @@ static const struct ata_port_info ahci_port_info[] = {
/* board_ahci */
{
.sht = &ahci_sht,
.host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
.flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
ATA_FLAG_SKIP_D2H_BSY,
.pio_mask = 0x1f, /* pio0-4 */
@ -287,7 +287,7 @@ static const struct ata_port_info ahci_port_info[] = {
/* board_ahci_vt8251 */
{
.sht = &ahci_sht,
.host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
.flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
ATA_FLAG_SKIP_D2H_BSY |
AHCI_FLAG_RESET_NEEDS_CLO | AHCI_FLAG_NO_NCQ,
@ -709,7 +709,7 @@ static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
static int ahci_clo(struct ata_port *ap)
{
void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
struct ahci_host_priv *hpriv = ap->host_set->private_data;
struct ahci_host_priv *hpriv = ap->host->private_data;
u32 tmp;
if (!(hpriv->cap & HOST_CAP_CLO))
@ -741,7 +741,7 @@ static int ahci_prereset(struct ata_port *ap)
static int ahci_softreset(struct ata_port *ap, unsigned int *class)
{
struct ahci_port_priv *pp = ap->private_data;
void __iomem *mmio = ap->host_set->mmio_base;
void __iomem *mmio = ap->host->mmio_base;
void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
const u32 cmd_fis_len = 5; /* five dwords */
const char *reason = NULL;
@ -850,7 +850,7 @@ static int ahci_hardreset(struct ata_port *ap, unsigned int *class)
struct ahci_port_priv *pp = ap->private_data;
u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
struct ata_taskfile tf;
void __iomem *mmio = ap->host_set->mmio_base;
void __iomem *mmio = ap->host->mmio_base;
void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
int rc;
@ -1039,7 +1039,7 @@ static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
static void ahci_host_intr(struct ata_port *ap)
{
void __iomem *mmio = ap->host_set->mmio_base;
void __iomem *mmio = ap->host->mmio_base;
void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
struct ata_eh_info *ehi = &ap->eh_info;
u32 status, qc_active;
@ -1091,7 +1091,7 @@ static void ahci_irq_clear(struct ata_port *ap)
static irqreturn_t ahci_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
{
struct ata_host_set *host_set = dev_instance;
struct ata_host *host = dev_instance;
struct ahci_host_priv *hpriv;
unsigned int i, handled = 0;
void __iomem *mmio;
@ -1099,8 +1099,8 @@ static irqreturn_t ahci_interrupt(int irq, void *dev_instance, struct pt_regs *r
VPRINTK("ENTER\n");
hpriv = host_set->private_data;
mmio = host_set->mmio_base;
hpriv = host->private_data;
mmio = host->mmio_base;
/* sigh. 0xffffffff is a valid return from h/w */
irq_stat = readl(mmio + HOST_IRQ_STAT);
@ -1108,22 +1108,22 @@ static irqreturn_t ahci_interrupt(int irq, void *dev_instance, struct pt_regs *r
if (!irq_stat)
return IRQ_NONE;
spin_lock(&host_set->lock);
spin_lock(&host->lock);
for (i = 0; i < host_set->n_ports; i++) {
for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap;
if (!(irq_stat & (1 << i)))
continue;
ap = host_set->ports[i];
ap = host->ports[i];
if (ap) {
ahci_host_intr(ap);
VPRINTK("port %u\n", i);
} else {
VPRINTK("port %u (no irq)\n", i);
if (ata_ratelimit())
dev_printk(KERN_WARNING, host_set->dev,
dev_printk(KERN_WARNING, host->dev,
"interrupt on disabled port %u\n", i);
}
@ -1135,7 +1135,7 @@ static irqreturn_t ahci_interrupt(int irq, void *dev_instance, struct pt_regs *r
handled = 1;
}
spin_unlock(&host_set->lock);
spin_unlock(&host->lock);
VPRINTK("EXIT\n");
@ -1157,7 +1157,7 @@ static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
static void ahci_freeze(struct ata_port *ap)
{
void __iomem *mmio = ap->host_set->mmio_base;
void __iomem *mmio = ap->host->mmio_base;
void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
/* turn IRQ off */
@ -1166,7 +1166,7 @@ static void ahci_freeze(struct ata_port *ap)
static void ahci_thaw(struct ata_port *ap)
{
void __iomem *mmio = ap->host_set->mmio_base;
void __iomem *mmio = ap->host->mmio_base;
void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
u32 tmp;
@ -1181,7 +1181,7 @@ static void ahci_thaw(struct ata_port *ap)
static void ahci_error_handler(struct ata_port *ap)
{
void __iomem *mmio = ap->host_set->mmio_base;
void __iomem *mmio = ap->host->mmio_base;
void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
@ -1198,7 +1198,7 @@ static void ahci_error_handler(struct ata_port *ap)
static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
void __iomem *mmio = ap->host_set->mmio_base;
void __iomem *mmio = ap->host->mmio_base;
void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
if (qc->flags & ATA_QCFLAG_FAILED)
@ -1213,9 +1213,9 @@ static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
{
struct ahci_host_priv *hpriv = ap->host_set->private_data;
struct ahci_host_priv *hpriv = ap->host->private_data;
struct ahci_port_priv *pp = ap->private_data;
void __iomem *mmio = ap->host_set->mmio_base;
void __iomem *mmio = ap->host->mmio_base;
void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
const char *emsg = NULL;
int rc;
@ -1233,8 +1233,8 @@ static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
static int ahci_port_resume(struct ata_port *ap)
{
struct ahci_port_priv *pp = ap->private_data;
struct ahci_host_priv *hpriv = ap->host_set->private_data;
void __iomem *mmio = ap->host_set->mmio_base;
struct ahci_host_priv *hpriv = ap->host->private_data;
void __iomem *mmio = ap->host->mmio_base;
void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
ahci_init_port(port_mmio, hpriv->cap, pp->cmd_slot_dma, pp->rx_fis_dma);
@ -1244,8 +1244,8 @@ static int ahci_port_resume(struct ata_port *ap)
static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
{
struct ata_host_set *host_set = dev_get_drvdata(&pdev->dev);
void __iomem *mmio = host_set->mmio_base;
struct ata_host *host = dev_get_drvdata(&pdev->dev);
void __iomem *mmio = host->mmio_base;
u32 ctl;
if (mesg.event == PM_EVENT_SUSPEND) {
@ -1264,9 +1264,9 @@ static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
static int ahci_pci_device_resume(struct pci_dev *pdev)
{
struct ata_host_set *host_set = dev_get_drvdata(&pdev->dev);
struct ahci_host_priv *hpriv = host_set->private_data;
void __iomem *mmio = host_set->mmio_base;
struct ata_host *host = dev_get_drvdata(&pdev->dev);
struct ahci_host_priv *hpriv = host->private_data;
void __iomem *mmio = host->mmio_base;
int rc;
ata_pci_device_do_resume(pdev);
@ -1276,20 +1276,20 @@ static int ahci_pci_device_resume(struct pci_dev *pdev)
if (rc)
return rc;
ahci_init_controller(mmio, pdev, host_set->n_ports, hpriv->cap);
ahci_init_controller(mmio, pdev, host->n_ports, hpriv->cap);
}
ata_host_set_resume(host_set);
ata_host_resume(host);
return 0;
}
static int ahci_port_start(struct ata_port *ap)
{
struct device *dev = ap->host_set->dev;
struct ahci_host_priv *hpriv = ap->host_set->private_data;
struct device *dev = ap->host->dev;
struct ahci_host_priv *hpriv = ap->host->private_data;
struct ahci_port_priv *pp;
void __iomem *mmio = ap->host_set->mmio_base;
void __iomem *mmio = ap->host->mmio_base;
void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
void *mem;
dma_addr_t mem_dma;
@ -1350,10 +1350,10 @@ static int ahci_port_start(struct ata_port *ap)
static void ahci_port_stop(struct ata_port *ap)
{
struct device *dev = ap->host_set->dev;
struct ahci_host_priv *hpriv = ap->host_set->private_data;
struct device *dev = ap->host->dev;
struct ahci_host_priv *hpriv = ap->host->private_data;
struct ahci_port_priv *pp = ap->private_data;
void __iomem *mmio = ap->host_set->mmio_base;
void __iomem *mmio = ap->host->mmio_base;
void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
const char *emsg = NULL;
int rc;
@ -1581,7 +1581,7 @@ static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
memset(hpriv, 0, sizeof(*hpriv));
probe_ent->sht = ahci_port_info[board_idx].sht;
probe_ent->host_flags = ahci_port_info[board_idx].host_flags;
probe_ent->port_flags = ahci_port_info[board_idx].flags;
probe_ent->pio_mask = ahci_port_info[board_idx].pio_mask;
probe_ent->udma_mask = ahci_port_info[board_idx].udma_mask;
probe_ent->port_ops = ahci_port_info[board_idx].port_ops;
@ -1599,9 +1599,9 @@ static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
goto err_out_hpriv;
if (!(probe_ent->host_flags & AHCI_FLAG_NO_NCQ) &&
if (!(probe_ent->port_flags & AHCI_FLAG_NO_NCQ) &&
(hpriv->cap & HOST_CAP_NCQ))
probe_ent->host_flags |= ATA_FLAG_NCQ;
probe_ent->port_flags |= ATA_FLAG_NCQ;
ahci_print_info(probe_ent);
@ -1632,27 +1632,27 @@ static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
static void ahci_remove_one (struct pci_dev *pdev)
{
struct device *dev = pci_dev_to_dev(pdev);
struct ata_host_set *host_set = dev_get_drvdata(dev);
struct ahci_host_priv *hpriv = host_set->private_data;
struct ata_host *host = dev_get_drvdata(dev);
struct ahci_host_priv *hpriv = host->private_data;
unsigned int i;
int have_msi;
for (i = 0; i < host_set->n_ports; i++)
ata_port_detach(host_set->ports[i]);
for (i = 0; i < host->n_ports; i++)
ata_port_detach(host->ports[i]);
have_msi = hpriv->flags & AHCI_FLAG_MSI;
free_irq(host_set->irq, host_set);
free_irq(host->irq, host);
for (i = 0; i < host_set->n_ports; i++) {
struct ata_port *ap = host_set->ports[i];
for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap = host->ports[i];
ata_scsi_release(ap->host);
scsi_host_put(ap->host);
ata_scsi_release(ap->scsi_host);
scsi_host_put(ap->scsi_host);
}
kfree(hpriv);
pci_iounmap(pdev, host_set->mmio_base);
kfree(host_set);
pci_iounmap(pdev, host->mmio_base);
kfree(host);
if (have_msi)
pci_disable_msi(pdev);

View File

@ -151,7 +151,7 @@ struct piix_host_priv {
static int piix_init_one (struct pci_dev *pdev,
const struct pci_device_id *ent);
static void piix_host_stop(struct ata_host_set *host_set);
static void piix_host_stop(struct ata_host *host);
static void piix_set_piomode (struct ata_port *ap, struct ata_device *adev);
static void piix_set_dmamode (struct ata_port *ap, struct ata_device *adev);
static void piix_pata_error_handler(struct ata_port *ap);
@ -362,7 +362,7 @@ static struct ata_port_info piix_port_info[] = {
/* piix4_pata */
{
.sht = &piix_sht,
.host_flags = ATA_FLAG_SLAVE_POSS,
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = 0x1f, /* pio0-4 */
#if 0
.mwdma_mask = 0x06, /* mwdma1-2 */
@ -376,7 +376,7 @@ static struct ata_port_info piix_port_info[] = {
/* ich5_pata */
{
.sht = &piix_sht,
.host_flags = ATA_FLAG_SLAVE_POSS | PIIX_FLAG_CHECKINTR,
.flags = ATA_FLAG_SLAVE_POSS | PIIX_FLAG_CHECKINTR,
.pio_mask = 0x1f, /* pio0-4 */
#if 0
.mwdma_mask = 0x06, /* mwdma1-2 */
@ -390,7 +390,7 @@ static struct ata_port_info piix_port_info[] = {
/* ich5_sata */
{
.sht = &piix_sht,
.host_flags = ATA_FLAG_SATA | PIIX_FLAG_CHECKINTR |
.flags = ATA_FLAG_SATA | PIIX_FLAG_CHECKINTR |
PIIX_FLAG_IGNORE_PCS,
.pio_mask = 0x1f, /* pio0-4 */
.mwdma_mask = 0x07, /* mwdma0-2 */
@ -401,7 +401,7 @@ static struct ata_port_info piix_port_info[] = {
/* i6300esb_sata */
{
.sht = &piix_sht,
.host_flags = ATA_FLAG_SATA |
.flags = ATA_FLAG_SATA |
PIIX_FLAG_CHECKINTR | PIIX_FLAG_IGNORE_PCS,
.pio_mask = 0x1f, /* pio0-4 */
.mwdma_mask = 0x07, /* mwdma0-2 */
@ -412,7 +412,7 @@ static struct ata_port_info piix_port_info[] = {
/* ich6_sata */
{
.sht = &piix_sht,
.host_flags = ATA_FLAG_SATA |
.flags = ATA_FLAG_SATA |
PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR,
.pio_mask = 0x1f, /* pio0-4 */
.mwdma_mask = 0x07, /* mwdma0-2 */
@ -423,7 +423,7 @@ static struct ata_port_info piix_port_info[] = {
/* ich6_sata_ahci */
{
.sht = &piix_sht,
.host_flags = ATA_FLAG_SATA |
.flags = ATA_FLAG_SATA |
PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR |
PIIX_FLAG_AHCI,
.pio_mask = 0x1f, /* pio0-4 */
@ -435,7 +435,7 @@ static struct ata_port_info piix_port_info[] = {
/* ich6m_sata_ahci */
{
.sht = &piix_sht,
.host_flags = ATA_FLAG_SATA |
.flags = ATA_FLAG_SATA |
PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR |
PIIX_FLAG_AHCI,
.pio_mask = 0x1f, /* pio0-4 */
@ -447,7 +447,7 @@ static struct ata_port_info piix_port_info[] = {
/* ich8_sata_ahci */
{
.sht = &piix_sht,
.host_flags = ATA_FLAG_SATA |
.flags = ATA_FLAG_SATA |
PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR |
PIIX_FLAG_AHCI,
.pio_mask = 0x1f, /* pio0-4 */
@ -485,7 +485,7 @@ MODULE_PARM_DESC(force_pcs, "force honoring or ignoring PCS to work around "
*/
static void piix_pata_cbl_detect(struct ata_port *ap)
{
struct pci_dev *pdev = to_pci_dev(ap->host_set->dev);
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u8 tmp, mask;
/* no 80c support in host controller? */
@ -517,7 +517,7 @@ static void piix_pata_cbl_detect(struct ata_port *ap)
*/
static int piix_pata_prereset(struct ata_port *ap)
{
struct pci_dev *pdev = to_pci_dev(ap->host_set->dev);
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
if (!pci_test_config_bits(pdev, &piix_enable_bits[ap->port_no])) {
ata_port_printk(ap, KERN_INFO, "port disabled. ignoring.\n");
@ -551,8 +551,8 @@ static void piix_pata_error_handler(struct ata_port *ap)
*/
static unsigned int piix_sata_present_mask(struct ata_port *ap)
{
struct pci_dev *pdev = to_pci_dev(ap->host_set->dev);
struct piix_host_priv *hpriv = ap->host_set->private_data;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
struct piix_host_priv *hpriv = ap->host->private_data;
const unsigned int *map = hpriv->map;
int base = 2 * ap->port_no;
unsigned int present_mask = 0;
@ -631,7 +631,7 @@ static void piix_sata_error_handler(struct ata_port *ap)
static void piix_set_piomode (struct ata_port *ap, struct ata_device *adev)
{
unsigned int pio = adev->pio_mode - XFER_PIO_0;
struct pci_dev *dev = to_pci_dev(ap->host_set->dev);
struct pci_dev *dev = to_pci_dev(ap->host->dev);
unsigned int is_slave = (adev->devno != 0);
unsigned int master_port= ap->port_no ? 0x42 : 0x40;
unsigned int slave_port = 0x44;
@ -683,7 +683,7 @@ static void piix_set_piomode (struct ata_port *ap, struct ata_device *adev)
static void piix_set_dmamode (struct ata_port *ap, struct ata_device *adev)
{
unsigned int udma = adev->dma_mode; /* FIXME: MWDMA too */
struct pci_dev *dev = to_pci_dev(ap->host_set->dev);
struct pci_dev *dev = to_pci_dev(ap->host->dev);
u8 maslave = ap->port_no ? 0x42 : 0x40;
u8 speed = udma;
unsigned int drive_dn = (ap->port_no ? 2 : 0) + adev->devno;
@ -835,13 +835,13 @@ static void __devinit piix_init_pcs(struct pci_dev *pdev,
if (force_pcs == 1) {
dev_printk(KERN_INFO, &pdev->dev,
"force ignoring PCS (0x%x)\n", new_pcs);
pinfo[0].host_flags |= PIIX_FLAG_IGNORE_PCS;
pinfo[1].host_flags |= PIIX_FLAG_IGNORE_PCS;
pinfo[0].flags |= PIIX_FLAG_IGNORE_PCS;
pinfo[1].flags |= PIIX_FLAG_IGNORE_PCS;
} else if (force_pcs == 2) {
dev_printk(KERN_INFO, &pdev->dev,
"force honoring PCS (0x%x)\n", new_pcs);
pinfo[0].host_flags &= ~PIIX_FLAG_IGNORE_PCS;
pinfo[1].host_flags &= ~PIIX_FLAG_IGNORE_PCS;
pinfo[0].flags &= ~PIIX_FLAG_IGNORE_PCS;
pinfo[1].flags &= ~PIIX_FLAG_IGNORE_PCS;
}
}
@ -881,7 +881,7 @@ static void __devinit piix_init_sata_map(struct pci_dev *pdev,
default:
printk(" P%d", map[i]);
if (i & 1)
pinfo[i / 2].host_flags |= ATA_FLAG_SLAVE_POSS;
pinfo[i / 2].flags |= ATA_FLAG_SLAVE_POSS;
break;
}
}
@ -916,7 +916,7 @@ static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
struct ata_port_info port_info[2];
struct ata_port_info *ppinfo[2] = { &port_info[0], &port_info[1] };
struct piix_host_priv *hpriv;
unsigned long host_flags;
unsigned long port_flags;
if (!printed_version++)
dev_printk(KERN_DEBUG, &pdev->dev,
@ -935,9 +935,9 @@ static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
port_info[0].private_data = hpriv;
port_info[1].private_data = hpriv;
host_flags = port_info[0].host_flags;
port_flags = port_info[0].flags;
if (host_flags & PIIX_FLAG_AHCI) {
if (port_flags & PIIX_FLAG_AHCI) {
u8 tmp;
pci_read_config_byte(pdev, PIIX_SCC, &tmp);
if (tmp == PIIX_AHCI_DEVICE) {
@ -948,7 +948,7 @@ static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
}
/* Initialize SATA map */
if (host_flags & ATA_FLAG_SATA) {
if (port_flags & ATA_FLAG_SATA) {
piix_init_sata_map(pdev, port_info,
piix_map_db_table[ent->driver_data]);
piix_init_pcs(pdev, port_info,
@ -961,7 +961,7 @@ static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
* MSI is disabled (and it is disabled, as we don't use
* message-signalled interrupts currently).
*/
if (host_flags & PIIX_FLAG_CHECKINTR)
if (port_flags & PIIX_FLAG_CHECKINTR)
pci_intx(pdev, 1);
if (piix_check_450nx_errata(pdev)) {
@ -976,11 +976,11 @@ static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
return ata_pci_init_one(pdev, ppinfo, 2);
}
static void piix_host_stop(struct ata_host_set *host_set)
static void piix_host_stop(struct ata_host *host)
{
struct piix_host_priv *hpriv = host_set->private_data;
struct piix_host_priv *hpriv = host->private_data;
ata_host_stop(host_set);
ata_host_stop(host);
kfree(hpriv);
}

View File

@ -193,7 +193,7 @@ void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
* synchronization with interrupt handler / other threads.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*/
static void ata_exec_command_pio(struct ata_port *ap, const struct ata_taskfile *tf)
@ -216,7 +216,7 @@ static void ata_exec_command_pio(struct ata_port *ap, const struct ata_taskfile
* FIXME: missing write posting for 400nS delay enforcement
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*/
static void ata_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
@ -237,7 +237,7 @@ static void ata_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile
* synchronization with interrupt handler / other threads.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*/
void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf)
{
@ -422,7 +422,7 @@ u8 ata_altstatus(struct ata_port *ap)
* @qc: Info associated with this ATA transaction.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*/
static void ata_bmdma_setup_mmio (struct ata_queued_cmd *qc)
@ -452,7 +452,7 @@ static void ata_bmdma_setup_mmio (struct ata_queued_cmd *qc)
* @qc: Info associated with this ATA transaction.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*/
static void ata_bmdma_start_mmio (struct ata_queued_cmd *qc)
@ -483,7 +483,7 @@ static void ata_bmdma_start_mmio (struct ata_queued_cmd *qc)
* @qc: Info associated with this ATA transaction.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*/
static void ata_bmdma_setup_pio (struct ata_queued_cmd *qc)
@ -511,7 +511,7 @@ static void ata_bmdma_setup_pio (struct ata_queued_cmd *qc)
* @qc: Info associated with this ATA transaction.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*/
static void ata_bmdma_start_pio (struct ata_queued_cmd *qc)
@ -535,7 +535,7 @@ static void ata_bmdma_start_pio (struct ata_queued_cmd *qc)
* May be used as the bmdma_start() entry in ata_port_operations.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*/
void ata_bmdma_start(struct ata_queued_cmd *qc)
{
@ -557,7 +557,7 @@ void ata_bmdma_start(struct ata_queued_cmd *qc)
* May be used as the bmdma_setup() entry in ata_port_operations.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*/
void ata_bmdma_setup(struct ata_queued_cmd *qc)
{
@ -577,7 +577,7 @@ void ata_bmdma_setup(struct ata_queued_cmd *qc)
* May be used as the irq_clear() entry in ata_port_operations.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*/
void ata_bmdma_irq_clear(struct ata_port *ap)
@ -605,7 +605,7 @@ void ata_bmdma_irq_clear(struct ata_port *ap)
* May be used as the bmdma_status() entry in ata_port_operations.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*/
u8 ata_bmdma_status(struct ata_port *ap)
@ -629,7 +629,7 @@ u8 ata_bmdma_status(struct ata_port *ap)
* May be used as the bmdma_stop() entry in ata_port_operations.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*/
void ata_bmdma_stop(struct ata_queued_cmd *qc)
@ -838,7 +838,7 @@ ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int
bmdma = pci_resource_start(pdev, 4);
if (bmdma) {
if (inb(bmdma + 2) & 0x80)
probe_ent->host_set_flags |= ATA_HOST_SIMPLEX;
probe_ent->_host_flags |= ATA_HOST_SIMPLEX;
probe_ent->port[p].bmdma_addr = bmdma;
}
ata_std_ports(&probe_ent->port[p]);
@ -854,7 +854,7 @@ ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int
if (bmdma) {
bmdma += 8;
if(inb(bmdma + 2) & 0x80)
probe_ent->host_set_flags |= ATA_HOST_SIMPLEX;
probe_ent->_host_flags |= ATA_HOST_SIMPLEX;
probe_ent->port[p].bmdma_addr = bmdma;
}
ata_std_ports(&probe_ent->port[p]);
@ -887,7 +887,7 @@ static struct ata_probe_ent *ata_pci_init_legacy_port(struct pci_dev *pdev,
if (bmdma) {
probe_ent->port[0].bmdma_addr = bmdma;
if (inb(bmdma + 2) & 0x80)
probe_ent->host_set_flags |= ATA_HOST_SIMPLEX;
probe_ent->_host_flags |= ATA_HOST_SIMPLEX;
}
ata_std_ports(&probe_ent->port[0]);
} else
@ -904,7 +904,7 @@ static struct ata_probe_ent *ata_pci_init_legacy_port(struct pci_dev *pdev,
if (bmdma) {
probe_ent->port[1].bmdma_addr = bmdma + 8;
if (inb(bmdma + 10) & 0x80)
probe_ent->host_set_flags |= ATA_HOST_SIMPLEX;
probe_ent->_host_flags |= ATA_HOST_SIMPLEX;
}
ata_std_ports(&probe_ent->port[1]);
} else
@ -957,7 +957,7 @@ int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
else
port[1] = port[0];
if ((port[0]->host_flags & ATA_FLAG_NO_LEGACY) == 0
if ((port[0]->flags & ATA_FLAG_NO_LEGACY) == 0
&& (pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
/* TODO: What if one channel is in native mode ... */
pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);

View File

@ -1335,7 +1335,7 @@ static void ata_dev_config_ncq(struct ata_device *dev,
}
if (ap->flags & ATA_FLAG_NCQ) {
hdepth = min(ap->host->can_queue, ATA_MAX_QUEUE - 1);
hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
dev->flags |= ATA_DFLAG_NCQ;
}
@ -1349,12 +1349,13 @@ static void ata_set_port_max_cmd_len(struct ata_port *ap)
{
int i;
if (ap->host) {
ap->host->max_cmd_len = 0;
if (ap->scsi_host) {
unsigned int len = 0;
for (i = 0; i < ATA_MAX_DEVICES; i++)
ap->host->max_cmd_len = max_t(unsigned int,
ap->host->max_cmd_len,
ap->device[i].cdb_len);
len = max(len, ap->device[i].cdb_len);
ap->scsi_host->max_cmd_len = len;
}
}
@ -1662,7 +1663,7 @@ int ata_bus_probe(struct ata_port *ap)
* Modify @ap data structure such that the system
* thinks that the entire port is enabled.
*
* LOCKING: host_set lock, or some other form of
* LOCKING: host lock, or some other form of
* serialization.
*/
@ -1800,7 +1801,7 @@ struct ata_device *ata_dev_pair(struct ata_device *adev)
* never attempt to probe or communicate with devices
* on this port.
*
* LOCKING: host_set lock, or some other form of
* LOCKING: host lock, or some other form of
* serialization.
*/
@ -2258,8 +2259,8 @@ int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
/* Record simplex status. If we selected DMA then the other
* host channels are not permitted to do so.
*/
if (used_dma && (ap->host_set->flags & ATA_HOST_SIMPLEX))
ap->host_set->simplex_claimed = 1;
if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
ap->host->simplex_claimed = 1;
/* step5: chip specific finalisation */
if (ap->ops->post_set_mode)
@ -2281,7 +2282,7 @@ int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
* other threads.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*/
static inline void ata_tf_to_host(struct ata_port *ap,
@ -2445,7 +2446,7 @@ static unsigned int ata_bus_softreset(struct ata_port *ap,
*
* LOCKING:
* PCI/etc. bus probe sem.
* Obtains host_set lock.
* Obtains host lock.
*
* SIDE EFFECTS:
* Sets ATA_FLAG_DISABLED if bus reset fails.
@ -3080,7 +3081,7 @@ static int ata_dma_blacklisted(const struct ata_device *dev)
static void ata_dev_xfermask(struct ata_device *dev)
{
struct ata_port *ap = dev->ap;
struct ata_host_set *hs = ap->host_set;
struct ata_host *host = ap->host;
unsigned long xfer_mask;
/* controller modes available */
@ -3114,7 +3115,7 @@ static void ata_dev_xfermask(struct ata_device *dev)
"device is on DMA blacklist, disabling DMA\n");
}
if ((hs->flags & ATA_HOST_SIMPLEX) && hs->simplex_claimed) {
if ((host->flags & ATA_HOST_SIMPLEX) && host->simplex_claimed) {
xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
"other device, disabling DMA\n");
@ -3207,7 +3208,7 @@ static unsigned int ata_dev_init_params(struct ata_device *dev,
* Unmap all mapped DMA memory associated with this command.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*/
static void ata_sg_clean(struct ata_queued_cmd *qc)
@ -3267,7 +3268,7 @@ static void ata_sg_clean(struct ata_queued_cmd *qc)
* associated with the current disk command.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*
*/
static void ata_fill_sg(struct ata_queued_cmd *qc)
@ -3319,7 +3320,7 @@ static void ata_fill_sg(struct ata_queued_cmd *qc)
* supplied PACKET command.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*
* RETURNS: 0 when ATAPI DMA can be used
* nonzero otherwise
@ -3341,7 +3342,7 @@ int ata_check_atapi_dma(struct ata_queued_cmd *qc)
* Prepare ATA taskfile for submission.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*/
void ata_qc_prep(struct ata_queued_cmd *qc)
{
@ -3363,7 +3364,7 @@ void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
* to point to a single memory buffer, @buf of byte length @buflen.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*/
void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
@ -3394,7 +3395,7 @@ void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
* elements.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*/
void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
@ -3413,7 +3414,7 @@ void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
* DMA-map the memory buffer associated with queued_cmd @qc.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*
* RETURNS:
* Zero on success, negative on error.
@ -3482,7 +3483,7 @@ static int ata_sg_setup_one(struct ata_queued_cmd *qc)
* DMA-map the scatter-gather table associated with queued_cmd @qc.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*
* RETURNS:
* Zero on success, negative on error.
@ -3991,7 +3992,7 @@ static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *q
* Finish @qc which is running on standard HSM.
*
* LOCKING:
* If @in_wq is zero, spin_lock_irqsave(host_set lock).
* If @in_wq is zero, spin_lock_irqsave(host lock).
* Otherwise, none on entry and grabs host lock.
*/
static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
@ -4003,8 +4004,8 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
if (in_wq) {
spin_lock_irqsave(ap->lock, flags);
/* EH might have kicked in while host_set lock
* is released.
/* EH might have kicked in while host lock is
* released.
*/
qc = ata_qc_from_tag(ap, qc->tag);
if (qc) {
@ -4369,7 +4370,7 @@ struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
* in case something prevents using it.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*/
void ata_qc_free(struct ata_queued_cmd *qc)
{
@ -4422,7 +4423,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
* command has completed, with either an ok or not-ok status.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*/
void ata_qc_complete(struct ata_queued_cmd *qc)
{
@ -4485,7 +4486,7 @@ void ata_qc_complete(struct ata_queued_cmd *qc)
* and commands are completed accordingly.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*
* RETURNS:
* Number of completed commands on success, -errno otherwise.
@ -4556,7 +4557,7 @@ static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
* writing the taskfile to hardware, starting the command.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*/
void ata_qc_issue(struct ata_queued_cmd *qc)
{
@ -4617,7 +4618,7 @@ void ata_qc_issue(struct ata_queued_cmd *qc)
* May be used as the qc_issue() entry in ata_port_operations.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*
* RETURNS:
* Zero on success, AC_ERR_* mask on failure
@ -4746,7 +4747,7 @@ unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
* handled via polling with interrupts disabled (nIEN bit).
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*
* RETURNS:
* One if interrupt was handled, zero if not (shared irq).
@ -4833,14 +4834,14 @@ inline unsigned int ata_host_intr (struct ata_port *ap,
/**
* ata_interrupt - Default ATA host interrupt handler
* @irq: irq line (unused)
* @dev_instance: pointer to our ata_host_set information structure
* @dev_instance: pointer to our ata_host information structure
* @regs: unused
*
* Default interrupt handler for PCI IDE devices. Calls
* ata_host_intr() for each port that is not disabled.
*
* LOCKING:
* Obtains host_set lock during operation.
* Obtains host lock during operation.
*
* RETURNS:
* IRQ_NONE or IRQ_HANDLED.
@ -4848,18 +4849,18 @@ inline unsigned int ata_host_intr (struct ata_port *ap,
irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
{
struct ata_host_set *host_set = dev_instance;
struct ata_host *host = dev_instance;
unsigned int i;
unsigned int handled = 0;
unsigned long flags;
/* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
spin_lock_irqsave(&host_set->lock, flags);
spin_lock_irqsave(&host->lock, flags);
for (i = 0; i < host_set->n_ports; i++) {
for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap;
ap = host_set->ports[i];
ap = host->ports[i];
if (ap &&
!(ap->flags & ATA_FLAG_DISABLED)) {
struct ata_queued_cmd *qc;
@ -4871,7 +4872,7 @@ irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
}
}
spin_unlock_irqrestore(&host_set->lock, flags);
spin_unlock_irqrestore(&host->lock, flags);
return IRQ_RETVAL(handled);
}
@ -5036,15 +5037,15 @@ int ata_flush_cache(struct ata_device *dev)
return 0;
}
static int ata_host_set_request_pm(struct ata_host_set *host_set,
pm_message_t mesg, unsigned int action,
unsigned int ehi_flags, int wait)
static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
unsigned int action, unsigned int ehi_flags,
int wait)
{
unsigned long flags;
int i, rc;
for (i = 0; i < host_set->n_ports; i++) {
struct ata_port *ap = host_set->ports[i];
for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap = host->ports[i];
/* Previous resume operation might still be in
* progress. Wait for PM_PENDING to clear.
@ -5084,11 +5085,11 @@ static int ata_host_set_request_pm(struct ata_host_set *host_set,
}
/**
* ata_host_set_suspend - suspend host_set
* @host_set: host_set to suspend
* ata_host_suspend - suspend host
* @host: host to suspend
* @mesg: PM message
*
* Suspend @host_set. Actual operation is performed by EH. This
* Suspend @host. Actual operation is performed by EH. This
* function requests EH to perform PM operations and waits for EH
* to finish.
*
@ -5098,11 +5099,11 @@ static int ata_host_set_request_pm(struct ata_host_set *host_set,
* RETURNS:
* 0 on success, -errno on failure.
*/
int ata_host_set_suspend(struct ata_host_set *host_set, pm_message_t mesg)
int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
{
int i, j, rc;
rc = ata_host_set_request_pm(host_set, mesg, 0, ATA_EHI_QUIET, 1);
rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
if (rc)
goto fail;
@ -5110,8 +5111,8 @@ int ata_host_set_suspend(struct ata_host_set *host_set, pm_message_t mesg)
* This happens if hotplug occurs between completion of device
* suspension and here.
*/
for (i = 0; i < host_set->n_ports; i++) {
struct ata_port *ap = host_set->ports[i];
for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap = host->ports[i];
for (j = 0; j < ATA_MAX_DEVICES; j++) {
struct ata_device *dev = &ap->device[j];
@ -5126,30 +5127,30 @@ int ata_host_set_suspend(struct ata_host_set *host_set, pm_message_t mesg)
}
}
host_set->dev->power.power_state = mesg;
host->dev->power.power_state = mesg;
return 0;
fail:
ata_host_set_resume(host_set);
ata_host_resume(host);
return rc;
}
/**
* ata_host_set_resume - resume host_set
* @host_set: host_set to resume
* ata_host_resume - resume host
* @host: host to resume
*
* Resume @host_set. Actual operation is performed by EH. This
* Resume @host. Actual operation is performed by EH. This
* function requests EH to perform PM operations and returns.
* Note that all resume operations are performed parallely.
*
* LOCKING:
* Kernel thread context (may sleep).
*/
void ata_host_set_resume(struct ata_host_set *host_set)
void ata_host_resume(struct ata_host *host)
{
ata_host_set_request_pm(host_set, PMSG_ON, ATA_EH_SOFTRESET,
ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
host_set->dev->power.power_state = PMSG_ON;
ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
host->dev->power.power_state = PMSG_ON;
}
/**
@ -5206,10 +5207,10 @@ void ata_port_stop (struct ata_port *ap)
ata_pad_free(ap, dev);
}
void ata_host_stop (struct ata_host_set *host_set)
void ata_host_stop (struct ata_host *host)
{
if (host_set->mmio_base)
iounmap(host_set->mmio_base);
if (host->mmio_base)
iounmap(host->mmio_base);
}
/**
@ -5231,7 +5232,7 @@ void ata_dev_init(struct ata_device *dev)
/* High bits of dev->flags are used to record warm plug
* requests which occur asynchronously. Synchronize using
* host_set lock.
* host lock.
*/
spin_lock_irqsave(ap->lock, flags);
dev->flags &= ~ATA_DFLAG_INIT_MASK;
@ -5247,7 +5248,7 @@ void ata_dev_init(struct ata_device *dev)
/**
* ata_port_init - Initialize an ata_port structure
* @ap: Structure to initialize
* @host_set: Collection of hosts to which @ap belongs
* @host: Collection of hosts to which @ap belongs
* @ent: Probe information provided by low-level driver
* @port_no: Port number associated with this ata_port
*
@ -5256,22 +5257,22 @@ void ata_dev_init(struct ata_device *dev)
* LOCKING:
* Inherited from caller.
*/
void ata_port_init(struct ata_port *ap, struct ata_host_set *host_set,
void ata_port_init(struct ata_port *ap, struct ata_host *host,
const struct ata_probe_ent *ent, unsigned int port_no)
{
unsigned int i;
ap->lock = &host_set->lock;
ap->lock = &host->lock;
ap->flags = ATA_FLAG_DISABLED;
ap->id = ata_unique_id++;
ap->ctl = ATA_DEVCTL_OBS;
ap->host_set = host_set;
ap->host = host;
ap->dev = ent->dev;
ap->port_no = port_no;
ap->pio_mask = ent->pio_mask;
ap->mwdma_mask = ent->mwdma_mask;
ap->udma_mask = ent->udma_mask;
ap->flags |= ent->host_flags;
ap->flags |= ent->port_flags;
ap->ops = ent->port_ops;
ap->hw_sata_spd_limit = UINT_MAX;
ap->active_tag = ATA_TAG_POISON;
@ -5324,7 +5325,7 @@ void ata_port_init(struct ata_port *ap, struct ata_host_set *host_set,
*/
static void ata_port_init_shost(struct ata_port *ap, struct Scsi_Host *shost)
{
ap->host = shost;
ap->scsi_host = shost;
shost->unique_id = ap->id;
shost->max_id = 16;
@ -5336,7 +5337,7 @@ static void ata_port_init_shost(struct ata_port *ap, struct Scsi_Host *shost)
/**
* ata_port_add - Attach low-level ATA driver to system
* @ent: Information provided by low-level driver
* @host_set: Collections of ports to which we add
* @host: Collections of ports to which we add
* @port_no: Port number associated with this host
*
* Attach low-level ATA driver to system.
@ -5348,7 +5349,7 @@ static void ata_port_init_shost(struct ata_port *ap, struct Scsi_Host *shost)
* New ata_port on success, for NULL on error.
*/
static struct ata_port * ata_port_add(const struct ata_probe_ent *ent,
struct ata_host_set *host_set,
struct ata_host *host,
unsigned int port_no)
{
struct Scsi_Host *shost;
@ -5357,7 +5358,7 @@ static struct ata_port * ata_port_add(const struct ata_probe_ent *ent,
DPRINTK("ENTER\n");
if (!ent->port_ops->error_handler &&
!(ent->host_flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST))) {
!(ent->port_flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST))) {
printk(KERN_ERR "ata%u: no reset mechanism available\n",
port_no);
return NULL;
@ -5371,32 +5372,31 @@ static struct ata_port * ata_port_add(const struct ata_probe_ent *ent,
ap = ata_shost_to_port(shost);
ata_port_init(ap, host_set, ent, port_no);
ata_port_init(ap, host, ent, port_no);
ata_port_init_shost(ap, shost);
return ap;
}
/**
* ata_sas_host_init - Initialize a host_set struct
* @host_set: host_set to initialize
* @dev: device host_set is attached to
* @flags: host_set flags
* @ops: port_ops
* ata_sas_host_init - Initialize a host struct
* @host: host to initialize
* @dev: device host is attached to
* @flags: host flags
* @ops: port_ops
*
* LOCKING:
* PCI/etc. bus probe sem.
*
*/
void ata_host_set_init(struct ata_host_set *host_set,
struct device *dev, unsigned long flags,
const struct ata_port_operations *ops)
void ata_host_init(struct ata_host *host, struct device *dev,
unsigned long flags, const struct ata_port_operations *ops)
{
spin_lock_init(&host_set->lock);
host_set->dev = dev;
host_set->flags = flags;
host_set->ops = ops;
spin_lock_init(&host->lock);
host->dev = dev;
host->flags = flags;
host->ops = ops;
}
/**
@ -5421,34 +5421,34 @@ int ata_device_add(const struct ata_probe_ent *ent)
{
unsigned int i;
struct device *dev = ent->dev;
struct ata_host_set *host_set;
struct ata_host *host;
int rc;
DPRINTK("ENTER\n");
/* alloc a container for our list of ATA ports (buses) */
host_set = kzalloc(sizeof(struct ata_host_set) +
(ent->n_ports * sizeof(void *)), GFP_KERNEL);
if (!host_set)
host = kzalloc(sizeof(struct ata_host) +
(ent->n_ports * sizeof(void *)), GFP_KERNEL);
if (!host)
return 0;
ata_host_set_init(host_set, dev, ent->host_set_flags, ent->port_ops);
host_set->n_ports = ent->n_ports;
host_set->irq = ent->irq;
host_set->irq2 = ent->irq2;
host_set->mmio_base = ent->mmio_base;
host_set->private_data = ent->private_data;
ata_host_init(host, dev, ent->_host_flags, ent->port_ops);
host->n_ports = ent->n_ports;
host->irq = ent->irq;
host->irq2 = ent->irq2;
host->mmio_base = ent->mmio_base;
host->private_data = ent->private_data;
/* register each port bound to this device */
for (i = 0; i < host_set->n_ports; i++) {
for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap;
unsigned long xfer_mode_mask;
int irq_line = ent->irq;
ap = ata_port_add(ent, host_set, i);
ap = ata_port_add(ent, host, i);
if (!ap)
goto err_out;
host_set->ports[i] = ap;
host->ports[i] = ap;
/* dummy? */
if (ent->dummy_port_mask & (1 << i)) {
@ -5460,8 +5460,8 @@ int ata_device_add(const struct ata_probe_ent *ent)
/* start port */
rc = ap->ops->port_start(ap);
if (rc) {
host_set->ports[i] = NULL;
scsi_host_put(ap->host);
host->ports[i] = NULL;
scsi_host_put(ap->scsi_host);
goto err_out;
}
@ -5484,13 +5484,13 @@ int ata_device_add(const struct ata_probe_ent *ent)
irq_line);
ata_chk_status(ap);
host_set->ops->irq_clear(ap);
host->ops->irq_clear(ap);
ata_eh_freeze_port(ap); /* freeze port before requesting IRQ */
}
/* obtain irq, that may be shared between channels */
rc = request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags,
DRV_NAME, host_set);
DRV_NAME, host);
if (rc) {
dev_printk(KERN_ERR, dev, "irq %lu request failed: %d\n",
ent->irq, rc);
@ -5504,7 +5504,7 @@ int ata_device_add(const struct ata_probe_ent *ent)
BUG_ON(ent->irq == ent->irq2);
rc = request_irq(ent->irq2, ent->port_ops->irq_handler, ent->irq_flags,
DRV_NAME, host_set);
DRV_NAME, host);
if (rc) {
dev_printk(KERN_ERR, dev, "irq %lu request failed: %d\n",
ent->irq2, rc);
@ -5514,8 +5514,8 @@ int ata_device_add(const struct ata_probe_ent *ent)
/* perform each probe synchronously */
DPRINTK("probe begin\n");
for (i = 0; i < host_set->n_ports; i++) {
struct ata_port *ap = host_set->ports[i];
for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap = host->ports[i];
u32 scontrol;
int rc;
@ -5526,7 +5526,7 @@ int ata_device_add(const struct ata_probe_ent *ent)
}
ap->sata_spd_limit = ap->hw_sata_spd_limit;
rc = scsi_add_host(ap->host, dev);
rc = scsi_add_host(ap->scsi_host, dev);
if (rc) {
ata_port_printk(ap, KERN_ERR, "scsi_add_host failed\n");
/* FIXME: do something useful here */
@ -5574,29 +5574,29 @@ int ata_device_add(const struct ata_probe_ent *ent)
/* probes are done, now scan each port's disk(s) */
DPRINTK("host probe begin\n");
for (i = 0; i < host_set->n_ports; i++) {
struct ata_port *ap = host_set->ports[i];
for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap = host->ports[i];
ata_scsi_scan_host(ap);
}
dev_set_drvdata(dev, host_set);
dev_set_drvdata(dev, host);
VPRINTK("EXIT, returning %u\n", ent->n_ports);
return ent->n_ports; /* success */
err_out_free_irq:
free_irq(ent->irq, host_set);
free_irq(ent->irq, host);
err_out:
for (i = 0; i < host_set->n_ports; i++) {
struct ata_port *ap = host_set->ports[i];
for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap = host->ports[i];
if (ap) {
ap->ops->port_stop(ap);
scsi_host_put(ap->host);
scsi_host_put(ap->scsi_host);
}
}
kfree(host_set);
kfree(host);
VPRINTK("EXIT, returning 0\n");
return 0;
}
@ -5656,12 +5656,12 @@ void ata_port_detach(struct ata_port *ap)
skip_eh:
/* remove the associated SCSI host */
scsi_remove_host(ap->host);
scsi_remove_host(ap->scsi_host);
}
/**
* ata_host_set_remove - PCI layer callback for device removal
* @host_set: ATA host set that was removed
* ata_host_remove - PCI layer callback for device removal
* @host: ATA host set that was removed
*
* Unregister all objects associated with this host set. Free those
* objects.
@ -5670,21 +5670,21 @@ void ata_port_detach(struct ata_port *ap)
* Inherited from calling layer (may sleep).
*/
void ata_host_set_remove(struct ata_host_set *host_set)
void ata_host_remove(struct ata_host *host)
{
unsigned int i;
for (i = 0; i < host_set->n_ports; i++)
ata_port_detach(host_set->ports[i]);
for (i = 0; i < host->n_ports; i++)
ata_port_detach(host->ports[i]);
free_irq(host_set->irq, host_set);
if (host_set->irq2)
free_irq(host_set->irq2, host_set);
free_irq(host->irq, host);
if (host->irq2)
free_irq(host->irq2, host);
for (i = 0; i < host_set->n_ports; i++) {
struct ata_port *ap = host_set->ports[i];
for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap = host->ports[i];
ata_scsi_release(ap->host);
ata_scsi_release(ap->scsi_host);
if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) {
struct ata_ioports *ioaddr = &ap->ioaddr;
@ -5696,13 +5696,13 @@ void ata_host_set_remove(struct ata_host_set *host_set)
release_region(ATA_SECONDARY_CMD, 8);
}
scsi_host_put(ap->host);
scsi_host_put(ap->scsi_host);
}
if (host_set->ops->host_stop)
host_set->ops->host_stop(host_set);
if (host->ops->host_stop)
host->ops->host_stop(host);
kfree(host_set);
kfree(host);
}
/**
@ -5719,9 +5719,9 @@ void ata_host_set_remove(struct ata_host_set *host_set)
* One.
*/
int ata_scsi_release(struct Scsi_Host *host)
int ata_scsi_release(struct Scsi_Host *shost)
{
struct ata_port *ap = ata_shost_to_port(host);
struct ata_port *ap = ata_shost_to_port(shost);
DPRINTK("ENTER\n");
@ -5748,7 +5748,7 @@ ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port)
probe_ent->dev = dev;
probe_ent->sht = port->sht;
probe_ent->host_flags = port->host_flags;
probe_ent->port_flags = port->flags;
probe_ent->pio_mask = port->pio_mask;
probe_ent->mwdma_mask = port->mwdma_mask;
probe_ent->udma_mask = port->udma_mask;
@ -5786,11 +5786,11 @@ void ata_std_ports(struct ata_ioports *ioaddr)
#ifdef CONFIG_PCI
void ata_pci_host_stop (struct ata_host_set *host_set)
void ata_pci_host_stop (struct ata_host *host)
{
struct pci_dev *pdev = to_pci_dev(host_set->dev);
struct pci_dev *pdev = to_pci_dev(host->dev);
pci_iounmap(pdev, host_set->mmio_base);
pci_iounmap(pdev, host->mmio_base);
}
/**
@ -5810,9 +5810,9 @@ void ata_pci_host_stop (struct ata_host_set *host_set)
void ata_pci_remove_one (struct pci_dev *pdev)
{
struct device *dev = pci_dev_to_dev(pdev);
struct ata_host_set *host_set = dev_get_drvdata(dev);
struct ata_host *host = dev_get_drvdata(dev);
ata_host_set_remove(host_set);
ata_host_remove(host);
pci_release_regions(pdev);
pci_disable_device(pdev);
@ -5873,10 +5873,10 @@ void ata_pci_device_do_resume(struct pci_dev *pdev)
int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
{
struct ata_host_set *host_set = dev_get_drvdata(&pdev->dev);
struct ata_host *host = dev_get_drvdata(&pdev->dev);
int rc = 0;
rc = ata_host_set_suspend(host_set, mesg);
rc = ata_host_suspend(host, mesg);
if (rc)
return rc;
@ -5887,10 +5887,10 @@ int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
int ata_pci_device_resume(struct pci_dev *pdev)
{
struct ata_host_set *host_set = dev_get_drvdata(&pdev->dev);
struct ata_host *host = dev_get_drvdata(&pdev->dev);
ata_pci_device_do_resume(pdev);
ata_host_set_resume(host_set);
ata_host_resume(host);
return 0;
}
#endif /* CONFIG_PCI */
@ -6035,10 +6035,10 @@ EXPORT_SYMBOL_GPL(sata_deb_timing_long);
EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
EXPORT_SYMBOL_GPL(ata_std_bios_param);
EXPORT_SYMBOL_GPL(ata_std_ports);
EXPORT_SYMBOL_GPL(ata_host_set_init);
EXPORT_SYMBOL_GPL(ata_host_init);
EXPORT_SYMBOL_GPL(ata_device_add);
EXPORT_SYMBOL_GPL(ata_port_detach);
EXPORT_SYMBOL_GPL(ata_host_set_remove);
EXPORT_SYMBOL_GPL(ata_host_remove);
EXPORT_SYMBOL_GPL(ata_sg_init);
EXPORT_SYMBOL_GPL(ata_sg_init_one);
EXPORT_SYMBOL_GPL(ata_hsm_move);
@ -6105,8 +6105,8 @@ EXPORT_SYMBOL_GPL(sata_scr_write);
EXPORT_SYMBOL_GPL(sata_scr_write_flush);
EXPORT_SYMBOL_GPL(ata_port_online);
EXPORT_SYMBOL_GPL(ata_port_offline);
EXPORT_SYMBOL_GPL(ata_host_set_suspend);
EXPORT_SYMBOL_GPL(ata_host_set_resume);
EXPORT_SYMBOL_GPL(ata_host_suspend);
EXPORT_SYMBOL_GPL(ata_host_resume);
EXPORT_SYMBOL_GPL(ata_id_string);
EXPORT_SYMBOL_GPL(ata_id_c_string);
EXPORT_SYMBOL_GPL(ata_scsi_simulate);

View File

@ -200,7 +200,7 @@ void ata_scsi_error(struct Scsi_Host *host)
/* synchronize with port task */
ata_port_flush_task(ap);
/* synchronize with host_set lock and sort out timeouts */
/* synchronize with host lock and sort out timeouts */
/* For new EH, all qcs are finished in one of three ways -
* normal completion, error completion, and SCSI timeout.
@ -377,7 +377,7 @@ void ata_port_wait_eh(struct ata_port *ap)
spin_unlock_irqrestore(ap->lock, flags);
/* make sure SCSI EH is complete */
if (scsi_host_in_recovery(ap->host)) {
if (scsi_host_in_recovery(ap->scsi_host)) {
msleep(10);
goto retry;
}
@ -486,7 +486,7 @@ void ata_eng_timeout(struct ata_port *ap)
* other commands are drained.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*/
void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
{
@ -513,14 +513,14 @@ void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
* all commands are drained.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*/
void ata_port_schedule_eh(struct ata_port *ap)
{
WARN_ON(!ap->ops->error_handler);
ap->pflags |= ATA_PFLAG_EH_PENDING;
scsi_schedule_eh(ap->host);
scsi_schedule_eh(ap->scsi_host);
DPRINTK("port EH scheduled\n");
}
@ -532,7 +532,7 @@ void ata_port_schedule_eh(struct ata_port *ap)
* Abort all active qc's of @ap and schedule EH.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*
* RETURNS:
* Number of aborted qc's.
@ -575,7 +575,7 @@ int ata_port_abort(struct ata_port *ap)
* is frozen.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*/
static void __ata_port_freeze(struct ata_port *ap)
{
@ -596,7 +596,7 @@ static void __ata_port_freeze(struct ata_port *ap)
* Abort and freeze @ap.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*
* RETURNS:
* Number of aborted commands.

View File

@ -321,7 +321,7 @@ int ata_scsi_ioctl(struct scsi_device *scsidev, int cmd, void __user *arg)
* current command.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*
* RETURNS:
* Command allocated, or %NULL if none available.
@ -537,7 +537,7 @@ int ata_scsi_device_resume(struct scsi_device *sdev)
* format sense blocks.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*/
void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk, u8 *asc,
u8 *ascq, int verbose)
@ -649,7 +649,7 @@ void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk, u8 *asc,
* block. Clear sense key, ASC & ASCQ if there is no error.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*/
void ata_gen_ata_desc_sense(struct ata_queued_cmd *qc)
{
@ -918,7 +918,7 @@ int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth)
* [See SAT revision 5 at www.t10.org]
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*
* RETURNS:
* Zero on success, non-zero on error.
@ -986,7 +986,7 @@ static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc,
* FLUSH CACHE EXT.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*
* RETURNS:
* Zero on success, non-zero on error.
@ -1109,7 +1109,7 @@ static void scsi_16_lba_len(const u8 *scsicmd, u64 *plba, u32 *plen)
* Converts SCSI VERIFY command to an ATA READ VERIFY command.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*
* RETURNS:
* Zero on success, non-zero on error.
@ -1233,7 +1233,7 @@ static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc, const u8 *sc
* %WRITE_16 are currently supported.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*
* RETURNS:
* Zero on success, non-zero on error.
@ -1467,7 +1467,7 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
* issued to @dev.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*
* RETURNS:
* 1 if deferring is needed, 0 otherwise.
@ -1510,7 +1510,7 @@ static int ata_scmd_need_defer(struct ata_device *dev, int is_io)
* termination.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*
* RETURNS:
* 0 on success, SCSI_ML_QUEUE_DEVICE_BUSY if the command
@ -1589,7 +1589,7 @@ static int ata_scsi_translate(struct ata_device *dev, struct scsi_cmnd *cmd,
* Maps buffer contained within SCSI command @cmd.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*
* RETURNS:
* Length of response buffer.
@ -1623,7 +1623,7 @@ static unsigned int ata_scsi_rbuf_get(struct scsi_cmnd *cmd, u8 **buf_out)
* Unmaps response buffer contained within @cmd.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*/
static inline void ata_scsi_rbuf_put(struct scsi_cmnd *cmd, u8 *buf)
@ -1649,7 +1649,7 @@ static inline void ata_scsi_rbuf_put(struct scsi_cmnd *cmd, u8 *buf)
* and sense buffer are assumed to be set).
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*/
void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
@ -1680,7 +1680,7 @@ void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
* with non-VPD INQUIRY command output.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*/
unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf,
@ -1736,7 +1736,7 @@ unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf,
* Returns list of inquiry VPD pages available.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*/
unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf,
@ -1764,7 +1764,7 @@ unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf,
* Returns ATA device serial number.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*/
unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf,
@ -1797,7 +1797,7 @@ unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf,
* name ("ATA "), model and serial numbers.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*/
unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf,
@ -1849,7 +1849,7 @@ unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf,
* that the caller should successfully complete this SCSI command.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*/
unsigned int ata_scsiop_noop(struct ata_scsi_args *args, u8 *rbuf,
@ -1990,7 +1990,7 @@ static int ata_dev_supports_fua(u16 *id)
* descriptor for other device types.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*/
unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf,
@ -2129,7 +2129,7 @@ unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf,
* Simulate READ CAPACITY commands.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*/
unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf,
@ -2204,7 +2204,7 @@ unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf,
* Simulate REPORT LUNS command.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*/
unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf,
@ -2256,7 +2256,7 @@ void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq)
* and the specified additional sense codes.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*/
void ata_scsi_badcmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *), u8 asc, u8 ascq)
@ -2421,7 +2421,7 @@ static void atapi_qc_complete(struct ata_queued_cmd *qc)
* @scsicmd: SCSI CDB associated with this PACKET command
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*
* RETURNS:
* Zero on success, non-zero on failure.
@ -2500,7 +2500,7 @@ static struct ata_device * __ata_scsi_find_dev(struct ata_port *ap,
* Determine if commands should be sent to the specified device.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*
* RETURNS:
* 0 if commands are not allowed / 1 if commands are allowed
@ -2534,7 +2534,7 @@ static int ata_scsi_dev_enabled(struct ata_device *dev)
* SCSI command to be sent.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*
* RETURNS:
* Associated ATA device, or %NULL if not found.
@ -2808,7 +2808,7 @@ static inline int __ata_scsi_queuecmd(struct scsi_cmnd *cmd,
* ATA and ATAPI devices appearing as SCSI devices.
*
* LOCKING:
* Releases scsi-layer-held lock, and obtains host_set lock.
* Releases scsi-layer-held lock, and obtains host lock.
*
* RETURNS:
* Return value from __ata_scsi_queuecmd() if @cmd can be queued,
@ -2852,7 +2852,7 @@ int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
* that can be handled internally.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*/
void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd,
@ -2944,7 +2944,7 @@ void ata_scsi_scan_host(struct ata_port *ap)
if (!ata_dev_enabled(dev) || dev->sdev)
continue;
sdev = __scsi_add_device(ap->host, 0, i, 0, NULL);
sdev = __scsi_add_device(ap->scsi_host, 0, i, 0, NULL);
if (!IS_ERR(sdev)) {
dev->sdev = sdev;
scsi_device_put(sdev);
@ -2958,11 +2958,11 @@ void ata_scsi_scan_host(struct ata_port *ap)
*
* This function is called from ata_eh_hotplug() and responsible
* for taking the SCSI device attached to @dev offline. This
* function is called with host_set lock which protects dev->sdev
* function is called with host lock which protects dev->sdev
* against clearing.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*
* RETURNS:
* 1 if attached SCSI device exists, 0 otherwise.
@ -2998,16 +2998,16 @@ static void ata_scsi_remove_dev(struct ata_device *dev)
* be removed if there is __scsi_device_get() interface which
* increments reference counts regardless of device state.
*/
mutex_lock(&ap->host->scan_mutex);
mutex_lock(&ap->scsi_host->scan_mutex);
spin_lock_irqsave(ap->lock, flags);
/* clearing dev->sdev is protected by host_set lock */
/* clearing dev->sdev is protected by host lock */
sdev = dev->sdev;
dev->sdev = NULL;
if (sdev) {
/* If user initiated unplug races with us, sdev can go
* away underneath us after the host_set lock and
* away underneath us after the host lock and
* scan_mutex are released. Hold onto it.
*/
if (scsi_device_get(sdev) == 0) {
@ -3024,7 +3024,7 @@ static void ata_scsi_remove_dev(struct ata_device *dev)
}
spin_unlock_irqrestore(ap->lock, flags);
mutex_unlock(&ap->host->scan_mutex);
mutex_unlock(&ap->scsi_host->scan_mutex);
if (sdev) {
ata_dev_printk(dev, KERN_INFO, "detaching (SCSI %s)\n",
@ -3176,7 +3176,7 @@ void ata_scsi_dev_rescan(void *data)
* ata_sas_port_alloc - Allocate port for a SAS attached SATA device
* @pdev: PCI device that the scsi device is attached to
* @port_info: Information from low-level host driver
* @host: SCSI host that the scsi device is attached to
* @shost: SCSI host that the scsi device is attached to
*
* LOCKING:
* PCI/etc. bus probe sem.
@ -3185,9 +3185,9 @@ void ata_scsi_dev_rescan(void *data)
* ata_port pointer on success / NULL on failure.
*/
struct ata_port *ata_sas_port_alloc(struct ata_host_set *host_set,
struct ata_port *ata_sas_port_alloc(struct ata_host *host,
struct ata_port_info *port_info,
struct Scsi_Host *host)
struct Scsi_Host *shost)
{
struct ata_port *ap = kzalloc(sizeof(*ap), GFP_KERNEL);
struct ata_probe_ent *ent;
@ -3195,14 +3195,14 @@ struct ata_port *ata_sas_port_alloc(struct ata_host_set *host_set,
if (!ap)
return NULL;
ent = ata_probe_ent_alloc(host_set->dev, port_info);
ent = ata_probe_ent_alloc(host->dev, port_info);
if (!ent) {
kfree(ap);
return NULL;
}
ata_port_init(ap, host_set, ent, 0);
ap->lock = host->host_lock;
ata_port_init(ap, host, ent, 0);
ap->lock = shost->host_lock;
kfree(ent);
return ap;
}

View File

@ -69,7 +69,7 @@ extern int ata_flush_cache(struct ata_device *dev);
extern void ata_dev_init(struct ata_device *dev);
extern int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg);
extern int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg);
extern void ata_port_init(struct ata_port *ap, struct ata_host_set *host_set,
extern void ata_port_init(struct ata_port *ap, struct ata_host *host,
const struct ata_probe_ent *ent, unsigned int port_no);
extern struct ata_probe_ent *ata_probe_ent_alloc(struct device *dev,
const struct ata_port_info *port);

View File

@ -127,7 +127,7 @@ static int adma_ata_init_one (struct pci_dev *pdev,
static irqreturn_t adma_intr (int irq, void *dev_instance,
struct pt_regs *regs);
static int adma_port_start(struct ata_port *ap);
static void adma_host_stop(struct ata_host_set *host_set);
static void adma_host_stop(struct ata_host *host);
static void adma_port_stop(struct ata_port *ap);
static void adma_phy_reset(struct ata_port *ap);
static void adma_qc_prep(struct ata_queued_cmd *qc);
@ -182,7 +182,7 @@ static struct ata_port_info adma_port_info[] = {
/* board_1841_idx */
{
.sht = &adma_ata_sht,
.host_flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST |
.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST |
ATA_FLAG_NO_LEGACY | ATA_FLAG_MMIO |
ATA_FLAG_PIO_POLLING,
.pio_mask = 0x10, /* pio4 */
@ -237,7 +237,7 @@ static void adma_reset_engine(void __iomem *chan)
static void adma_reinit_engine(struct ata_port *ap)
{
struct adma_port_priv *pp = ap->private_data;
void __iomem *mmio_base = ap->host_set->mmio_base;
void __iomem *mmio_base = ap->host->mmio_base;
void __iomem *chan = ADMA_REGS(mmio_base, ap->port_no);
/* mask/clear ATA interrupts */
@ -265,7 +265,7 @@ static void adma_reinit_engine(struct ata_port *ap)
static inline void adma_enter_reg_mode(struct ata_port *ap)
{
void __iomem *chan = ADMA_REGS(ap->host_set->mmio_base, ap->port_no);
void __iomem *chan = ADMA_REGS(ap->host->mmio_base, ap->port_no);
writew(aPIOMD4, chan + ADMA_CONTROL);
readb(chan + ADMA_STATUS); /* flush */
@ -412,7 +412,7 @@ static void adma_qc_prep(struct ata_queued_cmd *qc)
static inline void adma_packet_start(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
void __iomem *chan = ADMA_REGS(ap->host_set->mmio_base, ap->port_no);
void __iomem *chan = ADMA_REGS(ap->host->mmio_base, ap->port_no);
VPRINTK("ENTER, ap %p\n", ap);
@ -442,13 +442,13 @@ static unsigned int adma_qc_issue(struct ata_queued_cmd *qc)
return ata_qc_issue_prot(qc);
}
static inline unsigned int adma_intr_pkt(struct ata_host_set *host_set)
static inline unsigned int adma_intr_pkt(struct ata_host *host)
{
unsigned int handled = 0, port_no;
u8 __iomem *mmio_base = host_set->mmio_base;
u8 __iomem *mmio_base = host->mmio_base;
for (port_no = 0; port_no < host_set->n_ports; ++port_no) {
struct ata_port *ap = host_set->ports[port_no];
for (port_no = 0; port_no < host->n_ports; ++port_no) {
struct ata_port *ap = host->ports[port_no];
struct adma_port_priv *pp;
struct ata_queued_cmd *qc;
void __iomem *chan = ADMA_REGS(mmio_base, port_no);
@ -476,13 +476,13 @@ static inline unsigned int adma_intr_pkt(struct ata_host_set *host_set)
return handled;
}
static inline unsigned int adma_intr_mmio(struct ata_host_set *host_set)
static inline unsigned int adma_intr_mmio(struct ata_host *host)
{
unsigned int handled = 0, port_no;
for (port_no = 0; port_no < host_set->n_ports; ++port_no) {
for (port_no = 0; port_no < host->n_ports; ++port_no) {
struct ata_port *ap;
ap = host_set->ports[port_no];
ap = host->ports[port_no];
if (ap && (!(ap->flags & ATA_FLAG_DISABLED))) {
struct ata_queued_cmd *qc;
struct adma_port_priv *pp = ap->private_data;
@ -511,14 +511,14 @@ static inline unsigned int adma_intr_mmio(struct ata_host_set *host_set)
static irqreturn_t adma_intr(int irq, void *dev_instance, struct pt_regs *regs)
{
struct ata_host_set *host_set = dev_instance;
struct ata_host *host = dev_instance;
unsigned int handled = 0;
VPRINTK("ENTER\n");
spin_lock(&host_set->lock);
handled = adma_intr_pkt(host_set) | adma_intr_mmio(host_set);
spin_unlock(&host_set->lock);
spin_lock(&host->lock);
handled = adma_intr_pkt(host) | adma_intr_mmio(host);
spin_unlock(&host->lock);
VPRINTK("EXIT\n");
@ -544,7 +544,7 @@ static void adma_ata_setup_port(struct ata_ioports *port, unsigned long base)
static int adma_port_start(struct ata_port *ap)
{
struct device *dev = ap->host_set->dev;
struct device *dev = ap->host->dev;
struct adma_port_priv *pp;
int rc;
@ -582,10 +582,10 @@ static int adma_port_start(struct ata_port *ap)
static void adma_port_stop(struct ata_port *ap)
{
struct device *dev = ap->host_set->dev;
struct device *dev = ap->host->dev;
struct adma_port_priv *pp = ap->private_data;
adma_reset_engine(ADMA_REGS(ap->host_set->mmio_base, ap->port_no));
adma_reset_engine(ADMA_REGS(ap->host->mmio_base, ap->port_no));
if (pp != NULL) {
ap->private_data = NULL;
if (pp->pkt != NULL)
@ -596,14 +596,14 @@ static void adma_port_stop(struct ata_port *ap)
ata_port_stop(ap);
}
static void adma_host_stop(struct ata_host_set *host_set)
static void adma_host_stop(struct ata_host *host)
{
unsigned int port_no;
for (port_no = 0; port_no < ADMA_PORTS; ++port_no)
adma_reset_engine(ADMA_REGS(host_set->mmio_base, port_no));
adma_reset_engine(ADMA_REGS(host->mmio_base, port_no));
ata_pci_host_stop(host_set);
ata_pci_host_stop(host);
}
static void adma_host_init(unsigned int chip_id,
@ -684,7 +684,7 @@ static int adma_ata_init_one(struct pci_dev *pdev,
INIT_LIST_HEAD(&probe_ent->node);
probe_ent->sht = adma_port_info[board_idx].sht;
probe_ent->host_flags = adma_port_info[board_idx].host_flags;
probe_ent->port_flags = adma_port_info[board_idx].flags;
probe_ent->pio_mask = adma_port_info[board_idx].pio_mask;
probe_ent->mwdma_mask = adma_port_info[board_idx].mwdma_mask;
probe_ent->udma_mask = adma_port_info[board_idx].udma_mask;

View File

@ -342,7 +342,7 @@ static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
static void mv_phy_reset(struct ata_port *ap);
static void __mv_phy_reset(struct ata_port *ap, int can_sleep);
static void mv_host_stop(struct ata_host_set *host_set);
static void mv_host_stop(struct ata_host *host);
static int mv_port_start(struct ata_port *ap);
static void mv_port_stop(struct ata_port *ap);
static void mv_qc_prep(struct ata_queued_cmd *qc);
@ -480,35 +480,35 @@ static const struct ata_port_operations mv_iie_ops = {
static const struct ata_port_info mv_port_info[] = {
{ /* chip_504x */
.sht = &mv_sht,
.host_flags = MV_COMMON_FLAGS,
.flags = MV_COMMON_FLAGS,
.pio_mask = 0x1f, /* pio0-4 */
.udma_mask = 0x7f, /* udma0-6 */
.port_ops = &mv5_ops,
},
{ /* chip_508x */
.sht = &mv_sht,
.host_flags = (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC),
.flags = (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC),
.pio_mask = 0x1f, /* pio0-4 */
.udma_mask = 0x7f, /* udma0-6 */
.port_ops = &mv5_ops,
},
{ /* chip_5080 */
.sht = &mv_sht,
.host_flags = (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC),
.flags = (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC),
.pio_mask = 0x1f, /* pio0-4 */
.udma_mask = 0x7f, /* udma0-6 */
.port_ops = &mv5_ops,
},
{ /* chip_604x */
.sht = &mv_sht,
.host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
.flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
.pio_mask = 0x1f, /* pio0-4 */
.udma_mask = 0x7f, /* udma0-6 */
.port_ops = &mv6_ops,
},
{ /* chip_608x */
.sht = &mv_sht,
.host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS |
.flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS |
MV_FLAG_DUAL_HC),
.pio_mask = 0x1f, /* pio0-4 */
.udma_mask = 0x7f, /* udma0-6 */
@ -516,14 +516,14 @@ static const struct ata_port_info mv_port_info[] = {
},
{ /* chip_6042 */
.sht = &mv_sht,
.host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
.flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
.pio_mask = 0x1f, /* pio0-4 */
.udma_mask = 0x7f, /* udma0-6 */
.port_ops = &mv_iie_ops,
},
{ /* chip_7042 */
.sht = &mv_sht,
.host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS |
.flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS |
MV_FLAG_DUAL_HC),
.pio_mask = 0x1f, /* pio0-4 */
.udma_mask = 0x7f, /* udma0-6 */
@ -618,12 +618,12 @@ static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
static inline void __iomem *mv_ap_base(struct ata_port *ap)
{
return mv_port_base(ap->host_set->mmio_base, ap->port_no);
return mv_port_base(ap->host->mmio_base, ap->port_no);
}
static inline int mv_get_hc_count(unsigned long host_flags)
static inline int mv_get_hc_count(unsigned long port_flags)
{
return ((host_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
}
static void mv_irq_clear(struct ata_port *ap)
@ -809,7 +809,7 @@ static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
/**
* mv_host_stop - Host specific cleanup/stop routine.
* @host_set: host data structure
* @host: host data structure
*
* Disable ints, cleanup host memory, call general purpose
* host_stop.
@ -817,10 +817,10 @@ static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
* LOCKING:
* Inherited from caller.
*/
static void mv_host_stop(struct ata_host_set *host_set)
static void mv_host_stop(struct ata_host *host)
{
struct mv_host_priv *hpriv = host_set->private_data;
struct pci_dev *pdev = to_pci_dev(host_set->dev);
struct mv_host_priv *hpriv = host->private_data;
struct pci_dev *pdev = to_pci_dev(host->dev);
if (hpriv->hp_flags & MV_HP_FLAG_MSI) {
pci_disable_msi(pdev);
@ -828,7 +828,7 @@ static void mv_host_stop(struct ata_host_set *host_set)
pci_intx(pdev, 0);
}
kfree(hpriv);
ata_host_stop(host_set);
ata_host_stop(host);
}
static inline void mv_priv_free(struct mv_port_priv *pp, struct device *dev)
@ -875,8 +875,8 @@ static void mv_edma_cfg(struct mv_host_priv *hpriv, void __iomem *port_mmio)
*/
static int mv_port_start(struct ata_port *ap)
{
struct device *dev = ap->host_set->dev;
struct mv_host_priv *hpriv = ap->host_set->private_data;
struct device *dev = ap->host->dev;
struct mv_host_priv *hpriv = ap->host->private_data;
struct mv_port_priv *pp;
void __iomem *port_mmio = mv_ap_base(ap);
void *mem;
@ -965,17 +965,17 @@ static int mv_port_start(struct ata_port *ap)
* Stop DMA, cleanup port memory.
*
* LOCKING:
* This routine uses the host_set lock to protect the DMA stop.
* This routine uses the host lock to protect the DMA stop.
*/
static void mv_port_stop(struct ata_port *ap)
{
struct device *dev = ap->host_set->dev;
struct device *dev = ap->host->dev;
struct mv_port_priv *pp = ap->private_data;
unsigned long flags;
spin_lock_irqsave(&ap->host_set->lock, flags);
spin_lock_irqsave(&ap->host->lock, flags);
mv_stop_dma(ap);
spin_unlock_irqrestore(&ap->host_set->lock, flags);
spin_unlock_irqrestore(&ap->host->lock, flags);
ap->private_data = NULL;
ata_pad_free(ap, dev);
@ -1330,7 +1330,7 @@ static void mv_err_intr(struct ata_port *ap, int reset_allowed)
/**
* mv_host_intr - Handle all interrupts on the given host controller
* @host_set: host specific structure
* @host: host specific structure
* @relevant: port error bits relevant to this host controller
* @hc: which host controller we're to look at
*
@ -1344,10 +1344,9 @@ static void mv_err_intr(struct ata_port *ap, int reset_allowed)
* LOCKING:
* Inherited from caller.
*/
static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
unsigned int hc)
static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
{
void __iomem *mmio = host_set->mmio_base;
void __iomem *mmio = host->mmio_base;
void __iomem *hc_mmio = mv_hc_base(mmio, hc);
struct ata_queued_cmd *qc;
u32 hc_irq_cause;
@ -1371,7 +1370,7 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
u8 ata_status = 0;
struct ata_port *ap = host_set->ports[port];
struct ata_port *ap = host->ports[port];
struct mv_port_priv *pp = ap->private_data;
hard_port = mv_hardport_from_port(port); /* range 0..3 */
@ -1444,15 +1443,15 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
* reported here.
*
* LOCKING:
* This routine holds the host_set lock while processing pending
* This routine holds the host lock while processing pending
* interrupts.
*/
static irqreturn_t mv_interrupt(int irq, void *dev_instance,
struct pt_regs *regs)
{
struct ata_host_set *host_set = dev_instance;
struct ata_host *host = dev_instance;
unsigned int hc, handled = 0, n_hcs;
void __iomem *mmio = host_set->mmio_base;
void __iomem *mmio = host->mmio_base;
struct mv_host_priv *hpriv;
u32 irq_stat;
@ -1465,18 +1464,18 @@ static irqreturn_t mv_interrupt(int irq, void *dev_instance,
return IRQ_NONE;
}
n_hcs = mv_get_hc_count(host_set->ports[0]->flags);
spin_lock(&host_set->lock);
n_hcs = mv_get_hc_count(host->ports[0]->flags);
spin_lock(&host->lock);
for (hc = 0; hc < n_hcs; hc++) {
u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
if (relevant) {
mv_host_intr(host_set, relevant, hc);
mv_host_intr(host, relevant, hc);
handled++;
}
}
hpriv = host_set->private_data;
hpriv = host->private_data;
if (IS_60XX(hpriv)) {
/* deal with the interrupt coalescing bits */
if (irq_stat & (TRAN_LO_DONE | TRAN_HI_DONE | PORTS_0_7_COAL_DONE)) {
@ -1491,12 +1490,12 @@ static irqreturn_t mv_interrupt(int irq, void *dev_instance,
readl(mmio + PCI_IRQ_CAUSE_OFS));
DPRINTK("All regs @ PCI error\n");
mv_dump_all_regs(mmio, -1, to_pci_dev(host_set->dev));
mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
handled++;
}
spin_unlock(&host_set->lock);
spin_unlock(&host->lock);
return IRQ_RETVAL(handled);
}
@ -1528,7 +1527,7 @@ static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in)
{
void __iomem *mmio = mv5_phy_base(ap->host_set->mmio_base, ap->port_no);
void __iomem *mmio = mv5_phy_base(ap->host->mmio_base, ap->port_no);
unsigned int ofs = mv5_scr_offset(sc_reg_in);
if (ofs != 0xffffffffU)
@ -1539,7 +1538,7 @@ static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in)
static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
{
void __iomem *mmio = mv5_phy_base(ap->host_set->mmio_base, ap->port_no);
void __iomem *mmio = mv5_phy_base(ap->host->mmio_base, ap->port_no);
unsigned int ofs = mv5_scr_offset(sc_reg_in);
if (ofs != 0xffffffffU)
@ -1904,8 +1903,8 @@ static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
static void mv_stop_and_reset(struct ata_port *ap)
{
struct mv_host_priv *hpriv = ap->host_set->private_data;
void __iomem *mmio = ap->host_set->mmio_base;
struct mv_host_priv *hpriv = ap->host->private_data;
void __iomem *mmio = ap->host->mmio_base;
mv_stop_dma(ap);
@ -1936,7 +1935,7 @@ static inline void __msleep(unsigned int msec, int can_sleep)
static void __mv_phy_reset(struct ata_port *ap, int can_sleep)
{
struct mv_port_priv *pp = ap->private_data;
struct mv_host_priv *hpriv = ap->host_set->private_data;
struct mv_host_priv *hpriv = ap->host->private_data;
void __iomem *port_mmio = mv_ap_base(ap);
struct ata_taskfile tf;
struct ata_device *dev = &ap->device[0];
@ -2035,7 +2034,7 @@ static void mv_phy_reset(struct ata_port *ap)
* chip/bus, fail the command, and move on.
*
* LOCKING:
* This routine holds the host_set lock while failing the command.
* This routine holds the host lock while failing the command.
*/
static void mv_eng_timeout(struct ata_port *ap)
{
@ -2044,18 +2043,17 @@ static void mv_eng_timeout(struct ata_port *ap)
ata_port_printk(ap, KERN_ERR, "Entering mv_eng_timeout\n");
DPRINTK("All regs @ start of eng_timeout\n");
mv_dump_all_regs(ap->host_set->mmio_base, ap->port_no,
to_pci_dev(ap->host_set->dev));
mv_dump_all_regs(ap->host->mmio_base, ap->port_no,
to_pci_dev(ap->host->dev));
qc = ata_qc_from_tag(ap, ap->active_tag);
printk(KERN_ERR "mmio_base %p ap %p qc %p scsi_cmnd %p &cmnd %p\n",
ap->host_set->mmio_base, ap, qc, qc->scsicmd,
&qc->scsicmd->cmnd);
ap->host->mmio_base, ap, qc, qc->scsicmd, &qc->scsicmd->cmnd);
spin_lock_irqsave(&ap->host_set->lock, flags);
spin_lock_irqsave(&ap->host->lock, flags);
mv_err_intr(ap, 0);
mv_stop_and_reset(ap);
spin_unlock_irqrestore(&ap->host_set->lock, flags);
spin_unlock_irqrestore(&ap->host->lock, flags);
WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
if (qc->flags & ATA_QCFLAG_ACTIVE) {
@ -2236,7 +2234,7 @@ static int mv_init_host(struct pci_dev *pdev, struct ata_probe_ent *probe_ent,
if (rc)
goto done;
n_hc = mv_get_hc_count(probe_ent->host_flags);
n_hc = mv_get_hc_count(probe_ent->port_flags);
probe_ent->n_ports = MV_PORTS_PER_HC * n_hc;
for (port = 0; port < probe_ent->n_ports; port++)
@ -2389,7 +2387,7 @@ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
memset(hpriv, 0, sizeof(*hpriv));
probe_ent->sht = mv_port_info[board_idx].sht;
probe_ent->host_flags = mv_port_info[board_idx].host_flags;
probe_ent->port_flags = mv_port_info[board_idx].flags;
probe_ent->pio_mask = mv_port_info[board_idx].pio_mask;
probe_ent->udma_mask = mv_port_info[board_idx].udma_mask;
probe_ent->port_ops = mv_port_info[board_idx].port_ops;

View File

@ -81,7 +81,7 @@ enum {
};
static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
static void nv_ck804_host_stop(struct ata_host_set *host_set);
static void nv_ck804_host_stop(struct ata_host *host);
static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance,
struct pt_regs *regs);
static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance,
@ -257,7 +257,7 @@ static struct ata_port_info nv_port_info[] = {
/* generic */
{
.sht = &nv_sht,
.host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
.flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
.pio_mask = NV_PIO_MASK,
.mwdma_mask = NV_MWDMA_MASK,
.udma_mask = NV_UDMA_MASK,
@ -266,7 +266,7 @@ static struct ata_port_info nv_port_info[] = {
/* nforce2/3 */
{
.sht = &nv_sht,
.host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
.flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
.pio_mask = NV_PIO_MASK,
.mwdma_mask = NV_MWDMA_MASK,
.udma_mask = NV_UDMA_MASK,
@ -275,7 +275,7 @@ static struct ata_port_info nv_port_info[] = {
/* ck804 */
{
.sht = &nv_sht,
.host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
.flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
.pio_mask = NV_PIO_MASK,
.mwdma_mask = NV_MWDMA_MASK,
.udma_mask = NV_UDMA_MASK,
@ -292,17 +292,17 @@ MODULE_VERSION(DRV_VERSION);
static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance,
struct pt_regs *regs)
{
struct ata_host_set *host_set = dev_instance;
struct ata_host *host = dev_instance;
unsigned int i;
unsigned int handled = 0;
unsigned long flags;
spin_lock_irqsave(&host_set->lock, flags);
spin_lock_irqsave(&host->lock, flags);
for (i = 0; i < host_set->n_ports; i++) {
for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap;
ap = host_set->ports[i];
ap = host->ports[i];
if (ap &&
!(ap->flags & ATA_FLAG_DISABLED)) {
struct ata_queued_cmd *qc;
@ -318,7 +318,7 @@ static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance,
}
spin_unlock_irqrestore(&host_set->lock, flags);
spin_unlock_irqrestore(&host->lock, flags);
return IRQ_RETVAL(handled);
}
@ -354,12 +354,12 @@ static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
return 1;
}
static irqreturn_t nv_do_interrupt(struct ata_host_set *host_set, u8 irq_stat)
static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
{
int i, handled = 0;
for (i = 0; i < host_set->n_ports; i++) {
struct ata_port *ap = host_set->ports[i];
for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap = host->ports[i];
if (ap && !(ap->flags & ATA_FLAG_DISABLED))
handled += nv_host_intr(ap, irq_stat);
@ -373,14 +373,14 @@ static irqreturn_t nv_do_interrupt(struct ata_host_set *host_set, u8 irq_stat)
static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance,
struct pt_regs *regs)
{
struct ata_host_set *host_set = dev_instance;
struct ata_host *host = dev_instance;
u8 irq_stat;
irqreturn_t ret;
spin_lock(&host_set->lock);
irq_stat = inb(host_set->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
ret = nv_do_interrupt(host_set, irq_stat);
spin_unlock(&host_set->lock);
spin_lock(&host->lock);
irq_stat = inb(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
ret = nv_do_interrupt(host, irq_stat);
spin_unlock(&host->lock);
return ret;
}
@ -388,14 +388,14 @@ static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance,
static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance,
struct pt_regs *regs)
{
struct ata_host_set *host_set = dev_instance;
struct ata_host *host = dev_instance;
u8 irq_stat;
irqreturn_t ret;
spin_lock(&host_set->lock);
irq_stat = readb(host_set->mmio_base + NV_INT_STATUS_CK804);
ret = nv_do_interrupt(host_set, irq_stat);
spin_unlock(&host_set->lock);
spin_lock(&host->lock);
irq_stat = readb(host->mmio_base + NV_INT_STATUS_CK804);
ret = nv_do_interrupt(host, irq_stat);
spin_unlock(&host->lock);
return ret;
}
@ -418,7 +418,7 @@ static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
static void nv_nf2_freeze(struct ata_port *ap)
{
unsigned long scr_addr = ap->host_set->ports[0]->ioaddr.scr_addr;
unsigned long scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
int shift = ap->port_no * NV_INT_PORT_SHIFT;
u8 mask;
@ -429,7 +429,7 @@ static void nv_nf2_freeze(struct ata_port *ap)
static void nv_nf2_thaw(struct ata_port *ap)
{
unsigned long scr_addr = ap->host_set->ports[0]->ioaddr.scr_addr;
unsigned long scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
int shift = ap->port_no * NV_INT_PORT_SHIFT;
u8 mask;
@ -442,7 +442,7 @@ static void nv_nf2_thaw(struct ata_port *ap)
static void nv_ck804_freeze(struct ata_port *ap)
{
void __iomem *mmio_base = ap->host_set->mmio_base;
void __iomem *mmio_base = ap->host->mmio_base;
int shift = ap->port_no * NV_INT_PORT_SHIFT;
u8 mask;
@ -453,7 +453,7 @@ static void nv_ck804_freeze(struct ata_port *ap)
static void nv_ck804_thaw(struct ata_port *ap)
{
void __iomem *mmio_base = ap->host_set->mmio_base;
void __iomem *mmio_base = ap->host->mmio_base;
int shift = ap->port_no * NV_INT_PORT_SHIFT;
u8 mask;
@ -568,9 +568,9 @@ static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
return rc;
}
static void nv_ck804_host_stop(struct ata_host_set *host_set)
static void nv_ck804_host_stop(struct ata_host *host)
{
struct pci_dev *pdev = to_pci_dev(host_set->dev);
struct pci_dev *pdev = to_pci_dev(host->dev);
u8 regval;
/* disable SATA space for CK804 */
@ -578,7 +578,7 @@ static void nv_ck804_host_stop(struct ata_host_set *host_set)
regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
ata_pci_host_stop(host_set);
ata_pci_host_stop(host);
}
static int __init nv_init(void)

View File

@ -104,7 +104,7 @@ static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
static void pdc_irq_clear(struct ata_port *ap);
static unsigned int pdc_qc_issue_prot(struct ata_queued_cmd *qc);
static void pdc_host_stop(struct ata_host_set *host_set);
static void pdc_host_stop(struct ata_host *host);
static struct scsi_host_template pdc_ata_sht = {
@ -175,7 +175,7 @@ static const struct ata_port_info pdc_port_info[] = {
/* board_2037x */
{
.sht = &pdc_ata_sht,
.host_flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA,
.flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA,
.pio_mask = 0x1f, /* pio0-4 */
.mwdma_mask = 0x07, /* mwdma0-2 */
.udma_mask = 0x7f, /* udma0-6 ; FIXME */
@ -185,7 +185,7 @@ static const struct ata_port_info pdc_port_info[] = {
/* board_20319 */
{
.sht = &pdc_ata_sht,
.host_flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA,
.flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA,
.pio_mask = 0x1f, /* pio0-4 */
.mwdma_mask = 0x07, /* mwdma0-2 */
.udma_mask = 0x7f, /* udma0-6 ; FIXME */
@ -195,7 +195,7 @@ static const struct ata_port_info pdc_port_info[] = {
/* board_20619 */
{
.sht = &pdc_ata_sht,
.host_flags = PDC_COMMON_FLAGS | ATA_FLAG_SLAVE_POSS,
.flags = PDC_COMMON_FLAGS | ATA_FLAG_SLAVE_POSS,
.pio_mask = 0x1f, /* pio0-4 */
.mwdma_mask = 0x07, /* mwdma0-2 */
.udma_mask = 0x7f, /* udma0-6 ; FIXME */
@ -205,7 +205,7 @@ static const struct ata_port_info pdc_port_info[] = {
/* board_20771 */
{
.sht = &pdc_ata_sht,
.host_flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA,
.flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA,
.pio_mask = 0x1f, /* pio0-4 */
.mwdma_mask = 0x07, /* mwdma0-2 */
.udma_mask = 0x7f, /* udma0-6 ; FIXME */
@ -215,7 +215,7 @@ static const struct ata_port_info pdc_port_info[] = {
/* board_2057x */
{
.sht = &pdc_ata_sht,
.host_flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA,
.flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA,
.pio_mask = 0x1f, /* pio0-4 */
.mwdma_mask = 0x07, /* mwdma0-2 */
.udma_mask = 0x7f, /* udma0-6 ; FIXME */
@ -225,7 +225,7 @@ static const struct ata_port_info pdc_port_info[] = {
/* board_40518 */
{
.sht = &pdc_ata_sht,
.host_flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA,
.flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA,
.pio_mask = 0x1f, /* pio0-4 */
.mwdma_mask = 0x07, /* mwdma0-2 */
.udma_mask = 0x7f, /* udma0-6 ; FIXME */
@ -292,7 +292,7 @@ static struct pci_driver pdc_ata_pci_driver = {
static int pdc_port_start(struct ata_port *ap)
{
struct device *dev = ap->host_set->dev;
struct device *dev = ap->host->dev;
struct pdc_port_priv *pp;
int rc;
@ -326,7 +326,7 @@ static int pdc_port_start(struct ata_port *ap)
static void pdc_port_stop(struct ata_port *ap)
{
struct device *dev = ap->host_set->dev;
struct device *dev = ap->host->dev;
struct pdc_port_priv *pp = ap->private_data;
ap->private_data = NULL;
@ -336,11 +336,11 @@ static void pdc_port_stop(struct ata_port *ap)
}
static void pdc_host_stop(struct ata_host_set *host_set)
static void pdc_host_stop(struct ata_host *host)
{
struct pdc_host_priv *hp = host_set->private_data;
struct pdc_host_priv *hp = host->private_data;
ata_pci_host_stop(host_set);
ata_pci_host_stop(host);
kfree(hp);
}
@ -443,14 +443,14 @@ static void pdc_qc_prep(struct ata_queued_cmd *qc)
static void pdc_eng_timeout(struct ata_port *ap)
{
struct ata_host_set *host_set = ap->host_set;
struct ata_host *host = ap->host;
u8 drv_stat;
struct ata_queued_cmd *qc;
unsigned long flags;
DPRINTK("ENTER\n");
spin_lock_irqsave(&host_set->lock, flags);
spin_lock_irqsave(&host->lock, flags);
qc = ata_qc_from_tag(ap, ap->active_tag);
@ -473,7 +473,7 @@ static void pdc_eng_timeout(struct ata_port *ap)
break;
}
spin_unlock_irqrestore(&host_set->lock, flags);
spin_unlock_irqrestore(&host->lock, flags);
ata_eh_qc_complete(qc);
DPRINTK("EXIT\n");
}
@ -509,15 +509,15 @@ static inline unsigned int pdc_host_intr( struct ata_port *ap,
static void pdc_irq_clear(struct ata_port *ap)
{
struct ata_host_set *host_set = ap->host_set;
void __iomem *mmio = host_set->mmio_base;
struct ata_host *host = ap->host;
void __iomem *mmio = host->mmio_base;
readl(mmio + PDC_INT_SEQMASK);
}
static irqreturn_t pdc_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
{
struct ata_host_set *host_set = dev_instance;
struct ata_host *host = dev_instance;
struct ata_port *ap;
u32 mask = 0;
unsigned int i, tmp;
@ -526,12 +526,12 @@ static irqreturn_t pdc_interrupt (int irq, void *dev_instance, struct pt_regs *r
VPRINTK("ENTER\n");
if (!host_set || !host_set->mmio_base) {
if (!host || !host->mmio_base) {
VPRINTK("QUICK EXIT\n");
return IRQ_NONE;
}
mmio_base = host_set->mmio_base;
mmio_base = host->mmio_base;
/* reading should also clear interrupts */
mask = readl(mmio_base + PDC_INT_SEQMASK);
@ -541,7 +541,7 @@ static irqreturn_t pdc_interrupt (int irq, void *dev_instance, struct pt_regs *r
return IRQ_NONE;
}
spin_lock(&host_set->lock);
spin_lock(&host->lock);
mask &= 0xffff; /* only 16 tags possible */
if (!mask) {
@ -551,9 +551,9 @@ static irqreturn_t pdc_interrupt (int irq, void *dev_instance, struct pt_regs *r
writel(mask, mmio_base + PDC_INT_SEQMASK);
for (i = 0; i < host_set->n_ports; i++) {
for (i = 0; i < host->n_ports; i++) {
VPRINTK("port %u\n", i);
ap = host_set->ports[i];
ap = host->ports[i];
tmp = mask & (1 << (i + 1));
if (tmp && ap &&
!(ap->flags & ATA_FLAG_DISABLED)) {
@ -568,7 +568,7 @@ static irqreturn_t pdc_interrupt (int irq, void *dev_instance, struct pt_regs *r
VPRINTK("EXIT\n");
done_irq:
spin_unlock(&host_set->lock);
spin_unlock(&host->lock);
return IRQ_RETVAL(handled);
}
@ -581,8 +581,8 @@ static inline void pdc_packet_start(struct ata_queued_cmd *qc)
VPRINTK("ENTER, ap %p\n", ap);
writel(0x00000001, ap->host_set->mmio_base + (seq * 4));
readl(ap->host_set->mmio_base + (seq * 4)); /* flush */
writel(0x00000001, ap->host->mmio_base + (seq * 4));
readl(ap->host->mmio_base + (seq * 4)); /* flush */
pp->pkt[2] = seq;
wmb(); /* flush PRD, pkt writes */
@ -743,7 +743,7 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
probe_ent->private_data = hp;
probe_ent->sht = pdc_port_info[board_idx].sht;
probe_ent->host_flags = pdc_port_info[board_idx].host_flags;
probe_ent->port_flags = pdc_port_info[board_idx].flags;
probe_ent->pio_mask = pdc_port_info[board_idx].pio_mask;
probe_ent->mwdma_mask = pdc_port_info[board_idx].mwdma_mask;
probe_ent->udma_mask = pdc_port_info[board_idx].udma_mask;

View File

@ -116,7 +116,7 @@ static void qs_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
static int qs_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
static irqreturn_t qs_intr (int irq, void *dev_instance, struct pt_regs *regs);
static int qs_port_start(struct ata_port *ap);
static void qs_host_stop(struct ata_host_set *host_set);
static void qs_host_stop(struct ata_host *host);
static void qs_port_stop(struct ata_port *ap);
static void qs_phy_reset(struct ata_port *ap);
static void qs_qc_prep(struct ata_queued_cmd *qc);
@ -174,7 +174,7 @@ static const struct ata_port_info qs_port_info[] = {
/* board_2068_idx */
{
.sht = &qs_ata_sht,
.host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
.flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
ATA_FLAG_SATA_RESET |
//FIXME ATA_FLAG_SRST |
ATA_FLAG_MMIO | ATA_FLAG_PIO_POLLING,
@ -220,7 +220,7 @@ static void qs_irq_clear(struct ata_port *ap)
static inline void qs_enter_reg_mode(struct ata_port *ap)
{
u8 __iomem *chan = ap->host_set->mmio_base + (ap->port_no * 0x4000);
u8 __iomem *chan = ap->host->mmio_base + (ap->port_no * 0x4000);
writeb(QS_CTR0_REG, chan + QS_CCT_CTR0);
readb(chan + QS_CCT_CTR0); /* flush */
@ -228,7 +228,7 @@ static inline void qs_enter_reg_mode(struct ata_port *ap)
static inline void qs_reset_channel_logic(struct ata_port *ap)
{
u8 __iomem *chan = ap->host_set->mmio_base + (ap->port_no * 0x4000);
u8 __iomem *chan = ap->host->mmio_base + (ap->port_no * 0x4000);
writeb(QS_CTR1_RCHN, chan + QS_CCT_CTR1);
readb(chan + QS_CCT_CTR0); /* flush */
@ -342,7 +342,7 @@ static void qs_qc_prep(struct ata_queued_cmd *qc)
static inline void qs_packet_start(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
u8 __iomem *chan = ap->host_set->mmio_base + (ap->port_no * 0x4000);
u8 __iomem *chan = ap->host->mmio_base + (ap->port_no * 0x4000);
VPRINTK("ENTER, ap %p\n", ap);
@ -375,11 +375,11 @@ static unsigned int qs_qc_issue(struct ata_queued_cmd *qc)
return ata_qc_issue_prot(qc);
}
static inline unsigned int qs_intr_pkt(struct ata_host_set *host_set)
static inline unsigned int qs_intr_pkt(struct ata_host *host)
{
unsigned int handled = 0;
u8 sFFE;
u8 __iomem *mmio_base = host_set->mmio_base;
u8 __iomem *mmio_base = host->mmio_base;
do {
u32 sff0 = readl(mmio_base + QS_HST_SFF);
@ -391,7 +391,7 @@ static inline unsigned int qs_intr_pkt(struct ata_host_set *host_set)
u8 sDST = sff0 >> 16; /* dev status */
u8 sHST = sff1 & 0x3f; /* host status */
unsigned int port_no = (sff1 >> 8) & 0x03;
struct ata_port *ap = host_set->ports[port_no];
struct ata_port *ap = host->ports[port_no];
DPRINTK("SFF=%08x%08x: sCHAN=%u sHST=%d sDST=%02x\n",
sff1, sff0, port_no, sHST, sDST);
@ -421,13 +421,13 @@ static inline unsigned int qs_intr_pkt(struct ata_host_set *host_set)
return handled;
}
static inline unsigned int qs_intr_mmio(struct ata_host_set *host_set)
static inline unsigned int qs_intr_mmio(struct ata_host *host)
{
unsigned int handled = 0, port_no;
for (port_no = 0; port_no < host_set->n_ports; ++port_no) {
for (port_no = 0; port_no < host->n_ports; ++port_no) {
struct ata_port *ap;
ap = host_set->ports[port_no];
ap = host->ports[port_no];
if (ap &&
!(ap->flags & ATA_FLAG_DISABLED)) {
struct ata_queued_cmd *qc;
@ -457,14 +457,14 @@ static inline unsigned int qs_intr_mmio(struct ata_host_set *host_set)
static irqreturn_t qs_intr(int irq, void *dev_instance, struct pt_regs *regs)
{
struct ata_host_set *host_set = dev_instance;
struct ata_host *host = dev_instance;
unsigned int handled = 0;
VPRINTK("ENTER\n");
spin_lock(&host_set->lock);
handled = qs_intr_pkt(host_set) | qs_intr_mmio(host_set);
spin_unlock(&host_set->lock);
spin_lock(&host->lock);
handled = qs_intr_pkt(host) | qs_intr_mmio(host);
spin_unlock(&host->lock);
VPRINTK("EXIT\n");
@ -491,9 +491,9 @@ static void qs_ata_setup_port(struct ata_ioports *port, unsigned long base)
static int qs_port_start(struct ata_port *ap)
{
struct device *dev = ap->host_set->dev;
struct device *dev = ap->host->dev;
struct qs_port_priv *pp;
void __iomem *mmio_base = ap->host_set->mmio_base;
void __iomem *mmio_base = ap->host->mmio_base;
void __iomem *chan = mmio_base + (ap->port_no * 0x4000);
u64 addr;
int rc;
@ -530,7 +530,7 @@ static int qs_port_start(struct ata_port *ap)
static void qs_port_stop(struct ata_port *ap)
{
struct device *dev = ap->host_set->dev;
struct device *dev = ap->host->dev;
struct qs_port_priv *pp = ap->private_data;
if (pp != NULL) {
@ -543,10 +543,10 @@ static void qs_port_stop(struct ata_port *ap)
ata_port_stop(ap);
}
static void qs_host_stop(struct ata_host_set *host_set)
static void qs_host_stop(struct ata_host *host)
{
void __iomem *mmio_base = host_set->mmio_base;
struct pci_dev *pdev = to_pci_dev(host_set->dev);
void __iomem *mmio_base = host->mmio_base;
struct pci_dev *pdev = to_pci_dev(host->dev);
writeb(0, mmio_base + QS_HCT_CTRL); /* disable host interrupts */
writeb(QS_CNFG3_GSRST, mmio_base + QS_HCF_CNFG3); /* global reset */
@ -673,7 +673,7 @@ static int qs_ata_init_one(struct pci_dev *pdev,
INIT_LIST_HEAD(&probe_ent->node);
probe_ent->sht = qs_port_info[board_idx].sht;
probe_ent->host_flags = qs_port_info[board_idx].host_flags;
probe_ent->port_flags = qs_port_info[board_idx].flags;
probe_ent->pio_mask = qs_port_info[board_idx].pio_mask;
probe_ent->mwdma_mask = qs_port_info[board_idx].mwdma_mask;
probe_ent->udma_mask = qs_port_info[board_idx].udma_mask;

View File

@ -56,7 +56,7 @@ enum {
SIL_FLAG_RERR_ON_DMA_ACT = (1 << 29),
SIL_FLAG_MOD15WRITE = (1 << 30),
SIL_DFL_HOST_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
SIL_DFL_PORT_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
ATA_FLAG_MMIO | ATA_FLAG_HRST_TO_RESUME,
/*
@ -218,7 +218,7 @@ static const struct ata_port_info sil_port_info[] = {
/* sil_3112 */
{
.sht = &sil_sht,
.host_flags = SIL_DFL_HOST_FLAGS | SIL_FLAG_MOD15WRITE,
.flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_MOD15WRITE,
.pio_mask = 0x1f, /* pio0-4 */
.mwdma_mask = 0x07, /* mwdma0-2 */
.udma_mask = 0x3f, /* udma0-5 */
@ -227,7 +227,7 @@ static const struct ata_port_info sil_port_info[] = {
/* sil_3112_no_sata_irq */
{
.sht = &sil_sht,
.host_flags = SIL_DFL_HOST_FLAGS | SIL_FLAG_MOD15WRITE |
.flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_MOD15WRITE |
SIL_FLAG_NO_SATA_IRQ,
.pio_mask = 0x1f, /* pio0-4 */
.mwdma_mask = 0x07, /* mwdma0-2 */
@ -237,7 +237,7 @@ static const struct ata_port_info sil_port_info[] = {
/* sil_3512 */
{
.sht = &sil_sht,
.host_flags = SIL_DFL_HOST_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT,
.flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT,
.pio_mask = 0x1f, /* pio0-4 */
.mwdma_mask = 0x07, /* mwdma0-2 */
.udma_mask = 0x3f, /* udma0-5 */
@ -246,7 +246,7 @@ static const struct ata_port_info sil_port_info[] = {
/* sil_3114 */
{
.sht = &sil_sht,
.host_flags = SIL_DFL_HOST_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT,
.flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT,
.pio_mask = 0x1f, /* pio0-4 */
.mwdma_mask = 0x07, /* mwdma0-2 */
.udma_mask = 0x3f, /* udma0-5 */
@ -295,10 +295,9 @@ static unsigned char sil_get_device_cache_line(struct pci_dev *pdev)
static void sil_post_set_mode (struct ata_port *ap)
{
struct ata_host_set *host_set = ap->host_set;
struct ata_host *host = ap->host;
struct ata_device *dev;
void __iomem *addr =
host_set->mmio_base + sil_port[ap->port_no].xfer_mode;
void __iomem *addr = host->mmio_base + sil_port[ap->port_no].xfer_mode;
u32 tmp, dev_mode[2];
unsigned int i;
@ -440,15 +439,15 @@ static void sil_host_intr(struct ata_port *ap, u32 bmdma2)
static irqreturn_t sil_interrupt(int irq, void *dev_instance,
struct pt_regs *regs)
{
struct ata_host_set *host_set = dev_instance;
void __iomem *mmio_base = host_set->mmio_base;
struct ata_host *host = dev_instance;
void __iomem *mmio_base = host->mmio_base;
int handled = 0;
int i;
spin_lock(&host_set->lock);
spin_lock(&host->lock);
for (i = 0; i < host_set->n_ports; i++) {
struct ata_port *ap = host_set->ports[i];
for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap = host->ports[i];
u32 bmdma2 = readl(mmio_base + sil_port[ap->port_no].bmdma2);
if (unlikely(!ap || ap->flags & ATA_FLAG_DISABLED))
@ -466,14 +465,14 @@ static irqreturn_t sil_interrupt(int irq, void *dev_instance,
handled = 1;
}
spin_unlock(&host_set->lock);
spin_unlock(&host->lock);
return IRQ_RETVAL(handled);
}
static void sil_freeze(struct ata_port *ap)
{
void __iomem *mmio_base = ap->host_set->mmio_base;
void __iomem *mmio_base = ap->host->mmio_base;
u32 tmp;
/* global IRQ mask doesn't block SATA IRQ, turn off explicitly */
@ -488,7 +487,7 @@ static void sil_freeze(struct ata_port *ap)
static void sil_thaw(struct ata_port *ap)
{
void __iomem *mmio_base = ap->host_set->mmio_base;
void __iomem *mmio_base = ap->host->mmio_base;
u32 tmp;
/* clear IRQ */
@ -567,7 +566,7 @@ static void sil_dev_config(struct ata_port *ap, struct ata_device *dev)
}
static void sil_init_controller(struct pci_dev *pdev,
int n_ports, unsigned long host_flags,
int n_ports, unsigned long port_flags,
void __iomem *mmio_base)
{
u8 cls;
@ -587,7 +586,7 @@ static void sil_init_controller(struct pci_dev *pdev,
"cache line size not set. Driver may not function\n");
/* Apply R_ERR on DMA activate FIS errata workaround */
if (host_flags & SIL_FLAG_RERR_ON_DMA_ACT) {
if (port_flags & SIL_FLAG_RERR_ON_DMA_ACT) {
int cnt;
for (i = 0, cnt = 0; i < n_ports; i++) {
@ -658,7 +657,7 @@ static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
probe_ent->udma_mask = sil_port_info[ent->driver_data].udma_mask;
probe_ent->irq = pdev->irq;
probe_ent->irq_flags = IRQF_SHARED;
probe_ent->host_flags = sil_port_info[ent->driver_data].host_flags;
probe_ent->port_flags = sil_port_info[ent->driver_data].flags;
mmio_base = pci_iomap(pdev, 5, 0);
if (mmio_base == NULL) {
@ -679,7 +678,7 @@ static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
ata_std_ports(&probe_ent->port[i]);
}
sil_init_controller(pdev, probe_ent->n_ports, probe_ent->host_flags,
sil_init_controller(pdev, probe_ent->n_ports, probe_ent->port_flags,
mmio_base);
pci_set_master(pdev);
@ -703,12 +702,12 @@ static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
#ifdef CONFIG_PM
static int sil_pci_device_resume(struct pci_dev *pdev)
{
struct ata_host_set *host_set = dev_get_drvdata(&pdev->dev);
struct ata_host *host = dev_get_drvdata(&pdev->dev);
ata_pci_device_do_resume(pdev);
sil_init_controller(pdev, host_set->n_ports, host_set->ports[0]->flags,
host_set->mmio_base);
ata_host_set_resume(host_set);
sil_init_controller(pdev, host->n_ports, host->ports[0]->flags,
host->mmio_base);
ata_host_resume(host);
return 0;
}

View File

@ -316,7 +316,7 @@ struct sil24_port_priv {
struct ata_taskfile tf; /* Cached taskfile registers */
};
/* ap->host_set->private_data */
/* ap->host->private_data */
struct sil24_host_priv {
void __iomem *host_base; /* global controller control (128 bytes @BAR0) */
void __iomem *port_base; /* port registers (4 * 8192 bytes @BAR2) */
@ -337,7 +337,7 @@ static void sil24_error_handler(struct ata_port *ap);
static void sil24_post_internal_cmd(struct ata_queued_cmd *qc);
static int sil24_port_start(struct ata_port *ap);
static void sil24_port_stop(struct ata_port *ap);
static void sil24_host_stop(struct ata_host_set *host_set);
static void sil24_host_stop(struct ata_host *host);
static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
#ifdef CONFIG_PM
static int sil24_pci_device_resume(struct pci_dev *pdev);
@ -415,7 +415,7 @@ static const struct ata_port_operations sil24_ops = {
};
/*
* Use bits 30-31 of host_flags to encode available port numbers.
* Use bits 30-31 of port_flags to encode available port numbers.
* Current maxium is 4.
*/
#define SIL24_NPORTS2FLAG(nports) ((((unsigned)(nports) - 1) & 0x3) << 30)
@ -425,7 +425,7 @@ static struct ata_port_info sil24_port_info[] = {
/* sil_3124 */
{
.sht = &sil24_sht,
.host_flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(4) |
.flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(4) |
SIL24_FLAG_PCIX_IRQ_WOC,
.pio_mask = 0x1f, /* pio0-4 */
.mwdma_mask = 0x07, /* mwdma0-2 */
@ -435,7 +435,7 @@ static struct ata_port_info sil24_port_info[] = {
/* sil_3132 */
{
.sht = &sil24_sht,
.host_flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(2),
.flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(2),
.pio_mask = 0x1f, /* pio0-4 */
.mwdma_mask = 0x07, /* mwdma0-2 */
.udma_mask = 0x3f, /* udma0-5 */
@ -444,7 +444,7 @@ static struct ata_port_info sil24_port_info[] = {
/* sil_3131/sil_3531 */
{
.sht = &sil24_sht,
.host_flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(1),
.flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(1),
.pio_mask = 0x1f, /* pio0-4 */
.mwdma_mask = 0x07, /* mwdma0-2 */
.udma_mask = 0x3f, /* udma0-5 */
@ -871,8 +871,8 @@ static inline void sil24_host_intr(struct ata_port *ap)
static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
{
struct ata_host_set *host_set = dev_instance;
struct sil24_host_priv *hpriv = host_set->private_data;
struct ata_host *host = dev_instance;
struct sil24_host_priv *hpriv = host->private_data;
unsigned handled = 0;
u32 status;
int i;
@ -888,20 +888,20 @@ static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *
if (!(status & IRQ_STAT_4PORTS))
goto out;
spin_lock(&host_set->lock);
spin_lock(&host->lock);
for (i = 0; i < host_set->n_ports; i++)
for (i = 0; i < host->n_ports; i++)
if (status & (1 << i)) {
struct ata_port *ap = host_set->ports[i];
struct ata_port *ap = host->ports[i];
if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
sil24_host_intr(host_set->ports[i]);
sil24_host_intr(host->ports[i]);
handled++;
} else
printk(KERN_ERR DRV_NAME
": interrupt from disabled port %d\n", i);
}
spin_unlock(&host_set->lock);
spin_unlock(&host->lock);
out:
return IRQ_RETVAL(handled);
}
@ -941,7 +941,7 @@ static inline void sil24_cblk_free(struct sil24_port_priv *pp, struct device *de
static int sil24_port_start(struct ata_port *ap)
{
struct device *dev = ap->host_set->dev;
struct device *dev = ap->host->dev;
struct sil24_port_priv *pp;
union sil24_cmd_block *cb;
size_t cb_size = sizeof(*cb) * SIL24_MAX_CMDS;
@ -980,7 +980,7 @@ static int sil24_port_start(struct ata_port *ap)
static void sil24_port_stop(struct ata_port *ap)
{
struct device *dev = ap->host_set->dev;
struct device *dev = ap->host->dev;
struct sil24_port_priv *pp = ap->private_data;
sil24_cblk_free(pp, dev);
@ -988,10 +988,10 @@ static void sil24_port_stop(struct ata_port *ap)
kfree(pp);
}
static void sil24_host_stop(struct ata_host_set *host_set)
static void sil24_host_stop(struct ata_host *host)
{
struct sil24_host_priv *hpriv = host_set->private_data;
struct pci_dev *pdev = to_pci_dev(host_set->dev);
struct sil24_host_priv *hpriv = host->private_data;
struct pci_dev *pdev = to_pci_dev(host->dev);
pci_iounmap(pdev, hpriv->host_base);
pci_iounmap(pdev, hpriv->port_base);
@ -999,7 +999,7 @@ static void sil24_host_stop(struct ata_host_set *host_set)
}
static void sil24_init_controller(struct pci_dev *pdev, int n_ports,
unsigned long host_flags,
unsigned long port_flags,
void __iomem *host_base,
void __iomem *port_base)
{
@ -1032,7 +1032,7 @@ static void sil24_init_controller(struct pci_dev *pdev, int n_ports,
}
/* Configure IRQ WoC */
if (host_flags & SIL24_FLAG_PCIX_IRQ_WOC)
if (port_flags & SIL24_FLAG_PCIX_IRQ_WOC)
writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_STAT);
else
writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_CLR);
@ -1101,12 +1101,12 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
INIT_LIST_HEAD(&probe_ent->node);
probe_ent->sht = pinfo->sht;
probe_ent->host_flags = pinfo->host_flags;
probe_ent->port_flags = pinfo->flags;
probe_ent->pio_mask = pinfo->pio_mask;
probe_ent->mwdma_mask = pinfo->mwdma_mask;
probe_ent->udma_mask = pinfo->udma_mask;
probe_ent->port_ops = pinfo->port_ops;
probe_ent->n_ports = SIL24_FLAG2NPORTS(pinfo->host_flags);
probe_ent->n_ports = SIL24_FLAG2NPORTS(pinfo->flags);
probe_ent->irq = pdev->irq;
probe_ent->irq_flags = IRQF_SHARED;
@ -1144,14 +1144,14 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
/* Apply workaround for completion IRQ loss on PCI-X errata */
if (probe_ent->host_flags & SIL24_FLAG_PCIX_IRQ_WOC) {
if (probe_ent->port_flags & SIL24_FLAG_PCIX_IRQ_WOC) {
tmp = readl(host_base + HOST_CTRL);
if (tmp & (HOST_CTRL_TRDY | HOST_CTRL_STOP | HOST_CTRL_DEVSEL))
dev_printk(KERN_INFO, &pdev->dev,
"Applying completion IRQ loss on PCI-X "
"errata fix\n");
else
probe_ent->host_flags &= ~SIL24_FLAG_PCIX_IRQ_WOC;
probe_ent->port_flags &= ~SIL24_FLAG_PCIX_IRQ_WOC;
}
for (i = 0; i < probe_ent->n_ports; i++) {
@ -1164,7 +1164,7 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
ata_std_ports(&probe_ent->port[i]);
}
sil24_init_controller(pdev, probe_ent->n_ports, probe_ent->host_flags,
sil24_init_controller(pdev, probe_ent->n_ports, probe_ent->port_flags,
host_base, port_base);
pci_set_master(pdev);
@ -1191,19 +1191,18 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
#ifdef CONFIG_PM
static int sil24_pci_device_resume(struct pci_dev *pdev)
{
struct ata_host_set *host_set = dev_get_drvdata(&pdev->dev);
struct sil24_host_priv *hpriv = host_set->private_data;
struct ata_host *host = dev_get_drvdata(&pdev->dev);
struct sil24_host_priv *hpriv = host->private_data;
ata_pci_device_do_resume(pdev);
if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND)
writel(HOST_CTRL_GLOBAL_RST, hpriv->host_base + HOST_CTRL);
sil24_init_controller(pdev, host_set->n_ports,
host_set->ports[0]->flags,
sil24_init_controller(pdev, host->n_ports, host->ports[0]->flags,
hpriv->host_base, hpriv->port_base);
ata_host_set_resume(host_set);
ata_host_resume(host);
return 0;
}

View File

@ -128,7 +128,7 @@ static const struct ata_port_operations sis_ops = {
static struct ata_port_info sis_port_info = {
.sht = &sis_sht,
.host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
.flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
.pio_mask = 0x1f,
.mwdma_mask = 0x7,
.udma_mask = 0x7f,
@ -158,7 +158,7 @@ static unsigned int get_scr_cfg_addr(unsigned int port_no, unsigned int sc_reg,
static u32 sis_scr_cfg_read (struct ata_port *ap, unsigned int sc_reg)
{
struct pci_dev *pdev = to_pci_dev(ap->host_set->dev);
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
unsigned int cfg_addr = get_scr_cfg_addr(ap->port_no, sc_reg, pdev->device);
u32 val, val2 = 0;
u8 pmr;
@ -178,7 +178,7 @@ static u32 sis_scr_cfg_read (struct ata_port *ap, unsigned int sc_reg)
static void sis_scr_cfg_write (struct ata_port *ap, unsigned int scr, u32 val)
{
struct pci_dev *pdev = to_pci_dev(ap->host_set->dev);
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
unsigned int cfg_addr = get_scr_cfg_addr(ap->port_no, scr, pdev->device);
u8 pmr;
@ -195,7 +195,7 @@ static void sis_scr_cfg_write (struct ata_port *ap, unsigned int scr, u32 val)
static u32 sis_scr_read (struct ata_port *ap, unsigned int sc_reg)
{
struct pci_dev *pdev = to_pci_dev(ap->host_set->dev);
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u32 val, val2 = 0;
u8 pmr;
@ -217,7 +217,7 @@ static u32 sis_scr_read (struct ata_port *ap, unsigned int sc_reg)
static void sis_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
{
struct pci_dev *pdev = to_pci_dev(ap->host_set->dev);
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u8 pmr;
if (sc_reg > SCR_CONTROL)
@ -275,17 +275,17 @@ static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
/* check and see if the SCRs are in IO space or PCI cfg space */
pci_read_config_dword(pdev, SIS_GENCTL, &genctl);
if ((genctl & GENCTL_IOMAPPED_SCR) == 0)
probe_ent->host_flags |= SIS_FLAG_CFGSCR;
probe_ent->port_flags |= SIS_FLAG_CFGSCR;
/* if hardware thinks SCRs are in IO space, but there are
* no IO resources assigned, change to PCI cfg space.
*/
if ((!(probe_ent->host_flags & SIS_FLAG_CFGSCR)) &&
if ((!(probe_ent->port_flags & SIS_FLAG_CFGSCR)) &&
((pci_resource_start(pdev, SIS_SCR_PCI_BAR) == 0) ||
(pci_resource_len(pdev, SIS_SCR_PCI_BAR) < 128))) {
genctl &= ~GENCTL_IOMAPPED_SCR;
pci_write_config_dword(pdev, SIS_GENCTL, genctl);
probe_ent->host_flags |= SIS_FLAG_CFGSCR;
probe_ent->port_flags |= SIS_FLAG_CFGSCR;
}
pci_read_config_byte(pdev, SIS_PMR, &pmr);
@ -306,7 +306,7 @@ static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
port2_start = 0x20;
}
if (!(probe_ent->host_flags & SIS_FLAG_CFGSCR)) {
if (!(probe_ent->port_flags & SIS_FLAG_CFGSCR)) {
probe_ent->port[0].scr_addr =
pci_resource_start(pdev, SIS_SCR_PCI_BAR);
probe_ent->port[1].scr_addr =

View File

@ -169,7 +169,7 @@ static void k2_sata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
* @qc: Info associated with this ATA transaction.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*/
static void k2_bmdma_setup_mmio (struct ata_queued_cmd *qc)
@ -199,7 +199,7 @@ static void k2_bmdma_setup_mmio (struct ata_queued_cmd *qc)
* @qc: Info associated with this ATA transaction.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*/
static void k2_bmdma_start_mmio (struct ata_queued_cmd *qc)
@ -261,12 +261,12 @@ static int k2_sata_proc_info(struct Scsi_Host *shost, char *page, char **start,
return 0;
/* Find the OF node for the PCI device proper */
np = pci_device_to_OF_node(to_pci_dev(ap->host_set->dev));
np = pci_device_to_OF_node(to_pci_dev(ap->host->dev));
if (np == NULL)
return 0;
/* Match it to a port node */
index = (ap == ap->host_set->ports[0]) ? 0 : 1;
index = (ap == ap->host->ports[0]) ? 0 : 1;
for (np = np->child; np != NULL; np = np->sibling) {
u32 *reg = (u32 *)get_property(np, "reg", NULL);
if (!reg)
@ -423,7 +423,7 @@ static int k2_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
writel(0x0, mmio_base + K2_SATA_SIM_OFFSET);
probe_ent->sht = &k2_sata_sht;
probe_ent->host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
probe_ent->port_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
ATA_FLAG_MMIO;
probe_ent->port_ops = &k2_sata_ops;
probe_ent->n_ports = 4;

View File

@ -160,7 +160,7 @@ static void pdc_port_stop(struct ata_port *ap);
static void pdc20621_qc_prep(struct ata_queued_cmd *qc);
static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
static void pdc20621_host_stop(struct ata_host_set *host_set);
static void pdc20621_host_stop(struct ata_host *host);
static unsigned int pdc20621_dimm_init(struct ata_probe_ent *pe);
static int pdc20621_detect_dimm(struct ata_probe_ent *pe);
static unsigned int pdc20621_i2c_read(struct ata_probe_ent *pe,
@ -218,7 +218,7 @@ static const struct ata_port_info pdc_port_info[] = {
/* board_20621 */
{
.sht = &pdc_sata_sht,
.host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
.flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
ATA_FLAG_SRST | ATA_FLAG_MMIO |
ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING,
.pio_mask = 0x1f, /* pio0-4 */
@ -244,21 +244,21 @@ static struct pci_driver pdc_sata_pci_driver = {
};
static void pdc20621_host_stop(struct ata_host_set *host_set)
static void pdc20621_host_stop(struct ata_host *host)
{
struct pci_dev *pdev = to_pci_dev(host_set->dev);
struct pdc_host_priv *hpriv = host_set->private_data;
struct pci_dev *pdev = to_pci_dev(host->dev);
struct pdc_host_priv *hpriv = host->private_data;
void __iomem *dimm_mmio = hpriv->dimm_mmio;
pci_iounmap(pdev, dimm_mmio);
kfree(hpriv);
pci_iounmap(pdev, host_set->mmio_base);
pci_iounmap(pdev, host->mmio_base);
}
static int pdc_port_start(struct ata_port *ap)
{
struct device *dev = ap->host_set->dev;
struct device *dev = ap->host->dev;
struct pdc_port_priv *pp;
int rc;
@ -293,7 +293,7 @@ static int pdc_port_start(struct ata_port *ap)
static void pdc_port_stop(struct ata_port *ap)
{
struct device *dev = ap->host_set->dev;
struct device *dev = ap->host->dev;
struct pdc_port_priv *pp = ap->private_data;
ap->private_data = NULL;
@ -453,8 +453,8 @@ static void pdc20621_dma_prep(struct ata_queued_cmd *qc)
struct scatterlist *sg;
struct ata_port *ap = qc->ap;
struct pdc_port_priv *pp = ap->private_data;
void __iomem *mmio = ap->host_set->mmio_base;
struct pdc_host_priv *hpriv = ap->host_set->private_data;
void __iomem *mmio = ap->host->mmio_base;
struct pdc_host_priv *hpriv = ap->host->private_data;
void __iomem *dimm_mmio = hpriv->dimm_mmio;
unsigned int portno = ap->port_no;
unsigned int i, idx, total_len = 0, sgt_len;
@ -514,8 +514,8 @@ static void pdc20621_nodata_prep(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct pdc_port_priv *pp = ap->private_data;
void __iomem *mmio = ap->host_set->mmio_base;
struct pdc_host_priv *hpriv = ap->host_set->private_data;
void __iomem *mmio = ap->host->mmio_base;
struct pdc_host_priv *hpriv = ap->host->private_data;
void __iomem *dimm_mmio = hpriv->dimm_mmio;
unsigned int portno = ap->port_no;
unsigned int i;
@ -565,8 +565,8 @@ static void __pdc20621_push_hdma(struct ata_queued_cmd *qc,
u32 pkt_ofs)
{
struct ata_port *ap = qc->ap;
struct ata_host_set *host_set = ap->host_set;
void __iomem *mmio = host_set->mmio_base;
struct ata_host *host = ap->host;
void __iomem *mmio = host->mmio_base;
/* hard-code chip #0 */
mmio += PDC_CHIP0_OFS;
@ -583,7 +583,7 @@ static void pdc20621_push_hdma(struct ata_queued_cmd *qc,
u32 pkt_ofs)
{
struct ata_port *ap = qc->ap;
struct pdc_host_priv *pp = ap->host_set->private_data;
struct pdc_host_priv *pp = ap->host->private_data;
unsigned int idx = pp->hdma_prod & PDC_HDMA_Q_MASK;
if (!pp->doing_hdma) {
@ -601,7 +601,7 @@ static void pdc20621_push_hdma(struct ata_queued_cmd *qc,
static void pdc20621_pop_hdma(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct pdc_host_priv *pp = ap->host_set->private_data;
struct pdc_host_priv *pp = ap->host->private_data;
unsigned int idx = pp->hdma_cons & PDC_HDMA_Q_MASK;
/* if nothing on queue, we're done */
@ -620,7 +620,7 @@ static void pdc20621_dump_hdma(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
unsigned int port_no = ap->port_no;
struct pdc_host_priv *hpriv = ap->host_set->private_data;
struct pdc_host_priv *hpriv = ap->host->private_data;
void *dimm_mmio = hpriv->dimm_mmio;
dimm_mmio += (port_no * PDC_DIMM_WINDOW_STEP);
@ -638,9 +638,9 @@ static inline void pdc20621_dump_hdma(struct ata_queued_cmd *qc) { }
static void pdc20621_packet_start(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct ata_host_set *host_set = ap->host_set;
struct ata_host *host = ap->host;
unsigned int port_no = ap->port_no;
void __iomem *mmio = host_set->mmio_base;
void __iomem *mmio = host->mmio_base;
unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
u8 seq = (u8) (port_no + 1);
unsigned int port_ofs;
@ -781,8 +781,8 @@ static inline unsigned int pdc20621_host_intr( struct ata_port *ap,
static void pdc20621_irq_clear(struct ata_port *ap)
{
struct ata_host_set *host_set = ap->host_set;
void __iomem *mmio = host_set->mmio_base;
struct ata_host *host = ap->host;
void __iomem *mmio = host->mmio_base;
mmio += PDC_CHIP0_OFS;
@ -791,7 +791,7 @@ static void pdc20621_irq_clear(struct ata_port *ap)
static irqreturn_t pdc20621_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
{
struct ata_host_set *host_set = dev_instance;
struct ata_host *host = dev_instance;
struct ata_port *ap;
u32 mask = 0;
unsigned int i, tmp, port_no;
@ -800,12 +800,12 @@ static irqreturn_t pdc20621_interrupt (int irq, void *dev_instance, struct pt_re
VPRINTK("ENTER\n");
if (!host_set || !host_set->mmio_base) {
if (!host || !host->mmio_base) {
VPRINTK("QUICK EXIT\n");
return IRQ_NONE;
}
mmio_base = host_set->mmio_base;
mmio_base = host->mmio_base;
/* reading should also clear interrupts */
mmio_base += PDC_CHIP0_OFS;
@ -822,16 +822,16 @@ static irqreturn_t pdc20621_interrupt (int irq, void *dev_instance, struct pt_re
return IRQ_NONE;
}
spin_lock(&host_set->lock);
spin_lock(&host->lock);
for (i = 1; i < 9; i++) {
port_no = i - 1;
if (port_no > 3)
port_no -= 4;
if (port_no >= host_set->n_ports)
if (port_no >= host->n_ports)
ap = NULL;
else
ap = host_set->ports[port_no];
ap = host->ports[port_no];
tmp = mask & (1 << i);
VPRINTK("seq %u, port_no %u, ap %p, tmp %x\n", i, port_no, ap, tmp);
if (tmp && ap &&
@ -845,7 +845,7 @@ static irqreturn_t pdc20621_interrupt (int irq, void *dev_instance, struct pt_re
}
}
spin_unlock(&host_set->lock);
spin_unlock(&host->lock);
VPRINTK("mask == 0x%x\n", mask);
@ -857,13 +857,13 @@ static irqreturn_t pdc20621_interrupt (int irq, void *dev_instance, struct pt_re
static void pdc_eng_timeout(struct ata_port *ap)
{
u8 drv_stat;
struct ata_host_set *host_set = ap->host_set;
struct ata_host *host = ap->host;
struct ata_queued_cmd *qc;
unsigned long flags;
DPRINTK("ENTER\n");
spin_lock_irqsave(&host_set->lock, flags);
spin_lock_irqsave(&host->lock, flags);
qc = ata_qc_from_tag(ap, ap->active_tag);
@ -885,7 +885,7 @@ static void pdc_eng_timeout(struct ata_port *ap)
break;
}
spin_unlock_irqrestore(&host_set->lock, flags);
spin_unlock_irqrestore(&host->lock, flags);
ata_eh_qc_complete(qc);
DPRINTK("EXIT\n");
}
@ -1429,7 +1429,7 @@ static int pdc_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *
hpriv->dimm_mmio = dimm_mmio;
probe_ent->sht = pdc_port_info[board_idx].sht;
probe_ent->host_flags = pdc_port_info[board_idx].host_flags;
probe_ent->port_flags = pdc_port_info[board_idx].flags;
probe_ent->pio_mask = pdc_port_info[board_idx].pio_mask;
probe_ent->mwdma_mask = pdc_port_info[board_idx].mwdma_mask;
probe_ent->udma_mask = pdc_port_info[board_idx].udma_mask;

View File

@ -128,7 +128,7 @@ static const struct ata_port_operations uli_ops = {
static struct ata_port_info uli_port_info = {
.sht = &uli_sht,
.host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
.flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
.pio_mask = 0x1f, /* pio0-4 */
.udma_mask = 0x7f, /* udma0-6 */
.port_ops = &uli_ops,
@ -143,13 +143,13 @@ MODULE_VERSION(DRV_VERSION);
static unsigned int get_scr_cfg_addr(struct ata_port *ap, unsigned int sc_reg)
{
struct uli_priv *hpriv = ap->host_set->private_data;
struct uli_priv *hpriv = ap->host->private_data;
return hpriv->scr_cfg_addr[ap->port_no] + (4 * sc_reg);
}
static u32 uli_scr_cfg_read (struct ata_port *ap, unsigned int sc_reg)
{
struct pci_dev *pdev = to_pci_dev(ap->host_set->dev);
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
unsigned int cfg_addr = get_scr_cfg_addr(ap, sc_reg);
u32 val;
@ -159,7 +159,7 @@ static u32 uli_scr_cfg_read (struct ata_port *ap, unsigned int sc_reg)
static void uli_scr_cfg_write (struct ata_port *ap, unsigned int scr, u32 val)
{
struct pci_dev *pdev = to_pci_dev(ap->host_set->dev);
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
unsigned int cfg_addr = get_scr_cfg_addr(ap, scr);
pci_write_config_dword(pdev, cfg_addr, val);

View File

@ -176,7 +176,7 @@ static const struct ata_port_operations vt6421_sata_ops = {
static struct ata_port_info vt6420_port_info = {
.sht = &svia_sht,
.host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
.flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
.pio_mask = 0x1f,
.mwdma_mask = 0x07,
.udma_mask = 0x7f,
@ -346,7 +346,7 @@ static struct ata_probe_ent *vt6421_init_probe_ent(struct pci_dev *pdev)
INIT_LIST_HEAD(&probe_ent->node);
probe_ent->sht = &svia_sht;
probe_ent->host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY;
probe_ent->port_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY;
probe_ent->port_ops = &vt6421_sata_ops;
probe_ent->n_ports = N_PORTS;
probe_ent->irq = pdev->irq;

View File

@ -123,7 +123,7 @@ static void vsc_intr_mask_update(struct ata_port *ap, u8 ctl)
void __iomem *mask_addr;
u8 mask;
mask_addr = ap->host_set->mmio_base +
mask_addr = ap->host->mmio_base +
VSC_SATA_INT_MASK_OFFSET + ap->port_no;
mask = readb(mask_addr);
if (ctl & ATA_NIEN)
@ -206,20 +206,20 @@ static void vsc_sata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
static irqreturn_t vsc_sata_interrupt (int irq, void *dev_instance,
struct pt_regs *regs)
{
struct ata_host_set *host_set = dev_instance;
struct ata_host *host = dev_instance;
unsigned int i;
unsigned int handled = 0;
u32 int_status;
spin_lock(&host_set->lock);
spin_lock(&host->lock);
int_status = readl(host_set->mmio_base + VSC_SATA_INT_STAT_OFFSET);
int_status = readl(host->mmio_base + VSC_SATA_INT_STAT_OFFSET);
for (i = 0; i < host_set->n_ports; i++) {
for (i = 0; i < host->n_ports; i++) {
if (int_status & ((u32) 0xFF << (8 * i))) {
struct ata_port *ap;
ap = host_set->ports[i];
ap = host->ports[i];
if (is_vsc_sata_int_err(i, int_status)) {
u32 err_status;
@ -259,7 +259,7 @@ static irqreturn_t vsc_sata_interrupt (int irq, void *dev_instance,
}
}
spin_unlock(&host_set->lock);
spin_unlock(&host->lock);
return IRQ_RETVAL(handled);
}
@ -395,7 +395,7 @@ static int __devinit vsc_sata_init_one (struct pci_dev *pdev, const struct pci_d
pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x80);
probe_ent->sht = &vsc_sata_sht;
probe_ent->host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
probe_ent->port_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
ATA_FLAG_MMIO;
probe_ent->port_ops = &vsc_sata_ops;
probe_ent->n_ports = 4;

View File

@ -197,7 +197,7 @@ enum {
ATA_QCFLAG_EH_SCHEDULED = (1 << 18), /* EH scheduled (obsolete) */
/* host set flags */
ATA_HOST_SIMPLEX = (1 << 0), /* Host is simplex, one DMA channel per host_set only */
ATA_HOST_SIMPLEX = (1 << 0), /* Host is simplex, one DMA channel per host only */
/* various lengths of time */
ATA_TMOUT_BOOT = 30 * HZ, /* heuristic */
@ -357,13 +357,13 @@ struct ata_probe_ent {
unsigned long irq;
unsigned long irq2;
unsigned int irq_flags;
unsigned long host_flags;
unsigned long host_set_flags;
unsigned long port_flags;
unsigned long _host_flags;
void __iomem *mmio_base;
void *private_data;
};
struct ata_host_set {
struct ata_host {
spinlock_t lock;
struct device *dev;
unsigned long irq;
@ -420,7 +420,7 @@ struct ata_queued_cmd {
void *private_data;
};
struct ata_host_stats {
struct ata_port_stats {
unsigned long unhandled_irq;
unsigned long idle_irq;
unsigned long rw_reqbuf;
@ -498,7 +498,7 @@ struct ata_eh_context {
};
struct ata_port {
struct Scsi_Host *host; /* our co-allocated scsi host */
struct Scsi_Host *scsi_host; /* our co-allocated scsi host */
const struct ata_port_operations *ops;
spinlock_t *lock;
unsigned long flags; /* ATA_FLAG_xxx */
@ -523,7 +523,7 @@ struct ata_port {
unsigned int hw_sata_spd_limit;
unsigned int sata_spd_limit; /* SATA PHY speed limit */
/* record runtime error info, protected by host_set lock */
/* record runtime error info, protected by host lock */
struct ata_eh_info eh_info;
/* EH context owned by EH */
struct ata_eh_context eh_context;
@ -537,8 +537,8 @@ struct ata_port {
unsigned int active_tag;
u32 sactive;
struct ata_host_stats stats;
struct ata_host_set *host_set;
struct ata_port_stats stats;
struct ata_host *host;
struct device *dev;
struct work_struct port_task;
@ -614,7 +614,7 @@ struct ata_port_operations {
int (*port_start) (struct ata_port *ap);
void (*port_stop) (struct ata_port *ap);
void (*host_stop) (struct ata_host_set *host_set);
void (*host_stop) (struct ata_host *host);
void (*bmdma_stop) (struct ata_queued_cmd *qc);
u8 (*bmdma_status) (struct ata_port *ap);
@ -622,7 +622,7 @@ struct ata_port_operations {
struct ata_port_info {
struct scsi_host_template *sht;
unsigned long host_flags;
unsigned long flags;
unsigned long pio_mask;
unsigned long mwdma_mask;
unsigned long udma_mask;
@ -690,15 +690,15 @@ extern int ata_pci_clear_simplex(struct pci_dev *pdev);
#endif /* CONFIG_PCI */
extern int ata_device_add(const struct ata_probe_ent *ent);
extern void ata_port_detach(struct ata_port *ap);
extern void ata_host_set_init(struct ata_host_set *, struct device *,
unsigned long, const struct ata_port_operations *);
extern void ata_host_set_remove(struct ata_host_set *host_set);
extern void ata_host_init(struct ata_host *, struct device *,
unsigned long, const struct ata_port_operations *);
extern void ata_host_remove(struct ata_host *host);
extern int ata_scsi_detect(struct scsi_host_template *sht);
extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *));
extern int ata_scsi_release(struct Scsi_Host *host);
extern void ata_sas_port_destroy(struct ata_port *);
extern struct ata_port *ata_sas_port_alloc(struct ata_host_set *,
extern struct ata_port *ata_sas_port_alloc(struct ata_host *,
struct ata_port_info *, struct Scsi_Host *);
extern int ata_sas_port_init(struct ata_port *);
extern int ata_sas_port_start(struct ata_port *ap);
@ -715,9 +715,8 @@ extern int ata_port_online(struct ata_port *ap);
extern int ata_port_offline(struct ata_port *ap);
extern int ata_scsi_device_resume(struct scsi_device *);
extern int ata_scsi_device_suspend(struct scsi_device *, pm_message_t mesg);
extern int ata_host_set_suspend(struct ata_host_set *host_set,
pm_message_t mesg);
extern void ata_host_set_resume(struct ata_host_set *host_set);
extern int ata_host_suspend(struct ata_host *host, pm_message_t mesg);
extern void ata_host_resume(struct ata_host *host);
extern int ata_ratelimit(void);
extern unsigned int ata_busy_sleep(struct ata_port *ap,
unsigned long timeout_pat,
@ -742,7 +741,7 @@ extern u8 ata_altstatus(struct ata_port *ap);
extern void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf);
extern int ata_port_start (struct ata_port *ap);
extern void ata_port_stop (struct ata_port *ap);
extern void ata_host_stop (struct ata_host_set *host_set);
extern void ata_host_stop (struct ata_host *host);
extern irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs);
extern void ata_mmio_data_xfer(struct ata_device *adev, unsigned char *buf,
unsigned int buflen, int write_data);
@ -828,7 +827,7 @@ struct pci_bits {
unsigned long val;
};
extern void ata_pci_host_stop (struct ata_host_set *host_set);
extern void ata_pci_host_stop (struct ata_host *host);
extern struct ata_probe_ent *
ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int portmask);
extern int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits);