NXP/FSL SoC driver updates for v5.13
- Add ACPI support for RCPM driver - Use generic io{read,write} for QE drivers after performance optimized for PowerPC - Fix QBMAN probe to cleanup HW states correctly for kexec - Various cleanup and style fix for QBMAN/QE/GUTS drivers -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEhb3UXAyxp6UQ0v6khtxQDvusFVQFAmBvf7gACgkQhtxQDvus FVTBLxAAgo7FWiVCMD+wnMy3nceAV4U9OZljumEKfvt7g27oMN9i/AWaWwzJ+wsF zxMK9p0u8wCTG7PQIGbC4N1fRAaapc1w3TzDhFK4Uf5kqaoddqiFMf4IeRFmnYSF 6GDbxgxdZkvyGpYJ31Uo750UmC1Q9zmNoh17Nrs2wN12FLFlmiABokklzOuwKBxI DLzdgbuTuBxGAv/6zSBmye391C0kf+ky6haOpyzHKToBnYJv/LyaGQGEGckfsnct uKnigu7MjU+1l0LhftAT52UoFRR6SeyguqYbq4JAFvNJd66eNpZZpyJnGX8y7hJN V8y4cvgeJBZl1ssVQVNA5sJSb9NCyPx0svwZqzJ3kdMWd8t8n2l4FrJSdPnjvCbz HRwSzoMmSrQJZp+2LJATSHsHZz5ArBp+o79fLjWf/1aYO92B6MkZvdiL45ATl9uF XOQJITdHI1rFWxXmu9rzUcgZx81LEf5jVP+L3GBrOIpPa6R0SQgXLInflVxoB+/l SaJRp5CKjBstOvswJlA0dNyMNSn07qNjiR67Gvh7UXGvIa8w0Njv41ZYms2zvErd 6EIM1SuzrkByrq3zn+AiCNzTATgIJgU+JDq7doKwqCzBW9oBshZ9U3KKDdElqLv9 2UjMpP5dIkdbjh9vHZ+BwutuSjDwkkJrk40deck+9Q3/ZkNPvug= =+DV0 -----END PGP SIGNATURE----- Merge tag 'soc-fsl-next-v5.13' of git://git.kernel.org/pub/scm/linux/kernel/git/leo/linux into arm/drivers NXP/FSL SoC driver updates for v5.13 - Add ACPI support for RCPM driver - Use generic io{read,write} for QE drivers after performance optimized for PowerPC - Fix QBMAN probe to cleanup HW states correctly for kexec - Various cleanup and style fix for QBMAN/QE/GUTS drivers * tag 'soc-fsl-next-v5.13' of git://git.kernel.org/pub/scm/linux/kernel/git/leo/linux: soc: fsl: enable acpi support in RCPM driver Revert "soc: fsl: qe: introduce qe_io{read,write}* wrappers" tty: serial: ucc_uart: replace qe_io{read,write}* wrappers by generic io{read,write}* soc: fsl: qe: replace qe_io{read,write}* wrappers by generic io{read,write}* soc: fsl: guts: fix comment syntax in file soc: fsl: guts: remove unneeded semicolon soc: fsl: qe: Use DEFINE_SPINLOCK() for spinlock soc: fsl: qbman: Delete useless kfree code soc: fsl: qbman: Ensure device cleanup is run for kexec Link: https://lore.kernel.org/r/20210409205719.27927-1-leoyang.li@nxp.com Signed-off-by: Arnd Bergmann <arnd@arndb.de>
This commit is contained in:
commit
41c39cfc84
|
@ -117,7 +117,7 @@ static const struct fsl_soc_die_attr *fsl_soc_die_match(
|
|||
if (matches->svr == (svr & matches->mask))
|
||||
return matches;
|
||||
matches++;
|
||||
};
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
|
|
@ -709,7 +709,6 @@ struct bman_pool *bman_new_pool(void)
|
|||
return pool;
|
||||
err:
|
||||
bm_release_bpid(bpid);
|
||||
kfree(pool);
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(bman_new_pool);
|
||||
|
|
|
@ -160,7 +160,7 @@ static int bman_portal_probe(struct platform_device *pdev)
|
|||
__bman_portals_probed = 1;
|
||||
/* unassigned portal, skip init */
|
||||
spin_unlock(&bman_lock);
|
||||
return 0;
|
||||
goto check_cleanup;
|
||||
}
|
||||
|
||||
cpumask_set_cpu(cpu, &portal_cpus);
|
||||
|
@ -176,6 +176,7 @@ static int bman_portal_probe(struct platform_device *pdev)
|
|||
if (!cpu_online(cpu))
|
||||
bman_offline_cpu(cpu);
|
||||
|
||||
check_cleanup:
|
||||
if (__bman_portals_probed == 1 && bman_requires_cleanup()) {
|
||||
/*
|
||||
* BMan wasn't reset prior to boot (Kexec for example)
|
||||
|
|
|
@ -302,7 +302,7 @@ static int qman_portal_probe(struct platform_device *pdev)
|
|||
__qman_portals_probed = 1;
|
||||
/* unassigned portal, skip init */
|
||||
spin_unlock(&qman_lock);
|
||||
return 0;
|
||||
goto check_cleanup;
|
||||
}
|
||||
|
||||
cpumask_set_cpu(cpu, &portal_cpus);
|
||||
|
@ -323,6 +323,7 @@ static int qman_portal_probe(struct platform_device *pdev)
|
|||
if (!cpu_online(cpu))
|
||||
qman_offline_cpu(cpu);
|
||||
|
||||
check_cleanup:
|
||||
if (__qman_portals_probed == 1 && qman_requires_cleanup()) {
|
||||
/*
|
||||
* QMan wasn't reset prior to boot (Kexec for example)
|
||||
|
|
|
@ -41,13 +41,13 @@ static void qe_gpio_save_regs(struct of_mm_gpio_chip *mm_gc)
|
|||
container_of(mm_gc, struct qe_gpio_chip, mm_gc);
|
||||
struct qe_pio_regs __iomem *regs = mm_gc->regs;
|
||||
|
||||
qe_gc->cpdata = qe_ioread32be(®s->cpdata);
|
||||
qe_gc->cpdata = ioread32be(®s->cpdata);
|
||||
qe_gc->saved_regs.cpdata = qe_gc->cpdata;
|
||||
qe_gc->saved_regs.cpdir1 = qe_ioread32be(®s->cpdir1);
|
||||
qe_gc->saved_regs.cpdir2 = qe_ioread32be(®s->cpdir2);
|
||||
qe_gc->saved_regs.cppar1 = qe_ioread32be(®s->cppar1);
|
||||
qe_gc->saved_regs.cppar2 = qe_ioread32be(®s->cppar2);
|
||||
qe_gc->saved_regs.cpodr = qe_ioread32be(®s->cpodr);
|
||||
qe_gc->saved_regs.cpdir1 = ioread32be(®s->cpdir1);
|
||||
qe_gc->saved_regs.cpdir2 = ioread32be(®s->cpdir2);
|
||||
qe_gc->saved_regs.cppar1 = ioread32be(®s->cppar1);
|
||||
qe_gc->saved_regs.cppar2 = ioread32be(®s->cppar2);
|
||||
qe_gc->saved_regs.cpodr = ioread32be(®s->cpodr);
|
||||
}
|
||||
|
||||
static int qe_gpio_get(struct gpio_chip *gc, unsigned int gpio)
|
||||
|
@ -56,7 +56,7 @@ static int qe_gpio_get(struct gpio_chip *gc, unsigned int gpio)
|
|||
struct qe_pio_regs __iomem *regs = mm_gc->regs;
|
||||
u32 pin_mask = 1 << (QE_PIO_PINS - 1 - gpio);
|
||||
|
||||
return !!(qe_ioread32be(®s->cpdata) & pin_mask);
|
||||
return !!(ioread32be(®s->cpdata) & pin_mask);
|
||||
}
|
||||
|
||||
static void qe_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
|
||||
|
@ -74,7 +74,7 @@ static void qe_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
|
|||
else
|
||||
qe_gc->cpdata &= ~pin_mask;
|
||||
|
||||
qe_iowrite32be(qe_gc->cpdata, ®s->cpdata);
|
||||
iowrite32be(qe_gc->cpdata, ®s->cpdata);
|
||||
|
||||
spin_unlock_irqrestore(&qe_gc->lock, flags);
|
||||
}
|
||||
|
@ -101,7 +101,7 @@ static void qe_gpio_set_multiple(struct gpio_chip *gc,
|
|||
}
|
||||
}
|
||||
|
||||
qe_iowrite32be(qe_gc->cpdata, ®s->cpdata);
|
||||
iowrite32be(qe_gc->cpdata, ®s->cpdata);
|
||||
|
||||
spin_unlock_irqrestore(&qe_gc->lock, flags);
|
||||
}
|
||||
|
@ -269,7 +269,7 @@ void qe_pin_set_dedicated(struct qe_pin *qe_pin)
|
|||
else
|
||||
qe_gc->cpdata &= ~mask1;
|
||||
|
||||
qe_iowrite32be(qe_gc->cpdata, ®s->cpdata);
|
||||
iowrite32be(qe_gc->cpdata, ®s->cpdata);
|
||||
qe_clrsetbits_be32(®s->cpodr, mask1, sregs->cpodr & mask1);
|
||||
|
||||
spin_unlock_irqrestore(&qe_gc->lock, flags);
|
||||
|
|
|
@ -109,7 +109,7 @@ int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol, u32 cmd_input)
|
|||
|
||||
spin_lock_irqsave(&qe_lock, flags);
|
||||
if (cmd == QE_RESET) {
|
||||
qe_iowrite32be((u32)(cmd | QE_CR_FLG), &qe_immr->cp.cecr);
|
||||
iowrite32be((u32)(cmd | QE_CR_FLG), &qe_immr->cp.cecr);
|
||||
} else {
|
||||
if (cmd == QE_ASSIGN_PAGE) {
|
||||
/* Here device is the SNUM, not sub-block */
|
||||
|
@ -126,13 +126,13 @@ int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol, u32 cmd_input)
|
|||
mcn_shift = QE_CR_MCN_NORMAL_SHIFT;
|
||||
}
|
||||
|
||||
qe_iowrite32be(cmd_input, &qe_immr->cp.cecdr);
|
||||
qe_iowrite32be((cmd | QE_CR_FLG | ((u32)device << dev_shift) | (u32)mcn_protocol << mcn_shift),
|
||||
iowrite32be(cmd_input, &qe_immr->cp.cecdr);
|
||||
iowrite32be((cmd | QE_CR_FLG | ((u32)device << dev_shift) | (u32)mcn_protocol << mcn_shift),
|
||||
&qe_immr->cp.cecr);
|
||||
}
|
||||
|
||||
/* wait for the QE_CR_FLG to clear */
|
||||
ret = readx_poll_timeout_atomic(qe_ioread32be, &qe_immr->cp.cecr, val,
|
||||
ret = readx_poll_timeout_atomic(ioread32be, &qe_immr->cp.cecr, val,
|
||||
(val & QE_CR_FLG) == 0, 0, 100);
|
||||
/* On timeout, ret is -ETIMEDOUT, otherwise it will be 0. */
|
||||
spin_unlock_irqrestore(&qe_lock, flags);
|
||||
|
@ -231,7 +231,7 @@ int qe_setbrg(enum qe_clock brg, unsigned int rate, unsigned int multiplier)
|
|||
tempval = ((divisor - 1) << QE_BRGC_DIVISOR_SHIFT) |
|
||||
QE_BRGC_ENABLE | div16;
|
||||
|
||||
qe_iowrite32be(tempval, &qe_immr->brg.brgc[brg - QE_BRG1]);
|
||||
iowrite32be(tempval, &qe_immr->brg.brgc[brg - QE_BRG1]);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -375,9 +375,9 @@ static int qe_sdma_init(void)
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
qe_iowrite32be((u32)sdma_buf_offset & QE_SDEBCR_BA_MASK,
|
||||
iowrite32be((u32)sdma_buf_offset & QE_SDEBCR_BA_MASK,
|
||||
&sdma->sdebcr);
|
||||
qe_iowrite32be((QE_SDMR_GLB_1_MSK | (0x1 << QE_SDMR_CEN_SHIFT)),
|
||||
iowrite32be((QE_SDMR_GLB_1_MSK | (0x1 << QE_SDMR_CEN_SHIFT)),
|
||||
&sdma->sdmr);
|
||||
|
||||
return 0;
|
||||
|
@ -416,14 +416,14 @@ static void qe_upload_microcode(const void *base,
|
|||
"uploading microcode '%s'\n", ucode->id);
|
||||
|
||||
/* Use auto-increment */
|
||||
qe_iowrite32be(be32_to_cpu(ucode->iram_offset) | QE_IRAM_IADD_AIE | QE_IRAM_IADD_BADDR,
|
||||
iowrite32be(be32_to_cpu(ucode->iram_offset) | QE_IRAM_IADD_AIE | QE_IRAM_IADD_BADDR,
|
||||
&qe_immr->iram.iadd);
|
||||
|
||||
for (i = 0; i < be32_to_cpu(ucode->count); i++)
|
||||
qe_iowrite32be(be32_to_cpu(code[i]), &qe_immr->iram.idata);
|
||||
iowrite32be(be32_to_cpu(code[i]), &qe_immr->iram.idata);
|
||||
|
||||
/* Set I-RAM Ready Register */
|
||||
qe_iowrite32be(QE_IRAM_READY, &qe_immr->iram.iready);
|
||||
iowrite32be(QE_IRAM_READY, &qe_immr->iram.iready);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -542,12 +542,12 @@ int qe_upload_firmware(const struct qe_firmware *firmware)
|
|||
u32 trap = be32_to_cpu(ucode->traps[j]);
|
||||
|
||||
if (trap)
|
||||
qe_iowrite32be(trap,
|
||||
iowrite32be(trap,
|
||||
&qe_immr->rsp[i].tibcr[j]);
|
||||
}
|
||||
|
||||
/* Enable traps */
|
||||
qe_iowrite32be(be32_to_cpu(ucode->eccr),
|
||||
iowrite32be(be32_to_cpu(ucode->eccr),
|
||||
&qe_immr->rsp[i].eccr);
|
||||
}
|
||||
|
||||
|
|
|
@ -26,7 +26,7 @@
|
|||
#include <soc/fsl/qe/qe.h>
|
||||
|
||||
static struct gen_pool *muram_pool;
|
||||
static spinlock_t cpm_muram_lock;
|
||||
static DEFINE_SPINLOCK(cpm_muram_lock);
|
||||
static void __iomem *muram_vbase;
|
||||
static phys_addr_t muram_pbase;
|
||||
|
||||
|
@ -54,7 +54,6 @@ int cpm_muram_init(void)
|
|||
if (muram_pbase)
|
||||
return 0;
|
||||
|
||||
spin_lock_init(&cpm_muram_lock);
|
||||
np = of_find_compatible_node(NULL, NULL, "fsl,cpm-muram-data");
|
||||
if (!np) {
|
||||
/* try legacy bindings */
|
||||
|
|
|
@ -222,13 +222,13 @@ static struct qe_ic_info qe_ic_info[] = {
|
|||
|
||||
static inline u32 qe_ic_read(__be32 __iomem *base, unsigned int reg)
|
||||
{
|
||||
return qe_ioread32be(base + (reg >> 2));
|
||||
return ioread32be(base + (reg >> 2));
|
||||
}
|
||||
|
||||
static inline void qe_ic_write(__be32 __iomem *base, unsigned int reg,
|
||||
u32 value)
|
||||
{
|
||||
qe_iowrite32be(value, base + (reg >> 2));
|
||||
iowrite32be(value, base + (reg >> 2));
|
||||
}
|
||||
|
||||
static inline struct qe_ic *qe_ic_from_irq(unsigned int virq)
|
||||
|
|
|
@ -54,16 +54,16 @@ void __par_io_config_pin(struct qe_pio_regs __iomem *par_io, u8 pin, int dir,
|
|||
pin_mask1bit = (u32) (1 << (QE_PIO_PINS - (pin + 1)));
|
||||
|
||||
/* Set open drain, if required */
|
||||
tmp_val = qe_ioread32be(&par_io->cpodr);
|
||||
tmp_val = ioread32be(&par_io->cpodr);
|
||||
if (open_drain)
|
||||
qe_iowrite32be(pin_mask1bit | tmp_val, &par_io->cpodr);
|
||||
iowrite32be(pin_mask1bit | tmp_val, &par_io->cpodr);
|
||||
else
|
||||
qe_iowrite32be(~pin_mask1bit & tmp_val, &par_io->cpodr);
|
||||
iowrite32be(~pin_mask1bit & tmp_val, &par_io->cpodr);
|
||||
|
||||
/* define direction */
|
||||
tmp_val = (pin > (QE_PIO_PINS / 2) - 1) ?
|
||||
qe_ioread32be(&par_io->cpdir2) :
|
||||
qe_ioread32be(&par_io->cpdir1);
|
||||
ioread32be(&par_io->cpdir2) :
|
||||
ioread32be(&par_io->cpdir1);
|
||||
|
||||
/* get all bits mask for 2 bit per port */
|
||||
pin_mask2bits = (u32) (0x3 << (QE_PIO_PINS -
|
||||
|
@ -75,30 +75,30 @@ void __par_io_config_pin(struct qe_pio_regs __iomem *par_io, u8 pin, int dir,
|
|||
|
||||
/* clear and set 2 bits mask */
|
||||
if (pin > (QE_PIO_PINS / 2) - 1) {
|
||||
qe_iowrite32be(~pin_mask2bits & tmp_val, &par_io->cpdir2);
|
||||
iowrite32be(~pin_mask2bits & tmp_val, &par_io->cpdir2);
|
||||
tmp_val &= ~pin_mask2bits;
|
||||
qe_iowrite32be(new_mask2bits | tmp_val, &par_io->cpdir2);
|
||||
iowrite32be(new_mask2bits | tmp_val, &par_io->cpdir2);
|
||||
} else {
|
||||
qe_iowrite32be(~pin_mask2bits & tmp_val, &par_io->cpdir1);
|
||||
iowrite32be(~pin_mask2bits & tmp_val, &par_io->cpdir1);
|
||||
tmp_val &= ~pin_mask2bits;
|
||||
qe_iowrite32be(new_mask2bits | tmp_val, &par_io->cpdir1);
|
||||
iowrite32be(new_mask2bits | tmp_val, &par_io->cpdir1);
|
||||
}
|
||||
/* define pin assignment */
|
||||
tmp_val = (pin > (QE_PIO_PINS / 2) - 1) ?
|
||||
qe_ioread32be(&par_io->cppar2) :
|
||||
qe_ioread32be(&par_io->cppar1);
|
||||
ioread32be(&par_io->cppar2) :
|
||||
ioread32be(&par_io->cppar1);
|
||||
|
||||
new_mask2bits = (u32) (assignment << (QE_PIO_PINS -
|
||||
(pin % (QE_PIO_PINS / 2) + 1) * 2));
|
||||
/* clear and set 2 bits mask */
|
||||
if (pin > (QE_PIO_PINS / 2) - 1) {
|
||||
qe_iowrite32be(~pin_mask2bits & tmp_val, &par_io->cppar2);
|
||||
iowrite32be(~pin_mask2bits & tmp_val, &par_io->cppar2);
|
||||
tmp_val &= ~pin_mask2bits;
|
||||
qe_iowrite32be(new_mask2bits | tmp_val, &par_io->cppar2);
|
||||
iowrite32be(new_mask2bits | tmp_val, &par_io->cppar2);
|
||||
} else {
|
||||
qe_iowrite32be(~pin_mask2bits & tmp_val, &par_io->cppar1);
|
||||
iowrite32be(~pin_mask2bits & tmp_val, &par_io->cppar1);
|
||||
tmp_val &= ~pin_mask2bits;
|
||||
qe_iowrite32be(new_mask2bits | tmp_val, &par_io->cppar1);
|
||||
iowrite32be(new_mask2bits | tmp_val, &par_io->cppar1);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(__par_io_config_pin);
|
||||
|
@ -126,12 +126,12 @@ int par_io_data_set(u8 port, u8 pin, u8 val)
|
|||
/* calculate pin location */
|
||||
pin_mask = (u32) (1 << (QE_PIO_PINS - 1 - pin));
|
||||
|
||||
tmp_val = qe_ioread32be(&par_io[port].cpdata);
|
||||
tmp_val = ioread32be(&par_io[port].cpdata);
|
||||
|
||||
if (val == 0) /* clear */
|
||||
qe_iowrite32be(~pin_mask & tmp_val, &par_io[port].cpdata);
|
||||
iowrite32be(~pin_mask & tmp_val, &par_io[port].cpdata);
|
||||
else /* set */
|
||||
qe_iowrite32be(pin_mask | tmp_val, &par_io[port].cpdata);
|
||||
iowrite32be(pin_mask | tmp_val, &par_io[port].cpdata);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -29,42 +29,42 @@ void ucc_fast_dump_regs(struct ucc_fast_private * uccf)
|
|||
printk(KERN_INFO "Base address: 0x%p\n", uccf->uf_regs);
|
||||
|
||||
printk(KERN_INFO "gumr : addr=0x%p, val=0x%08x\n",
|
||||
&uccf->uf_regs->gumr, qe_ioread32be(&uccf->uf_regs->gumr));
|
||||
&uccf->uf_regs->gumr, ioread32be(&uccf->uf_regs->gumr));
|
||||
printk(KERN_INFO "upsmr : addr=0x%p, val=0x%08x\n",
|
||||
&uccf->uf_regs->upsmr, qe_ioread32be(&uccf->uf_regs->upsmr));
|
||||
&uccf->uf_regs->upsmr, ioread32be(&uccf->uf_regs->upsmr));
|
||||
printk(KERN_INFO "utodr : addr=0x%p, val=0x%04x\n",
|
||||
&uccf->uf_regs->utodr, qe_ioread16be(&uccf->uf_regs->utodr));
|
||||
&uccf->uf_regs->utodr, ioread16be(&uccf->uf_regs->utodr));
|
||||
printk(KERN_INFO "udsr : addr=0x%p, val=0x%04x\n",
|
||||
&uccf->uf_regs->udsr, qe_ioread16be(&uccf->uf_regs->udsr));
|
||||
&uccf->uf_regs->udsr, ioread16be(&uccf->uf_regs->udsr));
|
||||
printk(KERN_INFO "ucce : addr=0x%p, val=0x%08x\n",
|
||||
&uccf->uf_regs->ucce, qe_ioread32be(&uccf->uf_regs->ucce));
|
||||
&uccf->uf_regs->ucce, ioread32be(&uccf->uf_regs->ucce));
|
||||
printk(KERN_INFO "uccm : addr=0x%p, val=0x%08x\n",
|
||||
&uccf->uf_regs->uccm, qe_ioread32be(&uccf->uf_regs->uccm));
|
||||
&uccf->uf_regs->uccm, ioread32be(&uccf->uf_regs->uccm));
|
||||
printk(KERN_INFO "uccs : addr=0x%p, val=0x%02x\n",
|
||||
&uccf->uf_regs->uccs, qe_ioread8(&uccf->uf_regs->uccs));
|
||||
&uccf->uf_regs->uccs, ioread8(&uccf->uf_regs->uccs));
|
||||
printk(KERN_INFO "urfb : addr=0x%p, val=0x%08x\n",
|
||||
&uccf->uf_regs->urfb, qe_ioread32be(&uccf->uf_regs->urfb));
|
||||
&uccf->uf_regs->urfb, ioread32be(&uccf->uf_regs->urfb));
|
||||
printk(KERN_INFO "urfs : addr=0x%p, val=0x%04x\n",
|
||||
&uccf->uf_regs->urfs, qe_ioread16be(&uccf->uf_regs->urfs));
|
||||
&uccf->uf_regs->urfs, ioread16be(&uccf->uf_regs->urfs));
|
||||
printk(KERN_INFO "urfet : addr=0x%p, val=0x%04x\n",
|
||||
&uccf->uf_regs->urfet, qe_ioread16be(&uccf->uf_regs->urfet));
|
||||
&uccf->uf_regs->urfet, ioread16be(&uccf->uf_regs->urfet));
|
||||
printk(KERN_INFO "urfset: addr=0x%p, val=0x%04x\n",
|
||||
&uccf->uf_regs->urfset,
|
||||
qe_ioread16be(&uccf->uf_regs->urfset));
|
||||
ioread16be(&uccf->uf_regs->urfset));
|
||||
printk(KERN_INFO "utfb : addr=0x%p, val=0x%08x\n",
|
||||
&uccf->uf_regs->utfb, qe_ioread32be(&uccf->uf_regs->utfb));
|
||||
&uccf->uf_regs->utfb, ioread32be(&uccf->uf_regs->utfb));
|
||||
printk(KERN_INFO "utfs : addr=0x%p, val=0x%04x\n",
|
||||
&uccf->uf_regs->utfs, qe_ioread16be(&uccf->uf_regs->utfs));
|
||||
&uccf->uf_regs->utfs, ioread16be(&uccf->uf_regs->utfs));
|
||||
printk(KERN_INFO "utfet : addr=0x%p, val=0x%04x\n",
|
||||
&uccf->uf_regs->utfet, qe_ioread16be(&uccf->uf_regs->utfet));
|
||||
&uccf->uf_regs->utfet, ioread16be(&uccf->uf_regs->utfet));
|
||||
printk(KERN_INFO "utftt : addr=0x%p, val=0x%04x\n",
|
||||
&uccf->uf_regs->utftt, qe_ioread16be(&uccf->uf_regs->utftt));
|
||||
&uccf->uf_regs->utftt, ioread16be(&uccf->uf_regs->utftt));
|
||||
printk(KERN_INFO "utpt : addr=0x%p, val=0x%04x\n",
|
||||
&uccf->uf_regs->utpt, qe_ioread16be(&uccf->uf_regs->utpt));
|
||||
&uccf->uf_regs->utpt, ioread16be(&uccf->uf_regs->utpt));
|
||||
printk(KERN_INFO "urtry : addr=0x%p, val=0x%08x\n",
|
||||
&uccf->uf_regs->urtry, qe_ioread32be(&uccf->uf_regs->urtry));
|
||||
&uccf->uf_regs->urtry, ioread32be(&uccf->uf_regs->urtry));
|
||||
printk(KERN_INFO "guemr : addr=0x%p, val=0x%02x\n",
|
||||
&uccf->uf_regs->guemr, qe_ioread8(&uccf->uf_regs->guemr));
|
||||
&uccf->uf_regs->guemr, ioread8(&uccf->uf_regs->guemr));
|
||||
}
|
||||
EXPORT_SYMBOL(ucc_fast_dump_regs);
|
||||
|
||||
|
@ -86,7 +86,7 @@ EXPORT_SYMBOL(ucc_fast_get_qe_cr_subblock);
|
|||
|
||||
void ucc_fast_transmit_on_demand(struct ucc_fast_private * uccf)
|
||||
{
|
||||
qe_iowrite16be(UCC_FAST_TOD, &uccf->uf_regs->utodr);
|
||||
iowrite16be(UCC_FAST_TOD, &uccf->uf_regs->utodr);
|
||||
}
|
||||
EXPORT_SYMBOL(ucc_fast_transmit_on_demand);
|
||||
|
||||
|
@ -98,7 +98,7 @@ void ucc_fast_enable(struct ucc_fast_private * uccf, enum comm_dir mode)
|
|||
uf_regs = uccf->uf_regs;
|
||||
|
||||
/* Enable reception and/or transmission on this UCC. */
|
||||
gumr = qe_ioread32be(&uf_regs->gumr);
|
||||
gumr = ioread32be(&uf_regs->gumr);
|
||||
if (mode & COMM_DIR_TX) {
|
||||
gumr |= UCC_FAST_GUMR_ENT;
|
||||
uccf->enabled_tx = 1;
|
||||
|
@ -107,7 +107,7 @@ void ucc_fast_enable(struct ucc_fast_private * uccf, enum comm_dir mode)
|
|||
gumr |= UCC_FAST_GUMR_ENR;
|
||||
uccf->enabled_rx = 1;
|
||||
}
|
||||
qe_iowrite32be(gumr, &uf_regs->gumr);
|
||||
iowrite32be(gumr, &uf_regs->gumr);
|
||||
}
|
||||
EXPORT_SYMBOL(ucc_fast_enable);
|
||||
|
||||
|
@ -119,7 +119,7 @@ void ucc_fast_disable(struct ucc_fast_private * uccf, enum comm_dir mode)
|
|||
uf_regs = uccf->uf_regs;
|
||||
|
||||
/* Disable reception and/or transmission on this UCC. */
|
||||
gumr = qe_ioread32be(&uf_regs->gumr);
|
||||
gumr = ioread32be(&uf_regs->gumr);
|
||||
if (mode & COMM_DIR_TX) {
|
||||
gumr &= ~UCC_FAST_GUMR_ENT;
|
||||
uccf->enabled_tx = 0;
|
||||
|
@ -128,7 +128,7 @@ void ucc_fast_disable(struct ucc_fast_private * uccf, enum comm_dir mode)
|
|||
gumr &= ~UCC_FAST_GUMR_ENR;
|
||||
uccf->enabled_rx = 0;
|
||||
}
|
||||
qe_iowrite32be(gumr, &uf_regs->gumr);
|
||||
iowrite32be(gumr, &uf_regs->gumr);
|
||||
}
|
||||
EXPORT_SYMBOL(ucc_fast_disable);
|
||||
|
||||
|
@ -262,7 +262,7 @@ int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** ucc
|
|||
gumr |= uf_info->tenc;
|
||||
gumr |= uf_info->tcrc;
|
||||
gumr |= uf_info->mode;
|
||||
qe_iowrite32be(gumr, &uf_regs->gumr);
|
||||
iowrite32be(gumr, &uf_regs->gumr);
|
||||
|
||||
/* Allocate memory for Tx Virtual Fifo */
|
||||
uccf->ucc_fast_tx_virtual_fifo_base_offset =
|
||||
|
@ -287,16 +287,16 @@ int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** ucc
|
|||
}
|
||||
|
||||
/* Set Virtual Fifo registers */
|
||||
qe_iowrite16be(uf_info->urfs, &uf_regs->urfs);
|
||||
qe_iowrite16be(uf_info->urfet, &uf_regs->urfet);
|
||||
qe_iowrite16be(uf_info->urfset, &uf_regs->urfset);
|
||||
qe_iowrite16be(uf_info->utfs, &uf_regs->utfs);
|
||||
qe_iowrite16be(uf_info->utfet, &uf_regs->utfet);
|
||||
qe_iowrite16be(uf_info->utftt, &uf_regs->utftt);
|
||||
iowrite16be(uf_info->urfs, &uf_regs->urfs);
|
||||
iowrite16be(uf_info->urfet, &uf_regs->urfet);
|
||||
iowrite16be(uf_info->urfset, &uf_regs->urfset);
|
||||
iowrite16be(uf_info->utfs, &uf_regs->utfs);
|
||||
iowrite16be(uf_info->utfet, &uf_regs->utfet);
|
||||
iowrite16be(uf_info->utftt, &uf_regs->utftt);
|
||||
/* utfb, urfb are offsets from MURAM base */
|
||||
qe_iowrite32be(uccf->ucc_fast_tx_virtual_fifo_base_offset,
|
||||
iowrite32be(uccf->ucc_fast_tx_virtual_fifo_base_offset,
|
||||
&uf_regs->utfb);
|
||||
qe_iowrite32be(uccf->ucc_fast_rx_virtual_fifo_base_offset,
|
||||
iowrite32be(uccf->ucc_fast_rx_virtual_fifo_base_offset,
|
||||
&uf_regs->urfb);
|
||||
|
||||
/* Mux clocking */
|
||||
|
@ -365,14 +365,14 @@ int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** ucc
|
|||
}
|
||||
|
||||
/* Set interrupt mask register at UCC level. */
|
||||
qe_iowrite32be(uf_info->uccm_mask, &uf_regs->uccm);
|
||||
iowrite32be(uf_info->uccm_mask, &uf_regs->uccm);
|
||||
|
||||
/* First, clear anything pending at UCC level,
|
||||
* otherwise, old garbage may come through
|
||||
* as soon as the dam is opened. */
|
||||
|
||||
/* Writing '1' clears */
|
||||
qe_iowrite32be(0xffffffff, &uf_regs->ucce);
|
||||
iowrite32be(0xffffffff, &uf_regs->ucce);
|
||||
|
||||
*uccf_ret = uccf;
|
||||
return 0;
|
||||
|
|
|
@ -78,7 +78,7 @@ void ucc_slow_enable(struct ucc_slow_private * uccs, enum comm_dir mode)
|
|||
us_regs = uccs->us_regs;
|
||||
|
||||
/* Enable reception and/or transmission on this UCC. */
|
||||
gumr_l = qe_ioread32be(&us_regs->gumr_l);
|
||||
gumr_l = ioread32be(&us_regs->gumr_l);
|
||||
if (mode & COMM_DIR_TX) {
|
||||
gumr_l |= UCC_SLOW_GUMR_L_ENT;
|
||||
uccs->enabled_tx = 1;
|
||||
|
@ -87,7 +87,7 @@ void ucc_slow_enable(struct ucc_slow_private * uccs, enum comm_dir mode)
|
|||
gumr_l |= UCC_SLOW_GUMR_L_ENR;
|
||||
uccs->enabled_rx = 1;
|
||||
}
|
||||
qe_iowrite32be(gumr_l, &us_regs->gumr_l);
|
||||
iowrite32be(gumr_l, &us_regs->gumr_l);
|
||||
}
|
||||
EXPORT_SYMBOL(ucc_slow_enable);
|
||||
|
||||
|
@ -99,7 +99,7 @@ void ucc_slow_disable(struct ucc_slow_private * uccs, enum comm_dir mode)
|
|||
us_regs = uccs->us_regs;
|
||||
|
||||
/* Disable reception and/or transmission on this UCC. */
|
||||
gumr_l = qe_ioread32be(&us_regs->gumr_l);
|
||||
gumr_l = ioread32be(&us_regs->gumr_l);
|
||||
if (mode & COMM_DIR_TX) {
|
||||
gumr_l &= ~UCC_SLOW_GUMR_L_ENT;
|
||||
uccs->enabled_tx = 0;
|
||||
|
@ -108,7 +108,7 @@ void ucc_slow_disable(struct ucc_slow_private * uccs, enum comm_dir mode)
|
|||
gumr_l &= ~UCC_SLOW_GUMR_L_ENR;
|
||||
uccs->enabled_rx = 0;
|
||||
}
|
||||
qe_iowrite32be(gumr_l, &us_regs->gumr_l);
|
||||
iowrite32be(gumr_l, &us_regs->gumr_l);
|
||||
}
|
||||
EXPORT_SYMBOL(ucc_slow_disable);
|
||||
|
||||
|
@ -194,7 +194,7 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
|
|||
return ret;
|
||||
}
|
||||
|
||||
qe_iowrite16be(us_info->max_rx_buf_length, &uccs->us_pram->mrblr);
|
||||
iowrite16be(us_info->max_rx_buf_length, &uccs->us_pram->mrblr);
|
||||
|
||||
INIT_LIST_HEAD(&uccs->confQ);
|
||||
|
||||
|
@ -222,27 +222,27 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
|
|||
bd = uccs->confBd = uccs->tx_bd = qe_muram_addr(uccs->tx_base_offset);
|
||||
for (i = 0; i < us_info->tx_bd_ring_len - 1; i++) {
|
||||
/* clear bd buffer */
|
||||
qe_iowrite32be(0, &bd->buf);
|
||||
iowrite32be(0, &bd->buf);
|
||||
/* set bd status and length */
|
||||
qe_iowrite32be(0, (u32 __iomem *)bd);
|
||||
iowrite32be(0, (u32 __iomem *)bd);
|
||||
bd++;
|
||||
}
|
||||
/* for last BD set Wrap bit */
|
||||
qe_iowrite32be(0, &bd->buf);
|
||||
qe_iowrite32be(T_W, (u32 __iomem *)bd);
|
||||
iowrite32be(0, &bd->buf);
|
||||
iowrite32be(T_W, (u32 __iomem *)bd);
|
||||
|
||||
/* Init Rx bds */
|
||||
bd = uccs->rx_bd = qe_muram_addr(uccs->rx_base_offset);
|
||||
for (i = 0; i < us_info->rx_bd_ring_len - 1; i++) {
|
||||
/* set bd status and length */
|
||||
qe_iowrite32be(0, (u32 __iomem *)bd);
|
||||
iowrite32be(0, (u32 __iomem *)bd);
|
||||
/* clear bd buffer */
|
||||
qe_iowrite32be(0, &bd->buf);
|
||||
iowrite32be(0, &bd->buf);
|
||||
bd++;
|
||||
}
|
||||
/* for last BD set Wrap bit */
|
||||
qe_iowrite32be(R_W, (u32 __iomem *)bd);
|
||||
qe_iowrite32be(0, &bd->buf);
|
||||
iowrite32be(R_W, (u32 __iomem *)bd);
|
||||
iowrite32be(0, &bd->buf);
|
||||
|
||||
/* Set GUMR (For more details see the hardware spec.). */
|
||||
/* gumr_h */
|
||||
|
@ -263,7 +263,7 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
|
|||
gumr |= UCC_SLOW_GUMR_H_TXSY;
|
||||
if (us_info->rtsm)
|
||||
gumr |= UCC_SLOW_GUMR_H_RTSM;
|
||||
qe_iowrite32be(gumr, &us_regs->gumr_h);
|
||||
iowrite32be(gumr, &us_regs->gumr_h);
|
||||
|
||||
/* gumr_l */
|
||||
gumr = (u32)us_info->tdcr | (u32)us_info->rdcr | (u32)us_info->tenc |
|
||||
|
@ -276,18 +276,18 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
|
|||
gumr |= UCC_SLOW_GUMR_L_TINV;
|
||||
if (us_info->tend)
|
||||
gumr |= UCC_SLOW_GUMR_L_TEND;
|
||||
qe_iowrite32be(gumr, &us_regs->gumr_l);
|
||||
iowrite32be(gumr, &us_regs->gumr_l);
|
||||
|
||||
/* Function code registers */
|
||||
|
||||
/* if the data is in cachable memory, the 'global' */
|
||||
/* in the function code should be set. */
|
||||
qe_iowrite8(UCC_BMR_BO_BE, &uccs->us_pram->tbmr);
|
||||
qe_iowrite8(UCC_BMR_BO_BE, &uccs->us_pram->rbmr);
|
||||
iowrite8(UCC_BMR_BO_BE, &uccs->us_pram->tbmr);
|
||||
iowrite8(UCC_BMR_BO_BE, &uccs->us_pram->rbmr);
|
||||
|
||||
/* rbase, tbase are offsets from MURAM base */
|
||||
qe_iowrite16be(uccs->rx_base_offset, &uccs->us_pram->rbase);
|
||||
qe_iowrite16be(uccs->tx_base_offset, &uccs->us_pram->tbase);
|
||||
iowrite16be(uccs->rx_base_offset, &uccs->us_pram->rbase);
|
||||
iowrite16be(uccs->tx_base_offset, &uccs->us_pram->tbase);
|
||||
|
||||
/* Mux clocking */
|
||||
/* Grant Support */
|
||||
|
@ -317,14 +317,14 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
|
|||
}
|
||||
|
||||
/* Set interrupt mask register at UCC level. */
|
||||
qe_iowrite16be(us_info->uccm_mask, &us_regs->uccm);
|
||||
iowrite16be(us_info->uccm_mask, &us_regs->uccm);
|
||||
|
||||
/* First, clear anything pending at UCC level,
|
||||
* otherwise, old garbage may come through
|
||||
* as soon as the dam is opened. */
|
||||
|
||||
/* Writing '1' clears */
|
||||
qe_iowrite16be(0xffff, &us_regs->ucce);
|
||||
iowrite16be(0xffff, &us_regs->ucce);
|
||||
|
||||
/* Issue QE Init command */
|
||||
if (us_info->init_tx && us_info->init_rx)
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
#include <linux/slab.h>
|
||||
#include <linux/suspend.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/acpi.h>
|
||||
|
||||
#define RCPM_WAKEUP_CELL_MAX_SIZE 7
|
||||
|
||||
|
@ -78,10 +79,20 @@ static int rcpm_pm_prepare(struct device *dev)
|
|||
"fsl,rcpm-wakeup", value,
|
||||
rcpm->wakeup_cells + 1);
|
||||
|
||||
/* Wakeup source should refer to current rcpm device */
|
||||
if (ret || (np->phandle != value[0]))
|
||||
if (ret)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* For DT mode, would handle devices with "fsl,rcpm-wakeup"
|
||||
* pointing to the current RCPM node.
|
||||
*
|
||||
* For ACPI mode, currently we assume there is only one
|
||||
* RCPM controller existing.
|
||||
*/
|
||||
if (is_of_node(dev->fwnode))
|
||||
if (np->phandle != value[0])
|
||||
continue;
|
||||
|
||||
/* Property "#fsl,rcpm-wakeup-cells" of rcpm node defines the
|
||||
* number of IPPDEXPCR register cells, and "fsl,rcpm-wakeup"
|
||||
* of wakeup source IP contains an integer array: <phandle to
|
||||
|
@ -172,10 +183,19 @@ static const struct of_device_id rcpm_of_match[] = {
|
|||
};
|
||||
MODULE_DEVICE_TABLE(of, rcpm_of_match);
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
static const struct acpi_device_id rcpm_acpi_ids[] = {
|
||||
{"NXP0015",},
|
||||
{ }
|
||||
};
|
||||
MODULE_DEVICE_TABLE(acpi, rcpm_acpi_ids);
|
||||
#endif
|
||||
|
||||
static struct platform_driver rcpm_driver = {
|
||||
.driver = {
|
||||
.name = "rcpm",
|
||||
.of_match_table = rcpm_of_match,
|
||||
.acpi_match_table = ACPI_PTR(rcpm_acpi_ids),
|
||||
.pm = &rcpm_pm_ops,
|
||||
},
|
||||
.probe = rcpm_probe,
|
||||
|
|
|
@ -261,11 +261,11 @@ static unsigned int qe_uart_tx_empty(struct uart_port *port)
|
|||
struct qe_bd *bdp = qe_port->tx_bd_base;
|
||||
|
||||
while (1) {
|
||||
if (qe_ioread16be(&bdp->status) & BD_SC_READY)
|
||||
if (ioread16be(&bdp->status) & BD_SC_READY)
|
||||
/* This BD is not done, so return "not done" */
|
||||
return 0;
|
||||
|
||||
if (qe_ioread16be(&bdp->status) & BD_SC_WRAP)
|
||||
if (ioread16be(&bdp->status) & BD_SC_WRAP)
|
||||
/*
|
||||
* This BD is done and it's the last one, so return
|
||||
* "done"
|
||||
|
@ -344,10 +344,10 @@ static int qe_uart_tx_pump(struct uart_qe_port *qe_port)
|
|||
p = qe2cpu_addr(be32_to_cpu(bdp->buf), qe_port);
|
||||
|
||||
*p++ = port->x_char;
|
||||
qe_iowrite16be(1, &bdp->length);
|
||||
iowrite16be(1, &bdp->length);
|
||||
qe_setbits_be16(&bdp->status, BD_SC_READY);
|
||||
/* Get next BD. */
|
||||
if (qe_ioread16be(&bdp->status) & BD_SC_WRAP)
|
||||
if (ioread16be(&bdp->status) & BD_SC_WRAP)
|
||||
bdp = qe_port->tx_bd_base;
|
||||
else
|
||||
bdp++;
|
||||
|
@ -366,7 +366,7 @@ static int qe_uart_tx_pump(struct uart_qe_port *qe_port)
|
|||
/* Pick next descriptor and fill from buffer */
|
||||
bdp = qe_port->tx_cur;
|
||||
|
||||
while (!(qe_ioread16be(&bdp->status) & BD_SC_READY) &&
|
||||
while (!(ioread16be(&bdp->status) & BD_SC_READY) &&
|
||||
(xmit->tail != xmit->head)) {
|
||||
count = 0;
|
||||
p = qe2cpu_addr(be32_to_cpu(bdp->buf), qe_port);
|
||||
|
@ -379,11 +379,11 @@ static int qe_uart_tx_pump(struct uart_qe_port *qe_port)
|
|||
break;
|
||||
}
|
||||
|
||||
qe_iowrite16be(count, &bdp->length);
|
||||
iowrite16be(count, &bdp->length);
|
||||
qe_setbits_be16(&bdp->status, BD_SC_READY);
|
||||
|
||||
/* Get next BD. */
|
||||
if (qe_ioread16be(&bdp->status) & BD_SC_WRAP)
|
||||
if (ioread16be(&bdp->status) & BD_SC_WRAP)
|
||||
bdp = qe_port->tx_bd_base;
|
||||
else
|
||||
bdp++;
|
||||
|
@ -416,7 +416,7 @@ static void qe_uart_start_tx(struct uart_port *port)
|
|||
container_of(port, struct uart_qe_port, port);
|
||||
|
||||
/* If we currently are transmitting, then just return */
|
||||
if (qe_ioread16be(&qe_port->uccp->uccm) & UCC_UART_UCCE_TX)
|
||||
if (ioread16be(&qe_port->uccp->uccm) & UCC_UART_UCCE_TX)
|
||||
return;
|
||||
|
||||
/* Otherwise, pump the port and start transmission */
|
||||
|
@ -471,14 +471,14 @@ static void qe_uart_int_rx(struct uart_qe_port *qe_port)
|
|||
*/
|
||||
bdp = qe_port->rx_cur;
|
||||
while (1) {
|
||||
status = qe_ioread16be(&bdp->status);
|
||||
status = ioread16be(&bdp->status);
|
||||
|
||||
/* If this one is empty, then we assume we've read them all */
|
||||
if (status & BD_SC_EMPTY)
|
||||
break;
|
||||
|
||||
/* get number of characters, and check space in RX buffer */
|
||||
i = qe_ioread16be(&bdp->length);
|
||||
i = ioread16be(&bdp->length);
|
||||
|
||||
/* If we don't have enough room in RX buffer for the entire BD,
|
||||
* then we try later, which will be the next RX interrupt.
|
||||
|
@ -512,7 +512,7 @@ static void qe_uart_int_rx(struct uart_qe_port *qe_port)
|
|||
qe_clrsetbits_be16(&bdp->status,
|
||||
BD_SC_BR | BD_SC_FR | BD_SC_PR | BD_SC_OV | BD_SC_ID,
|
||||
BD_SC_EMPTY);
|
||||
if (qe_ioread16be(&bdp->status) & BD_SC_WRAP)
|
||||
if (ioread16be(&bdp->status) & BD_SC_WRAP)
|
||||
bdp = qe_port->rx_bd_base;
|
||||
else
|
||||
bdp++;
|
||||
|
@ -569,8 +569,8 @@ static irqreturn_t qe_uart_int(int irq, void *data)
|
|||
u16 events;
|
||||
|
||||
/* Clear the interrupts */
|
||||
events = qe_ioread16be(&uccp->ucce);
|
||||
qe_iowrite16be(events, &uccp->ucce);
|
||||
events = ioread16be(&uccp->ucce);
|
||||
iowrite16be(events, &uccp->ucce);
|
||||
|
||||
if (events & UCC_UART_UCCE_BRKE)
|
||||
uart_handle_break(&qe_port->port);
|
||||
|
@ -601,17 +601,17 @@ static void qe_uart_initbd(struct uart_qe_port *qe_port)
|
|||
bdp = qe_port->rx_bd_base;
|
||||
qe_port->rx_cur = qe_port->rx_bd_base;
|
||||
for (i = 0; i < (qe_port->rx_nrfifos - 1); i++) {
|
||||
qe_iowrite16be(BD_SC_EMPTY | BD_SC_INTRPT, &bdp->status);
|
||||
qe_iowrite32be(cpu2qe_addr(bd_virt, qe_port), &bdp->buf);
|
||||
qe_iowrite16be(0, &bdp->length);
|
||||
iowrite16be(BD_SC_EMPTY | BD_SC_INTRPT, &bdp->status);
|
||||
iowrite32be(cpu2qe_addr(bd_virt, qe_port), &bdp->buf);
|
||||
iowrite16be(0, &bdp->length);
|
||||
bd_virt += qe_port->rx_fifosize;
|
||||
bdp++;
|
||||
}
|
||||
|
||||
/* */
|
||||
qe_iowrite16be(BD_SC_WRAP | BD_SC_EMPTY | BD_SC_INTRPT, &bdp->status);
|
||||
qe_iowrite32be(cpu2qe_addr(bd_virt, qe_port), &bdp->buf);
|
||||
qe_iowrite16be(0, &bdp->length);
|
||||
iowrite16be(BD_SC_WRAP | BD_SC_EMPTY | BD_SC_INTRPT, &bdp->status);
|
||||
iowrite32be(cpu2qe_addr(bd_virt, qe_port), &bdp->buf);
|
||||
iowrite16be(0, &bdp->length);
|
||||
|
||||
/* Set the physical address of the host memory
|
||||
* buffers in the buffer descriptors, and the
|
||||
|
@ -622,9 +622,9 @@ static void qe_uart_initbd(struct uart_qe_port *qe_port)
|
|||
qe_port->tx_cur = qe_port->tx_bd_base;
|
||||
bdp = qe_port->tx_bd_base;
|
||||
for (i = 0; i < (qe_port->tx_nrfifos - 1); i++) {
|
||||
qe_iowrite16be(BD_SC_INTRPT, &bdp->status);
|
||||
qe_iowrite32be(cpu2qe_addr(bd_virt, qe_port), &bdp->buf);
|
||||
qe_iowrite16be(0, &bdp->length);
|
||||
iowrite16be(BD_SC_INTRPT, &bdp->status);
|
||||
iowrite32be(cpu2qe_addr(bd_virt, qe_port), &bdp->buf);
|
||||
iowrite16be(0, &bdp->length);
|
||||
bd_virt += qe_port->tx_fifosize;
|
||||
bdp++;
|
||||
}
|
||||
|
@ -634,9 +634,9 @@ static void qe_uart_initbd(struct uart_qe_port *qe_port)
|
|||
qe_setbits_be16(&qe_port->tx_cur->status, BD_SC_P);
|
||||
#endif
|
||||
|
||||
qe_iowrite16be(BD_SC_WRAP | BD_SC_INTRPT, &bdp->status);
|
||||
qe_iowrite32be(cpu2qe_addr(bd_virt, qe_port), &bdp->buf);
|
||||
qe_iowrite16be(0, &bdp->length);
|
||||
iowrite16be(BD_SC_WRAP | BD_SC_INTRPT, &bdp->status);
|
||||
iowrite32be(cpu2qe_addr(bd_virt, qe_port), &bdp->buf);
|
||||
iowrite16be(0, &bdp->length);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -658,21 +658,21 @@ static void qe_uart_init_ucc(struct uart_qe_port *qe_port)
|
|||
ucc_slow_disable(qe_port->us_private, COMM_DIR_RX_AND_TX);
|
||||
|
||||
/* Program the UCC UART parameter RAM */
|
||||
qe_iowrite8(UCC_BMR_GBL | UCC_BMR_BO_BE, &uccup->common.rbmr);
|
||||
qe_iowrite8(UCC_BMR_GBL | UCC_BMR_BO_BE, &uccup->common.tbmr);
|
||||
qe_iowrite16be(qe_port->rx_fifosize, &uccup->common.mrblr);
|
||||
qe_iowrite16be(0x10, &uccup->maxidl);
|
||||
qe_iowrite16be(1, &uccup->brkcr);
|
||||
qe_iowrite16be(0, &uccup->parec);
|
||||
qe_iowrite16be(0, &uccup->frmec);
|
||||
qe_iowrite16be(0, &uccup->nosec);
|
||||
qe_iowrite16be(0, &uccup->brkec);
|
||||
qe_iowrite16be(0, &uccup->uaddr[0]);
|
||||
qe_iowrite16be(0, &uccup->uaddr[1]);
|
||||
qe_iowrite16be(0, &uccup->toseq);
|
||||
iowrite8(UCC_BMR_GBL | UCC_BMR_BO_BE, &uccup->common.rbmr);
|
||||
iowrite8(UCC_BMR_GBL | UCC_BMR_BO_BE, &uccup->common.tbmr);
|
||||
iowrite16be(qe_port->rx_fifosize, &uccup->common.mrblr);
|
||||
iowrite16be(0x10, &uccup->maxidl);
|
||||
iowrite16be(1, &uccup->brkcr);
|
||||
iowrite16be(0, &uccup->parec);
|
||||
iowrite16be(0, &uccup->frmec);
|
||||
iowrite16be(0, &uccup->nosec);
|
||||
iowrite16be(0, &uccup->brkec);
|
||||
iowrite16be(0, &uccup->uaddr[0]);
|
||||
iowrite16be(0, &uccup->uaddr[1]);
|
||||
iowrite16be(0, &uccup->toseq);
|
||||
for (i = 0; i < 8; i++)
|
||||
qe_iowrite16be(0xC000, &uccup->cchars[i]);
|
||||
qe_iowrite16be(0xc0ff, &uccup->rccm);
|
||||
iowrite16be(0xC000, &uccup->cchars[i]);
|
||||
iowrite16be(0xc0ff, &uccup->rccm);
|
||||
|
||||
/* Configure the GUMR registers for UART */
|
||||
if (soft_uart) {
|
||||
|
@ -702,30 +702,30 @@ static void qe_uart_init_ucc(struct uart_qe_port *qe_port)
|
|||
#endif
|
||||
|
||||
/* Disable rx interrupts and clear all pending events. */
|
||||
qe_iowrite16be(0, &uccp->uccm);
|
||||
qe_iowrite16be(0xffff, &uccp->ucce);
|
||||
qe_iowrite16be(0x7e7e, &uccp->udsr);
|
||||
iowrite16be(0, &uccp->uccm);
|
||||
iowrite16be(0xffff, &uccp->ucce);
|
||||
iowrite16be(0x7e7e, &uccp->udsr);
|
||||
|
||||
/* Initialize UPSMR */
|
||||
qe_iowrite16be(0, &uccp->upsmr);
|
||||
iowrite16be(0, &uccp->upsmr);
|
||||
|
||||
if (soft_uart) {
|
||||
qe_iowrite16be(0x30, &uccup->supsmr);
|
||||
qe_iowrite16be(0, &uccup->res92);
|
||||
qe_iowrite32be(0, &uccup->rx_state);
|
||||
qe_iowrite32be(0, &uccup->rx_cnt);
|
||||
qe_iowrite8(0, &uccup->rx_bitmark);
|
||||
qe_iowrite8(10, &uccup->rx_length);
|
||||
qe_iowrite32be(0x4000, &uccup->dump_ptr);
|
||||
qe_iowrite8(0, &uccup->rx_temp_dlst_qe);
|
||||
qe_iowrite32be(0, &uccup->rx_frame_rem);
|
||||
qe_iowrite8(0, &uccup->rx_frame_rem_size);
|
||||
iowrite16be(0x30, &uccup->supsmr);
|
||||
iowrite16be(0, &uccup->res92);
|
||||
iowrite32be(0, &uccup->rx_state);
|
||||
iowrite32be(0, &uccup->rx_cnt);
|
||||
iowrite8(0, &uccup->rx_bitmark);
|
||||
iowrite8(10, &uccup->rx_length);
|
||||
iowrite32be(0x4000, &uccup->dump_ptr);
|
||||
iowrite8(0, &uccup->rx_temp_dlst_qe);
|
||||
iowrite32be(0, &uccup->rx_frame_rem);
|
||||
iowrite8(0, &uccup->rx_frame_rem_size);
|
||||
/* Soft-UART requires TX to be 1X */
|
||||
qe_iowrite8(UCC_UART_TX_STATE_UART | UCC_UART_TX_STATE_X1,
|
||||
iowrite8(UCC_UART_TX_STATE_UART | UCC_UART_TX_STATE_X1,
|
||||
&uccup->tx_mode);
|
||||
qe_iowrite16be(0, &uccup->tx_state);
|
||||
qe_iowrite8(0, &uccup->resD4);
|
||||
qe_iowrite16be(0, &uccup->resD5);
|
||||
iowrite16be(0, &uccup->tx_state);
|
||||
iowrite8(0, &uccup->resD4);
|
||||
iowrite16be(0, &uccup->resD5);
|
||||
|
||||
/* Set UART mode.
|
||||
* Enable receive and transmit.
|
||||
|
@ -850,9 +850,9 @@ static void qe_uart_set_termios(struct uart_port *port,
|
|||
struct ucc_slow __iomem *uccp = qe_port->uccp;
|
||||
unsigned int baud;
|
||||
unsigned long flags;
|
||||
u16 upsmr = qe_ioread16be(&uccp->upsmr);
|
||||
u16 upsmr = ioread16be(&uccp->upsmr);
|
||||
struct ucc_uart_pram __iomem *uccup = qe_port->uccup;
|
||||
u16 supsmr = qe_ioread16be(&uccup->supsmr);
|
||||
u16 supsmr = ioread16be(&uccup->supsmr);
|
||||
u8 char_length = 2; /* 1 + CL + PEN + 1 + SL */
|
||||
|
||||
/* Character length programmed into the mode register is the
|
||||
|
@ -950,10 +950,10 @@ static void qe_uart_set_termios(struct uart_port *port,
|
|||
/* Update the per-port timeout. */
|
||||
uart_update_timeout(port, termios->c_cflag, baud);
|
||||
|
||||
qe_iowrite16be(upsmr, &uccp->upsmr);
|
||||
iowrite16be(upsmr, &uccp->upsmr);
|
||||
if (soft_uart) {
|
||||
qe_iowrite16be(supsmr, &uccup->supsmr);
|
||||
qe_iowrite8(char_length, &uccup->rx_length);
|
||||
iowrite16be(supsmr, &uccup->supsmr);
|
||||
iowrite8(char_length, &uccup->rx_length);
|
||||
|
||||
/* Soft-UART requires a 1X multiplier for TX */
|
||||
qe_setbrg(qe_port->us_info.rx_clock, baud, 16);
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
||||
/**
|
||||
/*
|
||||
* Freecale 85xx and 86xx Global Utilties register set
|
||||
*
|
||||
* Authors: Jeff Brown
|
||||
|
@ -14,7 +14,7 @@
|
|||
#include <linux/types.h>
|
||||
#include <linux/io.h>
|
||||
|
||||
/**
|
||||
/*
|
||||
* Global Utility Registers.
|
||||
*
|
||||
* Not all registers defined in this structure are available on all chips, so
|
||||
|
|
|
@ -239,37 +239,21 @@ static inline int qe_alive_during_sleep(void)
|
|||
#define qe_muram_dma cpm_muram_dma
|
||||
#define qe_muram_free_addr cpm_muram_free_addr
|
||||
|
||||
#ifdef CONFIG_PPC32
|
||||
#define qe_iowrite8(val, addr) out_8(addr, val)
|
||||
#define qe_iowrite16be(val, addr) out_be16(addr, val)
|
||||
#define qe_iowrite32be(val, addr) out_be32(addr, val)
|
||||
#define qe_ioread8(addr) in_8(addr)
|
||||
#define qe_ioread16be(addr) in_be16(addr)
|
||||
#define qe_ioread32be(addr) in_be32(addr)
|
||||
#else
|
||||
#define qe_iowrite8(val, addr) iowrite8(val, addr)
|
||||
#define qe_iowrite16be(val, addr) iowrite16be(val, addr)
|
||||
#define qe_iowrite32be(val, addr) iowrite32be(val, addr)
|
||||
#define qe_ioread8(addr) ioread8(addr)
|
||||
#define qe_ioread16be(addr) ioread16be(addr)
|
||||
#define qe_ioread32be(addr) ioread32be(addr)
|
||||
#endif
|
||||
#define qe_setbits_be32(_addr, _v) iowrite32be(ioread32be(_addr) | (_v), (_addr))
|
||||
#define qe_clrbits_be32(_addr, _v) iowrite32be(ioread32be(_addr) & ~(_v), (_addr))
|
||||
|
||||
#define qe_setbits_be32(_addr, _v) qe_iowrite32be(qe_ioread32be(_addr) | (_v), (_addr))
|
||||
#define qe_clrbits_be32(_addr, _v) qe_iowrite32be(qe_ioread32be(_addr) & ~(_v), (_addr))
|
||||
#define qe_setbits_be16(_addr, _v) iowrite16be(ioread16be(_addr) | (_v), (_addr))
|
||||
#define qe_clrbits_be16(_addr, _v) iowrite16be(ioread16be(_addr) & ~(_v), (_addr))
|
||||
|
||||
#define qe_setbits_be16(_addr, _v) qe_iowrite16be(qe_ioread16be(_addr) | (_v), (_addr))
|
||||
#define qe_clrbits_be16(_addr, _v) qe_iowrite16be(qe_ioread16be(_addr) & ~(_v), (_addr))
|
||||
|
||||
#define qe_setbits_8(_addr, _v) qe_iowrite8(qe_ioread8(_addr) | (_v), (_addr))
|
||||
#define qe_clrbits_8(_addr, _v) qe_iowrite8(qe_ioread8(_addr) & ~(_v), (_addr))
|
||||
#define qe_setbits_8(_addr, _v) iowrite8(ioread8(_addr) | (_v), (_addr))
|
||||
#define qe_clrbits_8(_addr, _v) iowrite8(ioread8(_addr) & ~(_v), (_addr))
|
||||
|
||||
#define qe_clrsetbits_be32(addr, clear, set) \
|
||||
qe_iowrite32be((qe_ioread32be(addr) & ~(clear)) | (set), (addr))
|
||||
iowrite32be((ioread32be(addr) & ~(clear)) | (set), (addr))
|
||||
#define qe_clrsetbits_be16(addr, clear, set) \
|
||||
qe_iowrite16be((qe_ioread16be(addr) & ~(clear)) | (set), (addr))
|
||||
iowrite16be((ioread16be(addr) & ~(clear)) | (set), (addr))
|
||||
#define qe_clrsetbits_8(addr, clear, set) \
|
||||
qe_iowrite8((qe_ioread8(addr) & ~(clear)) | (set), (addr))
|
||||
iowrite8((ioread8(addr) & ~(clear)) | (set), (addr))
|
||||
|
||||
/* Structure that defines QE firmware binary files.
|
||||
*
|
||||
|
|
Loading…
Reference in New Issue