mirror of https://gitee.com/openkylin/linux.git
NXP/FSL SoC driver updates for v5.4
DPAA2 DPIO/MC driver - Remove explicit device_link_remove() and device_link_del() calls due to framework change DPAA QBman driver - Various changes to make it working with kexec - Remove dev_err() usage after platform_get_irq() GUTS driver - Add LS1028 SoC support -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEhb3UXAyxp6UQ0v6khtxQDvusFVQFAl1XCAYACgkQhtxQDvus FVThUQ//bf+El82sfxyLDUeGo8OZ/tN82AOsoKpVSw8pFNIXJdsPOLavGaJF2ILv klWC6/gjFiusiXWuQkxPU3XD/tvgxnxwbwR0HojQIZDD8xPiY3LhK5gLLahnyu4N 4dYrXguSaJB2t+I8Pn5S+1QtHvdLiHptbwWfg0DBd4WstPPmO2W2TMFSQMgfTzRv Y1vy9cJ/ybvnUu0ZpSAmUyUMY9cWLoKKgXONmIYBZg3Ar2bkBhYnFRfDXavuHpJQ LJMkRXkPsiq9cbuI/DiMcDOGwwYGG+7norrGYohTl9sSy+gfvSgszYgI7b0w5Sx/ /NXmigojmRXOCICGCFXX+PVC2gWy9WAJhgSRXXEs+qHy03qJkSh8YbqP1lLyOZ/+ r+hj7uHvyqxn2t5YThEYxpOURt7ztQdGvrzhiQEplmNYF1SOwKk+nypg9M2DYYG0 OIZzjrUe3nMpBHbnGJJDcjQievI1xsqdyla9fby6w9wmfV9U3Xn9LVmCWNyOpl6k lcp7ayQHNxChsqFujyyyEhg4E7IyfvRCSCY+QT6aWb7ZIOuOAK0qsSZ76JxhqjDQ HXBcDTXsHZmkG5Cq3zcWUktehOgXsyuY4SXJUVIzMzQP07sMFxj08/zz3yKutl4O JLsXlXoJBl0peVJ6pXIiHbFr1jVvACjs2D1mnSuBP/OO+++HDM0= =KjF7 -----END PGP SIGNATURE----- Merge tag 'soc-fsl-next-v5.4' of git://git.kernel.org/pub/scm/linux/kernel/git/leo/linux into arm/drivers NXP/FSL SoC driver updates for v5.4 DPAA2 DPIO/MC driver - Remove explicit device_link_remove() and device_link_del() calls due to framework change DPAA QBman driver - Various changes to make it working with kexec - Remove dev_err() usage after platform_get_irq() GUTS driver - Add LS1028 SoC support * tag 'soc-fsl-next-v5.4' of git://git.kernel.org/pub/scm/linux/kernel/git/leo/linux: bus: fsl-mc: remove explicit device_link_del soc: fsl: dpio: remove explicit device_link_remove soc: fsl: guts: Add definition for LS1028A soc/fsl/qbman: Update device tree with reserved memory soc/fsl/qbman: Fixup qman_shutdown_fq() soc/fsl/qbman: Disable interrupts during portal recovery soc/fsl/qbman: Fix drain_mr_fqni() soc/fsl/qbman: Cleanup QMan queues if device was already initialized soc/fsl/qbman: Cleanup buffer pools if BMan was initialized prior to bootup soc/fsl/qbman: Rework QBMan private memory setup soc: fsl: qbman: Remove dev_err() usage after platform_get_irq() Link: https://lore.kernel.org/r/20190816195301.26660-1-leoyang.li@nxp.com Signed-off-by: Arnd Bergmann <arnd@arndb.de>
This commit is contained in:
commit
9ddb2526eb
|
@ -330,7 +330,6 @@ void fsl_mc_object_free(struct fsl_mc_device *mc_adev)
|
||||||
|
|
||||||
fsl_mc_resource_free(resource);
|
fsl_mc_resource_free(resource);
|
||||||
|
|
||||||
device_link_del(mc_adev->consumer_link);
|
|
||||||
mc_adev->consumer_link = NULL;
|
mc_adev->consumer_link = NULL;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(fsl_mc_object_free);
|
EXPORT_SYMBOL_GPL(fsl_mc_object_free);
|
||||||
|
|
|
@ -255,7 +255,6 @@ void fsl_mc_portal_free(struct fsl_mc_io *mc_io)
|
||||||
fsl_destroy_mc_io(mc_io);
|
fsl_destroy_mc_io(mc_io);
|
||||||
fsl_mc_resource_free(resource);
|
fsl_mc_resource_free(resource);
|
||||||
|
|
||||||
device_link_del(dpmcp_dev->consumer_link);
|
|
||||||
dpmcp_dev->consumer_link = NULL;
|
dpmcp_dev->consumer_link = NULL;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(fsl_mc_portal_free);
|
EXPORT_SYMBOL_GPL(fsl_mc_portal_free);
|
||||||
|
|
|
@ -305,8 +305,6 @@ void dpaa2_io_service_deregister(struct dpaa2_io *service,
|
||||||
list_del(&ctx->node);
|
list_del(&ctx->node);
|
||||||
spin_unlock_irqrestore(&d->lock_notifications, irqflags);
|
spin_unlock_irqrestore(&d->lock_notifications, irqflags);
|
||||||
|
|
||||||
if (dev)
|
|
||||||
device_link_remove(dev, d->dev);
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(dpaa2_io_service_deregister);
|
EXPORT_SYMBOL_GPL(dpaa2_io_service_deregister);
|
||||||
|
|
||||||
|
|
|
@ -102,6 +102,11 @@ static const struct fsl_soc_die_attr fsl_soc_die[] = {
|
||||||
.svr = 0x87360000,
|
.svr = 0x87360000,
|
||||||
.mask = 0xff3f0000,
|
.mask = 0xff3f0000,
|
||||||
},
|
},
|
||||||
|
/* Die: LS1028A, SoC: LS1028A */
|
||||||
|
{ .die = "LS1028A",
|
||||||
|
.svr = 0x870b0000,
|
||||||
|
.mask = 0xff3f0000,
|
||||||
|
},
|
||||||
{ },
|
{ },
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -224,6 +229,7 @@ static const struct of_device_id fsl_guts_of_match[] = {
|
||||||
{ .compatible = "fsl,ls1012a-dcfg", },
|
{ .compatible = "fsl,ls1012a-dcfg", },
|
||||||
{ .compatible = "fsl,ls1046a-dcfg", },
|
{ .compatible = "fsl,ls1046a-dcfg", },
|
||||||
{ .compatible = "fsl,lx2160a-dcfg", },
|
{ .compatible = "fsl,lx2160a-dcfg", },
|
||||||
|
{ .compatible = "fsl,ls1028a-dcfg", },
|
||||||
{}
|
{}
|
||||||
};
|
};
|
||||||
MODULE_DEVICE_TABLE(of, fsl_guts_of_match);
|
MODULE_DEVICE_TABLE(of, fsl_guts_of_match);
|
||||||
|
|
|
@ -635,30 +635,31 @@ int bman_p_irqsource_add(struct bman_portal *p, u32 bits)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int bm_shutdown_pool(u32 bpid)
|
int bm_shutdown_pool(u32 bpid)
|
||||||
{
|
{
|
||||||
|
int err = 0;
|
||||||
struct bm_mc_command *bm_cmd;
|
struct bm_mc_command *bm_cmd;
|
||||||
union bm_mc_result *bm_res;
|
union bm_mc_result *bm_res;
|
||||||
|
|
||||||
|
|
||||||
|
struct bman_portal *p = get_affine_portal();
|
||||||
while (1) {
|
while (1) {
|
||||||
struct bman_portal *p = get_affine_portal();
|
|
||||||
/* Acquire buffers until empty */
|
/* Acquire buffers until empty */
|
||||||
bm_cmd = bm_mc_start(&p->p);
|
bm_cmd = bm_mc_start(&p->p);
|
||||||
bm_cmd->bpid = bpid;
|
bm_cmd->bpid = bpid;
|
||||||
bm_mc_commit(&p->p, BM_MCC_VERB_CMD_ACQUIRE | 1);
|
bm_mc_commit(&p->p, BM_MCC_VERB_CMD_ACQUIRE | 1);
|
||||||
if (!bm_mc_result_timeout(&p->p, &bm_res)) {
|
if (!bm_mc_result_timeout(&p->p, &bm_res)) {
|
||||||
put_affine_portal();
|
|
||||||
pr_crit("BMan Acquire Command timedout\n");
|
pr_crit("BMan Acquire Command timedout\n");
|
||||||
return -ETIMEDOUT;
|
err = -ETIMEDOUT;
|
||||||
|
goto done;
|
||||||
}
|
}
|
||||||
if (!(bm_res->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT)) {
|
if (!(bm_res->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT)) {
|
||||||
put_affine_portal();
|
|
||||||
/* Pool is empty */
|
/* Pool is empty */
|
||||||
return 0;
|
goto done;
|
||||||
}
|
}
|
||||||
put_affine_portal();
|
|
||||||
}
|
}
|
||||||
|
done:
|
||||||
|
put_affine_portal();
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -97,17 +97,40 @@ static void bm_get_version(u16 *id, u8 *major, u8 *minor)
|
||||||
/* signal transactions for FBPRs with higher priority */
|
/* signal transactions for FBPRs with higher priority */
|
||||||
#define FBPR_AR_RPRIO_HI BIT(30)
|
#define FBPR_AR_RPRIO_HI BIT(30)
|
||||||
|
|
||||||
static void bm_set_memory(u64 ba, u32 size)
|
/* Track if probe has occurred and if cleanup is required */
|
||||||
|
static int __bman_probed;
|
||||||
|
static int __bman_requires_cleanup;
|
||||||
|
|
||||||
|
|
||||||
|
static int bm_set_memory(u64 ba, u32 size)
|
||||||
{
|
{
|
||||||
|
u32 bar, bare;
|
||||||
u32 exp = ilog2(size);
|
u32 exp = ilog2(size);
|
||||||
/* choke if size isn't within range */
|
/* choke if size isn't within range */
|
||||||
DPAA_ASSERT(size >= 4096 && size <= 1024*1024*1024 &&
|
DPAA_ASSERT(size >= 4096 && size <= 1024*1024*1024 &&
|
||||||
is_power_of_2(size));
|
is_power_of_2(size));
|
||||||
/* choke if '[e]ba' has lower-alignment than 'size' */
|
/* choke if '[e]ba' has lower-alignment than 'size' */
|
||||||
DPAA_ASSERT(!(ba & (size - 1)));
|
DPAA_ASSERT(!(ba & (size - 1)));
|
||||||
|
|
||||||
|
/* Check to see if BMan has already been initialized */
|
||||||
|
bar = bm_ccsr_in(REG_FBPR_BAR);
|
||||||
|
if (bar) {
|
||||||
|
/* Maker sure ba == what was programmed) */
|
||||||
|
bare = bm_ccsr_in(REG_FBPR_BARE);
|
||||||
|
if (bare != upper_32_bits(ba) || bar != lower_32_bits(ba)) {
|
||||||
|
pr_err("Attempted to reinitialize BMan with different BAR, got 0x%llx read BARE=0x%x BAR=0x%x\n",
|
||||||
|
ba, bare, bar);
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
pr_info("BMan BAR already configured\n");
|
||||||
|
__bman_requires_cleanup = 1;
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
bm_ccsr_out(REG_FBPR_BARE, upper_32_bits(ba));
|
bm_ccsr_out(REG_FBPR_BARE, upper_32_bits(ba));
|
||||||
bm_ccsr_out(REG_FBPR_BAR, lower_32_bits(ba));
|
bm_ccsr_out(REG_FBPR_BAR, lower_32_bits(ba));
|
||||||
bm_ccsr_out(REG_FBPR_AR, exp - 1);
|
bm_ccsr_out(REG_FBPR_AR, exp - 1);
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -120,7 +143,6 @@ static void bm_set_memory(u64 ba, u32 size)
|
||||||
*/
|
*/
|
||||||
static dma_addr_t fbpr_a;
|
static dma_addr_t fbpr_a;
|
||||||
static size_t fbpr_sz;
|
static size_t fbpr_sz;
|
||||||
static int __bman_probed;
|
|
||||||
|
|
||||||
static int bman_fbpr(struct reserved_mem *rmem)
|
static int bman_fbpr(struct reserved_mem *rmem)
|
||||||
{
|
{
|
||||||
|
@ -173,6 +195,16 @@ int bman_is_probed(void)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(bman_is_probed);
|
EXPORT_SYMBOL_GPL(bman_is_probed);
|
||||||
|
|
||||||
|
int bman_requires_cleanup(void)
|
||||||
|
{
|
||||||
|
return __bman_requires_cleanup;
|
||||||
|
}
|
||||||
|
|
||||||
|
void bman_done_cleanup(void)
|
||||||
|
{
|
||||||
|
__bman_requires_cleanup = 0;
|
||||||
|
}
|
||||||
|
|
||||||
static int fsl_bman_probe(struct platform_device *pdev)
|
static int fsl_bman_probe(struct platform_device *pdev)
|
||||||
{
|
{
|
||||||
int ret, err_irq;
|
int ret, err_irq;
|
||||||
|
|
|
@ -100,7 +100,7 @@ static int bman_portal_probe(struct platform_device *pdev)
|
||||||
struct device_node *node = dev->of_node;
|
struct device_node *node = dev->of_node;
|
||||||
struct bm_portal_config *pcfg;
|
struct bm_portal_config *pcfg;
|
||||||
struct resource *addr_phys[2];
|
struct resource *addr_phys[2];
|
||||||
int irq, cpu, err;
|
int irq, cpu, err, i;
|
||||||
|
|
||||||
err = bman_is_probed();
|
err = bman_is_probed();
|
||||||
if (!err)
|
if (!err)
|
||||||
|
@ -135,10 +135,8 @@ static int bman_portal_probe(struct platform_device *pdev)
|
||||||
pcfg->cpu = -1;
|
pcfg->cpu = -1;
|
||||||
|
|
||||||
irq = platform_get_irq(pdev, 0);
|
irq = platform_get_irq(pdev, 0);
|
||||||
if (irq <= 0) {
|
if (irq <= 0)
|
||||||
dev_err(dev, "Can't get %pOF IRQ'\n", node);
|
|
||||||
goto err_ioremap1;
|
goto err_ioremap1;
|
||||||
}
|
|
||||||
pcfg->irq = irq;
|
pcfg->irq = irq;
|
||||||
|
|
||||||
pcfg->addr_virt_ce = memremap(addr_phys[0]->start,
|
pcfg->addr_virt_ce = memremap(addr_phys[0]->start,
|
||||||
|
@ -178,6 +176,22 @@ static int bman_portal_probe(struct platform_device *pdev)
|
||||||
if (!cpu_online(cpu))
|
if (!cpu_online(cpu))
|
||||||
bman_offline_cpu(cpu);
|
bman_offline_cpu(cpu);
|
||||||
|
|
||||||
|
if (__bman_portals_probed == 1 && bman_requires_cleanup()) {
|
||||||
|
/*
|
||||||
|
* BMan wasn't reset prior to boot (Kexec for example)
|
||||||
|
* Empty all the buffer pools so they are in reset state
|
||||||
|
*/
|
||||||
|
for (i = 0; i < BM_POOL_MAX; i++) {
|
||||||
|
err = bm_shutdown_pool(i);
|
||||||
|
if (err) {
|
||||||
|
dev_err(dev, "Failed to shutdown bpool %d\n",
|
||||||
|
i);
|
||||||
|
goto err_portal_init;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
bman_done_cleanup();
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_portal_init:
|
err_portal_init:
|
||||||
|
|
|
@ -76,3 +76,8 @@ int bman_p_irqsource_add(struct bman_portal *p, u32 bits);
|
||||||
|
|
||||||
const struct bm_portal_config *
|
const struct bm_portal_config *
|
||||||
bman_get_bm_portal_config(const struct bman_portal *portal);
|
bman_get_bm_portal_config(const struct bman_portal *portal);
|
||||||
|
|
||||||
|
int bman_requires_cleanup(void);
|
||||||
|
void bman_done_cleanup(void);
|
||||||
|
|
||||||
|
int bm_shutdown_pool(u32 bpid);
|
||||||
|
|
|
@ -37,42 +37,53 @@
|
||||||
int qbman_init_private_mem(struct device *dev, int idx, dma_addr_t *addr,
|
int qbman_init_private_mem(struct device *dev, int idx, dma_addr_t *addr,
|
||||||
size_t *size)
|
size_t *size)
|
||||||
{
|
{
|
||||||
int ret;
|
|
||||||
struct device_node *mem_node;
|
struct device_node *mem_node;
|
||||||
u64 size64;
|
struct reserved_mem *rmem;
|
||||||
|
struct property *prop;
|
||||||
|
int len, err;
|
||||||
|
__be32 *res_array;
|
||||||
|
|
||||||
ret = of_reserved_mem_device_init_by_idx(dev, dev->of_node, idx);
|
mem_node = of_parse_phandle(dev->of_node, "memory-region", idx);
|
||||||
if (ret) {
|
if (!mem_node) {
|
||||||
dev_err(dev,
|
|
||||||
"of_reserved_mem_device_init_by_idx(%d) failed 0x%x\n",
|
|
||||||
idx, ret);
|
|
||||||
return -ENODEV;
|
|
||||||
}
|
|
||||||
mem_node = of_parse_phandle(dev->of_node, "memory-region", 0);
|
|
||||||
if (mem_node) {
|
|
||||||
ret = of_property_read_u64(mem_node, "size", &size64);
|
|
||||||
if (ret) {
|
|
||||||
dev_err(dev, "of_address_to_resource fails 0x%x\n",
|
|
||||||
ret);
|
|
||||||
return -ENODEV;
|
|
||||||
}
|
|
||||||
*size = size64;
|
|
||||||
} else {
|
|
||||||
dev_err(dev, "No memory-region found for index %d\n", idx);
|
dev_err(dev, "No memory-region found for index %d\n", idx);
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!dma_alloc_coherent(dev, *size, addr, 0)) {
|
rmem = of_reserved_mem_lookup(mem_node);
|
||||||
dev_err(dev, "DMA Alloc memory failed\n");
|
if (!rmem) {
|
||||||
|
dev_err(dev, "of_reserved_mem_lookup() returned NULL\n");
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
*addr = rmem->base;
|
||||||
|
*size = rmem->size;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Disassociate the reserved memory area from the device
|
* Check if the reg property exists - if not insert the node
|
||||||
* because a device can only have one DMA memory area. This
|
* so upon kexec() the same memory region address will be preserved.
|
||||||
* should be fine since the memory is allocated and initialized
|
* This is needed because QBMan HW does not allow the base address/
|
||||||
* and only ever accessed by the QBMan device from now on
|
* size to be modified once set.
|
||||||
*/
|
*/
|
||||||
of_reserved_mem_device_release(dev);
|
prop = of_find_property(mem_node, "reg", &len);
|
||||||
|
if (!prop) {
|
||||||
|
prop = devm_kzalloc(dev, sizeof(*prop), GFP_KERNEL);
|
||||||
|
if (!prop)
|
||||||
|
return -ENOMEM;
|
||||||
|
prop->value = res_array = devm_kzalloc(dev, sizeof(__be32) * 4,
|
||||||
|
GFP_KERNEL);
|
||||||
|
if (!prop->value)
|
||||||
|
return -ENOMEM;
|
||||||
|
res_array[0] = cpu_to_be32(upper_32_bits(*addr));
|
||||||
|
res_array[1] = cpu_to_be32(lower_32_bits(*addr));
|
||||||
|
res_array[2] = cpu_to_be32(upper_32_bits(*size));
|
||||||
|
res_array[3] = cpu_to_be32(lower_32_bits(*size));
|
||||||
|
prop->length = sizeof(__be32) * 4;
|
||||||
|
prop->name = devm_kstrdup(dev, "reg", GFP_KERNEL);
|
||||||
|
if (!prop->name)
|
||||||
|
return -ENOMEM;
|
||||||
|
err = of_add_property(mem_node, prop);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1018,6 +1018,20 @@ static inline void put_affine_portal(void)
|
||||||
put_cpu_var(qman_affine_portal);
|
put_cpu_var(qman_affine_portal);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static inline struct qman_portal *get_portal_for_channel(u16 channel)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for (i = 0; i < num_possible_cpus(); i++) {
|
||||||
|
if (affine_portals[i] &&
|
||||||
|
affine_portals[i]->config->channel == channel)
|
||||||
|
return affine_portals[i];
|
||||||
|
}
|
||||||
|
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
static struct workqueue_struct *qm_portal_wq;
|
static struct workqueue_struct *qm_portal_wq;
|
||||||
|
|
||||||
int qman_dqrr_set_ithresh(struct qman_portal *portal, u8 ithresh)
|
int qman_dqrr_set_ithresh(struct qman_portal *portal, u8 ithresh)
|
||||||
|
@ -1070,6 +1084,20 @@ int qman_wq_alloc(void)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void qman_enable_irqs(void)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for (i = 0; i < num_possible_cpus(); i++) {
|
||||||
|
if (affine_portals[i]) {
|
||||||
|
qm_out(&affine_portals[i]->p, QM_REG_ISR, 0xffffffff);
|
||||||
|
qm_out(&affine_portals[i]->p, QM_REG_IIR, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This is what everything can wait on, even if it migrates to a different cpu
|
* This is what everything can wait on, even if it migrates to a different cpu
|
||||||
* to the one whose affine portal it is waiting on.
|
* to the one whose affine portal it is waiting on.
|
||||||
|
@ -1164,6 +1192,7 @@ static int drain_mr_fqrni(struct qm_portal *p)
|
||||||
{
|
{
|
||||||
const union qm_mr_entry *msg;
|
const union qm_mr_entry *msg;
|
||||||
loop:
|
loop:
|
||||||
|
qm_mr_pvb_update(p);
|
||||||
msg = qm_mr_current(p);
|
msg = qm_mr_current(p);
|
||||||
if (!msg) {
|
if (!msg) {
|
||||||
/*
|
/*
|
||||||
|
@ -1180,7 +1209,8 @@ static int drain_mr_fqrni(struct qm_portal *p)
|
||||||
* entries well before the ring has been fully consumed, so
|
* entries well before the ring has been fully consumed, so
|
||||||
* we're being *really* paranoid here.
|
* we're being *really* paranoid here.
|
||||||
*/
|
*/
|
||||||
msleep(1);
|
mdelay(1);
|
||||||
|
qm_mr_pvb_update(p);
|
||||||
msg = qm_mr_current(p);
|
msg = qm_mr_current(p);
|
||||||
if (!msg)
|
if (!msg)
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -1267,8 +1297,8 @@ static int qman_create_portal(struct qman_portal *portal,
|
||||||
qm_out(p, QM_REG_ISDR, isdr);
|
qm_out(p, QM_REG_ISDR, isdr);
|
||||||
portal->irq_sources = 0;
|
portal->irq_sources = 0;
|
||||||
qm_out(p, QM_REG_IER, 0);
|
qm_out(p, QM_REG_IER, 0);
|
||||||
qm_out(p, QM_REG_ISR, 0xffffffff);
|
|
||||||
snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu);
|
snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu);
|
||||||
|
qm_out(p, QM_REG_IIR, 1);
|
||||||
if (request_irq(c->irq, portal_isr, 0, portal->irqname, portal)) {
|
if (request_irq(c->irq, portal_isr, 0, portal->irqname, portal)) {
|
||||||
dev_err(c->dev, "request_irq() failed\n");
|
dev_err(c->dev, "request_irq() failed\n");
|
||||||
goto fail_irq;
|
goto fail_irq;
|
||||||
|
@ -1288,7 +1318,7 @@ static int qman_create_portal(struct qman_portal *portal,
|
||||||
isdr &= ~(QM_PIRQ_DQRI | QM_PIRQ_MRI);
|
isdr &= ~(QM_PIRQ_DQRI | QM_PIRQ_MRI);
|
||||||
qm_out(p, QM_REG_ISDR, isdr);
|
qm_out(p, QM_REG_ISDR, isdr);
|
||||||
if (qm_dqrr_current(p)) {
|
if (qm_dqrr_current(p)) {
|
||||||
dev_err(c->dev, "DQRR unclean\n");
|
dev_dbg(c->dev, "DQRR unclean\n");
|
||||||
qm_dqrr_cdc_consume_n(p, 0xffff);
|
qm_dqrr_cdc_consume_n(p, 0xffff);
|
||||||
}
|
}
|
||||||
if (qm_mr_current(p) && drain_mr_fqrni(p)) {
|
if (qm_mr_current(p) && drain_mr_fqrni(p)) {
|
||||||
|
@ -1301,8 +1331,10 @@ static int qman_create_portal(struct qman_portal *portal,
|
||||||
}
|
}
|
||||||
/* Success */
|
/* Success */
|
||||||
portal->config = c;
|
portal->config = c;
|
||||||
|
qm_out(p, QM_REG_ISR, 0xffffffff);
|
||||||
qm_out(p, QM_REG_ISDR, 0);
|
qm_out(p, QM_REG_ISDR, 0);
|
||||||
qm_out(p, QM_REG_IIR, 0);
|
if (!qman_requires_cleanup())
|
||||||
|
qm_out(p, QM_REG_IIR, 0);
|
||||||
/* Write a sane SDQCR */
|
/* Write a sane SDQCR */
|
||||||
qm_dqrr_sdqcr_set(p, portal->sdqcr);
|
qm_dqrr_sdqcr_set(p, portal->sdqcr);
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -2581,9 +2613,9 @@ static int _qm_dqrr_consume_and_match(struct qm_portal *p, u32 fqid, int s,
|
||||||
#define qm_dqrr_drain_nomatch(p) \
|
#define qm_dqrr_drain_nomatch(p) \
|
||||||
_qm_dqrr_consume_and_match(p, 0, 0, false)
|
_qm_dqrr_consume_and_match(p, 0, 0, false)
|
||||||
|
|
||||||
static int qman_shutdown_fq(u32 fqid)
|
int qman_shutdown_fq(u32 fqid)
|
||||||
{
|
{
|
||||||
struct qman_portal *p;
|
struct qman_portal *p, *channel_portal;
|
||||||
struct device *dev;
|
struct device *dev;
|
||||||
union qm_mc_command *mcc;
|
union qm_mc_command *mcc;
|
||||||
union qm_mc_result *mcr;
|
union qm_mc_result *mcr;
|
||||||
|
@ -2623,17 +2655,28 @@ static int qman_shutdown_fq(u32 fqid)
|
||||||
channel = qm_fqd_get_chan(&mcr->queryfq.fqd);
|
channel = qm_fqd_get_chan(&mcr->queryfq.fqd);
|
||||||
wq = qm_fqd_get_wq(&mcr->queryfq.fqd);
|
wq = qm_fqd_get_wq(&mcr->queryfq.fqd);
|
||||||
|
|
||||||
|
if (channel < qm_channel_pool1) {
|
||||||
|
channel_portal = get_portal_for_channel(channel);
|
||||||
|
if (channel_portal == NULL) {
|
||||||
|
dev_err(dev, "Can't find portal for dedicated channel 0x%x\n",
|
||||||
|
channel);
|
||||||
|
ret = -EIO;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
} else
|
||||||
|
channel_portal = p;
|
||||||
|
|
||||||
switch (state) {
|
switch (state) {
|
||||||
case QM_MCR_NP_STATE_TEN_SCHED:
|
case QM_MCR_NP_STATE_TEN_SCHED:
|
||||||
case QM_MCR_NP_STATE_TRU_SCHED:
|
case QM_MCR_NP_STATE_TRU_SCHED:
|
||||||
case QM_MCR_NP_STATE_ACTIVE:
|
case QM_MCR_NP_STATE_ACTIVE:
|
||||||
case QM_MCR_NP_STATE_PARKED:
|
case QM_MCR_NP_STATE_PARKED:
|
||||||
orl_empty = 0;
|
orl_empty = 0;
|
||||||
mcc = qm_mc_start(&p->p);
|
mcc = qm_mc_start(&channel_portal->p);
|
||||||
qm_fqid_set(&mcc->fq, fqid);
|
qm_fqid_set(&mcc->fq, fqid);
|
||||||
qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE);
|
qm_mc_commit(&channel_portal->p, QM_MCC_VERB_ALTER_RETIRE);
|
||||||
if (!qm_mc_result_timeout(&p->p, &mcr)) {
|
if (!qm_mc_result_timeout(&channel_portal->p, &mcr)) {
|
||||||
dev_err(dev, "QUERYFQ_NP timeout\n");
|
dev_err(dev, "ALTER_RETIRE timeout\n");
|
||||||
ret = -ETIMEDOUT;
|
ret = -ETIMEDOUT;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
@ -2641,6 +2684,9 @@ static int qman_shutdown_fq(u32 fqid)
|
||||||
QM_MCR_VERB_ALTER_RETIRE);
|
QM_MCR_VERB_ALTER_RETIRE);
|
||||||
res = mcr->result; /* Make a copy as we reuse MCR below */
|
res = mcr->result; /* Make a copy as we reuse MCR below */
|
||||||
|
|
||||||
|
if (res == QM_MCR_RESULT_OK)
|
||||||
|
drain_mr_fqrni(&channel_portal->p);
|
||||||
|
|
||||||
if (res == QM_MCR_RESULT_PENDING) {
|
if (res == QM_MCR_RESULT_PENDING) {
|
||||||
/*
|
/*
|
||||||
* Need to wait for the FQRN in the message ring, which
|
* Need to wait for the FQRN in the message ring, which
|
||||||
|
@ -2670,21 +2716,25 @@ static int qman_shutdown_fq(u32 fqid)
|
||||||
}
|
}
|
||||||
/* Set the sdqcr to drain this channel */
|
/* Set the sdqcr to drain this channel */
|
||||||
if (channel < qm_channel_pool1)
|
if (channel < qm_channel_pool1)
|
||||||
qm_dqrr_sdqcr_set(&p->p,
|
qm_dqrr_sdqcr_set(&channel_portal->p,
|
||||||
QM_SDQCR_TYPE_ACTIVE |
|
QM_SDQCR_TYPE_ACTIVE |
|
||||||
QM_SDQCR_CHANNELS_DEDICATED);
|
QM_SDQCR_CHANNELS_DEDICATED);
|
||||||
else
|
else
|
||||||
qm_dqrr_sdqcr_set(&p->p,
|
qm_dqrr_sdqcr_set(&channel_portal->p,
|
||||||
QM_SDQCR_TYPE_ACTIVE |
|
QM_SDQCR_TYPE_ACTIVE |
|
||||||
QM_SDQCR_CHANNELS_POOL_CONV
|
QM_SDQCR_CHANNELS_POOL_CONV
|
||||||
(channel));
|
(channel));
|
||||||
do {
|
do {
|
||||||
/* Keep draining DQRR while checking the MR*/
|
/* Keep draining DQRR while checking the MR*/
|
||||||
qm_dqrr_drain_nomatch(&p->p);
|
qm_dqrr_drain_nomatch(&channel_portal->p);
|
||||||
/* Process message ring too */
|
/* Process message ring too */
|
||||||
found_fqrn = qm_mr_drain(&p->p, FQRN);
|
found_fqrn = qm_mr_drain(&channel_portal->p,
|
||||||
|
FQRN);
|
||||||
cpu_relax();
|
cpu_relax();
|
||||||
} while (!found_fqrn);
|
} while (!found_fqrn);
|
||||||
|
/* Restore SDQCR */
|
||||||
|
qm_dqrr_sdqcr_set(&channel_portal->p,
|
||||||
|
channel_portal->sdqcr);
|
||||||
|
|
||||||
}
|
}
|
||||||
if (res != QM_MCR_RESULT_OK &&
|
if (res != QM_MCR_RESULT_OK &&
|
||||||
|
@ -2715,9 +2765,8 @@ static int qman_shutdown_fq(u32 fqid)
|
||||||
* Wait for a dequeue and process the dequeues,
|
* Wait for a dequeue and process the dequeues,
|
||||||
* making sure to empty the ring completely
|
* making sure to empty the ring completely
|
||||||
*/
|
*/
|
||||||
} while (qm_dqrr_drain_wait(&p->p, fqid, FQ_EMPTY));
|
} while (!qm_dqrr_drain_wait(&p->p, fqid, FQ_EMPTY));
|
||||||
}
|
}
|
||||||
qm_dqrr_sdqcr_set(&p->p, 0);
|
|
||||||
|
|
||||||
while (!orl_empty) {
|
while (!orl_empty) {
|
||||||
/* Wait for the ORL to have been completely drained */
|
/* Wait for the ORL to have been completely drained */
|
||||||
|
@ -2754,7 +2803,7 @@ static int qman_shutdown_fq(u32 fqid)
|
||||||
|
|
||||||
DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
|
DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
|
||||||
QM_MCR_VERB_ALTER_OOS);
|
QM_MCR_VERB_ALTER_OOS);
|
||||||
if (mcr->result) {
|
if (mcr->result != QM_MCR_RESULT_OK) {
|
||||||
dev_err(dev, "OOS fail: FQ 0x%x (0x%x)\n",
|
dev_err(dev, "OOS fail: FQ 0x%x (0x%x)\n",
|
||||||
fqid, mcr->result);
|
fqid, mcr->result);
|
||||||
ret = -EIO;
|
ret = -EIO;
|
||||||
|
|
|
@ -274,6 +274,7 @@ static u32 __iomem *qm_ccsr_start;
|
||||||
/* A SDQCR mask comprising all the available/visible pool channels */
|
/* A SDQCR mask comprising all the available/visible pool channels */
|
||||||
static u32 qm_pools_sdqcr;
|
static u32 qm_pools_sdqcr;
|
||||||
static int __qman_probed;
|
static int __qman_probed;
|
||||||
|
static int __qman_requires_cleanup;
|
||||||
|
|
||||||
static inline u32 qm_ccsr_in(u32 offset)
|
static inline u32 qm_ccsr_in(u32 offset)
|
||||||
{
|
{
|
||||||
|
@ -340,19 +341,55 @@ static void qm_get_version(u16 *id, u8 *major, u8 *minor)
|
||||||
}
|
}
|
||||||
|
|
||||||
#define PFDR_AR_EN BIT(31)
|
#define PFDR_AR_EN BIT(31)
|
||||||
static void qm_set_memory(enum qm_memory memory, u64 ba, u32 size)
|
static int qm_set_memory(enum qm_memory memory, u64 ba, u32 size)
|
||||||
{
|
{
|
||||||
|
void *ptr;
|
||||||
u32 offset = (memory == qm_memory_fqd) ? REG_FQD_BARE : REG_PFDR_BARE;
|
u32 offset = (memory == qm_memory_fqd) ? REG_FQD_BARE : REG_PFDR_BARE;
|
||||||
u32 exp = ilog2(size);
|
u32 exp = ilog2(size);
|
||||||
|
u32 bar, bare;
|
||||||
|
|
||||||
/* choke if size isn't within range */
|
/* choke if size isn't within range */
|
||||||
DPAA_ASSERT((size >= 4096) && (size <= 1024*1024*1024) &&
|
DPAA_ASSERT((size >= 4096) && (size <= 1024*1024*1024) &&
|
||||||
is_power_of_2(size));
|
is_power_of_2(size));
|
||||||
/* choke if 'ba' has lower-alignment than 'size' */
|
/* choke if 'ba' has lower-alignment than 'size' */
|
||||||
DPAA_ASSERT(!(ba & (size - 1)));
|
DPAA_ASSERT(!(ba & (size - 1)));
|
||||||
|
|
||||||
|
/* Check to see if QMan has already been initialized */
|
||||||
|
bar = qm_ccsr_in(offset + REG_offset_BAR);
|
||||||
|
if (bar) {
|
||||||
|
/* Maker sure ba == what was programmed) */
|
||||||
|
bare = qm_ccsr_in(offset);
|
||||||
|
if (bare != upper_32_bits(ba) || bar != lower_32_bits(ba)) {
|
||||||
|
pr_err("Attempted to reinitialize QMan with different BAR, got 0x%llx read BARE=0x%x BAR=0x%x\n",
|
||||||
|
ba, bare, bar);
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
__qman_requires_cleanup = 1;
|
||||||
|
/* Return 1 to indicate memory was previously programmed */
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
/* Need to temporarily map the area to make sure it is zeroed */
|
||||||
|
ptr = memremap(ba, size, MEMREMAP_WB);
|
||||||
|
if (!ptr) {
|
||||||
|
pr_crit("memremap() of QMan private memory failed\n");
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
memset(ptr, 0, size);
|
||||||
|
|
||||||
|
#ifdef CONFIG_PPC
|
||||||
|
/*
|
||||||
|
* PPC doesn't appear to flush the cache on memunmap() but the
|
||||||
|
* cache must be flushed since QMan does non coherent accesses
|
||||||
|
* to this memory
|
||||||
|
*/
|
||||||
|
flush_dcache_range((unsigned long) ptr, (unsigned long) ptr+size);
|
||||||
|
#endif
|
||||||
|
memunmap(ptr);
|
||||||
|
|
||||||
qm_ccsr_out(offset, upper_32_bits(ba));
|
qm_ccsr_out(offset, upper_32_bits(ba));
|
||||||
qm_ccsr_out(offset + REG_offset_BAR, lower_32_bits(ba));
|
qm_ccsr_out(offset + REG_offset_BAR, lower_32_bits(ba));
|
||||||
qm_ccsr_out(offset + REG_offset_AR, PFDR_AR_EN | (exp - 1));
|
qm_ccsr_out(offset + REG_offset_AR, PFDR_AR_EN | (exp - 1));
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void qm_set_pfdr_threshold(u32 th, u8 k)
|
static void qm_set_pfdr_threshold(u32 th, u8 k)
|
||||||
|
@ -455,7 +492,7 @@ RESERVEDMEM_OF_DECLARE(qman_pfdr, "fsl,qman-pfdr", qman_pfdr);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static unsigned int qm_get_fqid_maxcnt(void)
|
unsigned int qm_get_fqid_maxcnt(void)
|
||||||
{
|
{
|
||||||
return fqd_sz / 64;
|
return fqd_sz / 64;
|
||||||
}
|
}
|
||||||
|
@ -571,12 +608,19 @@ static int qman_init_ccsr(struct device *dev)
|
||||||
int i, err;
|
int i, err;
|
||||||
|
|
||||||
/* FQD memory */
|
/* FQD memory */
|
||||||
qm_set_memory(qm_memory_fqd, fqd_a, fqd_sz);
|
err = qm_set_memory(qm_memory_fqd, fqd_a, fqd_sz);
|
||||||
/* PFDR memory */
|
if (err < 0)
|
||||||
qm_set_memory(qm_memory_pfdr, pfdr_a, pfdr_sz);
|
|
||||||
err = qm_init_pfdr(dev, 8, pfdr_sz / 64 - 8);
|
|
||||||
if (err)
|
|
||||||
return err;
|
return err;
|
||||||
|
/* PFDR memory */
|
||||||
|
err = qm_set_memory(qm_memory_pfdr, pfdr_a, pfdr_sz);
|
||||||
|
if (err < 0)
|
||||||
|
return err;
|
||||||
|
/* Only initialize PFDRs if the QMan was not initialized before */
|
||||||
|
if (err == 0) {
|
||||||
|
err = qm_init_pfdr(dev, 8, pfdr_sz / 64 - 8);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
}
|
||||||
/* thresholds */
|
/* thresholds */
|
||||||
qm_set_pfdr_threshold(512, 64);
|
qm_set_pfdr_threshold(512, 64);
|
||||||
qm_set_sfdr_threshold(128);
|
qm_set_sfdr_threshold(128);
|
||||||
|
@ -693,6 +737,18 @@ int qman_is_probed(void)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(qman_is_probed);
|
EXPORT_SYMBOL_GPL(qman_is_probed);
|
||||||
|
|
||||||
|
int qman_requires_cleanup(void)
|
||||||
|
{
|
||||||
|
return __qman_requires_cleanup;
|
||||||
|
}
|
||||||
|
|
||||||
|
void qman_done_cleanup(void)
|
||||||
|
{
|
||||||
|
qman_enable_irqs();
|
||||||
|
__qman_requires_cleanup = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
static int fsl_qman_probe(struct platform_device *pdev)
|
static int fsl_qman_probe(struct platform_device *pdev)
|
||||||
{
|
{
|
||||||
struct device *dev = &pdev->dev;
|
struct device *dev = &pdev->dev;
|
||||||
|
|
|
@ -233,7 +233,7 @@ static int qman_portal_probe(struct platform_device *pdev)
|
||||||
struct device_node *node = dev->of_node;
|
struct device_node *node = dev->of_node;
|
||||||
struct qm_portal_config *pcfg;
|
struct qm_portal_config *pcfg;
|
||||||
struct resource *addr_phys[2];
|
struct resource *addr_phys[2];
|
||||||
int irq, cpu, err;
|
int irq, cpu, err, i;
|
||||||
u32 val;
|
u32 val;
|
||||||
|
|
||||||
err = qman_is_probed();
|
err = qman_is_probed();
|
||||||
|
@ -275,10 +275,8 @@ static int qman_portal_probe(struct platform_device *pdev)
|
||||||
pcfg->channel = val;
|
pcfg->channel = val;
|
||||||
pcfg->cpu = -1;
|
pcfg->cpu = -1;
|
||||||
irq = platform_get_irq(pdev, 0);
|
irq = platform_get_irq(pdev, 0);
|
||||||
if (irq <= 0) {
|
if (irq <= 0)
|
||||||
dev_err(dev, "Can't get %pOF IRQ\n", node);
|
|
||||||
goto err_ioremap1;
|
goto err_ioremap1;
|
||||||
}
|
|
||||||
pcfg->irq = irq;
|
pcfg->irq = irq;
|
||||||
|
|
||||||
pcfg->addr_virt_ce = memremap(addr_phys[0]->start,
|
pcfg->addr_virt_ce = memremap(addr_phys[0]->start,
|
||||||
|
@ -325,6 +323,22 @@ static int qman_portal_probe(struct platform_device *pdev)
|
||||||
if (!cpu_online(cpu))
|
if (!cpu_online(cpu))
|
||||||
qman_offline_cpu(cpu);
|
qman_offline_cpu(cpu);
|
||||||
|
|
||||||
|
if (__qman_portals_probed == 1 && qman_requires_cleanup()) {
|
||||||
|
/*
|
||||||
|
* QMan wasn't reset prior to boot (Kexec for example)
|
||||||
|
* Empty all the frame queues so they are in reset state
|
||||||
|
*/
|
||||||
|
for (i = 0; i < qm_get_fqid_maxcnt(); i++) {
|
||||||
|
err = qman_shutdown_fq(i);
|
||||||
|
if (err) {
|
||||||
|
dev_err(dev, "Failed to shutdown frame queue %d\n",
|
||||||
|
i);
|
||||||
|
goto err_portal_init;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
qman_done_cleanup();
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_portal_init:
|
err_portal_init:
|
||||||
|
|
|
@ -272,3 +272,11 @@ extern struct qman_portal *affine_portals[NR_CPUS];
|
||||||
extern struct qman_portal *qman_dma_portal;
|
extern struct qman_portal *qman_dma_portal;
|
||||||
const struct qm_portal_config *qman_get_qm_portal_config(
|
const struct qm_portal_config *qman_get_qm_portal_config(
|
||||||
struct qman_portal *portal);
|
struct qman_portal *portal);
|
||||||
|
|
||||||
|
unsigned int qm_get_fqid_maxcnt(void);
|
||||||
|
|
||||||
|
int qman_shutdown_fq(u32 fqid);
|
||||||
|
|
||||||
|
int qman_requires_cleanup(void);
|
||||||
|
void qman_done_cleanup(void);
|
||||||
|
void qman_enable_irqs(void);
|
||||||
|
|
Loading…
Reference in New Issue