mirror of https://gitee.com/openkylin/linux.git
remoteproc updates for v4.20
This contains a series of patches that reworks the memory carveout handling in remoteproc, in order to allow this to be reused for statically allocated memory regions to be used for e.g. firmware. It adds support for audio DSP (both TZ-assisted and non-TZ assisted) and compute DSP on Qualcomm SDM845, TZ-assisted audio DSP, compute DSP and WiFi processor on Qualcomm QCS404 and through some renaming of the drivers cleans up the naming situation. Finally support for custom coreudmp segment handlers is added and is used in the Qualcomm modem remoteproc driver to gather memory dumps of the firmware. -----BEGIN PGP SIGNATURE----- iQJPBAABCAA5FiEEBd4DzF816k8JZtUlCx85Pw2ZrcUFAlvXlhMbHGJqb3JuLmFu ZGVyc3NvbkBsaW5hcm8ub3JnAAoJEAsfOT8Nma3FvvUP/j5M7fvB1kJnd6NUJ1Hz CKTw05eYeC4Z9d6nBIso6jcYbf6QfBwtBmdEPfa8Yzhc+Mkr91dR85ylzjd1+PXm qcCEOrofMMHQ2CSPO0dfdii4zvQRdo2CvMShJqdc/PUo0G4QPQR8Pdb6fLW5S4uL zb0K0CqS8KBzY382r6QyV5riv+8nsuTKLWTzPHL/mmXD0aW5YCVgxG56DScFJVu3 HD9YzbsECzIBuB0k1UufdYxb2FkRy2pOWttiKDQAlB2q7AxOwEV/oqWQKpmHVb2k F94zRWlttuMabjtRajDDSqfG8DUse7zpCgakk+oehEZq1WmcVfLKs+asqELBLNc7 J/3N1g2+cE+VQUqBiew6WUR6zgMFMfyqucnp8t0KRgZvpQPxfpiRqynoFxEj8MXl z4lptuk7DMhYphrwWhA+D/Jqc1prMmX7smCDou3b43DFVYZ06c2gI7aO9VPVhT4m SUFhzznTn6FUfnFWzDAuDKt51WaRIFemgvvprsNHrl24rtQhYdel+12U9ySiFq8q Z7Bqb3jNcZ6qHjLy4+6jtnQSZoyf6bbcvF2++85m16i4vElu1X1TQ6HXMmrXpBZ9 z454/jVacZ2vtiAzustBlu86QQZtQhVHenn7j34AWkqIBCF3PiHuZqVfaw0R/pZq 3/HkgfsgudLRu4+qIclHDX+X =mM8w -----END PGP SIGNATURE----- Merge tag 'rproc-v4.20' of git://github.com/andersson/remoteproc Pull remoteproc updates from Bjorn Andersson: "This contains a series of patches that reworks the memory carveout handling in remoteproc, in order to allow this to be reused for statically allocated memory regions to be used for e.g. firmware. It adds support for audio DSP (both TZ-assisted and non-TZ assisted) and compute DSP on Qualcomm SDM845, TZ-assisted audio DSP, compute DSP and WiFi processor on Qualcomm QCS404 and through some renaming of the drivers cleans up the naming situation. Finally support for custom coreudmp segment handlers is added and is used in the Qualcomm modem remoteproc driver to gather memory dumps of the firmware" * tag 'rproc-v4.20' of git://github.com/andersson/remoteproc: (36 commits) remoteproc: qcom: q6v5-mss: Register segments/dumpfn for coredump remoteproc: qcom: q6v5-mss: Add custom dump function for modem remoteproc: qcom: q6v5-mss: Refactor mba load/unload sequence remoteproc: Add mechanism for custom dump function assignment remoteproc: Introduce custom dump function for each remoteproc segment remoteproc: modify vring allocation to rely on centralized carveout allocator remoteproc: qcom: q6v5: shore up resource probe handling remoteproc: qcom: qcom_q6v5_adsp: Fix some return value check remoteproc: modify rproc_handle_carveout to support pre-registered region remoteproc: add helper function to check carveout device address remoteproc: add helper function to allocate rproc_mem_entry from reserved memory remoteproc: add alloc ops in rproc_mem_entry struct remoteproc: introduce rproc_find_carveout_by_name function remoteproc: introduce rproc_add_carveout function remoteproc: add helper function to allocate and init rproc_mem_entry struct remoteproc: add name in rproc_mem_entry struct remoteproc: add release ops in rproc_mem_entry struct remoteproc: add rproc_va_to_pa function remoteproc: configure IOMMU only if device address requested remoteproc: qcom: q6v5-mss: add SCM probe dependency ...
This commit is contained in:
commit
929e134c43
|
@ -0,0 +1,126 @@
|
|||
Qualcomm Technology Inc. ADSP Peripheral Image Loader
|
||||
|
||||
This document defines the binding for a component that loads and boots firmware
|
||||
on the Qualcomm Technology Inc. ADSP Hexagon core.
|
||||
|
||||
- compatible:
|
||||
Usage: required
|
||||
Value type: <string>
|
||||
Definition: must be one of:
|
||||
"qcom,sdm845-adsp-pil"
|
||||
|
||||
- reg:
|
||||
Usage: required
|
||||
Value type: <prop-encoded-array>
|
||||
Definition: must specify the base address and size of the qdsp6ss register
|
||||
|
||||
- interrupts-extended:
|
||||
Usage: required
|
||||
Value type: <prop-encoded-array>
|
||||
Definition: must list the watchdog, fatal IRQs ready, handover and
|
||||
stop-ack IRQs
|
||||
|
||||
- interrupt-names:
|
||||
Usage: required
|
||||
Value type: <stringlist>
|
||||
Definition: must be "wdog", "fatal", "ready", "handover", "stop-ack"
|
||||
|
||||
- clocks:
|
||||
Usage: required
|
||||
Value type: <prop-encoded-array>
|
||||
Definition: List of 8 phandle and clock specifier pairs for the adsp.
|
||||
|
||||
- clock-names:
|
||||
Usage: required
|
||||
Value type: <stringlist>
|
||||
Definition: List of clock input name strings sorted in the same
|
||||
order as the clocks property. Definition must have
|
||||
"xo", "sway_cbcr", "lpass_aon", "lpass_ahbs_aon_cbcr",
|
||||
"lpass_ahbm_aon_cbcr", "qdsp6ss_xo", "qdsp6ss_sleep"
|
||||
and "qdsp6ss_core".
|
||||
|
||||
- power-domains:
|
||||
Usage: required
|
||||
Value type: <phandle>
|
||||
Definition: reference to cx power domain node.
|
||||
|
||||
- resets:
|
||||
Usage: required
|
||||
Value type: <phandle>
|
||||
Definition: reference to the list of 2 reset-controller for the adsp.
|
||||
|
||||
- reset-names:
|
||||
Usage: required
|
||||
Value type: <stringlist>
|
||||
Definition: must be "pdc_sync" and "cc_lpass"
|
||||
|
||||
- qcom,halt-regs:
|
||||
Usage: required
|
||||
Value type: <prop-encoded-array>
|
||||
Definition: a phandle reference to a syscon representing TCSR followed
|
||||
by the offset within syscon for lpass halt register.
|
||||
|
||||
- memory-region:
|
||||
Usage: required
|
||||
Value type: <phandle>
|
||||
Definition: reference to the reserved-memory for the ADSP
|
||||
|
||||
- qcom,smem-states:
|
||||
Usage: required
|
||||
Value type: <phandle>
|
||||
Definition: reference to the smem state for requesting the ADSP to
|
||||
shut down
|
||||
|
||||
- qcom,smem-state-names:
|
||||
Usage: required
|
||||
Value type: <stringlist>
|
||||
Definition: must be "stop"
|
||||
|
||||
|
||||
= SUBNODES
|
||||
The adsp node may have an subnode named "glink-edge" that describes the
|
||||
communication edge, channels and devices related to the ADSP.
|
||||
See ../soc/qcom/qcom,glink.txt for details on how to describe these.
|
||||
|
||||
= EXAMPLE
|
||||
The following example describes the resources needed to boot control the
|
||||
ADSP, as it is found on SDM845 boards.
|
||||
|
||||
remoteproc@17300000 {
|
||||
compatible = "qcom,sdm845-adsp-pil";
|
||||
reg = <0x17300000 0x40c>;
|
||||
|
||||
interrupts-extended = <&intc GIC_SPI 162 IRQ_TYPE_EDGE_RISING>,
|
||||
<&adsp_smp2p_in 0 IRQ_TYPE_EDGE_RISING>,
|
||||
<&adsp_smp2p_in 1 IRQ_TYPE_EDGE_RISING>,
|
||||
<&adsp_smp2p_in 2 IRQ_TYPE_EDGE_RISING>,
|
||||
<&adsp_smp2p_in 3 IRQ_TYPE_EDGE_RISING>;
|
||||
interrupt-names = "wdog", "fatal", "ready",
|
||||
"handover", "stop-ack";
|
||||
|
||||
clocks = <&rpmhcc RPMH_CXO_CLK>,
|
||||
<&gcc GCC_LPASS_SWAY_CLK>,
|
||||
<&lpasscc LPASS_AUDIO_WRAPPER_AON_CLK>,
|
||||
<&lpasscc LPASS_Q6SS_AHBS_AON_CLK>,
|
||||
<&lpasscc LPASS_Q6SS_AHBM_AON_CLK>,
|
||||
<&lpasscc LPASS_QDSP6SS_XO_CLK>,
|
||||
<&lpasscc LPASS_QDSP6SS_SLEEP_CLK>,
|
||||
<&lpasscc LPASS_QDSP6SS_CORE_CLK>;
|
||||
clock-names = "xo", "sway_cbcr", "lpass_aon",
|
||||
"lpass_ahbs_aon_cbcr",
|
||||
"lpass_ahbm_aon_cbcr", "qdsp6ss_xo",
|
||||
"qdsp6ss_sleep", "qdsp6ss_core";
|
||||
|
||||
power-domains = <&rpmhpd SDM845_CX>;
|
||||
|
||||
resets = <&pdc_reset PDC_AUDIO_SYNC_RESET>,
|
||||
<&aoss_reset AOSS_CC_LPASS_RESTART>;
|
||||
reset-names = "pdc_sync", "cc_lpass";
|
||||
|
||||
qcom,halt-regs = <&tcsr_mutex_regs 0x22000>;
|
||||
|
||||
memory-region = <&pil_adsp_mem>;
|
||||
|
||||
qcom,smem-states = <&adsp_smp2p_out 0>;
|
||||
qcom,smem-state-names = "stop";
|
||||
};
|
|
@ -10,6 +10,11 @@ on the Qualcomm ADSP Hexagon core.
|
|||
"qcom,msm8974-adsp-pil"
|
||||
"qcom,msm8996-adsp-pil"
|
||||
"qcom,msm8996-slpi-pil"
|
||||
"qcom,qcs404-adsp-pas"
|
||||
"qcom,qcs404-cdsp-pas"
|
||||
"qcom,qcs404-wcss-pas"
|
||||
"qcom,sdm845-adsp-pas"
|
||||
"qcom,sdm845-cdsp-pas"
|
||||
|
||||
- interrupts-extended:
|
||||
Usage: required
|
||||
|
|
|
@ -53,13 +53,17 @@ on the Qualcomm Hexagon core.
|
|||
Definition: reference to the reset-controller for the modem sub-system
|
||||
reference to the list of 3 reset-controllers for the
|
||||
wcss sub-system
|
||||
reference to the list of 2 reset-controllers for the modem
|
||||
sub-system on SDM845 SoCs
|
||||
|
||||
- reset-names:
|
||||
Usage: required
|
||||
Value type: <stringlist>
|
||||
Definition: must be "mss_restart" for the modem sub-system
|
||||
Definition: must be "wcss_aon_reset", "wcss_reset", "wcss_q6_reset"
|
||||
for the wcss syb-system
|
||||
must be "wcss_aon_reset", "wcss_reset", "wcss_q6_reset"
|
||||
for the wcss sub-system
|
||||
must be "mss_restart", "pdc_reset" for the modem
|
||||
sub-system on SDM845 SoCs
|
||||
|
||||
- cx-supply:
|
||||
- mss-supply:
|
||||
|
|
|
@ -84,8 +84,46 @@ config KEYSTONE_REMOTEPROC
|
|||
It's safe to say N here if you're not interested in the Keystone
|
||||
DSPs or just want to use a bare minimum kernel.
|
||||
|
||||
config QCOM_ADSP_PIL
|
||||
tristate "Qualcomm ADSP Peripheral Image Loader"
|
||||
config QCOM_RPROC_COMMON
|
||||
tristate
|
||||
|
||||
config QCOM_Q6V5_COMMON
|
||||
tristate
|
||||
depends on ARCH_QCOM
|
||||
depends on QCOM_SMEM
|
||||
|
||||
config QCOM_Q6V5_ADSP
|
||||
tristate "Qualcomm Technology Inc ADSP Peripheral Image Loader"
|
||||
depends on OF && ARCH_QCOM
|
||||
depends on QCOM_SMEM
|
||||
depends on RPMSG_QCOM_SMD || (COMPILE_TEST && RPMSG_QCOM_SMD=n)
|
||||
depends on RPMSG_QCOM_GLINK_SMEM || RPMSG_QCOM_GLINK_SMEM=n
|
||||
depends on QCOM_SYSMON || QCOM_SYSMON=n
|
||||
select MFD_SYSCON
|
||||
select QCOM_MDT_LOADER
|
||||
select QCOM_Q6V5_COMMON
|
||||
select QCOM_RPROC_COMMON
|
||||
help
|
||||
Say y here to support the Peripheral Image Loader
|
||||
for the Qualcomm Technology Inc. ADSP remote processors.
|
||||
|
||||
config QCOM_Q6V5_MSS
|
||||
tristate "Qualcomm Hexagon V5 self-authenticating modem subsystem support"
|
||||
depends on OF && ARCH_QCOM
|
||||
depends on QCOM_SMEM
|
||||
depends on RPMSG_QCOM_SMD || (COMPILE_TEST && RPMSG_QCOM_SMD=n)
|
||||
depends on RPMSG_QCOM_GLINK_SMEM || RPMSG_QCOM_GLINK_SMEM=n
|
||||
depends on QCOM_SYSMON || QCOM_SYSMON=n
|
||||
select MFD_SYSCON
|
||||
select QCOM_Q6V5_COMMON
|
||||
select QCOM_RPROC_COMMON
|
||||
select QCOM_SCM
|
||||
help
|
||||
Say y here to support the Qualcomm self-authenticating modem
|
||||
subsystem based on Hexagon V5.
|
||||
|
||||
config QCOM_Q6V5_PAS
|
||||
tristate "Qualcomm Hexagon v5 Peripheral Authentication Service support"
|
||||
depends on OF && ARCH_QCOM
|
||||
depends on QCOM_SMEM
|
||||
depends on RPMSG_QCOM_SMD || (COMPILE_TEST && RPMSG_QCOM_SMD=n)
|
||||
|
@ -98,30 +136,8 @@ config QCOM_ADSP_PIL
|
|||
select QCOM_SCM
|
||||
help
|
||||
Say y here to support the TrustZone based Peripherial Image Loader
|
||||
for the Qualcomm ADSP remote processors.
|
||||
|
||||
config QCOM_RPROC_COMMON
|
||||
tristate
|
||||
|
||||
config QCOM_Q6V5_COMMON
|
||||
tristate
|
||||
depends on ARCH_QCOM
|
||||
depends on QCOM_SMEM
|
||||
|
||||
config QCOM_Q6V5_PIL
|
||||
tristate "Qualcomm Hexagon V5 Peripherial Image Loader"
|
||||
depends on OF && ARCH_QCOM
|
||||
depends on QCOM_SMEM
|
||||
depends on RPMSG_QCOM_SMD || (COMPILE_TEST && RPMSG_QCOM_SMD=n)
|
||||
depends on RPMSG_QCOM_GLINK_SMEM || RPMSG_QCOM_GLINK_SMEM=n
|
||||
depends on QCOM_SYSMON || QCOM_SYSMON=n
|
||||
select MFD_SYSCON
|
||||
select QCOM_Q6V5_COMMON
|
||||
select QCOM_RPROC_COMMON
|
||||
select QCOM_SCM
|
||||
help
|
||||
Say y here to support the Qualcomm Peripherial Image Loader for the
|
||||
Hexagon V5 based remote processors.
|
||||
for the Qualcomm Hexagon v5 based remote processors. This is commonly
|
||||
used to control subsystems such as ADSP, Compute and Sensor.
|
||||
|
||||
config QCOM_Q6V5_WCSS
|
||||
tristate "Qualcomm Hexagon based WCSS Peripheral Image Loader"
|
||||
|
|
|
@ -14,10 +14,11 @@ obj-$(CONFIG_OMAP_REMOTEPROC) += omap_remoteproc.o
|
|||
obj-$(CONFIG_WKUP_M3_RPROC) += wkup_m3_rproc.o
|
||||
obj-$(CONFIG_DA8XX_REMOTEPROC) += da8xx_remoteproc.o
|
||||
obj-$(CONFIG_KEYSTONE_REMOTEPROC) += keystone_remoteproc.o
|
||||
obj-$(CONFIG_QCOM_ADSP_PIL) += qcom_adsp_pil.o
|
||||
obj-$(CONFIG_QCOM_RPROC_COMMON) += qcom_common.o
|
||||
obj-$(CONFIG_QCOM_Q6V5_COMMON) += qcom_q6v5.o
|
||||
obj-$(CONFIG_QCOM_Q6V5_PIL) += qcom_q6v5_pil.o
|
||||
obj-$(CONFIG_QCOM_Q6V5_ADSP) += qcom_q6v5_adsp.o
|
||||
obj-$(CONFIG_QCOM_Q6V5_MSS) += qcom_q6v5_mss.o
|
||||
obj-$(CONFIG_QCOM_Q6V5_PAS) += qcom_q6v5_pas.o
|
||||
obj-$(CONFIG_QCOM_Q6V5_WCSS) += qcom_q6v5_wcss.o
|
||||
obj-$(CONFIG_QCOM_SYSMON) += qcom_sysmon.o
|
||||
obj-$(CONFIG_QCOM_WCNSS_PIL) += qcom_wcnss_pil.o
|
||||
|
|
|
@ -226,7 +226,7 @@ static int da8xx_rproc_get_internal_memories(struct platform_device *pdev,
|
|||
res->start & DA8XX_RPROC_LOCAL_ADDRESS_MASK;
|
||||
drproc->mem[i].size = resource_size(res);
|
||||
|
||||
dev_dbg(dev, "memory %8s: bus addr %pa size 0x%x va %p da 0x%x\n",
|
||||
dev_dbg(dev, "memory %8s: bus addr %pa size 0x%zx va %p da 0x%x\n",
|
||||
mem_names[i], &drproc->mem[i].bus_addr,
|
||||
drproc->mem[i].size, drproc->mem[i].cpu_addr,
|
||||
drproc->mem[i].dev_addr);
|
||||
|
|
|
@ -84,6 +84,7 @@ static irqreturn_t q6v5_fatal_interrupt(int irq, void *data)
|
|||
else
|
||||
dev_err(q6v5->dev, "fatal error without message\n");
|
||||
|
||||
q6v5->running = false;
|
||||
rproc_report_crash(q6v5->rproc, RPROC_FATAL_ERROR);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
|
@ -150,8 +151,6 @@ int qcom_q6v5_request_stop(struct qcom_q6v5 *q6v5)
|
|||
{
|
||||
int ret;
|
||||
|
||||
q6v5->running = false;
|
||||
|
||||
qcom_smem_state_update_bits(q6v5->state,
|
||||
BIT(q6v5->stop_bit), BIT(q6v5->stop_bit));
|
||||
|
||||
|
@ -188,6 +187,14 @@ int qcom_q6v5_init(struct qcom_q6v5 *q6v5, struct platform_device *pdev,
|
|||
init_completion(&q6v5->stop_done);
|
||||
|
||||
q6v5->wdog_irq = platform_get_irq_byname(pdev, "wdog");
|
||||
if (q6v5->wdog_irq < 0) {
|
||||
if (q6v5->wdog_irq != -EPROBE_DEFER)
|
||||
dev_err(&pdev->dev,
|
||||
"failed to retrieve wdog IRQ: %d\n",
|
||||
q6v5->wdog_irq);
|
||||
return q6v5->wdog_irq;
|
||||
}
|
||||
|
||||
ret = devm_request_threaded_irq(&pdev->dev, q6v5->wdog_irq,
|
||||
NULL, q6v5_wdog_interrupt,
|
||||
IRQF_TRIGGER_RISING | IRQF_ONESHOT,
|
||||
|
@ -198,6 +205,14 @@ int qcom_q6v5_init(struct qcom_q6v5 *q6v5, struct platform_device *pdev,
|
|||
}
|
||||
|
||||
q6v5->fatal_irq = platform_get_irq_byname(pdev, "fatal");
|
||||
if (q6v5->fatal_irq < 0) {
|
||||
if (q6v5->fatal_irq != -EPROBE_DEFER)
|
||||
dev_err(&pdev->dev,
|
||||
"failed to retrieve fatal IRQ: %d\n",
|
||||
q6v5->fatal_irq);
|
||||
return q6v5->fatal_irq;
|
||||
}
|
||||
|
||||
ret = devm_request_threaded_irq(&pdev->dev, q6v5->fatal_irq,
|
||||
NULL, q6v5_fatal_interrupt,
|
||||
IRQF_TRIGGER_RISING | IRQF_ONESHOT,
|
||||
|
@ -208,6 +223,14 @@ int qcom_q6v5_init(struct qcom_q6v5 *q6v5, struct platform_device *pdev,
|
|||
}
|
||||
|
||||
q6v5->ready_irq = platform_get_irq_byname(pdev, "ready");
|
||||
if (q6v5->ready_irq < 0) {
|
||||
if (q6v5->ready_irq != -EPROBE_DEFER)
|
||||
dev_err(&pdev->dev,
|
||||
"failed to retrieve ready IRQ: %d\n",
|
||||
q6v5->ready_irq);
|
||||
return q6v5->ready_irq;
|
||||
}
|
||||
|
||||
ret = devm_request_threaded_irq(&pdev->dev, q6v5->ready_irq,
|
||||
NULL, q6v5_ready_interrupt,
|
||||
IRQF_TRIGGER_RISING | IRQF_ONESHOT,
|
||||
|
@ -218,6 +241,14 @@ int qcom_q6v5_init(struct qcom_q6v5 *q6v5, struct platform_device *pdev,
|
|||
}
|
||||
|
||||
q6v5->handover_irq = platform_get_irq_byname(pdev, "handover");
|
||||
if (q6v5->handover_irq < 0) {
|
||||
if (q6v5->handover_irq != -EPROBE_DEFER)
|
||||
dev_err(&pdev->dev,
|
||||
"failed to retrieve handover IRQ: %d\n",
|
||||
q6v5->handover_irq);
|
||||
return q6v5->handover_irq;
|
||||
}
|
||||
|
||||
ret = devm_request_threaded_irq(&pdev->dev, q6v5->handover_irq,
|
||||
NULL, q6v5_handover_interrupt,
|
||||
IRQF_TRIGGER_RISING | IRQF_ONESHOT,
|
||||
|
@ -229,6 +260,14 @@ int qcom_q6v5_init(struct qcom_q6v5 *q6v5, struct platform_device *pdev,
|
|||
disable_irq(q6v5->handover_irq);
|
||||
|
||||
q6v5->stop_irq = platform_get_irq_byname(pdev, "stop-ack");
|
||||
if (q6v5->stop_irq < 0) {
|
||||
if (q6v5->stop_irq != -EPROBE_DEFER)
|
||||
dev_err(&pdev->dev,
|
||||
"failed to retrieve stop-ack IRQ: %d\n",
|
||||
q6v5->stop_irq);
|
||||
return q6v5->stop_irq;
|
||||
}
|
||||
|
||||
ret = devm_request_threaded_irq(&pdev->dev, q6v5->stop_irq,
|
||||
NULL, q6v5_stop_interrupt,
|
||||
IRQF_TRIGGER_RISING | IRQF_ONESHOT,
|
||||
|
|
|
@ -0,0 +1,497 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Qualcomm Technology Inc. ADSP Peripheral Image Loader for SDM845.
|
||||
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/clk.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/firmware.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/iopoll.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/mfd/syscon.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/pm_domain.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/regmap.h>
|
||||
#include <linux/remoteproc.h>
|
||||
#include <linux/reset.h>
|
||||
#include <linux/soc/qcom/mdt_loader.h>
|
||||
#include <linux/soc/qcom/smem.h>
|
||||
#include <linux/soc/qcom/smem_state.h>
|
||||
|
||||
#include "qcom_common.h"
|
||||
#include "qcom_q6v5.h"
|
||||
#include "remoteproc_internal.h"
|
||||
|
||||
/* time out value */
|
||||
#define ACK_TIMEOUT 1000
|
||||
#define BOOT_FSM_TIMEOUT 10000
|
||||
/* mask values */
|
||||
#define EVB_MASK GENMASK(27, 4)
|
||||
/*QDSP6SS register offsets*/
|
||||
#define RST_EVB_REG 0x10
|
||||
#define CORE_START_REG 0x400
|
||||
#define BOOT_CMD_REG 0x404
|
||||
#define BOOT_STATUS_REG 0x408
|
||||
#define RET_CFG_REG 0x1C
|
||||
/*TCSR register offsets*/
|
||||
#define LPASS_MASTER_IDLE_REG 0x8
|
||||
#define LPASS_HALTACK_REG 0x4
|
||||
#define LPASS_PWR_ON_REG 0x10
|
||||
#define LPASS_HALTREQ_REG 0x0
|
||||
|
||||
/* list of clocks required by ADSP PIL */
|
||||
static const char * const adsp_clk_id[] = {
|
||||
"sway_cbcr", "lpass_aon", "lpass_ahbs_aon_cbcr", "lpass_ahbm_aon_cbcr",
|
||||
"qdsp6ss_xo", "qdsp6ss_sleep", "qdsp6ss_core",
|
||||
};
|
||||
|
||||
struct adsp_pil_data {
|
||||
int crash_reason_smem;
|
||||
const char *firmware_name;
|
||||
|
||||
const char *ssr_name;
|
||||
const char *sysmon_name;
|
||||
int ssctl_id;
|
||||
};
|
||||
|
||||
struct qcom_adsp {
|
||||
struct device *dev;
|
||||
struct rproc *rproc;
|
||||
|
||||
struct qcom_q6v5 q6v5;
|
||||
|
||||
struct clk *xo;
|
||||
|
||||
int num_clks;
|
||||
struct clk_bulk_data *clks;
|
||||
|
||||
void __iomem *qdsp6ss_base;
|
||||
|
||||
struct reset_control *pdc_sync_reset;
|
||||
struct reset_control *cc_lpass_restart;
|
||||
|
||||
struct regmap *halt_map;
|
||||
unsigned int halt_lpass;
|
||||
|
||||
int crash_reason_smem;
|
||||
|
||||
struct completion start_done;
|
||||
struct completion stop_done;
|
||||
|
||||
phys_addr_t mem_phys;
|
||||
phys_addr_t mem_reloc;
|
||||
void *mem_region;
|
||||
size_t mem_size;
|
||||
|
||||
struct qcom_rproc_glink glink_subdev;
|
||||
struct qcom_rproc_ssr ssr_subdev;
|
||||
struct qcom_sysmon *sysmon;
|
||||
};
|
||||
|
||||
static int qcom_adsp_shutdown(struct qcom_adsp *adsp)
|
||||
{
|
||||
unsigned long timeout;
|
||||
unsigned int val;
|
||||
int ret;
|
||||
|
||||
/* Reset the retention logic */
|
||||
val = readl(adsp->qdsp6ss_base + RET_CFG_REG);
|
||||
val |= 0x1;
|
||||
writel(val, adsp->qdsp6ss_base + RET_CFG_REG);
|
||||
|
||||
clk_bulk_disable_unprepare(adsp->num_clks, adsp->clks);
|
||||
|
||||
/* QDSP6 master port needs to be explicitly halted */
|
||||
ret = regmap_read(adsp->halt_map,
|
||||
adsp->halt_lpass + LPASS_PWR_ON_REG, &val);
|
||||
if (ret || !val)
|
||||
goto reset;
|
||||
|
||||
ret = regmap_read(adsp->halt_map,
|
||||
adsp->halt_lpass + LPASS_MASTER_IDLE_REG,
|
||||
&val);
|
||||
if (ret || val)
|
||||
goto reset;
|
||||
|
||||
regmap_write(adsp->halt_map,
|
||||
adsp->halt_lpass + LPASS_HALTREQ_REG, 1);
|
||||
|
||||
/* Wait for halt ACK from QDSP6 */
|
||||
timeout = jiffies + msecs_to_jiffies(ACK_TIMEOUT);
|
||||
for (;;) {
|
||||
ret = regmap_read(adsp->halt_map,
|
||||
adsp->halt_lpass + LPASS_HALTACK_REG, &val);
|
||||
if (ret || val || time_after(jiffies, timeout))
|
||||
break;
|
||||
|
||||
usleep_range(1000, 1100);
|
||||
}
|
||||
|
||||
ret = regmap_read(adsp->halt_map,
|
||||
adsp->halt_lpass + LPASS_MASTER_IDLE_REG, &val);
|
||||
if (ret || !val)
|
||||
dev_err(adsp->dev, "port failed halt\n");
|
||||
|
||||
reset:
|
||||
/* Assert the LPASS PDC Reset */
|
||||
reset_control_assert(adsp->pdc_sync_reset);
|
||||
/* Place the LPASS processor into reset */
|
||||
reset_control_assert(adsp->cc_lpass_restart);
|
||||
/* wait after asserting subsystem restart from AOSS */
|
||||
usleep_range(200, 300);
|
||||
|
||||
/* Clear the halt request for the AXIM and AHBM for Q6 */
|
||||
regmap_write(adsp->halt_map, adsp->halt_lpass + LPASS_HALTREQ_REG, 0);
|
||||
|
||||
/* De-assert the LPASS PDC Reset */
|
||||
reset_control_deassert(adsp->pdc_sync_reset);
|
||||
/* Remove the LPASS reset */
|
||||
reset_control_deassert(adsp->cc_lpass_restart);
|
||||
/* wait after de-asserting subsystem restart from AOSS */
|
||||
usleep_range(200, 300);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int adsp_load(struct rproc *rproc, const struct firmware *fw)
|
||||
{
|
||||
struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv;
|
||||
|
||||
return qcom_mdt_load_no_init(adsp->dev, fw, rproc->firmware, 0,
|
||||
adsp->mem_region, adsp->mem_phys, adsp->mem_size,
|
||||
&adsp->mem_reloc);
|
||||
}
|
||||
|
||||
static int adsp_start(struct rproc *rproc)
|
||||
{
|
||||
struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv;
|
||||
int ret;
|
||||
unsigned int val;
|
||||
|
||||
qcom_q6v5_prepare(&adsp->q6v5);
|
||||
|
||||
ret = clk_prepare_enable(adsp->xo);
|
||||
if (ret)
|
||||
goto disable_irqs;
|
||||
|
||||
dev_pm_genpd_set_performance_state(adsp->dev, INT_MAX);
|
||||
ret = pm_runtime_get_sync(adsp->dev);
|
||||
if (ret)
|
||||
goto disable_xo_clk;
|
||||
|
||||
ret = clk_bulk_prepare_enable(adsp->num_clks, adsp->clks);
|
||||
if (ret) {
|
||||
dev_err(adsp->dev, "adsp clk_enable failed\n");
|
||||
goto disable_power_domain;
|
||||
}
|
||||
|
||||
/* Program boot address */
|
||||
writel(adsp->mem_phys >> 4, adsp->qdsp6ss_base + RST_EVB_REG);
|
||||
|
||||
/* De-assert QDSP6 stop core. QDSP6 will execute after out of reset */
|
||||
writel(0x1, adsp->qdsp6ss_base + CORE_START_REG);
|
||||
|
||||
/* Trigger boot FSM to start QDSP6 */
|
||||
writel(0x1, adsp->qdsp6ss_base + BOOT_CMD_REG);
|
||||
|
||||
/* Wait for core to come out of reset */
|
||||
ret = readl_poll_timeout(adsp->qdsp6ss_base + BOOT_STATUS_REG,
|
||||
val, (val & BIT(0)) != 0, 10, BOOT_FSM_TIMEOUT);
|
||||
if (ret) {
|
||||
dev_err(adsp->dev, "failed to bootup adsp\n");
|
||||
goto disable_adsp_clks;
|
||||
}
|
||||
|
||||
ret = qcom_q6v5_wait_for_start(&adsp->q6v5, msecs_to_jiffies(5 * HZ));
|
||||
if (ret == -ETIMEDOUT) {
|
||||
dev_err(adsp->dev, "start timed out\n");
|
||||
goto disable_adsp_clks;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
disable_adsp_clks:
|
||||
clk_bulk_disable_unprepare(adsp->num_clks, adsp->clks);
|
||||
disable_power_domain:
|
||||
dev_pm_genpd_set_performance_state(adsp->dev, 0);
|
||||
pm_runtime_put(adsp->dev);
|
||||
disable_xo_clk:
|
||||
clk_disable_unprepare(adsp->xo);
|
||||
disable_irqs:
|
||||
qcom_q6v5_unprepare(&adsp->q6v5);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void qcom_adsp_pil_handover(struct qcom_q6v5 *q6v5)
|
||||
{
|
||||
struct qcom_adsp *adsp = container_of(q6v5, struct qcom_adsp, q6v5);
|
||||
|
||||
clk_disable_unprepare(adsp->xo);
|
||||
dev_pm_genpd_set_performance_state(adsp->dev, 0);
|
||||
pm_runtime_put(adsp->dev);
|
||||
}
|
||||
|
||||
static int adsp_stop(struct rproc *rproc)
|
||||
{
|
||||
struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv;
|
||||
int handover;
|
||||
int ret;
|
||||
|
||||
ret = qcom_q6v5_request_stop(&adsp->q6v5);
|
||||
if (ret == -ETIMEDOUT)
|
||||
dev_err(adsp->dev, "timed out on wait\n");
|
||||
|
||||
ret = qcom_adsp_shutdown(adsp);
|
||||
if (ret)
|
||||
dev_err(adsp->dev, "failed to shutdown: %d\n", ret);
|
||||
|
||||
handover = qcom_q6v5_unprepare(&adsp->q6v5);
|
||||
if (handover)
|
||||
qcom_adsp_pil_handover(&adsp->q6v5);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void *adsp_da_to_va(struct rproc *rproc, u64 da, int len)
|
||||
{
|
||||
struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv;
|
||||
int offset;
|
||||
|
||||
offset = da - adsp->mem_reloc;
|
||||
if (offset < 0 || offset + len > adsp->mem_size)
|
||||
return NULL;
|
||||
|
||||
return adsp->mem_region + offset;
|
||||
}
|
||||
|
||||
static const struct rproc_ops adsp_ops = {
|
||||
.start = adsp_start,
|
||||
.stop = adsp_stop,
|
||||
.da_to_va = adsp_da_to_va,
|
||||
.parse_fw = qcom_register_dump_segments,
|
||||
.load = adsp_load,
|
||||
};
|
||||
|
||||
static int adsp_init_clock(struct qcom_adsp *adsp)
|
||||
{
|
||||
int i, ret;
|
||||
|
||||
adsp->xo = devm_clk_get(adsp->dev, "xo");
|
||||
if (IS_ERR(adsp->xo)) {
|
||||
ret = PTR_ERR(adsp->xo);
|
||||
if (ret != -EPROBE_DEFER)
|
||||
dev_err(adsp->dev, "failed to get xo clock");
|
||||
return ret;
|
||||
}
|
||||
|
||||
adsp->num_clks = ARRAY_SIZE(adsp_clk_id);
|
||||
adsp->clks = devm_kcalloc(adsp->dev, adsp->num_clks,
|
||||
sizeof(*adsp->clks), GFP_KERNEL);
|
||||
if (!adsp->clks)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < adsp->num_clks; i++)
|
||||
adsp->clks[i].id = adsp_clk_id[i];
|
||||
|
||||
return devm_clk_bulk_get(adsp->dev, adsp->num_clks, adsp->clks);
|
||||
}
|
||||
|
||||
static int adsp_init_reset(struct qcom_adsp *adsp)
|
||||
{
|
||||
adsp->pdc_sync_reset = devm_reset_control_get_exclusive(adsp->dev,
|
||||
"pdc_sync");
|
||||
if (IS_ERR(adsp->pdc_sync_reset)) {
|
||||
dev_err(adsp->dev, "failed to acquire pdc_sync reset\n");
|
||||
return PTR_ERR(adsp->pdc_sync_reset);
|
||||
}
|
||||
|
||||
adsp->cc_lpass_restart = devm_reset_control_get_exclusive(adsp->dev,
|
||||
"cc_lpass");
|
||||
if (IS_ERR(adsp->cc_lpass_restart)) {
|
||||
dev_err(adsp->dev, "failed to acquire cc_lpass restart\n");
|
||||
return PTR_ERR(adsp->cc_lpass_restart);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int adsp_init_mmio(struct qcom_adsp *adsp,
|
||||
struct platform_device *pdev)
|
||||
{
|
||||
struct device_node *syscon;
|
||||
struct resource *res;
|
||||
int ret;
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
adsp->qdsp6ss_base = devm_ioremap(&pdev->dev, res->start,
|
||||
resource_size(res));
|
||||
if (!adsp->qdsp6ss_base) {
|
||||
dev_err(adsp->dev, "failed to map QDSP6SS registers\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
syscon = of_parse_phandle(pdev->dev.of_node, "qcom,halt-regs", 0);
|
||||
if (!syscon) {
|
||||
dev_err(&pdev->dev, "failed to parse qcom,halt-regs\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
adsp->halt_map = syscon_node_to_regmap(syscon);
|
||||
of_node_put(syscon);
|
||||
if (IS_ERR(adsp->halt_map))
|
||||
return PTR_ERR(adsp->halt_map);
|
||||
|
||||
ret = of_property_read_u32_index(pdev->dev.of_node, "qcom,halt-regs",
|
||||
1, &adsp->halt_lpass);
|
||||
if (ret < 0) {
|
||||
dev_err(&pdev->dev, "no offset in syscon\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int adsp_alloc_memory_region(struct qcom_adsp *adsp)
|
||||
{
|
||||
struct device_node *node;
|
||||
struct resource r;
|
||||
int ret;
|
||||
|
||||
node = of_parse_phandle(adsp->dev->of_node, "memory-region", 0);
|
||||
if (!node) {
|
||||
dev_err(adsp->dev, "no memory-region specified\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = of_address_to_resource(node, 0, &r);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
adsp->mem_phys = adsp->mem_reloc = r.start;
|
||||
adsp->mem_size = resource_size(&r);
|
||||
adsp->mem_region = devm_ioremap_wc(adsp->dev,
|
||||
adsp->mem_phys, adsp->mem_size);
|
||||
if (!adsp->mem_region) {
|
||||
dev_err(adsp->dev, "unable to map memory region: %pa+%zx\n",
|
||||
&r.start, adsp->mem_size);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int adsp_probe(struct platform_device *pdev)
|
||||
{
|
||||
const struct adsp_pil_data *desc;
|
||||
struct qcom_adsp *adsp;
|
||||
struct rproc *rproc;
|
||||
int ret;
|
||||
|
||||
desc = of_device_get_match_data(&pdev->dev);
|
||||
if (!desc)
|
||||
return -EINVAL;
|
||||
|
||||
rproc = rproc_alloc(&pdev->dev, pdev->name, &adsp_ops,
|
||||
desc->firmware_name, sizeof(*adsp));
|
||||
if (!rproc) {
|
||||
dev_err(&pdev->dev, "unable to allocate remoteproc\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
adsp = (struct qcom_adsp *)rproc->priv;
|
||||
adsp->dev = &pdev->dev;
|
||||
adsp->rproc = rproc;
|
||||
platform_set_drvdata(pdev, adsp);
|
||||
|
||||
ret = adsp_alloc_memory_region(adsp);
|
||||
if (ret)
|
||||
goto free_rproc;
|
||||
|
||||
ret = adsp_init_clock(adsp);
|
||||
if (ret)
|
||||
goto free_rproc;
|
||||
|
||||
pm_runtime_enable(adsp->dev);
|
||||
|
||||
ret = adsp_init_reset(adsp);
|
||||
if (ret)
|
||||
goto disable_pm;
|
||||
|
||||
ret = adsp_init_mmio(adsp, pdev);
|
||||
if (ret)
|
||||
goto disable_pm;
|
||||
|
||||
ret = qcom_q6v5_init(&adsp->q6v5, pdev, rproc, desc->crash_reason_smem,
|
||||
qcom_adsp_pil_handover);
|
||||
if (ret)
|
||||
goto disable_pm;
|
||||
|
||||
qcom_add_glink_subdev(rproc, &adsp->glink_subdev);
|
||||
qcom_add_ssr_subdev(rproc, &adsp->ssr_subdev, desc->ssr_name);
|
||||
adsp->sysmon = qcom_add_sysmon_subdev(rproc,
|
||||
desc->sysmon_name,
|
||||
desc->ssctl_id);
|
||||
|
||||
ret = rproc_add(rproc);
|
||||
if (ret)
|
||||
goto disable_pm;
|
||||
|
||||
return 0;
|
||||
|
||||
disable_pm:
|
||||
pm_runtime_disable(adsp->dev);
|
||||
free_rproc:
|
||||
rproc_free(rproc);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int adsp_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct qcom_adsp *adsp = platform_get_drvdata(pdev);
|
||||
|
||||
rproc_del(adsp->rproc);
|
||||
|
||||
qcom_remove_glink_subdev(adsp->rproc, &adsp->glink_subdev);
|
||||
qcom_remove_sysmon_subdev(adsp->sysmon);
|
||||
qcom_remove_ssr_subdev(adsp->rproc, &adsp->ssr_subdev);
|
||||
pm_runtime_disable(adsp->dev);
|
||||
rproc_free(adsp->rproc);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct adsp_pil_data adsp_resource_init = {
|
||||
.crash_reason_smem = 423,
|
||||
.firmware_name = "adsp.mdt",
|
||||
.ssr_name = "lpass",
|
||||
.sysmon_name = "adsp",
|
||||
.ssctl_id = 0x14,
|
||||
};
|
||||
|
||||
static const struct of_device_id adsp_of_match[] = {
|
||||
{ .compatible = "qcom,sdm845-adsp-pil", .data = &adsp_resource_init },
|
||||
{ },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, adsp_of_match);
|
||||
|
||||
static struct platform_driver adsp_pil_driver = {
|
||||
.probe = adsp_probe,
|
||||
.remove = adsp_remove,
|
||||
.driver = {
|
||||
.name = "qcom_q6v5_adsp",
|
||||
.of_match_table = adsp_of_match,
|
||||
},
|
||||
};
|
||||
|
||||
module_platform_driver(adsp_pil_driver);
|
||||
MODULE_DESCRIPTION("QTI SDM845 ADSP Peripheral Image Loader");
|
||||
MODULE_LICENSE("GPL v2");
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Qualcomm Peripheral Image Loader
|
||||
* Qualcomm self-authenticating modem subsystem remoteproc driver
|
||||
*
|
||||
* Copyright (C) 2016 Linaro Ltd.
|
||||
* Copyright (C) 2014 Sony Mobile Communications AB
|
||||
|
@ -149,6 +149,7 @@ struct q6v5 {
|
|||
u32 halt_nc;
|
||||
|
||||
struct reset_control *mss_restart;
|
||||
struct reset_control *pdc_reset;
|
||||
|
||||
struct qcom_q6v5 q6v5;
|
||||
|
||||
|
@ -166,6 +167,10 @@ struct q6v5 {
|
|||
|
||||
bool running;
|
||||
|
||||
bool dump_mba_loaded;
|
||||
unsigned long dump_segment_mask;
|
||||
unsigned long dump_complete_mask;
|
||||
|
||||
phys_addr_t mba_phys;
|
||||
void *mba_region;
|
||||
size_t mba_size;
|
||||
|
@ -347,10 +352,17 @@ static int q6v5_load(struct rproc *rproc, const struct firmware *fw)
|
|||
|
||||
static int q6v5_reset_assert(struct q6v5 *qproc)
|
||||
{
|
||||
if (qproc->has_alt_reset)
|
||||
return reset_control_reset(qproc->mss_restart);
|
||||
else
|
||||
return reset_control_assert(qproc->mss_restart);
|
||||
int ret;
|
||||
|
||||
if (qproc->has_alt_reset) {
|
||||
reset_control_assert(qproc->pdc_reset);
|
||||
ret = reset_control_reset(qproc->mss_restart);
|
||||
reset_control_deassert(qproc->pdc_reset);
|
||||
} else {
|
||||
ret = reset_control_assert(qproc->mss_restart);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int q6v5_reset_deassert(struct q6v5 *qproc)
|
||||
|
@ -358,9 +370,11 @@ static int q6v5_reset_deassert(struct q6v5 *qproc)
|
|||
int ret;
|
||||
|
||||
if (qproc->has_alt_reset) {
|
||||
reset_control_assert(qproc->pdc_reset);
|
||||
writel(1, qproc->rmb_base + RMB_MBA_ALT_RESET);
|
||||
ret = reset_control_reset(qproc->mss_restart);
|
||||
writel(0, qproc->rmb_base + RMB_MBA_ALT_RESET);
|
||||
reset_control_deassert(qproc->pdc_reset);
|
||||
} else {
|
||||
ret = reset_control_deassert(qproc->mss_restart);
|
||||
}
|
||||
|
@ -669,126 +683,10 @@ static bool q6v5_phdr_valid(const struct elf32_phdr *phdr)
|
|||
return true;
|
||||
}
|
||||
|
||||
static int q6v5_mpss_load(struct q6v5 *qproc)
|
||||
static int q6v5_mba_load(struct q6v5 *qproc)
|
||||
{
|
||||
const struct elf32_phdr *phdrs;
|
||||
const struct elf32_phdr *phdr;
|
||||
const struct firmware *seg_fw;
|
||||
const struct firmware *fw;
|
||||
struct elf32_hdr *ehdr;
|
||||
phys_addr_t mpss_reloc;
|
||||
phys_addr_t boot_addr;
|
||||
phys_addr_t min_addr = PHYS_ADDR_MAX;
|
||||
phys_addr_t max_addr = 0;
|
||||
bool relocate = false;
|
||||
char seg_name[10];
|
||||
ssize_t offset;
|
||||
size_t size = 0;
|
||||
void *ptr;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
ret = request_firmware(&fw, "modem.mdt", qproc->dev);
|
||||
if (ret < 0) {
|
||||
dev_err(qproc->dev, "unable to load modem.mdt\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Initialize the RMB validator */
|
||||
writel(0, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
|
||||
|
||||
ret = q6v5_mpss_init_image(qproc, fw);
|
||||
if (ret)
|
||||
goto release_firmware;
|
||||
|
||||
ehdr = (struct elf32_hdr *)fw->data;
|
||||
phdrs = (struct elf32_phdr *)(ehdr + 1);
|
||||
|
||||
for (i = 0; i < ehdr->e_phnum; i++) {
|
||||
phdr = &phdrs[i];
|
||||
|
||||
if (!q6v5_phdr_valid(phdr))
|
||||
continue;
|
||||
|
||||
if (phdr->p_flags & QCOM_MDT_RELOCATABLE)
|
||||
relocate = true;
|
||||
|
||||
if (phdr->p_paddr < min_addr)
|
||||
min_addr = phdr->p_paddr;
|
||||
|
||||
if (phdr->p_paddr + phdr->p_memsz > max_addr)
|
||||
max_addr = ALIGN(phdr->p_paddr + phdr->p_memsz, SZ_4K);
|
||||
}
|
||||
|
||||
mpss_reloc = relocate ? min_addr : qproc->mpss_phys;
|
||||
/* Load firmware segments */
|
||||
for (i = 0; i < ehdr->e_phnum; i++) {
|
||||
phdr = &phdrs[i];
|
||||
|
||||
if (!q6v5_phdr_valid(phdr))
|
||||
continue;
|
||||
|
||||
offset = phdr->p_paddr - mpss_reloc;
|
||||
if (offset < 0 || offset + phdr->p_memsz > qproc->mpss_size) {
|
||||
dev_err(qproc->dev, "segment outside memory range\n");
|
||||
ret = -EINVAL;
|
||||
goto release_firmware;
|
||||
}
|
||||
|
||||
ptr = qproc->mpss_region + offset;
|
||||
|
||||
if (phdr->p_filesz) {
|
||||
snprintf(seg_name, sizeof(seg_name), "modem.b%02d", i);
|
||||
ret = request_firmware(&seg_fw, seg_name, qproc->dev);
|
||||
if (ret) {
|
||||
dev_err(qproc->dev, "failed to load %s\n", seg_name);
|
||||
goto release_firmware;
|
||||
}
|
||||
|
||||
memcpy(ptr, seg_fw->data, seg_fw->size);
|
||||
|
||||
release_firmware(seg_fw);
|
||||
}
|
||||
|
||||
if (phdr->p_memsz > phdr->p_filesz) {
|
||||
memset(ptr + phdr->p_filesz, 0,
|
||||
phdr->p_memsz - phdr->p_filesz);
|
||||
}
|
||||
size += phdr->p_memsz;
|
||||
}
|
||||
|
||||
/* Transfer ownership of modem ddr region to q6 */
|
||||
ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true,
|
||||
qproc->mpss_phys, qproc->mpss_size);
|
||||
if (ret) {
|
||||
dev_err(qproc->dev,
|
||||
"assigning Q6 access to mpss memory failed: %d\n", ret);
|
||||
ret = -EAGAIN;
|
||||
goto release_firmware;
|
||||
}
|
||||
|
||||
boot_addr = relocate ? qproc->mpss_phys : min_addr;
|
||||
writel(boot_addr, qproc->rmb_base + RMB_PMI_CODE_START_REG);
|
||||
writel(RMB_CMD_LOAD_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG);
|
||||
writel(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
|
||||
|
||||
ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_AUTH_COMPLETE, 10000);
|
||||
if (ret == -ETIMEDOUT)
|
||||
dev_err(qproc->dev, "MPSS authentication timed out\n");
|
||||
else if (ret < 0)
|
||||
dev_err(qproc->dev, "MPSS authentication failed: %d\n", ret);
|
||||
|
||||
release_firmware:
|
||||
release_firmware(fw);
|
||||
|
||||
return ret < 0 ? ret : 0;
|
||||
}
|
||||
|
||||
static int q6v5_start(struct rproc *rproc)
|
||||
{
|
||||
struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
|
||||
int xfermemop_ret;
|
||||
int ret;
|
||||
|
||||
qcom_q6v5_prepare(&qproc->q6v5);
|
||||
|
||||
|
@ -859,34 +757,9 @@ static int q6v5_start(struct rproc *rproc)
|
|||
goto halt_axi_ports;
|
||||
}
|
||||
|
||||
dev_info(qproc->dev, "MBA booted, loading mpss\n");
|
||||
|
||||
ret = q6v5_mpss_load(qproc);
|
||||
if (ret)
|
||||
goto reclaim_mpss;
|
||||
|
||||
ret = qcom_q6v5_wait_for_start(&qproc->q6v5, msecs_to_jiffies(5000));
|
||||
if (ret == -ETIMEDOUT) {
|
||||
dev_err(qproc->dev, "start timed out\n");
|
||||
goto reclaim_mpss;
|
||||
}
|
||||
|
||||
xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false,
|
||||
qproc->mba_phys,
|
||||
qproc->mba_size);
|
||||
if (xfermemop_ret)
|
||||
dev_err(qproc->dev,
|
||||
"Failed to reclaim mba buffer system may become unstable\n");
|
||||
qproc->running = true;
|
||||
|
||||
qproc->dump_mba_loaded = true;
|
||||
return 0;
|
||||
|
||||
reclaim_mpss:
|
||||
xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm,
|
||||
false, qproc->mpss_phys,
|
||||
qproc->mpss_size);
|
||||
WARN_ON(xfermemop_ret);
|
||||
|
||||
halt_axi_ports:
|
||||
q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
|
||||
q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
|
||||
|
@ -904,7 +777,6 @@ static int q6v5_start(struct rproc *rproc)
|
|||
disable_active_clks:
|
||||
q6v5_clk_disable(qproc->dev, qproc->active_clks,
|
||||
qproc->active_clk_count);
|
||||
|
||||
assert_reset:
|
||||
q6v5_reset_assert(qproc);
|
||||
disable_reset_clks:
|
||||
|
@ -919,24 +791,18 @@ static int q6v5_start(struct rproc *rproc)
|
|||
disable_proxy_reg:
|
||||
q6v5_regulator_disable(qproc, qproc->proxy_regs,
|
||||
qproc->proxy_reg_count);
|
||||
|
||||
disable_irqs:
|
||||
qcom_q6v5_unprepare(&qproc->q6v5);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int q6v5_stop(struct rproc *rproc)
|
||||
static void q6v5_mba_reclaim(struct q6v5 *qproc)
|
||||
{
|
||||
struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
|
||||
int ret;
|
||||
u32 val;
|
||||
|
||||
qproc->running = false;
|
||||
|
||||
ret = qcom_q6v5_request_stop(&qproc->q6v5);
|
||||
if (ret == -ETIMEDOUT)
|
||||
dev_err(qproc->dev, "timed out on wait\n");
|
||||
qproc->dump_mba_loaded = false;
|
||||
|
||||
q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
|
||||
q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
|
||||
|
@ -951,13 +817,28 @@ static int q6v5_stop(struct rproc *rproc)
|
|||
writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
|
||||
}
|
||||
|
||||
|
||||
ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, false,
|
||||
qproc->mpss_phys, qproc->mpss_size);
|
||||
ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm,
|
||||
false, qproc->mpss_phys,
|
||||
qproc->mpss_size);
|
||||
WARN_ON(ret);
|
||||
|
||||
q6v5_reset_assert(qproc);
|
||||
|
||||
q6v5_clk_disable(qproc->dev, qproc->reset_clks,
|
||||
qproc->reset_clk_count);
|
||||
q6v5_clk_disable(qproc->dev, qproc->active_clks,
|
||||
qproc->active_clk_count);
|
||||
q6v5_regulator_disable(qproc, qproc->active_regs,
|
||||
qproc->active_reg_count);
|
||||
|
||||
/* In case of failure or coredump scenario where reclaiming MBA memory
|
||||
* could not happen reclaim it here.
|
||||
*/
|
||||
ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false,
|
||||
qproc->mba_phys,
|
||||
qproc->mba_size);
|
||||
WARN_ON(ret);
|
||||
|
||||
ret = qcom_q6v5_unprepare(&qproc->q6v5);
|
||||
if (ret) {
|
||||
q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
|
||||
|
@ -965,13 +846,208 @@ static int q6v5_stop(struct rproc *rproc)
|
|||
q6v5_regulator_disable(qproc, qproc->proxy_regs,
|
||||
qproc->proxy_reg_count);
|
||||
}
|
||||
}
|
||||
|
||||
q6v5_clk_disable(qproc->dev, qproc->reset_clks,
|
||||
qproc->reset_clk_count);
|
||||
q6v5_clk_disable(qproc->dev, qproc->active_clks,
|
||||
qproc->active_clk_count);
|
||||
q6v5_regulator_disable(qproc, qproc->active_regs,
|
||||
qproc->active_reg_count);
|
||||
static int q6v5_mpss_load(struct q6v5 *qproc)
|
||||
{
|
||||
const struct elf32_phdr *phdrs;
|
||||
const struct elf32_phdr *phdr;
|
||||
const struct firmware *seg_fw;
|
||||
const struct firmware *fw;
|
||||
struct elf32_hdr *ehdr;
|
||||
phys_addr_t mpss_reloc;
|
||||
phys_addr_t boot_addr;
|
||||
phys_addr_t min_addr = PHYS_ADDR_MAX;
|
||||
phys_addr_t max_addr = 0;
|
||||
bool relocate = false;
|
||||
char seg_name[10];
|
||||
ssize_t offset;
|
||||
size_t size = 0;
|
||||
void *ptr;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
ret = request_firmware(&fw, "modem.mdt", qproc->dev);
|
||||
if (ret < 0) {
|
||||
dev_err(qproc->dev, "unable to load modem.mdt\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Initialize the RMB validator */
|
||||
writel(0, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
|
||||
|
||||
ret = q6v5_mpss_init_image(qproc, fw);
|
||||
if (ret)
|
||||
goto release_firmware;
|
||||
|
||||
ehdr = (struct elf32_hdr *)fw->data;
|
||||
phdrs = (struct elf32_phdr *)(ehdr + 1);
|
||||
|
||||
for (i = 0; i < ehdr->e_phnum; i++) {
|
||||
phdr = &phdrs[i];
|
||||
|
||||
if (!q6v5_phdr_valid(phdr))
|
||||
continue;
|
||||
|
||||
if (phdr->p_flags & QCOM_MDT_RELOCATABLE)
|
||||
relocate = true;
|
||||
|
||||
if (phdr->p_paddr < min_addr)
|
||||
min_addr = phdr->p_paddr;
|
||||
|
||||
if (phdr->p_paddr + phdr->p_memsz > max_addr)
|
||||
max_addr = ALIGN(phdr->p_paddr + phdr->p_memsz, SZ_4K);
|
||||
}
|
||||
|
||||
mpss_reloc = relocate ? min_addr : qproc->mpss_phys;
|
||||
qproc->mpss_reloc = mpss_reloc;
|
||||
/* Load firmware segments */
|
||||
for (i = 0; i < ehdr->e_phnum; i++) {
|
||||
phdr = &phdrs[i];
|
||||
|
||||
if (!q6v5_phdr_valid(phdr))
|
||||
continue;
|
||||
|
||||
offset = phdr->p_paddr - mpss_reloc;
|
||||
if (offset < 0 || offset + phdr->p_memsz > qproc->mpss_size) {
|
||||
dev_err(qproc->dev, "segment outside memory range\n");
|
||||
ret = -EINVAL;
|
||||
goto release_firmware;
|
||||
}
|
||||
|
||||
ptr = qproc->mpss_region + offset;
|
||||
|
||||
if (phdr->p_filesz) {
|
||||
snprintf(seg_name, sizeof(seg_name), "modem.b%02d", i);
|
||||
ret = request_firmware(&seg_fw, seg_name, qproc->dev);
|
||||
if (ret) {
|
||||
dev_err(qproc->dev, "failed to load %s\n", seg_name);
|
||||
goto release_firmware;
|
||||
}
|
||||
|
||||
memcpy(ptr, seg_fw->data, seg_fw->size);
|
||||
|
||||
release_firmware(seg_fw);
|
||||
}
|
||||
|
||||
if (phdr->p_memsz > phdr->p_filesz) {
|
||||
memset(ptr + phdr->p_filesz, 0,
|
||||
phdr->p_memsz - phdr->p_filesz);
|
||||
}
|
||||
size += phdr->p_memsz;
|
||||
}
|
||||
|
||||
/* Transfer ownership of modem ddr region to q6 */
|
||||
ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true,
|
||||
qproc->mpss_phys, qproc->mpss_size);
|
||||
if (ret) {
|
||||
dev_err(qproc->dev,
|
||||
"assigning Q6 access to mpss memory failed: %d\n", ret);
|
||||
ret = -EAGAIN;
|
||||
goto release_firmware;
|
||||
}
|
||||
|
||||
boot_addr = relocate ? qproc->mpss_phys : min_addr;
|
||||
writel(boot_addr, qproc->rmb_base + RMB_PMI_CODE_START_REG);
|
||||
writel(RMB_CMD_LOAD_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG);
|
||||
writel(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
|
||||
|
||||
ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_AUTH_COMPLETE, 10000);
|
||||
if (ret == -ETIMEDOUT)
|
||||
dev_err(qproc->dev, "MPSS authentication timed out\n");
|
||||
else if (ret < 0)
|
||||
dev_err(qproc->dev, "MPSS authentication failed: %d\n", ret);
|
||||
|
||||
release_firmware:
|
||||
release_firmware(fw);
|
||||
|
||||
return ret < 0 ? ret : 0;
|
||||
}
|
||||
|
||||
static void qcom_q6v5_dump_segment(struct rproc *rproc,
|
||||
struct rproc_dump_segment *segment,
|
||||
void *dest)
|
||||
{
|
||||
int ret = 0;
|
||||
struct q6v5 *qproc = rproc->priv;
|
||||
unsigned long mask = BIT((unsigned long)segment->priv);
|
||||
void *ptr = rproc_da_to_va(rproc, segment->da, segment->size);
|
||||
|
||||
/* Unlock mba before copying segments */
|
||||
if (!qproc->dump_mba_loaded)
|
||||
ret = q6v5_mba_load(qproc);
|
||||
|
||||
if (!ptr || ret)
|
||||
memset(dest, 0xff, segment->size);
|
||||
else
|
||||
memcpy(dest, ptr, segment->size);
|
||||
|
||||
qproc->dump_segment_mask |= mask;
|
||||
|
||||
/* Reclaim mba after copying segments */
|
||||
if (qproc->dump_segment_mask == qproc->dump_complete_mask) {
|
||||
if (qproc->dump_mba_loaded)
|
||||
q6v5_mba_reclaim(qproc);
|
||||
}
|
||||
}
|
||||
|
||||
static int q6v5_start(struct rproc *rproc)
|
||||
{
|
||||
struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
|
||||
int xfermemop_ret;
|
||||
int ret;
|
||||
|
||||
ret = q6v5_mba_load(qproc);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
dev_info(qproc->dev, "MBA booted, loading mpss\n");
|
||||
|
||||
ret = q6v5_mpss_load(qproc);
|
||||
if (ret)
|
||||
goto reclaim_mpss;
|
||||
|
||||
ret = qcom_q6v5_wait_for_start(&qproc->q6v5, msecs_to_jiffies(5000));
|
||||
if (ret == -ETIMEDOUT) {
|
||||
dev_err(qproc->dev, "start timed out\n");
|
||||
goto reclaim_mpss;
|
||||
}
|
||||
|
||||
xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false,
|
||||
qproc->mba_phys,
|
||||
qproc->mba_size);
|
||||
if (xfermemop_ret)
|
||||
dev_err(qproc->dev,
|
||||
"Failed to reclaim mba buffer system may become unstable\n");
|
||||
|
||||
/* Reset Dump Segment Mask */
|
||||
qproc->dump_segment_mask = 0;
|
||||
qproc->running = true;
|
||||
|
||||
return 0;
|
||||
|
||||
reclaim_mpss:
|
||||
xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm,
|
||||
false, qproc->mpss_phys,
|
||||
qproc->mpss_size);
|
||||
WARN_ON(xfermemop_ret);
|
||||
q6v5_mba_reclaim(qproc);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int q6v5_stop(struct rproc *rproc)
|
||||
{
|
||||
struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
|
||||
int ret;
|
||||
|
||||
qproc->running = false;
|
||||
|
||||
ret = qcom_q6v5_request_stop(&qproc->q6v5);
|
||||
if (ret == -ETIMEDOUT)
|
||||
dev_err(qproc->dev, "timed out on wait\n");
|
||||
|
||||
q6v5_mba_reclaim(qproc);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -988,10 +1064,52 @@ static void *q6v5_da_to_va(struct rproc *rproc, u64 da, int len)
|
|||
return qproc->mpss_region + offset;
|
||||
}
|
||||
|
||||
static int qcom_q6v5_register_dump_segments(struct rproc *rproc,
|
||||
const struct firmware *mba_fw)
|
||||
{
|
||||
const struct firmware *fw;
|
||||
const struct elf32_phdr *phdrs;
|
||||
const struct elf32_phdr *phdr;
|
||||
const struct elf32_hdr *ehdr;
|
||||
struct q6v5 *qproc = rproc->priv;
|
||||
unsigned long i;
|
||||
int ret;
|
||||
|
||||
ret = request_firmware(&fw, "modem.mdt", qproc->dev);
|
||||
if (ret < 0) {
|
||||
dev_err(qproc->dev, "unable to load modem.mdt\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ehdr = (struct elf32_hdr *)fw->data;
|
||||
phdrs = (struct elf32_phdr *)(ehdr + 1);
|
||||
qproc->dump_complete_mask = 0;
|
||||
|
||||
for (i = 0; i < ehdr->e_phnum; i++) {
|
||||
phdr = &phdrs[i];
|
||||
|
||||
if (!q6v5_phdr_valid(phdr))
|
||||
continue;
|
||||
|
||||
ret = rproc_coredump_add_custom_segment(rproc, phdr->p_paddr,
|
||||
phdr->p_memsz,
|
||||
qcom_q6v5_dump_segment,
|
||||
(void *)i);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
qproc->dump_complete_mask |= BIT(i);
|
||||
}
|
||||
|
||||
release_firmware(fw);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct rproc_ops q6v5_ops = {
|
||||
.start = q6v5_start,
|
||||
.stop = q6v5_stop,
|
||||
.da_to_va = q6v5_da_to_va,
|
||||
.parse_fw = qcom_q6v5_register_dump_segments,
|
||||
.load = q6v5_load,
|
||||
};
|
||||
|
||||
|
@ -1066,12 +1184,21 @@ static int q6v5_init_clocks(struct device *dev, struct clk **clks,
|
|||
static int q6v5_init_reset(struct q6v5 *qproc)
|
||||
{
|
||||
qproc->mss_restart = devm_reset_control_get_exclusive(qproc->dev,
|
||||
NULL);
|
||||
"mss_restart");
|
||||
if (IS_ERR(qproc->mss_restart)) {
|
||||
dev_err(qproc->dev, "failed to acquire mss restart\n");
|
||||
return PTR_ERR(qproc->mss_restart);
|
||||
}
|
||||
|
||||
if (qproc->has_alt_reset) {
|
||||
qproc->pdc_reset = devm_reset_control_get_exclusive(qproc->dev,
|
||||
"pdc_reset");
|
||||
if (IS_ERR(qproc->pdc_reset)) {
|
||||
dev_err(qproc->dev, "failed to acquire pdc reset\n");
|
||||
return PTR_ERR(qproc->pdc_reset);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1132,6 +1259,9 @@ static int q6v5_probe(struct platform_device *pdev)
|
|||
if (!desc)
|
||||
return -EINVAL;
|
||||
|
||||
if (desc->need_mem_protection && !qcom_scm_is_available())
|
||||
return -EPROBE_DEFER;
|
||||
|
||||
rproc = rproc_alloc(&pdev->dev, pdev->name, &q6v5_ops,
|
||||
desc->hexagon_mba_image, sizeof(*qproc));
|
||||
if (!rproc) {
|
||||
|
@ -1192,12 +1322,12 @@ static int q6v5_probe(struct platform_device *pdev)
|
|||
}
|
||||
qproc->active_reg_count = ret;
|
||||
|
||||
qproc->has_alt_reset = desc->has_alt_reset;
|
||||
ret = q6v5_init_reset(qproc);
|
||||
if (ret)
|
||||
goto free_rproc;
|
||||
|
||||
qproc->version = desc->version;
|
||||
qproc->has_alt_reset = desc->has_alt_reset;
|
||||
qproc->need_mem_protection = desc->need_mem_protection;
|
||||
|
||||
ret = qcom_q6v5_init(&qproc->q6v5, pdev, rproc, MPSS_CRASH_REASON_SMEM,
|
||||
|
@ -1368,11 +1498,11 @@ static struct platform_driver q6v5_driver = {
|
|||
.probe = q6v5_probe,
|
||||
.remove = q6v5_remove,
|
||||
.driver = {
|
||||
.name = "qcom-q6v5-pil",
|
||||
.name = "qcom-q6v5-mss",
|
||||
.of_match_table = q6v5_of_match,
|
||||
},
|
||||
};
|
||||
module_platform_driver(q6v5_driver);
|
||||
|
||||
MODULE_DESCRIPTION("Peripheral Image Loader for Hexagon");
|
||||
MODULE_DESCRIPTION("Qualcomm Self-authenticating modem remoteproc driver");
|
||||
MODULE_LICENSE("GPL v2");
|
|
@ -342,6 +342,16 @@ static const struct adsp_data adsp_resource_init = {
|
|||
.ssctl_id = 0x14,
|
||||
};
|
||||
|
||||
static const struct adsp_data cdsp_resource_init = {
|
||||
.crash_reason_smem = 601,
|
||||
.firmware_name = "cdsp.mdt",
|
||||
.pas_id = 18,
|
||||
.has_aggre2_clk = false,
|
||||
.ssr_name = "cdsp",
|
||||
.sysmon_name = "cdsp",
|
||||
.ssctl_id = 0x17,
|
||||
};
|
||||
|
||||
static const struct adsp_data slpi_resource_init = {
|
||||
.crash_reason_smem = 424,
|
||||
.firmware_name = "slpi.mdt",
|
||||
|
@ -352,10 +362,24 @@ static const struct adsp_data slpi_resource_init = {
|
|||
.ssctl_id = 0x16,
|
||||
};
|
||||
|
||||
static const struct adsp_data wcss_resource_init = {
|
||||
.crash_reason_smem = 421,
|
||||
.firmware_name = "wcnss.mdt",
|
||||
.pas_id = 6,
|
||||
.ssr_name = "mpss",
|
||||
.sysmon_name = "wcnss",
|
||||
.ssctl_id = 0x12,
|
||||
};
|
||||
|
||||
static const struct of_device_id adsp_of_match[] = {
|
||||
{ .compatible = "qcom,msm8974-adsp-pil", .data = &adsp_resource_init},
|
||||
{ .compatible = "qcom,msm8996-adsp-pil", .data = &adsp_resource_init},
|
||||
{ .compatible = "qcom,msm8996-slpi-pil", .data = &slpi_resource_init},
|
||||
{ .compatible = "qcom,qcs404-adsp-pas", .data = &adsp_resource_init },
|
||||
{ .compatible = "qcom,qcs404-cdsp-pas", .data = &cdsp_resource_init },
|
||||
{ .compatible = "qcom,qcs404-wcss-pas", .data = &wcss_resource_init },
|
||||
{ .compatible = "qcom,sdm845-adsp-pas", .data = &adsp_resource_init},
|
||||
{ .compatible = "qcom,sdm845-cdsp-pas", .data = &cdsp_resource_init},
|
||||
{ },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, adsp_of_match);
|
||||
|
@ -364,11 +388,11 @@ static struct platform_driver adsp_driver = {
|
|||
.probe = adsp_probe,
|
||||
.remove = adsp_remove,
|
||||
.driver = {
|
||||
.name = "qcom_adsp_pil",
|
||||
.name = "qcom_q6v5_pas",
|
||||
.of_match_table = adsp_of_match,
|
||||
},
|
||||
};
|
||||
|
||||
module_platform_driver(adsp_driver);
|
||||
MODULE_DESCRIPTION("Qualcomm MSM8974/MSM8996 ADSP Peripherial Image Loader");
|
||||
MODULE_DESCRIPTION("Qualcomm Hexagon v5 Peripheral Authentication Service driver");
|
||||
MODULE_LICENSE("GPL v2");
|
|
@ -53,6 +53,11 @@ typedef int (*rproc_handle_resources_t)(struct rproc *rproc,
|
|||
typedef int (*rproc_handle_resource_t)(struct rproc *rproc,
|
||||
void *, int offset, int avail);
|
||||
|
||||
static int rproc_alloc_carveout(struct rproc *rproc,
|
||||
struct rproc_mem_entry *mem);
|
||||
static int rproc_release_carveout(struct rproc *rproc,
|
||||
struct rproc_mem_entry *mem);
|
||||
|
||||
/* Unique indices for remoteproc devices */
|
||||
static DEFINE_IDA(rproc_dev_index);
|
||||
|
||||
|
@ -140,6 +145,22 @@ static void rproc_disable_iommu(struct rproc *rproc)
|
|||
iommu_domain_free(domain);
|
||||
}
|
||||
|
||||
static phys_addr_t rproc_va_to_pa(void *cpu_addr)
|
||||
{
|
||||
/*
|
||||
* Return physical address according to virtual address location
|
||||
* - in vmalloc: if region ioremapped or defined as dma_alloc_coherent
|
||||
* - in kernel: if region allocated in generic dma memory pool
|
||||
*/
|
||||
if (is_vmalloc_addr(cpu_addr)) {
|
||||
return page_to_phys(vmalloc_to_page(cpu_addr)) +
|
||||
offset_in_page(cpu_addr);
|
||||
}
|
||||
|
||||
WARN_ON(!virt_addr_valid(cpu_addr));
|
||||
return virt_to_phys(cpu_addr);
|
||||
}
|
||||
|
||||
/**
|
||||
* rproc_da_to_va() - lookup the kernel virtual address for a remoteproc address
|
||||
* @rproc: handle of a remote processor
|
||||
|
@ -201,27 +222,128 @@ void *rproc_da_to_va(struct rproc *rproc, u64 da, int len)
|
|||
}
|
||||
EXPORT_SYMBOL(rproc_da_to_va);
|
||||
|
||||
/**
|
||||
* rproc_find_carveout_by_name() - lookup the carveout region by a name
|
||||
* @rproc: handle of a remote processor
|
||||
* @name,..: carveout name to find (standard printf format)
|
||||
*
|
||||
* Platform driver has the capability to register some pre-allacoted carveout
|
||||
* (physically contiguous memory regions) before rproc firmware loading and
|
||||
* associated resource table analysis. These regions may be dedicated memory
|
||||
* regions internal to the coprocessor or specified DDR region with specific
|
||||
* attributes
|
||||
*
|
||||
* This function is a helper function with which we can go over the
|
||||
* allocated carveouts and return associated region characteristics like
|
||||
* coprocessor address, length or processor virtual address.
|
||||
*
|
||||
* Return: a valid pointer on carveout entry on success or NULL on failure.
|
||||
*/
|
||||
struct rproc_mem_entry *
|
||||
rproc_find_carveout_by_name(struct rproc *rproc, const char *name, ...)
|
||||
{
|
||||
va_list args;
|
||||
char _name[32];
|
||||
struct rproc_mem_entry *carveout, *mem = NULL;
|
||||
|
||||
if (!name)
|
||||
return NULL;
|
||||
|
||||
va_start(args, name);
|
||||
vsnprintf(_name, sizeof(_name), name, args);
|
||||
va_end(args);
|
||||
|
||||
list_for_each_entry(carveout, &rproc->carveouts, node) {
|
||||
/* Compare carveout and requested names */
|
||||
if (!strcmp(carveout->name, _name)) {
|
||||
mem = carveout;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return mem;
|
||||
}
|
||||
|
||||
/**
|
||||
* rproc_check_carveout_da() - Check specified carveout da configuration
|
||||
* @rproc: handle of a remote processor
|
||||
* @mem: pointer on carveout to check
|
||||
* @da: area device address
|
||||
* @len: associated area size
|
||||
*
|
||||
* This function is a helper function to verify requested device area (couple
|
||||
* da, len) is part of specified carevout.
|
||||
*
|
||||
* Return: 0 if carveout match request else -ENOMEM
|
||||
*/
|
||||
int rproc_check_carveout_da(struct rproc *rproc, struct rproc_mem_entry *mem,
|
||||
u32 da, u32 len)
|
||||
{
|
||||
struct device *dev = &rproc->dev;
|
||||
int delta = 0;
|
||||
|
||||
/* Check requested resource length */
|
||||
if (len > mem->len) {
|
||||
dev_err(dev, "Registered carveout doesn't fit len request\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (da != FW_RSC_ADDR_ANY && mem->da == FW_RSC_ADDR_ANY) {
|
||||
/* Update existing carveout da */
|
||||
mem->da = da;
|
||||
} else if (da != FW_RSC_ADDR_ANY && mem->da != FW_RSC_ADDR_ANY) {
|
||||
delta = da - mem->da;
|
||||
|
||||
/* Check requested resource belongs to registered carveout */
|
||||
if (delta < 0) {
|
||||
dev_err(dev,
|
||||
"Registered carveout doesn't fit da request\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (delta + len > mem->len) {
|
||||
dev_err(dev,
|
||||
"Registered carveout doesn't fit len request\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int rproc_alloc_vring(struct rproc_vdev *rvdev, int i)
|
||||
{
|
||||
struct rproc *rproc = rvdev->rproc;
|
||||
struct device *dev = &rproc->dev;
|
||||
struct rproc_vring *rvring = &rvdev->vring[i];
|
||||
struct fw_rsc_vdev *rsc;
|
||||
dma_addr_t dma;
|
||||
void *va;
|
||||
int ret, size, notifyid;
|
||||
struct rproc_mem_entry *mem;
|
||||
|
||||
/* actual size of vring (in bytes) */
|
||||
size = PAGE_ALIGN(vring_size(rvring->len, rvring->align));
|
||||
|
||||
/*
|
||||
* Allocate non-cacheable memory for the vring. In the future
|
||||
* this call will also configure the IOMMU for us
|
||||
*/
|
||||
va = dma_alloc_coherent(dev->parent, size, &dma, GFP_KERNEL);
|
||||
if (!va) {
|
||||
dev_err(dev->parent, "dma_alloc_coherent failed\n");
|
||||
return -EINVAL;
|
||||
rsc = (void *)rproc->table_ptr + rvdev->rsc_offset;
|
||||
|
||||
/* Search for pre-registered carveout */
|
||||
mem = rproc_find_carveout_by_name(rproc, "vdev%dvring%d", rvdev->index,
|
||||
i);
|
||||
if (mem) {
|
||||
if (rproc_check_carveout_da(rproc, mem, rsc->vring[i].da, size))
|
||||
return -ENOMEM;
|
||||
} else {
|
||||
/* Register carveout in in list */
|
||||
mem = rproc_mem_entry_init(dev, 0, 0, size, rsc->vring[i].da,
|
||||
rproc_alloc_carveout,
|
||||
rproc_release_carveout,
|
||||
"vdev%dvring%d",
|
||||
rvdev->index, i);
|
||||
if (!mem) {
|
||||
dev_err(dev, "Can't allocate memory entry structure\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
rproc_add_carveout(rproc, mem);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -232,7 +354,6 @@ int rproc_alloc_vring(struct rproc_vdev *rvdev, int i)
|
|||
ret = idr_alloc(&rproc->notifyids, rvring, 0, 0, GFP_KERNEL);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "idr_alloc failed: %d\n", ret);
|
||||
dma_free_coherent(dev->parent, size, va, dma);
|
||||
return ret;
|
||||
}
|
||||
notifyid = ret;
|
||||
|
@ -241,21 +362,9 @@ int rproc_alloc_vring(struct rproc_vdev *rvdev, int i)
|
|||
if (notifyid > rproc->max_notifyid)
|
||||
rproc->max_notifyid = notifyid;
|
||||
|
||||
dev_dbg(dev, "vring%d: va %pK dma %pad size 0x%x idr %d\n",
|
||||
i, va, &dma, size, notifyid);
|
||||
|
||||
rvring->va = va;
|
||||
rvring->dma = dma;
|
||||
rvring->notifyid = notifyid;
|
||||
|
||||
/*
|
||||
* Let the rproc know the notifyid and da of this vring.
|
||||
* Not all platforms use dma_alloc_coherent to automatically
|
||||
* set up the iommu. In this case the device address (da) will
|
||||
* hold the physical address and not the device address.
|
||||
*/
|
||||
rsc = (void *)rproc->table_ptr + rvdev->rsc_offset;
|
||||
rsc->vring[i].da = dma;
|
||||
/* Let the rproc know the notifyid of this vring.*/
|
||||
rsc->vring[i].notifyid = notifyid;
|
||||
return 0;
|
||||
}
|
||||
|
@ -287,12 +396,10 @@ rproc_parse_vring(struct rproc_vdev *rvdev, struct fw_rsc_vdev *rsc, int i)
|
|||
|
||||
void rproc_free_vring(struct rproc_vring *rvring)
|
||||
{
|
||||
int size = PAGE_ALIGN(vring_size(rvring->len, rvring->align));
|
||||
struct rproc *rproc = rvring->rvdev->rproc;
|
||||
int idx = rvring->rvdev->vring - rvring;
|
||||
struct fw_rsc_vdev *rsc;
|
||||
|
||||
dma_free_coherent(rproc->dev.parent, size, rvring->va, rvring->dma);
|
||||
idr_remove(&rproc->notifyids, rvring->notifyid);
|
||||
|
||||
/* reset resource entry info */
|
||||
|
@ -379,6 +486,7 @@ static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc,
|
|||
|
||||
rvdev->id = rsc->id;
|
||||
rvdev->rproc = rproc;
|
||||
rvdev->index = rproc->nb_vdev++;
|
||||
|
||||
/* parse the vrings */
|
||||
for (i = 0; i < rsc->num_of_vrings; i++) {
|
||||
|
@ -423,9 +531,6 @@ void rproc_vdev_release(struct kref *ref)
|
|||
|
||||
for (id = 0; id < ARRAY_SIZE(rvdev->vring); id++) {
|
||||
rvring = &rvdev->vring[id];
|
||||
if (!rvring->va)
|
||||
continue;
|
||||
|
||||
rproc_free_vring(rvring);
|
||||
}
|
||||
|
||||
|
@ -583,6 +688,119 @@ static int rproc_handle_devmem(struct rproc *rproc, struct fw_rsc_devmem *rsc,
|
|||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* rproc_alloc_carveout() - allocated specified carveout
|
||||
* @rproc: rproc handle
|
||||
* @mem: the memory entry to allocate
|
||||
*
|
||||
* This function allocate specified memory entry @mem using
|
||||
* dma_alloc_coherent() as default allocator
|
||||
*/
|
||||
static int rproc_alloc_carveout(struct rproc *rproc,
|
||||
struct rproc_mem_entry *mem)
|
||||
{
|
||||
struct rproc_mem_entry *mapping = NULL;
|
||||
struct device *dev = &rproc->dev;
|
||||
dma_addr_t dma;
|
||||
void *va;
|
||||
int ret;
|
||||
|
||||
va = dma_alloc_coherent(dev->parent, mem->len, &dma, GFP_KERNEL);
|
||||
if (!va) {
|
||||
dev_err(dev->parent,
|
||||
"failed to allocate dma memory: len 0x%x\n", mem->len);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
dev_dbg(dev, "carveout va %pK, dma %pad, len 0x%x\n",
|
||||
va, &dma, mem->len);
|
||||
|
||||
/*
|
||||
* Ok, this is non-standard.
|
||||
*
|
||||
* Sometimes we can't rely on the generic iommu-based DMA API
|
||||
* to dynamically allocate the device address and then set the IOMMU
|
||||
* tables accordingly, because some remote processors might
|
||||
* _require_ us to use hard coded device addresses that their
|
||||
* firmware was compiled with.
|
||||
*
|
||||
* In this case, we must use the IOMMU API directly and map
|
||||
* the memory to the device address as expected by the remote
|
||||
* processor.
|
||||
*
|
||||
* Obviously such remote processor devices should not be configured
|
||||
* to use the iommu-based DMA API: we expect 'dma' to contain the
|
||||
* physical address in this case.
|
||||
*/
|
||||
|
||||
if (mem->da != FW_RSC_ADDR_ANY) {
|
||||
if (!rproc->domain) {
|
||||
dev_err(dev->parent,
|
||||
"Bad carveout rsc configuration\n");
|
||||
ret = -ENOMEM;
|
||||
goto dma_free;
|
||||
}
|
||||
|
||||
mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
|
||||
if (!mapping) {
|
||||
ret = -ENOMEM;
|
||||
goto dma_free;
|
||||
}
|
||||
|
||||
ret = iommu_map(rproc->domain, mem->da, dma, mem->len,
|
||||
mem->flags);
|
||||
if (ret) {
|
||||
dev_err(dev, "iommu_map failed: %d\n", ret);
|
||||
goto free_mapping;
|
||||
}
|
||||
|
||||
/*
|
||||
* We'll need this info later when we'll want to unmap
|
||||
* everything (e.g. on shutdown).
|
||||
*
|
||||
* We can't trust the remote processor not to change the
|
||||
* resource table, so we must maintain this info independently.
|
||||
*/
|
||||
mapping->da = mem->da;
|
||||
mapping->len = mem->len;
|
||||
list_add_tail(&mapping->node, &rproc->mappings);
|
||||
|
||||
dev_dbg(dev, "carveout mapped 0x%x to %pad\n",
|
||||
mem->da, &dma);
|
||||
} else {
|
||||
mem->da = (u32)dma;
|
||||
}
|
||||
|
||||
mem->dma = (u32)dma;
|
||||
mem->va = va;
|
||||
|
||||
return 0;
|
||||
|
||||
free_mapping:
|
||||
kfree(mapping);
|
||||
dma_free:
|
||||
dma_free_coherent(dev->parent, mem->len, va, dma);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* rproc_release_carveout() - release acquired carveout
|
||||
* @rproc: rproc handle
|
||||
* @mem: the memory entry to release
|
||||
*
|
||||
* This function releases specified memory entry @mem allocated via
|
||||
* rproc_alloc_carveout() function by @rproc.
|
||||
*/
|
||||
static int rproc_release_carveout(struct rproc *rproc,
|
||||
struct rproc_mem_entry *mem)
|
||||
{
|
||||
struct device *dev = &rproc->dev;
|
||||
|
||||
/* clean up carveout allocations */
|
||||
dma_free_coherent(dev->parent, mem->len, mem->va, mem->dma);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* rproc_handle_carveout() - handle phys contig memory allocation requests
|
||||
* @rproc: rproc handle
|
||||
|
@ -605,11 +823,8 @@ static int rproc_handle_carveout(struct rproc *rproc,
|
|||
struct fw_rsc_carveout *rsc,
|
||||
int offset, int avail)
|
||||
{
|
||||
struct rproc_mem_entry *carveout, *mapping;
|
||||
struct rproc_mem_entry *carveout;
|
||||
struct device *dev = &rproc->dev;
|
||||
dma_addr_t dma;
|
||||
void *va;
|
||||
int ret;
|
||||
|
||||
if (sizeof(*rsc) > avail) {
|
||||
dev_err(dev, "carveout rsc is truncated\n");
|
||||
|
@ -625,105 +840,140 @@ static int rproc_handle_carveout(struct rproc *rproc,
|
|||
dev_dbg(dev, "carveout rsc: name: %s, da 0x%x, pa 0x%x, len 0x%x, flags 0x%x\n",
|
||||
rsc->name, rsc->da, rsc->pa, rsc->len, rsc->flags);
|
||||
|
||||
carveout = kzalloc(sizeof(*carveout), GFP_KERNEL);
|
||||
if (!carveout)
|
||||
/*
|
||||
* Check carveout rsc already part of a registered carveout,
|
||||
* Search by name, then check the da and length
|
||||
*/
|
||||
carveout = rproc_find_carveout_by_name(rproc, rsc->name);
|
||||
|
||||
if (carveout) {
|
||||
if (carveout->rsc_offset != FW_RSC_ADDR_ANY) {
|
||||
dev_err(dev,
|
||||
"Carveout already associated to resource table\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (rproc_check_carveout_da(rproc, carveout, rsc->da, rsc->len))
|
||||
return -ENOMEM;
|
||||
|
||||
/* Update memory carveout with resource table info */
|
||||
carveout->rsc_offset = offset;
|
||||
carveout->flags = rsc->flags;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Register carveout in in list */
|
||||
carveout = rproc_mem_entry_init(dev, 0, 0, rsc->len, rsc->da,
|
||||
rproc_alloc_carveout,
|
||||
rproc_release_carveout, rsc->name);
|
||||
if (!carveout) {
|
||||
dev_err(dev, "Can't allocate memory entry structure\n");
|
||||
return -ENOMEM;
|
||||
|
||||
va = dma_alloc_coherent(dev->parent, rsc->len, &dma, GFP_KERNEL);
|
||||
if (!va) {
|
||||
dev_err(dev->parent,
|
||||
"failed to allocate dma memory: len 0x%x\n", rsc->len);
|
||||
ret = -ENOMEM;
|
||||
goto free_carv;
|
||||
}
|
||||
|
||||
dev_dbg(dev, "carveout va %pK, dma %pad, len 0x%x\n",
|
||||
va, &dma, rsc->len);
|
||||
|
||||
/*
|
||||
* Ok, this is non-standard.
|
||||
*
|
||||
* Sometimes we can't rely on the generic iommu-based DMA API
|
||||
* to dynamically allocate the device address and then set the IOMMU
|
||||
* tables accordingly, because some remote processors might
|
||||
* _require_ us to use hard coded device addresses that their
|
||||
* firmware was compiled with.
|
||||
*
|
||||
* In this case, we must use the IOMMU API directly and map
|
||||
* the memory to the device address as expected by the remote
|
||||
* processor.
|
||||
*
|
||||
* Obviously such remote processor devices should not be configured
|
||||
* to use the iommu-based DMA API: we expect 'dma' to contain the
|
||||
* physical address in this case.
|
||||
*/
|
||||
if (rproc->domain) {
|
||||
mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
|
||||
if (!mapping) {
|
||||
ret = -ENOMEM;
|
||||
goto dma_free;
|
||||
}
|
||||
|
||||
ret = iommu_map(rproc->domain, rsc->da, dma, rsc->len,
|
||||
rsc->flags);
|
||||
if (ret) {
|
||||
dev_err(dev, "iommu_map failed: %d\n", ret);
|
||||
goto free_mapping;
|
||||
}
|
||||
|
||||
/*
|
||||
* We'll need this info later when we'll want to unmap
|
||||
* everything (e.g. on shutdown).
|
||||
*
|
||||
* We can't trust the remote processor not to change the
|
||||
* resource table, so we must maintain this info independently.
|
||||
*/
|
||||
mapping->da = rsc->da;
|
||||
mapping->len = rsc->len;
|
||||
list_add_tail(&mapping->node, &rproc->mappings);
|
||||
|
||||
dev_dbg(dev, "carveout mapped 0x%x to %pad\n",
|
||||
rsc->da, &dma);
|
||||
}
|
||||
|
||||
/*
|
||||
* Some remote processors might need to know the pa
|
||||
* even though they are behind an IOMMU. E.g., OMAP4's
|
||||
* remote M3 processor needs this so it can control
|
||||
* on-chip hardware accelerators that are not behind
|
||||
* the IOMMU, and therefor must know the pa.
|
||||
*
|
||||
* Generally we don't want to expose physical addresses
|
||||
* if we don't have to (remote processors are generally
|
||||
* _not_ trusted), so we might want to do this only for
|
||||
* remote processor that _must_ have this (e.g. OMAP4's
|
||||
* dual M3 subsystem).
|
||||
*
|
||||
* Non-IOMMU processors might also want to have this info.
|
||||
* In this case, the device address and the physical address
|
||||
* are the same.
|
||||
*/
|
||||
rsc->pa = dma;
|
||||
|
||||
carveout->va = va;
|
||||
carveout->len = rsc->len;
|
||||
carveout->dma = dma;
|
||||
carveout->da = rsc->da;
|
||||
|
||||
list_add_tail(&carveout->node, &rproc->carveouts);
|
||||
carveout->flags = rsc->flags;
|
||||
carveout->rsc_offset = offset;
|
||||
rproc_add_carveout(rproc, carveout);
|
||||
|
||||
return 0;
|
||||
|
||||
free_mapping:
|
||||
kfree(mapping);
|
||||
dma_free:
|
||||
dma_free_coherent(dev->parent, rsc->len, va, dma);
|
||||
free_carv:
|
||||
kfree(carveout);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
/**
|
||||
* rproc_add_carveout() - register an allocated carveout region
|
||||
* @rproc: rproc handle
|
||||
* @mem: memory entry to register
|
||||
*
|
||||
* This function registers specified memory entry in @rproc carveouts list.
|
||||
* Specified carveout should have been allocated before registering.
|
||||
*/
|
||||
void rproc_add_carveout(struct rproc *rproc, struct rproc_mem_entry *mem)
|
||||
{
|
||||
list_add_tail(&mem->node, &rproc->carveouts);
|
||||
}
|
||||
EXPORT_SYMBOL(rproc_add_carveout);
|
||||
|
||||
/**
|
||||
* rproc_mem_entry_init() - allocate and initialize rproc_mem_entry struct
|
||||
* @dev: pointer on device struct
|
||||
* @va: virtual address
|
||||
* @dma: dma address
|
||||
* @len: memory carveout length
|
||||
* @da: device address
|
||||
* @release: memory carveout function
|
||||
* @name: carveout name
|
||||
*
|
||||
* This function allocates a rproc_mem_entry struct and fill it with parameters
|
||||
* provided by client.
|
||||
*/
|
||||
struct rproc_mem_entry *
|
||||
rproc_mem_entry_init(struct device *dev,
|
||||
void *va, dma_addr_t dma, int len, u32 da,
|
||||
int (*alloc)(struct rproc *, struct rproc_mem_entry *),
|
||||
int (*release)(struct rproc *, struct rproc_mem_entry *),
|
||||
const char *name, ...)
|
||||
{
|
||||
struct rproc_mem_entry *mem;
|
||||
va_list args;
|
||||
|
||||
mem = kzalloc(sizeof(*mem), GFP_KERNEL);
|
||||
if (!mem)
|
||||
return mem;
|
||||
|
||||
mem->va = va;
|
||||
mem->dma = dma;
|
||||
mem->da = da;
|
||||
mem->len = len;
|
||||
mem->alloc = alloc;
|
||||
mem->release = release;
|
||||
mem->rsc_offset = FW_RSC_ADDR_ANY;
|
||||
mem->of_resm_idx = -1;
|
||||
|
||||
va_start(args, name);
|
||||
vsnprintf(mem->name, sizeof(mem->name), name, args);
|
||||
va_end(args);
|
||||
|
||||
return mem;
|
||||
}
|
||||
EXPORT_SYMBOL(rproc_mem_entry_init);
|
||||
|
||||
/**
|
||||
* rproc_of_resm_mem_entry_init() - allocate and initialize rproc_mem_entry struct
|
||||
* from a reserved memory phandle
|
||||
* @dev: pointer on device struct
|
||||
* @of_resm_idx: reserved memory phandle index in "memory-region"
|
||||
* @len: memory carveout length
|
||||
* @da: device address
|
||||
* @name: carveout name
|
||||
*
|
||||
* This function allocates a rproc_mem_entry struct and fill it with parameters
|
||||
* provided by client.
|
||||
*/
|
||||
struct rproc_mem_entry *
|
||||
rproc_of_resm_mem_entry_init(struct device *dev, u32 of_resm_idx, int len,
|
||||
u32 da, const char *name, ...)
|
||||
{
|
||||
struct rproc_mem_entry *mem;
|
||||
va_list args;
|
||||
|
||||
mem = kzalloc(sizeof(*mem), GFP_KERNEL);
|
||||
if (!mem)
|
||||
return mem;
|
||||
|
||||
mem->da = da;
|
||||
mem->len = len;
|
||||
mem->rsc_offset = FW_RSC_ADDR_ANY;
|
||||
mem->of_resm_idx = of_resm_idx;
|
||||
|
||||
va_start(args, name);
|
||||
vsnprintf(mem->name, sizeof(mem->name), name, args);
|
||||
va_end(args);
|
||||
|
||||
return mem;
|
||||
}
|
||||
EXPORT_SYMBOL(rproc_of_resm_mem_entry_init);
|
||||
|
||||
/**
|
||||
* A lookup table for resource handlers. The indices are defined in
|
||||
* enum fw_resource_type.
|
||||
*/
|
||||
|
@ -844,6 +1094,70 @@ static void rproc_unprepare_subdevices(struct rproc *rproc)
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* rproc_alloc_registered_carveouts() - allocate all carveouts registered
|
||||
* in the list
|
||||
* @rproc: the remote processor handle
|
||||
*
|
||||
* This function parses registered carveout list, performs allocation
|
||||
* if alloc() ops registered and updates resource table information
|
||||
* if rsc_offset set.
|
||||
*
|
||||
* Return: 0 on success
|
||||
*/
|
||||
static int rproc_alloc_registered_carveouts(struct rproc *rproc)
|
||||
{
|
||||
struct rproc_mem_entry *entry, *tmp;
|
||||
struct fw_rsc_carveout *rsc;
|
||||
struct device *dev = &rproc->dev;
|
||||
int ret;
|
||||
|
||||
list_for_each_entry_safe(entry, tmp, &rproc->carveouts, node) {
|
||||
if (entry->alloc) {
|
||||
ret = entry->alloc(rproc, entry);
|
||||
if (ret) {
|
||||
dev_err(dev, "Unable to allocate carveout %s: %d\n",
|
||||
entry->name, ret);
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
|
||||
if (entry->rsc_offset != FW_RSC_ADDR_ANY) {
|
||||
/* update resource table */
|
||||
rsc = (void *)rproc->table_ptr + entry->rsc_offset;
|
||||
|
||||
/*
|
||||
* Some remote processors might need to know the pa
|
||||
* even though they are behind an IOMMU. E.g., OMAP4's
|
||||
* remote M3 processor needs this so it can control
|
||||
* on-chip hardware accelerators that are not behind
|
||||
* the IOMMU, and therefor must know the pa.
|
||||
*
|
||||
* Generally we don't want to expose physical addresses
|
||||
* if we don't have to (remote processors are generally
|
||||
* _not_ trusted), so we might want to do this only for
|
||||
* remote processor that _must_ have this (e.g. OMAP4's
|
||||
* dual M3 subsystem).
|
||||
*
|
||||
* Non-IOMMU processors might also want to have this info.
|
||||
* In this case, the device address and the physical address
|
||||
* are the same.
|
||||
*/
|
||||
|
||||
/* Use va if defined else dma to generate pa */
|
||||
if (entry->va)
|
||||
rsc->pa = (u32)rproc_va_to_pa(entry->va);
|
||||
else
|
||||
rsc->pa = (u32)entry->dma;
|
||||
|
||||
rsc->da = entry->da;
|
||||
rsc->len = entry->len;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* rproc_coredump_cleanup() - clean up dump_segments list
|
||||
* @rproc: the remote processor handle
|
||||
|
@ -896,8 +1210,8 @@ static void rproc_resource_cleanup(struct rproc *rproc)
|
|||
|
||||
/* clean up carveout allocations */
|
||||
list_for_each_entry_safe(entry, tmp, &rproc->carveouts, node) {
|
||||
dma_free_coherent(dev->parent, entry->len, entry->va,
|
||||
entry->dma);
|
||||
if (entry->release)
|
||||
entry->release(rproc, entry);
|
||||
list_del(&entry->node);
|
||||
kfree(entry);
|
||||
}
|
||||
|
@ -1009,6 +1323,9 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
|
|||
/* reset max_notifyid */
|
||||
rproc->max_notifyid = -1;
|
||||
|
||||
/* reset handled vdev */
|
||||
rproc->nb_vdev = 0;
|
||||
|
||||
/* handle fw resources which are required to boot rproc */
|
||||
ret = rproc_handle_resources(rproc, rproc_loading_handlers);
|
||||
if (ret) {
|
||||
|
@ -1016,6 +1333,14 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
|
|||
goto clean_up_resources;
|
||||
}
|
||||
|
||||
/* Allocate carveout resources associated to rproc */
|
||||
ret = rproc_alloc_registered_carveouts(rproc);
|
||||
if (ret) {
|
||||
dev_err(dev, "Failed to allocate associated carveouts: %d\n",
|
||||
ret);
|
||||
goto clean_up_resources;
|
||||
}
|
||||
|
||||
ret = rproc_start(rproc, fw);
|
||||
if (ret)
|
||||
goto clean_up_resources;
|
||||
|
@ -1121,6 +1446,44 @@ int rproc_coredump_add_segment(struct rproc *rproc, dma_addr_t da, size_t size)
|
|||
}
|
||||
EXPORT_SYMBOL(rproc_coredump_add_segment);
|
||||
|
||||
/**
|
||||
* rproc_coredump_add_custom_segment() - add custom coredump segment
|
||||
* @rproc: handle of a remote processor
|
||||
* @da: device address
|
||||
* @size: size of segment
|
||||
* @dumpfn: custom dump function called for each segment during coredump
|
||||
* @priv: private data
|
||||
*
|
||||
* Add device memory to the list of segments to be included in the coredump
|
||||
* and associate the segment with the given custom dump function and private
|
||||
* data.
|
||||
*
|
||||
* Return: 0 on success, negative errno on error.
|
||||
*/
|
||||
int rproc_coredump_add_custom_segment(struct rproc *rproc,
|
||||
dma_addr_t da, size_t size,
|
||||
void (*dumpfn)(struct rproc *rproc,
|
||||
struct rproc_dump_segment *segment,
|
||||
void *dest),
|
||||
void *priv)
|
||||
{
|
||||
struct rproc_dump_segment *segment;
|
||||
|
||||
segment = kzalloc(sizeof(*segment), GFP_KERNEL);
|
||||
if (!segment)
|
||||
return -ENOMEM;
|
||||
|
||||
segment->da = da;
|
||||
segment->size = size;
|
||||
segment->priv = priv;
|
||||
segment->dump = dumpfn;
|
||||
|
||||
list_add_tail(&segment->node, &rproc->dump_segments);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(rproc_coredump_add_custom_segment);
|
||||
|
||||
/**
|
||||
* rproc_coredump() - perform coredump
|
||||
* @rproc: rproc handle
|
||||
|
@ -1183,14 +1546,18 @@ static void rproc_coredump(struct rproc *rproc)
|
|||
phdr->p_flags = PF_R | PF_W | PF_X;
|
||||
phdr->p_align = 0;
|
||||
|
||||
ptr = rproc_da_to_va(rproc, segment->da, segment->size);
|
||||
if (!ptr) {
|
||||
dev_err(&rproc->dev,
|
||||
"invalid coredump segment (%pad, %zu)\n",
|
||||
&segment->da, segment->size);
|
||||
memset(data + offset, 0xff, segment->size);
|
||||
if (segment->dump) {
|
||||
segment->dump(rproc, segment, data + offset);
|
||||
} else {
|
||||
memcpy(data + offset, ptr, segment->size);
|
||||
ptr = rproc_da_to_va(rproc, segment->da, segment->size);
|
||||
if (!ptr) {
|
||||
dev_err(&rproc->dev,
|
||||
"invalid coredump segment (%pad, %zu)\n",
|
||||
&segment->da, segment->size);
|
||||
memset(data + offset, 0xff, segment->size);
|
||||
} else {
|
||||
memcpy(data + offset, ptr, segment->size);
|
||||
}
|
||||
}
|
||||
|
||||
offset += phdr->p_filesz;
|
||||
|
|
|
@ -260,6 +260,7 @@ static int rproc_carveouts_show(struct seq_file *seq, void *p)
|
|||
|
||||
list_for_each_entry(carveout, &rproc->carveouts, node) {
|
||||
seq_puts(seq, "Carveout memory entry:\n");
|
||||
seq_printf(seq, "\tName: %s\n", carveout->name);
|
||||
seq_printf(seq, "\tVirtual address: %pK\n", carveout->va);
|
||||
seq_printf(seq, "\tDMA address: %pad\n", &carveout->dma);
|
||||
seq_printf(seq, "\tDevice address: 0x%x\n", carveout->da);
|
||||
|
|
|
@ -60,6 +60,8 @@ int rproc_elf_load_segments(struct rproc *rproc, const struct firmware *fw);
|
|||
int rproc_elf_load_rsc_table(struct rproc *rproc, const struct firmware *fw);
|
||||
struct resource_table *rproc_elf_find_loaded_rsc_table(struct rproc *rproc,
|
||||
const struct firmware *fw);
|
||||
struct rproc_mem_entry *
|
||||
rproc_find_carveout_by_name(struct rproc *rproc, const char *name, ...);
|
||||
|
||||
static inline
|
||||
int rproc_fw_sanity_check(struct rproc *rproc, const struct firmware *fw)
|
||||
|
|
|
@ -48,6 +48,11 @@ static ssize_t firmware_store(struct device *dev,
|
|||
}
|
||||
|
||||
len = strcspn(buf, "\n");
|
||||
if (!len) {
|
||||
dev_err(dev, "can't provide a NULL firmware\n");
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
p = kstrndup(buf, len, GFP_KERNEL);
|
||||
if (!p) {
|
||||
|
|
|
@ -76,7 +76,9 @@ static struct virtqueue *rp_find_vq(struct virtio_device *vdev,
|
|||
struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
|
||||
struct rproc *rproc = vdev_to_rproc(vdev);
|
||||
struct device *dev = &rproc->dev;
|
||||
struct rproc_mem_entry *mem;
|
||||
struct rproc_vring *rvring;
|
||||
struct fw_rsc_vdev *rsc;
|
||||
struct virtqueue *vq;
|
||||
void *addr;
|
||||
int len, size;
|
||||
|
@ -88,8 +90,14 @@ static struct virtqueue *rp_find_vq(struct virtio_device *vdev,
|
|||
if (!name)
|
||||
return NULL;
|
||||
|
||||
/* Search allocated memory region by name */
|
||||
mem = rproc_find_carveout_by_name(rproc, "vdev%dvring%d", rvdev->index,
|
||||
id);
|
||||
if (!mem || !mem->va)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
rvring = &rvdev->vring[id];
|
||||
addr = rvring->va;
|
||||
addr = mem->va;
|
||||
len = rvring->len;
|
||||
|
||||
/* zero vring */
|
||||
|
@ -114,6 +122,10 @@ static struct virtqueue *rp_find_vq(struct virtio_device *vdev,
|
|||
rvring->vq = vq;
|
||||
vq->priv = rvring;
|
||||
|
||||
/* Update vring in resource table */
|
||||
rsc = (void *)rproc->table_ptr + rvdev->rsc_offset;
|
||||
rsc->vring[id].da = mem->da;
|
||||
|
||||
return vq;
|
||||
}
|
||||
|
||||
|
|
|
@ -305,14 +305,22 @@ struct fw_rsc_vdev {
|
|||
struct fw_rsc_vdev_vring vring[0];
|
||||
} __packed;
|
||||
|
||||
struct rproc;
|
||||
|
||||
/**
|
||||
* struct rproc_mem_entry - memory entry descriptor
|
||||
* @va: virtual address
|
||||
* @dma: dma address
|
||||
* @len: length, in bytes
|
||||
* @da: device address
|
||||
* @release: release associated memory
|
||||
* @priv: associated data
|
||||
* @name: associated memory region name (optional)
|
||||
* @node: list node
|
||||
* @rsc_offset: offset in resource table
|
||||
* @flags: iommu protection flags
|
||||
* @of_resm_idx: reserved memory phandle index
|
||||
* @alloc: specific memory allocator function
|
||||
*/
|
||||
struct rproc_mem_entry {
|
||||
void *va;
|
||||
|
@ -320,10 +328,15 @@ struct rproc_mem_entry {
|
|||
int len;
|
||||
u32 da;
|
||||
void *priv;
|
||||
char name[32];
|
||||
struct list_head node;
|
||||
u32 rsc_offset;
|
||||
u32 flags;
|
||||
u32 of_resm_idx;
|
||||
int (*alloc)(struct rproc *rproc, struct rproc_mem_entry *mem);
|
||||
int (*release)(struct rproc *rproc, struct rproc_mem_entry *mem);
|
||||
};
|
||||
|
||||
struct rproc;
|
||||
struct firmware;
|
||||
|
||||
/**
|
||||
|
@ -399,6 +412,9 @@ enum rproc_crash_type {
|
|||
* @node: list node related to the rproc segment list
|
||||
* @da: device address of the segment
|
||||
* @size: size of the segment
|
||||
* @priv: private data associated with the dump_segment
|
||||
* @dump: custom dump function to fill device memory segment associated
|
||||
* with coredump
|
||||
*/
|
||||
struct rproc_dump_segment {
|
||||
struct list_head node;
|
||||
|
@ -406,6 +422,9 @@ struct rproc_dump_segment {
|
|||
dma_addr_t da;
|
||||
size_t size;
|
||||
|
||||
void *priv;
|
||||
void (*dump)(struct rproc *rproc, struct rproc_dump_segment *segment,
|
||||
void *dest);
|
||||
loff_t offset;
|
||||
};
|
||||
|
||||
|
@ -439,7 +458,9 @@ struct rproc_dump_segment {
|
|||
* @cached_table: copy of the resource table
|
||||
* @table_sz: size of @cached_table
|
||||
* @has_iommu: flag to indicate if remote processor is behind an MMU
|
||||
* @auto_boot: flag to indicate if remote processor should be auto-started
|
||||
* @dump_segments: list of segments in the firmware
|
||||
* @nb_vdev: number of vdev currently handled by rproc
|
||||
*/
|
||||
struct rproc {
|
||||
struct list_head node;
|
||||
|
@ -472,6 +493,7 @@ struct rproc {
|
|||
bool has_iommu;
|
||||
bool auto_boot;
|
||||
struct list_head dump_segments;
|
||||
int nb_vdev;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -499,7 +521,6 @@ struct rproc_subdev {
|
|||
/**
|
||||
* struct rproc_vring - remoteproc vring state
|
||||
* @va: virtual address
|
||||
* @dma: dma address
|
||||
* @len: length, in bytes
|
||||
* @da: device address
|
||||
* @align: vring alignment
|
||||
|
@ -509,7 +530,6 @@ struct rproc_subdev {
|
|||
*/
|
||||
struct rproc_vring {
|
||||
void *va;
|
||||
dma_addr_t dma;
|
||||
int len;
|
||||
u32 da;
|
||||
u32 align;
|
||||
|
@ -528,6 +548,7 @@ struct rproc_vring {
|
|||
* @vdev: the virio device
|
||||
* @vring: the vrings for this vdev
|
||||
* @rsc_offset: offset of the vdev's resource entry
|
||||
* @index: vdev position versus other vdev declared in resource table
|
||||
*/
|
||||
struct rproc_vdev {
|
||||
struct kref refcount;
|
||||
|
@ -540,6 +561,7 @@ struct rproc_vdev {
|
|||
struct virtio_device vdev;
|
||||
struct rproc_vring vring[RVDEV_NUM_VRINGS];
|
||||
u32 rsc_offset;
|
||||
u32 index;
|
||||
};
|
||||
|
||||
struct rproc *rproc_get_by_phandle(phandle phandle);
|
||||
|
@ -553,10 +575,29 @@ int rproc_add(struct rproc *rproc);
|
|||
int rproc_del(struct rproc *rproc);
|
||||
void rproc_free(struct rproc *rproc);
|
||||
|
||||
void rproc_add_carveout(struct rproc *rproc, struct rproc_mem_entry *mem);
|
||||
|
||||
struct rproc_mem_entry *
|
||||
rproc_mem_entry_init(struct device *dev,
|
||||
void *va, dma_addr_t dma, int len, u32 da,
|
||||
int (*alloc)(struct rproc *, struct rproc_mem_entry *),
|
||||
int (*release)(struct rproc *, struct rproc_mem_entry *),
|
||||
const char *name, ...);
|
||||
|
||||
struct rproc_mem_entry *
|
||||
rproc_of_resm_mem_entry_init(struct device *dev, u32 of_resm_idx, int len,
|
||||
u32 da, const char *name, ...);
|
||||
|
||||
int rproc_boot(struct rproc *rproc);
|
||||
void rproc_shutdown(struct rproc *rproc);
|
||||
void rproc_report_crash(struct rproc *rproc, enum rproc_crash_type type);
|
||||
int rproc_coredump_add_segment(struct rproc *rproc, dma_addr_t da, size_t size);
|
||||
int rproc_coredump_add_custom_segment(struct rproc *rproc,
|
||||
dma_addr_t da, size_t size,
|
||||
void (*dumpfn)(struct rproc *rproc,
|
||||
struct rproc_dump_segment *segment,
|
||||
void *dest),
|
||||
void *priv);
|
||||
|
||||
static inline struct rproc_vdev *vdev_to_rvdev(struct virtio_device *vdev)
|
||||
{
|
||||
|
|
Loading…
Reference in New Issue