summaryrefslogtreecommitdiff
path: root/drivers/remoteproc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/remoteproc')
-rw-r--r--drivers/remoteproc/mtk_common.h3
-rw-r--r--drivers/remoteproc/mtk_scp.c90
-rw-r--r--drivers/remoteproc/qcom_q6v5.c21
-rw-r--r--drivers/remoteproc/qcom_q6v5.h3
-rw-r--r--drivers/remoteproc/qcom_q6v5_adsp.c228
-rw-r--r--drivers/remoteproc/qcom_q6v5_mss.c19
-rw-r--r--drivers/remoteproc/qcom_wcnss.c1
-rw-r--r--drivers/remoteproc/remoteproc_cdev.c2
-rw-r--r--drivers/remoteproc/remoteproc_core.c9
-rw-r--r--drivers/remoteproc/remoteproc_debugfs.c2
-rw-r--r--drivers/remoteproc/remoteproc_internal.h1
-rw-r--r--drivers/remoteproc/remoteproc_sysfs.c21
-rw-r--r--drivers/remoteproc/ti_k3_dsp_remoteproc.c208
-rw-r--r--drivers/remoteproc/ti_k3_r5_remoteproc.c287
-rw-r--r--drivers/remoteproc/wkup_m3_rproc.c1
15 files changed, 758 insertions, 138 deletions
diff --git a/drivers/remoteproc/mtk_common.h b/drivers/remoteproc/mtk_common.h
index 5ff3867c72f3..71ce4977cb0b 100644
--- a/drivers/remoteproc/mtk_common.h
+++ b/drivers/remoteproc/mtk_common.h
@@ -32,6 +32,9 @@
#define MT8183_SCP_CACHESIZE_8KB BIT(8)
#define MT8183_SCP_CACHE_CON_WAYEN BIT(10)
+#define MT8186_SCP_L1_SRAM_PD_P1 0x40B0
+#define MT8186_SCP_L1_SRAM_PD_p2 0x40B4
+
#define MT8192_L2TCM_SRAM_PD_0 0x10C0
#define MT8192_L2TCM_SRAM_PD_1 0x10C4
#define MT8192_L2TCM_SRAM_PD_2 0x10C8
diff --git a/drivers/remoteproc/mtk_scp.c b/drivers/remoteproc/mtk_scp.c
index 36e48cf58ed6..38609153bf64 100644
--- a/drivers/remoteproc/mtk_scp.c
+++ b/drivers/remoteproc/mtk_scp.c
@@ -383,6 +383,27 @@ static void mt8192_power_off_sram(void __iomem *addr)
writel(GENMASK(i, 0), addr);
}
+static int mt8186_scp_before_load(struct mtk_scp *scp)
+{
+ /* Clear SCP to host interrupt */
+ writel(MT8183_SCP_IPC_INT_BIT, scp->reg_base + MT8183_SCP_TO_HOST);
+
+ /* Reset clocks before loading FW */
+ writel(0x0, scp->reg_base + MT8183_SCP_CLK_SW_SEL);
+ writel(0x0, scp->reg_base + MT8183_SCP_CLK_DIV_SEL);
+
+ /* Turn on the power of SCP's SRAM before using it. Enable 1 block per time*/
+ mt8192_power_on_sram(scp->reg_base + MT8183_SCP_SRAM_PDN);
+
+ /* Initialize TCM before loading FW. */
+ writel(0x0, scp->reg_base + MT8183_SCP_L1_SRAM_PD);
+ writel(0x0, scp->reg_base + MT8183_SCP_TCM_TAIL_SRAM_PD);
+ writel(0x0, scp->reg_base + MT8186_SCP_L1_SRAM_PD_P1);
+ writel(0x0, scp->reg_base + MT8186_SCP_L1_SRAM_PD_p2);
+
+ return 0;
+}
+
static int mt8192_scp_before_load(struct mtk_scp *scp)
{
/* clear SPM interrupt, SCP2SPM_IPC_CLR */
@@ -756,15 +777,9 @@ static int scp_probe(struct platform_device *pdev)
char *fw_name = "scp.img";
int ret, i;
- rproc = rproc_alloc(dev,
- np->name,
- &scp_ops,
- fw_name,
- sizeof(*scp));
- if (!rproc) {
- dev_err(dev, "unable to allocate remoteproc\n");
- return -ENOMEM;
- }
+ rproc = devm_rproc_alloc(dev, np->name, &scp_ops, fw_name, sizeof(*scp));
+ if (!rproc)
+ return dev_err_probe(dev, -ENOMEM, "unable to allocate remoteproc\n");
scp = (struct mtk_scp *)rproc->priv;
scp->rproc = rproc;
@@ -774,46 +789,42 @@ static int scp_probe(struct platform_device *pdev)
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram");
scp->sram_base = devm_ioremap_resource(dev, res);
- if (IS_ERR((__force void *)scp->sram_base)) {
- dev_err(dev, "Failed to parse and map sram memory\n");
- ret = PTR_ERR((__force void *)scp->sram_base);
- goto free_rproc;
- }
+ if (IS_ERR(scp->sram_base))
+ return dev_err_probe(dev, PTR_ERR(scp->sram_base),
+ "Failed to parse and map sram memory\n");
+
scp->sram_size = resource_size(res);
scp->sram_phys = res->start;
/* l1tcm is an optional memory region */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "l1tcm");
scp->l1tcm_base = devm_ioremap_resource(dev, res);
- if (IS_ERR((__force void *)scp->l1tcm_base)) {
- ret = PTR_ERR((__force void *)scp->l1tcm_base);
+ if (IS_ERR(scp->l1tcm_base)) {
+ ret = PTR_ERR(scp->l1tcm_base);
if (ret != -EINVAL) {
- dev_err(dev, "Failed to map l1tcm memory\n");
- goto free_rproc;
+ return dev_err_probe(dev, ret, "Failed to map l1tcm memory\n");
}
} else {
scp->l1tcm_size = resource_size(res);
scp->l1tcm_phys = res->start;
}
- mutex_init(&scp->send_lock);
- for (i = 0; i < SCP_IPI_MAX; i++)
- mutex_init(&scp->ipi_desc[i].lock);
-
scp->reg_base = devm_platform_ioremap_resource_byname(pdev, "cfg");
- if (IS_ERR((__force void *)scp->reg_base)) {
- dev_err(dev, "Failed to parse and map cfg memory\n");
- ret = PTR_ERR((__force void *)scp->reg_base);
- goto destroy_mutex;
- }
+ if (IS_ERR(scp->reg_base))
+ return dev_err_probe(dev, PTR_ERR(scp->reg_base),
+ "Failed to parse and map cfg memory\n");
- ret = scp_map_memory_region(scp);
+ ret = scp->data->scp_clk_get(scp);
if (ret)
- goto destroy_mutex;
+ return ret;
- ret = scp->data->scp_clk_get(scp);
+ ret = scp_map_memory_region(scp);
if (ret)
- goto release_dev_mem;
+ return ret;
+
+ mutex_init(&scp->send_lock);
+ for (i = 0; i < SCP_IPI_MAX; i++)
+ mutex_init(&scp->ipi_desc[i].lock);
/* register SCP initialization IPI */
ret = scp_ipi_register(scp, SCP_IPI_INIT, scp_init_ipi_handler, scp);
@@ -847,12 +858,9 @@ remove_subdev:
scp_ipi_unregister(scp, SCP_IPI_INIT);
release_dev_mem:
scp_unmap_memory_region(scp);
-destroy_mutex:
for (i = 0; i < SCP_IPI_MAX; i++)
mutex_destroy(&scp->ipi_desc[i].lock);
mutex_destroy(&scp->send_lock);
-free_rproc:
- rproc_free(rproc);
return ret;
}
@@ -887,6 +895,19 @@ static const struct mtk_scp_of_data mt8183_of_data = {
.ipi_buf_offset = 0x7bdb0,
};
+static const struct mtk_scp_of_data mt8186_of_data = {
+ .scp_clk_get = mt8195_scp_clk_get,
+ .scp_before_load = mt8186_scp_before_load,
+ .scp_irq_handler = mt8183_scp_irq_handler,
+ .scp_reset_assert = mt8183_scp_reset_assert,
+ .scp_reset_deassert = mt8183_scp_reset_deassert,
+ .scp_stop = mt8183_scp_stop,
+ .scp_da_to_va = mt8183_scp_da_to_va,
+ .host_to_scp_reg = MT8183_HOST_TO_SCP,
+ .host_to_scp_int_bit = MT8183_HOST_IPC_INT_BIT,
+ .ipi_buf_offset = 0x7bdb0,
+};
+
static const struct mtk_scp_of_data mt8192_of_data = {
.scp_clk_get = mt8192_scp_clk_get,
.scp_before_load = mt8192_scp_before_load,
@@ -913,6 +934,7 @@ static const struct mtk_scp_of_data mt8195_of_data = {
static const struct of_device_id mtk_scp_of_match[] = {
{ .compatible = "mediatek,mt8183-scp", .data = &mt8183_of_data },
+ { .compatible = "mediatek,mt8186-scp", .data = &mt8186_of_data },
{ .compatible = "mediatek,mt8192-scp", .data = &mt8192_of_data },
{ .compatible = "mediatek,mt8195-scp", .data = &mt8195_of_data },
{},
diff --git a/drivers/remoteproc/qcom_q6v5.c b/drivers/remoteproc/qcom_q6v5.c
index 442a388f8102..5280ec9b5449 100644
--- a/drivers/remoteproc/qcom_q6v5.c
+++ b/drivers/remoteproc/qcom_q6v5.c
@@ -8,6 +8,7 @@
*/
#include <linux/kernel.h>
#include <linux/platform_device.h>
+#include <linux/interconnect.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/soc/qcom/qcom_aoss.h>
@@ -51,9 +52,17 @@ int qcom_q6v5_prepare(struct qcom_q6v5 *q6v5)
{
int ret;
+ ret = icc_set_bw(q6v5->path, 0, UINT_MAX);
+ if (ret < 0) {
+ dev_err(q6v5->dev, "failed to set bandwidth request\n");
+ return ret;
+ }
+
ret = q6v5_load_state_toggle(q6v5, true);
- if (ret)
+ if (ret) {
+ icc_set_bw(q6v5->path, 0, 0);
return ret;
+ }
reinit_completion(&q6v5->start_done);
reinit_completion(&q6v5->stop_done);
@@ -78,6 +87,9 @@ int qcom_q6v5_unprepare(struct qcom_q6v5 *q6v5)
disable_irq(q6v5->handover_irq);
q6v5_load_state_toggle(q6v5, false);
+ /* Disable interconnect vote, in case handover never happened */
+ icc_set_bw(q6v5->path, 0, 0);
+
return !q6v5->handover_issued;
}
EXPORT_SYMBOL_GPL(qcom_q6v5_unprepare);
@@ -160,6 +172,8 @@ static irqreturn_t q6v5_handover_interrupt(int irq, void *data)
if (q6v5->handover)
q6v5->handover(q6v5);
+ icc_set_bw(q6v5->path, 0, 0);
+
q6v5->handover_issued = true;
return IRQ_HANDLED;
@@ -332,6 +346,11 @@ int qcom_q6v5_init(struct qcom_q6v5 *q6v5, struct platform_device *pdev,
return load_state ? -ENOMEM : -EINVAL;
}
+ q6v5->path = devm_of_icc_get(&pdev->dev, NULL);
+ if (IS_ERR(q6v5->path))
+ return dev_err_probe(&pdev->dev, PTR_ERR(q6v5->path),
+ "failed to acquire interconnect path\n");
+
return 0;
}
EXPORT_SYMBOL_GPL(qcom_q6v5_init);
diff --git a/drivers/remoteproc/qcom_q6v5.h b/drivers/remoteproc/qcom_q6v5.h
index f35e04471ed7..5a859c41896e 100644
--- a/drivers/remoteproc/qcom_q6v5.h
+++ b/drivers/remoteproc/qcom_q6v5.h
@@ -7,6 +7,7 @@
#include <linux/completion.h>
#include <linux/soc/qcom/qcom_aoss.h>
+struct icc_path;
struct rproc;
struct qcom_smem_state;
struct qcom_sysmon;
@@ -18,6 +19,8 @@ struct qcom_q6v5 {
struct qcom_smem_state *state;
struct qmp *qmp;
+ struct icc_path *path;
+
unsigned stop_bit;
int wdog_irq;
diff --git a/drivers/remoteproc/qcom_q6v5_adsp.c b/drivers/remoteproc/qcom_q6v5_adsp.c
index 098362e6e233..2f3b9f54251e 100644
--- a/drivers/remoteproc/qcom_q6v5_adsp.c
+++ b/drivers/remoteproc/qcom_q6v5_adsp.c
@@ -32,6 +32,7 @@
/* time out value */
#define ACK_TIMEOUT 1000
+#define ACK_TIMEOUT_US 1000000
#define BOOT_FSM_TIMEOUT 10000
/* mask values */
#define EVB_MASK GENMASK(27, 4)
@@ -51,6 +52,8 @@
#define QDSP6SS_CORE_CBCR 0x20
#define QDSP6SS_SLEEP_CBCR 0x3c
+#define QCOM_Q6V5_RPROC_PROXY_PD_MAX 3
+
struct adsp_pil_data {
int crash_reason_smem;
const char *firmware_name;
@@ -58,9 +61,13 @@ struct adsp_pil_data {
const char *ssr_name;
const char *sysmon_name;
int ssctl_id;
+ bool is_wpss;
+ bool auto_boot;
const char **clk_ids;
int num_clks;
+ const char **proxy_pd_names;
+ const char *load_state;
};
struct qcom_adsp {
@@ -93,11 +100,151 @@ struct qcom_adsp {
void *mem_region;
size_t mem_size;
+ struct device *proxy_pds[QCOM_Q6V5_RPROC_PROXY_PD_MAX];
+ size_t proxy_pd_count;
+
struct qcom_rproc_glink glink_subdev;
struct qcom_rproc_ssr ssr_subdev;
struct qcom_sysmon *sysmon;
+
+ int (*shutdown)(struct qcom_adsp *adsp);
};
+static int qcom_rproc_pds_attach(struct device *dev, struct qcom_adsp *adsp,
+ const char **pd_names)
+{
+ struct device **devs = adsp->proxy_pds;
+ size_t num_pds = 0;
+ int ret;
+ int i;
+
+ if (!pd_names)
+ return 0;
+
+ /* Handle single power domain */
+ if (dev->pm_domain) {
+ devs[0] = dev;
+ pm_runtime_enable(dev);
+ return 1;
+ }
+
+ while (pd_names[num_pds])
+ num_pds++;
+
+ if (num_pds > ARRAY_SIZE(adsp->proxy_pds))
+ return -E2BIG;
+
+ for (i = 0; i < num_pds; i++) {
+ devs[i] = dev_pm_domain_attach_by_name(dev, pd_names[i]);
+ if (IS_ERR_OR_NULL(devs[i])) {
+ ret = PTR_ERR(devs[i]) ? : -ENODATA;
+ goto unroll_attach;
+ }
+ }
+
+ return num_pds;
+
+unroll_attach:
+ for (i--; i >= 0; i--)
+ dev_pm_domain_detach(devs[i], false);
+
+ return ret;
+}
+
+static void qcom_rproc_pds_detach(struct qcom_adsp *adsp, struct device **pds,
+ size_t pd_count)
+{
+ struct device *dev = adsp->dev;
+ int i;
+
+ /* Handle single power domain */
+ if (dev->pm_domain && pd_count) {
+ pm_runtime_disable(dev);
+ return;
+ }
+
+ for (i = 0; i < pd_count; i++)
+ dev_pm_domain_detach(pds[i], false);
+}
+
+static int qcom_rproc_pds_enable(struct qcom_adsp *adsp, struct device **pds,
+ size_t pd_count)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < pd_count; i++) {
+ dev_pm_genpd_set_performance_state(pds[i], INT_MAX);
+ ret = pm_runtime_get_sync(pds[i]);
+ if (ret < 0) {
+ pm_runtime_put_noidle(pds[i]);
+ dev_pm_genpd_set_performance_state(pds[i], 0);
+ goto unroll_pd_votes;
+ }
+ }
+
+ return 0;
+
+unroll_pd_votes:
+ for (i--; i >= 0; i--) {
+ dev_pm_genpd_set_performance_state(pds[i], 0);
+ pm_runtime_put(pds[i]);
+ }
+
+ return ret;
+}
+
+static void qcom_rproc_pds_disable(struct qcom_adsp *adsp, struct device **pds,
+ size_t pd_count)
+{
+ int i;
+
+ for (i = 0; i < pd_count; i++) {
+ dev_pm_genpd_set_performance_state(pds[i], 0);
+ pm_runtime_put(pds[i]);
+ }
+}
+
+static int qcom_wpss_shutdown(struct qcom_adsp *adsp)
+{
+ unsigned int val;
+
+ regmap_write(adsp->halt_map, adsp->halt_lpass + LPASS_HALTREQ_REG, 1);
+
+ /* Wait for halt ACK from QDSP6 */
+ regmap_read_poll_timeout(adsp->halt_map,
+ adsp->halt_lpass + LPASS_HALTACK_REG, val,
+ val, 1000, ACK_TIMEOUT_US);
+
+ /* Assert the WPSS PDC Reset */
+ reset_control_assert(adsp->pdc_sync_reset);
+
+ /* Place the WPSS processor into reset */
+ reset_control_assert(adsp->restart);
+
+ /* wait after asserting subsystem restart from AOSS */
+ usleep_range(200, 205);
+
+ /* Remove the WPSS reset */
+ reset_control_deassert(adsp->restart);
+
+ /* De-assert the WPSS PDC Reset */
+ reset_control_deassert(adsp->pdc_sync_reset);
+
+ usleep_range(100, 105);
+
+ clk_bulk_disable_unprepare(adsp->num_clks, adsp->clks);
+
+ regmap_write(adsp->halt_map, adsp->halt_lpass + LPASS_HALTREQ_REG, 0);
+
+ /* Wait for halt ACK from QDSP6 */
+ regmap_read_poll_timeout(adsp->halt_map,
+ adsp->halt_lpass + LPASS_HALTACK_REG, val,
+ !val, 1000, ACK_TIMEOUT_US);
+
+ return 0;
+}
+
static int qcom_adsp_shutdown(struct qcom_adsp *adsp)
{
unsigned long timeout;
@@ -193,12 +340,10 @@ static int adsp_start(struct rproc *rproc)
if (ret)
goto disable_irqs;
- dev_pm_genpd_set_performance_state(adsp->dev, INT_MAX);
- ret = pm_runtime_get_sync(adsp->dev);
- if (ret) {
- pm_runtime_put_noidle(adsp->dev);
+ ret = qcom_rproc_pds_enable(adsp, adsp->proxy_pds,
+ adsp->proxy_pd_count);
+ if (ret < 0)
goto disable_xo_clk;
- }
ret = clk_bulk_prepare_enable(adsp->num_clks, adsp->clks);
if (ret) {
@@ -243,8 +388,7 @@ static int adsp_start(struct rproc *rproc)
disable_adsp_clks:
clk_bulk_disable_unprepare(adsp->num_clks, adsp->clks);
disable_power_domain:
- dev_pm_genpd_set_performance_state(adsp->dev, 0);
- pm_runtime_put(adsp->dev);
+ qcom_rproc_pds_disable(adsp, adsp->proxy_pds, adsp->proxy_pd_count);
disable_xo_clk:
clk_disable_unprepare(adsp->xo);
disable_irqs:
@@ -258,8 +402,7 @@ static void qcom_adsp_pil_handover(struct qcom_q6v5 *q6v5)
struct qcom_adsp *adsp = container_of(q6v5, struct qcom_adsp, q6v5);
clk_disable_unprepare(adsp->xo);
- dev_pm_genpd_set_performance_state(adsp->dev, 0);
- pm_runtime_put(adsp->dev);
+ qcom_rproc_pds_disable(adsp, adsp->proxy_pds, adsp->proxy_pd_count);
}
static int adsp_stop(struct rproc *rproc)
@@ -272,7 +415,7 @@ static int adsp_stop(struct rproc *rproc)
if (ret == -ETIMEDOUT)
dev_err(adsp->dev, "timed out on wait\n");
- ret = qcom_adsp_shutdown(adsp);
+ ret = adsp->shutdown(adsp);
if (ret)
dev_err(adsp->dev, "failed to shutdown: %d\n", ret);
@@ -408,6 +551,7 @@ static int adsp_alloc_memory_region(struct qcom_adsp *adsp)
}
ret = of_address_to_resource(node, 0, &r);
+ of_node_put(node);
if (ret)
return ret;
@@ -427,6 +571,7 @@ static int adsp_alloc_memory_region(struct qcom_adsp *adsp)
static int adsp_probe(struct platform_device *pdev)
{
const struct adsp_pil_data *desc;
+ const char *firmware_name;
struct qcom_adsp *adsp;
struct rproc *rproc;
int ret;
@@ -435,12 +580,22 @@ static int adsp_probe(struct platform_device *pdev)
if (!desc)
return -EINVAL;
+ firmware_name = desc->firmware_name;
+ ret = of_property_read_string(pdev->dev.of_node, "firmware-name",
+ &firmware_name);
+ if (ret < 0 && ret != -EINVAL) {
+ dev_err(&pdev->dev, "unable to read firmware-name\n");
+ return ret;
+ }
+
rproc = rproc_alloc(&pdev->dev, pdev->name, &adsp_ops,
- desc->firmware_name, sizeof(*adsp));
+ firmware_name, sizeof(*adsp));
if (!rproc) {
dev_err(&pdev->dev, "unable to allocate remoteproc\n");
return -ENOMEM;
}
+
+ rproc->auto_boot = desc->auto_boot;
rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE);
adsp = (struct qcom_adsp *)rproc->priv;
@@ -449,6 +604,11 @@ static int adsp_probe(struct platform_device *pdev)
adsp->info_name = desc->sysmon_name;
platform_set_drvdata(pdev, adsp);
+ if (desc->is_wpss)
+ adsp->shutdown = qcom_wpss_shutdown;
+ else
+ adsp->shutdown = qcom_adsp_shutdown;
+
ret = adsp_alloc_memory_region(adsp);
if (ret)
goto free_rproc;
@@ -457,7 +617,13 @@ static int adsp_probe(struct platform_device *pdev)
if (ret)
goto free_rproc;
- pm_runtime_enable(adsp->dev);
+ ret = qcom_rproc_pds_attach(adsp->dev, adsp,
+ desc->proxy_pd_names);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to attach proxy power domains\n");
+ goto free_rproc;
+ }
+ adsp->proxy_pd_count = ret;
ret = adsp_init_reset(adsp);
if (ret)
@@ -467,8 +633,8 @@ static int adsp_probe(struct platform_device *pdev)
if (ret)
goto disable_pm;
- ret = qcom_q6v5_init(&adsp->q6v5, pdev, rproc, desc->crash_reason_smem, NULL,
- qcom_adsp_pil_handover);
+ ret = qcom_q6v5_init(&adsp->q6v5, pdev, rproc, desc->crash_reason_smem,
+ desc->load_state, qcom_adsp_pil_handover);
if (ret)
goto disable_pm;
@@ -489,7 +655,8 @@ static int adsp_probe(struct platform_device *pdev)
return 0;
disable_pm:
- pm_runtime_disable(adsp->dev);
+ qcom_rproc_pds_detach(adsp, adsp->proxy_pds, adsp->proxy_pd_count);
+
free_rproc:
rproc_free(rproc);
@@ -506,7 +673,7 @@ static int adsp_remove(struct platform_device *pdev)
qcom_remove_glink_subdev(adsp->rproc, &adsp->glink_subdev);
qcom_remove_sysmon_subdev(adsp->sysmon);
qcom_remove_ssr_subdev(adsp->rproc, &adsp->ssr_subdev);
- pm_runtime_disable(adsp->dev);
+ qcom_rproc_pds_detach(adsp, adsp->proxy_pds, adsp->proxy_pd_count);
rproc_free(adsp->rproc);
return 0;
@@ -518,11 +685,16 @@ static const struct adsp_pil_data adsp_resource_init = {
.ssr_name = "lpass",
.sysmon_name = "adsp",
.ssctl_id = 0x14,
+ .is_wpss = false,
+ .auto_boot = true,
.clk_ids = (const char*[]) {
"sway_cbcr", "lpass_ahbs_aon_cbcr", "lpass_ahbm_aon_cbcr",
"qdsp6ss_xo", "qdsp6ss_sleep", "qdsp6ss_core", NULL
},
.num_clks = 7,
+ .proxy_pd_names = (const char*[]) {
+ "cx", NULL
+ },
};
static const struct adsp_pil_data cdsp_resource_init = {
@@ -531,15 +703,39 @@ static const struct adsp_pil_data cdsp_resource_init = {
.ssr_name = "cdsp",
.sysmon_name = "cdsp",
.ssctl_id = 0x17,
+ .is_wpss = false,
+ .auto_boot = true,
.clk_ids = (const char*[]) {
"sway", "tbu", "bimc", "ahb_aon", "q6ss_slave", "q6ss_master",
"q6_axim", NULL
},
.num_clks = 7,
+ .proxy_pd_names = (const char*[]) {
+ "cx", NULL
+ },
+};
+
+static const struct adsp_pil_data wpss_resource_init = {
+ .crash_reason_smem = 626,
+ .firmware_name = "wpss.mdt",
+ .ssr_name = "wpss",
+ .sysmon_name = "wpss",
+ .ssctl_id = 0x19,
+ .is_wpss = true,
+ .auto_boot = false,
+ .load_state = "wpss",
+ .clk_ids = (const char*[]) {
+ "ahb_bdg", "ahb", "rscp", NULL
+ },
+ .num_clks = 3,
+ .proxy_pd_names = (const char*[]) {
+ "cx", "mx", NULL
+ },
};
static const struct of_device_id adsp_of_match[] = {
{ .compatible = "qcom,qcs404-cdsp-pil", .data = &cdsp_resource_init },
+ { .compatible = "qcom,sc7280-wpss-pil", .data = &wpss_resource_init },
{ .compatible = "qcom,sdm845-adsp-pil", .data = &adsp_resource_init },
{ },
};
diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c
index a2c231a17b2b..af217de75e4d 100644
--- a/drivers/remoteproc/qcom_q6v5_mss.c
+++ b/drivers/remoteproc/qcom_q6v5_mss.c
@@ -218,6 +218,7 @@ struct q6v5 {
struct qcom_rproc_subdev smd_subdev;
struct qcom_rproc_ssr ssr_subdev;
struct qcom_sysmon *sysmon;
+ struct platform_device *bam_dmux;
bool need_mem_protection;
bool has_alt_reset;
bool has_mba_logs;
@@ -1807,18 +1808,20 @@ static int q6v5_alloc_memory_region(struct q6v5 *qproc)
* reserved memory regions from device's memory-region property.
*/
child = of_get_child_by_name(qproc->dev->of_node, "mba");
- if (!child)
+ if (!child) {
node = of_parse_phandle(qproc->dev->of_node,
"memory-region", 0);
- else
+ } else {
node = of_parse_phandle(child, "memory-region", 0);
+ of_node_put(child);
+ }
ret = of_address_to_resource(node, 0, &r);
+ of_node_put(node);
if (ret) {
dev_err(qproc->dev, "unable to resolve mba region\n");
return ret;
}
- of_node_put(node);
qproc->mba_phys = r.start;
qproc->mba_size = resource_size(&r);
@@ -1829,14 +1832,15 @@ static int q6v5_alloc_memory_region(struct q6v5 *qproc)
} else {
child = of_get_child_by_name(qproc->dev->of_node, "mpss");
node = of_parse_phandle(child, "memory-region", 0);
+ of_node_put(child);
}
ret = of_address_to_resource(node, 0, &r);
+ of_node_put(node);
if (ret) {
dev_err(qproc->dev, "unable to resolve mpss region\n");
return ret;
}
- of_node_put(node);
qproc->mpss_phys = qproc->mpss_reloc = r.start;
qproc->mpss_size = resource_size(&r);
@@ -1847,6 +1851,7 @@ static int q6v5_alloc_memory_region(struct q6v5 *qproc)
static int q6v5_probe(struct platform_device *pdev)
{
const struct rproc_hexagon_res *desc;
+ struct device_node *node;
struct q6v5 *qproc;
struct rproc *rproc;
const char *mba_image;
@@ -1990,6 +1995,10 @@ static int q6v5_probe(struct platform_device *pdev)
if (ret)
goto remove_sysmon_subdev;
+ node = of_get_compatible_child(pdev->dev.of_node, "qcom,bam-dmux");
+ qproc->bam_dmux = of_platform_device_create(node, NULL, &pdev->dev);
+ of_node_put(node);
+
return 0;
remove_sysmon_subdev:
@@ -2011,6 +2020,8 @@ static int q6v5_remove(struct platform_device *pdev)
struct q6v5 *qproc = platform_get_drvdata(pdev);
struct rproc *rproc = qproc->rproc;
+ if (qproc->bam_dmux)
+ of_platform_device_destroy(&qproc->bam_dmux->dev, NULL);
rproc_del(rproc);
qcom_q6v5_deinit(&qproc->q6v5);
diff --git a/drivers/remoteproc/qcom_wcnss.c b/drivers/remoteproc/qcom_wcnss.c
index 80bbafee9846..9a223d394087 100644
--- a/drivers/remoteproc/qcom_wcnss.c
+++ b/drivers/remoteproc/qcom_wcnss.c
@@ -500,6 +500,7 @@ static int wcnss_alloc_memory_region(struct qcom_wcnss *wcnss)
}
ret = of_address_to_resource(node, 0, &r);
+ of_node_put(node);
if (ret)
return ret;
diff --git a/drivers/remoteproc/remoteproc_cdev.c b/drivers/remoteproc/remoteproc_cdev.c
index 4ad98b0b8caa..906ff3c4dfdd 100644
--- a/drivers/remoteproc/remoteproc_cdev.c
+++ b/drivers/remoteproc/remoteproc_cdev.c
@@ -42,7 +42,7 @@ static ssize_t rproc_cdev_write(struct file *filp, const char __user *buf, size_
rproc->state != RPROC_ATTACHED)
return -EINVAL;
- rproc_shutdown(rproc);
+ ret = rproc_shutdown(rproc);
} else if (!strncmp(cmd, "detach", len)) {
if (rproc->state != RPROC_ATTACHED)
return -EINVAL;
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index 69f51acf235e..c510125769b9 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -2061,16 +2061,18 @@ EXPORT_SYMBOL(rproc_boot);
* which means that the @rproc handle stays valid even after rproc_shutdown()
* returns, and users can still use it with a subsequent rproc_boot(), if
* needed.
+ *
+ * Return: 0 on success, and an appropriate error value otherwise
*/
-void rproc_shutdown(struct rproc *rproc)
+int rproc_shutdown(struct rproc *rproc)
{
struct device *dev = &rproc->dev;
- int ret;
+ int ret = 0;
ret = mutex_lock_interruptible(&rproc->lock);
if (ret) {
dev_err(dev, "can't lock rproc %s: %d\n", rproc->name, ret);
- return;
+ return ret;
}
/* if the remote proc is still needed, bail out */
@@ -2097,6 +2099,7 @@ void rproc_shutdown(struct rproc *rproc)
rproc->table_ptr = NULL;
out:
mutex_unlock(&rproc->lock);
+ return ret;
}
EXPORT_SYMBOL(rproc_shutdown);
diff --git a/drivers/remoteproc/remoteproc_debugfs.c b/drivers/remoteproc/remoteproc_debugfs.c
index b5a1e3b697d9..581930483ef8 100644
--- a/drivers/remoteproc/remoteproc_debugfs.c
+++ b/drivers/remoteproc/remoteproc_debugfs.c
@@ -76,7 +76,7 @@ static ssize_t rproc_coredump_write(struct file *filp,
int ret, err = 0;
char buf[20];
- if (count > sizeof(buf))
+ if (count < 1 || count > sizeof(buf))
return -EINVAL;
ret = copy_from_user(buf, user_buf, count);
diff --git a/drivers/remoteproc/remoteproc_internal.h b/drivers/remoteproc/remoteproc_internal.h
index a328e634b1de..72d4d3d7d94d 100644
--- a/drivers/remoteproc/remoteproc_internal.h
+++ b/drivers/remoteproc/remoteproc_internal.h
@@ -84,7 +84,6 @@ static inline void rproc_char_device_remove(struct rproc *rproc)
void rproc_free_vring(struct rproc_vring *rvring);
int rproc_alloc_vring(struct rproc_vdev *rvdev, int i);
-void *rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem);
phys_addr_t rproc_va_to_pa(void *cpu_addr);
int rproc_trigger_recovery(struct rproc *rproc);
diff --git a/drivers/remoteproc/remoteproc_sysfs.c b/drivers/remoteproc/remoteproc_sysfs.c
index ea8b89f97d7b..51a04bc6ba7a 100644
--- a/drivers/remoteproc/remoteproc_sysfs.c
+++ b/drivers/remoteproc/remoteproc_sysfs.c
@@ -206,7 +206,7 @@ static ssize_t state_store(struct device *dev,
rproc->state != RPROC_ATTACHED)
return -EINVAL;
- rproc_shutdown(rproc);
+ ret = rproc_shutdown(rproc);
} else if (sysfs_streq(buf, "detach")) {
if (rproc->state != RPROC_ATTACHED)
return -EINVAL;
@@ -230,6 +230,22 @@ static ssize_t name_show(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR_RO(name);
+static umode_t rproc_is_visible(struct kobject *kobj, struct attribute *attr,
+ int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct rproc *rproc = to_rproc(dev);
+ umode_t mode = attr->mode;
+
+ if (rproc->sysfs_read_only && (attr == &dev_attr_recovery.attr ||
+ attr == &dev_attr_firmware.attr ||
+ attr == &dev_attr_state.attr ||
+ attr == &dev_attr_coredump.attr))
+ mode = 0444;
+
+ return mode;
+}
+
static struct attribute *rproc_attrs[] = {
&dev_attr_coredump.attr,
&dev_attr_recovery.attr,
@@ -240,7 +256,8 @@ static struct attribute *rproc_attrs[] = {
};
static const struct attribute_group rproc_devgroup = {
- .attrs = rproc_attrs
+ .attrs = rproc_attrs,
+ .is_visible = rproc_is_visible,
};
static const struct attribute_group *rproc_devgroups[] = {
diff --git a/drivers/remoteproc/ti_k3_dsp_remoteproc.c b/drivers/remoteproc/ti_k3_dsp_remoteproc.c
index 939c5d90b562..eb9c64f7b9b4 100644
--- a/drivers/remoteproc/ti_k3_dsp_remoteproc.c
+++ b/drivers/remoteproc/ti_k3_dsp_remoteproc.c
@@ -2,7 +2,7 @@
/*
* TI K3 DSP Remote Processor(s) driver
*
- * Copyright (C) 2018-2020 Texas Instruments Incorporated - https://www.ti.com/
+ * Copyright (C) 2018-2022 Texas Instruments Incorporated - https://www.ti.com/
* Suman Anna <s-anna@ti.com>
*/
@@ -216,6 +216,43 @@ lreset:
return ret;
}
+static int k3_dsp_rproc_request_mbox(struct rproc *rproc)
+{
+ struct k3_dsp_rproc *kproc = rproc->priv;
+ struct mbox_client *client = &kproc->client;
+ struct device *dev = kproc->dev;
+ int ret;
+
+ client->dev = dev;
+ client->tx_done = NULL;
+ client->rx_callback = k3_dsp_rproc_mbox_callback;
+ client->tx_block = false;
+ client->knows_txdone = false;
+
+ kproc->mbox = mbox_request_channel(client, 0);
+ if (IS_ERR(kproc->mbox)) {
+ ret = -EBUSY;
+ dev_err(dev, "mbox_request_channel failed: %ld\n",
+ PTR_ERR(kproc->mbox));
+ return ret;
+ }
+
+ /*
+ * Ping the remote processor, this is only for sanity-sake for now;
+ * there is no functional effect whatsoever.
+ *
+ * Note that the reply will _not_ arrive immediately: this message
+ * will wait in the mailbox fifo until the remote processor is booted.
+ */
+ ret = mbox_send_message(kproc->mbox, (void *)RP_MBOX_ECHO_REQUEST);
+ if (ret < 0) {
+ dev_err(dev, "mbox_send_message failed: %d\n", ret);
+ mbox_free_channel(kproc->mbox);
+ return ret;
+ }
+
+ return 0;
+}
/*
* The C66x DSP cores have a local reset that affects only the CPU, and a
* generic module reset that powers on the device and allows the DSP internal
@@ -223,7 +260,8 @@ lreset:
* used to release the global reset on C66x DSPs to allow loading into the DSP
* internal RAMs. The .prepare() ops is invoked by remoteproc core before any
* firmware loading, and is followed by the .start() ops after loading to
- * actually let the C66x DSP cores run.
+ * actually let the C66x DSP cores run. This callback is invoked only in
+ * remoteproc mode.
*/
static int k3_dsp_rproc_prepare(struct rproc *rproc)
{
@@ -247,7 +285,7 @@ static int k3_dsp_rproc_prepare(struct rproc *rproc)
* powering down the C66x DSP cores. The cores themselves are only halted in the
* .stop() callback through the local reset, and the .unprepare() ops is invoked
* by the remoteproc core after the remoteproc is stopped to balance the global
- * reset.
+ * reset. This callback is invoked only in remoteproc mode.
*/
static int k3_dsp_rproc_unprepare(struct rproc *rproc)
{
@@ -268,42 +306,18 @@ static int k3_dsp_rproc_unprepare(struct rproc *rproc)
*
* This function will be invoked only after the firmware for this rproc
* was loaded, parsed successfully, and all of its resource requirements
- * were met.
+ * were met. This callback is invoked only in remoteproc mode.
*/
static int k3_dsp_rproc_start(struct rproc *rproc)
{
struct k3_dsp_rproc *kproc = rproc->priv;
- struct mbox_client *client = &kproc->client;
struct device *dev = kproc->dev;
u32 boot_addr;
int ret;
- client->dev = dev;
- client->tx_done = NULL;
- client->rx_callback = k3_dsp_rproc_mbox_callback;
- client->tx_block = false;
- client->knows_txdone = false;
-
- kproc->mbox = mbox_request_channel(client, 0);
- if (IS_ERR(kproc->mbox)) {
- ret = -EBUSY;
- dev_err(dev, "mbox_request_channel failed: %ld\n",
- PTR_ERR(kproc->mbox));
+ ret = k3_dsp_rproc_request_mbox(rproc);
+ if (ret)
return ret;
- }
-
- /*
- * Ping the remote processor, this is only for sanity-sake for now;
- * there is no functional effect whatsoever.
- *
- * Note that the reply will _not_ arrive immediately: this message
- * will wait in the mailbox fifo until the remote processor is booted.
- */
- ret = mbox_send_message(kproc->mbox, (void *)RP_MBOX_ECHO_REQUEST);
- if (ret < 0) {
- dev_err(dev, "mbox_send_message failed: %d\n", ret);
- goto put_mbox;
- }
boot_addr = rproc->bootaddr;
if (boot_addr & (kproc->data->boot_align_addr - 1)) {
@@ -333,7 +347,7 @@ put_mbox:
* Stop the DSP remote processor.
*
* This function puts the DSP processor into reset, and finishes processing
- * of any pending messages.
+ * of any pending messages. This callback is invoked only in remoteproc mode.
*/
static int k3_dsp_rproc_stop(struct rproc *rproc)
{
@@ -347,6 +361,78 @@ static int k3_dsp_rproc_stop(struct rproc *rproc)
}
/*
+ * Attach to a running DSP remote processor (IPC-only mode)
+ *
+ * This rproc attach callback only needs to request the mailbox, the remote
+ * processor is already booted, so there is no need to issue any TI-SCI
+ * commands to boot the DSP core. This callback is invoked only in IPC-only
+ * mode.
+ */
+static int k3_dsp_rproc_attach(struct rproc *rproc)
+{
+ struct k3_dsp_rproc *kproc = rproc->priv;
+ struct device *dev = kproc->dev;
+ int ret;
+
+ ret = k3_dsp_rproc_request_mbox(rproc);
+ if (ret)
+ return ret;
+
+ dev_info(dev, "DSP initialized in IPC-only mode\n");
+ return 0;
+}
+
+/*
+ * Detach from a running DSP remote processor (IPC-only mode)
+ *
+ * This rproc detach callback performs the opposite operation to attach callback
+ * and only needs to release the mailbox, the DSP core is not stopped and will
+ * be left to continue to run its booted firmware. This callback is invoked only
+ * in IPC-only mode.
+ */
+static int k3_dsp_rproc_detach(struct rproc *rproc)
+{
+ struct k3_dsp_rproc *kproc = rproc->priv;
+ struct device *dev = kproc->dev;
+
+ mbox_free_channel(kproc->mbox);
+ dev_info(dev, "DSP deinitialized in IPC-only mode\n");
+ return 0;
+}
+
+/*
+ * This function implements the .get_loaded_rsc_table() callback and is used
+ * to provide the resource table for a booted DSP in IPC-only mode. The K3 DSP
+ * firmwares follow a design-by-contract approach and are expected to have the
+ * resource table at the base of the DDR region reserved for firmware usage.
+ * This provides flexibility for the remote processor to be booted by different
+ * bootloaders that may or may not have the ability to publish the resource table
+ * address and size through a DT property. This callback is invoked only in
+ * IPC-only mode.
+ */
+static struct resource_table *k3_dsp_get_loaded_rsc_table(struct rproc *rproc,
+ size_t *rsc_table_sz)
+{
+ struct k3_dsp_rproc *kproc = rproc->priv;
+ struct device *dev = kproc->dev;
+
+ if (!kproc->rmem[0].cpu_addr) {
+ dev_err(dev, "memory-region #1 does not exist, loaded rsc table can't be found");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /*
+ * NOTE: The resource table size is currently hard-coded to a maximum
+ * of 256 bytes. The most common resource table usage for K3 firmwares
+ * is to only have the vdev resource entry and an optional trace entry.
+ * The exact size could be computed based on resource table address, but
+ * the hard-coded value suffices to support the IPC-only mode.
+ */
+ *rsc_table_sz = 256;
+ return (struct resource_table *)kproc->rmem[0].cpu_addr;
+}
+
+/*
* Custom function to translate a DSP device address (internal RAMs only) to a
* kernel virtual address. The DSPs can access their RAMs at either an internal
* address visible only from a DSP, or at the SoC-level bus address. Both these
@@ -592,6 +678,7 @@ static int k3_dsp_rproc_probe(struct platform_device *pdev)
struct k3_dsp_rproc *kproc;
struct rproc *rproc;
const char *fw_name;
+ bool p_state = false;
int ret = 0;
int ret1;
@@ -670,19 +757,43 @@ static int k3_dsp_rproc_probe(struct platform_device *pdev)
goto release_tsp;
}
- /*
- * ensure the DSP local reset is asserted to ensure the DSP doesn't
- * execute bogus code in .prepare() when the module reset is released.
- */
- if (data->uses_lreset) {
- ret = reset_control_status(kproc->reset);
- if (ret < 0) {
- dev_err(dev, "failed to get reset status, status = %d\n",
- ret);
- goto release_mem;
- } else if (ret == 0) {
- dev_warn(dev, "local reset is deasserted for device\n");
- k3_dsp_rproc_reset(kproc);
+ ret = kproc->ti_sci->ops.dev_ops.is_on(kproc->ti_sci, kproc->ti_sci_id,
+ NULL, &p_state);
+ if (ret) {
+ dev_err(dev, "failed to get initial state, mode cannot be determined, ret = %d\n",
+ ret);
+ goto release_mem;
+ }
+
+ /* configure J721E devices for either remoteproc or IPC-only mode */
+ if (p_state) {
+ dev_info(dev, "configured DSP for IPC-only mode\n");
+ rproc->state = RPROC_DETACHED;
+ /* override rproc ops with only required IPC-only mode ops */
+ rproc->ops->prepare = NULL;
+ rproc->ops->unprepare = NULL;
+ rproc->ops->start = NULL;
+ rproc->ops->stop = NULL;
+ rproc->ops->attach = k3_dsp_rproc_attach;
+ rproc->ops->detach = k3_dsp_rproc_detach;
+ rproc->ops->get_loaded_rsc_table = k3_dsp_get_loaded_rsc_table;
+ } else {
+ dev_info(dev, "configured DSP for remoteproc mode\n");
+ /*
+ * ensure the DSP local reset is asserted to ensure the DSP
+ * doesn't execute bogus code in .prepare() when the module
+ * reset is released.
+ */
+ if (data->uses_lreset) {
+ ret = reset_control_status(kproc->reset);
+ if (ret < 0) {
+ dev_err(dev, "failed to get reset status, status = %d\n",
+ ret);
+ goto release_mem;
+ } else if (ret == 0) {
+ dev_warn(dev, "local reset is deasserted for device\n");
+ k3_dsp_rproc_reset(kproc);
+ }
}
}
@@ -717,9 +828,18 @@ free_rproc:
static int k3_dsp_rproc_remove(struct platform_device *pdev)
{
struct k3_dsp_rproc *kproc = platform_get_drvdata(pdev);
+ struct rproc *rproc = kproc->rproc;
struct device *dev = &pdev->dev;
int ret;
+ if (rproc->state == RPROC_ATTACHED) {
+ ret = rproc_detach(rproc);
+ if (ret) {
+ dev_err(dev, "failed to detach proc, ret = %d\n", ret);
+ return ret;
+ }
+ }
+
rproc_del(kproc->rproc);
ret = ti_sci_proc_release(kproc->tsp);
diff --git a/drivers/remoteproc/ti_k3_r5_remoteproc.c b/drivers/remoteproc/ti_k3_r5_remoteproc.c
index 969531c05b13..4840ad906018 100644
--- a/drivers/remoteproc/ti_k3_r5_remoteproc.c
+++ b/drivers/remoteproc/ti_k3_r5_remoteproc.c
@@ -2,7 +2,7 @@
/*
* TI K3 R5F (MCU) Remote Processor driver
*
- * Copyright (C) 2017-2020 Texas Instruments Incorporated - https://www.ti.com/
+ * Copyright (C) 2017-2022 Texas Instruments Incorporated - https://www.ti.com/
* Suman Anna <s-anna@ti.com>
*/
@@ -376,6 +376,44 @@ static inline int k3_r5_core_run(struct k3_r5_core *core)
0, PROC_BOOT_CTRL_FLAG_R5_CORE_HALT);
}
+static int k3_r5_rproc_request_mbox(struct rproc *rproc)
+{
+ struct k3_r5_rproc *kproc = rproc->priv;
+ struct mbox_client *client = &kproc->client;
+ struct device *dev = kproc->dev;
+ int ret;
+
+ client->dev = dev;
+ client->tx_done = NULL;
+ client->rx_callback = k3_r5_rproc_mbox_callback;
+ client->tx_block = false;
+ client->knows_txdone = false;
+
+ kproc->mbox = mbox_request_channel(client, 0);
+ if (IS_ERR(kproc->mbox)) {
+ ret = -EBUSY;
+ dev_err(dev, "mbox_request_channel failed: %ld\n",
+ PTR_ERR(kproc->mbox));
+ return ret;
+ }
+
+ /*
+ * Ping the remote processor, this is only for sanity-sake for now;
+ * there is no functional effect whatsoever.
+ *
+ * Note that the reply will _not_ arrive immediately: this message
+ * will wait in the mailbox fifo until the remote processor is booted.
+ */
+ ret = mbox_send_message(kproc->mbox, (void *)RP_MBOX_ECHO_REQUEST);
+ if (ret < 0) {
+ dev_err(dev, "mbox_send_message failed: %d\n", ret);
+ mbox_free_channel(kproc->mbox);
+ return ret;
+ }
+
+ return 0;
+}
+
/*
* The R5F cores have controls for both a reset and a halt/run. The code
* execution from DDR requires the initial boot-strapping code to be run
@@ -390,6 +428,7 @@ static inline int k3_r5_core_run(struct k3_r5_core *core)
* private to each core. Only Core0 needs to be unhalted for running the
* cluster in this mode. The function uses the same reset logic as LockStep
* mode for this (though the behavior is agnostic of the reset release order).
+ * This callback is invoked only in remoteproc mode.
*/
static int k3_r5_rproc_prepare(struct rproc *rproc)
{
@@ -455,7 +494,8 @@ static int k3_r5_rproc_prepare(struct rproc *rproc)
* both cores. The access is made possible only with releasing the resets for
* both cores, but with only Core0 unhalted. This function re-uses the same
* reset assert logic as LockStep mode for this mode (though the behavior is
- * agnostic of the reset assert order).
+ * agnostic of the reset assert order). This callback is invoked only in
+ * remoteproc mode.
*/
static int k3_r5_rproc_unprepare(struct rproc *rproc)
{
@@ -489,44 +529,21 @@ static int k3_r5_rproc_unprepare(struct rproc *rproc)
*
* The Single-CPU mode on applicable SoCs (eg: AM64x) only uses Core0 to execute
* code, so only Core0 needs to be unhalted. The function uses the same logic
- * flow as Split-mode for this.
+ * flow as Split-mode for this. This callback is invoked only in remoteproc
+ * mode.
*/
static int k3_r5_rproc_start(struct rproc *rproc)
{
struct k3_r5_rproc *kproc = rproc->priv;
struct k3_r5_cluster *cluster = kproc->cluster;
- struct mbox_client *client = &kproc->client;
struct device *dev = kproc->dev;
struct k3_r5_core *core;
u32 boot_addr;
int ret;
- client->dev = dev;
- client->tx_done = NULL;
- client->rx_callback = k3_r5_rproc_mbox_callback;
- client->tx_block = false;
- client->knows_txdone = false;
-
- kproc->mbox = mbox_request_channel(client, 0);
- if (IS_ERR(kproc->mbox)) {
- ret = -EBUSY;
- dev_err(dev, "mbox_request_channel failed: %ld\n",
- PTR_ERR(kproc->mbox));
+ ret = k3_r5_rproc_request_mbox(rproc);
+ if (ret)
return ret;
- }
-
- /*
- * Ping the remote processor, this is only for sanity-sake for now;
- * there is no functional effect whatsoever.
- *
- * Note that the reply will _not_ arrive immediately: this message
- * will wait in the mailbox fifo until the remote processor is booted.
- */
- ret = mbox_send_message(kproc->mbox, (void *)RP_MBOX_ECHO_REQUEST);
- if (ret < 0) {
- dev_err(dev, "mbox_send_message failed: %d\n", ret);
- goto put_mbox;
- }
boot_addr = rproc->bootaddr;
/* TODO: add boot_addr sanity checking */
@@ -584,7 +601,8 @@ put_mbox:
* be done here, but is preferred to be done in the .unprepare() ops - this
* maintains the symmetric behavior between the .start(), .stop(), .prepare()
* and .unprepare() ops, and also balances them well between sysfs 'state'
- * flow and device bind/unbind or module removal.
+ * flow and device bind/unbind or module removal. This callback is invoked
+ * only in remoteproc mode.
*/
static int k3_r5_rproc_stop(struct rproc *rproc)
{
@@ -622,6 +640,78 @@ out:
}
/*
+ * Attach to a running R5F remote processor (IPC-only mode)
+ *
+ * The R5F attach callback only needs to request the mailbox, the remote
+ * processor is already booted, so there is no need to issue any TI-SCI
+ * commands to boot the R5F cores in IPC-only mode. This callback is invoked
+ * only in IPC-only mode.
+ */
+static int k3_r5_rproc_attach(struct rproc *rproc)
+{
+ struct k3_r5_rproc *kproc = rproc->priv;
+ struct device *dev = kproc->dev;
+ int ret;
+
+ ret = k3_r5_rproc_request_mbox(rproc);
+ if (ret)
+ return ret;
+
+ dev_info(dev, "R5F core initialized in IPC-only mode\n");
+ return 0;
+}
+
+/*
+ * Detach from a running R5F remote processor (IPC-only mode)
+ *
+ * The R5F detach callback performs the opposite operation to attach callback
+ * and only needs to release the mailbox, the R5F cores are not stopped and
+ * will be left in booted state in IPC-only mode. This callback is invoked
+ * only in IPC-only mode.
+ */
+static int k3_r5_rproc_detach(struct rproc *rproc)
+{
+ struct k3_r5_rproc *kproc = rproc->priv;
+ struct device *dev = kproc->dev;
+
+ mbox_free_channel(kproc->mbox);
+ dev_info(dev, "R5F core deinitialized in IPC-only mode\n");
+ return 0;
+}
+
+/*
+ * This function implements the .get_loaded_rsc_table() callback and is used
+ * to provide the resource table for the booted R5F in IPC-only mode. The K3 R5F
+ * firmwares follow a design-by-contract approach and are expected to have the
+ * resource table at the base of the DDR region reserved for firmware usage.
+ * This provides flexibility for the remote processor to be booted by different
+ * bootloaders that may or may not have the ability to publish the resource table
+ * address and size through a DT property. This callback is invoked only in
+ * IPC-only mode.
+ */
+static struct resource_table *k3_r5_get_loaded_rsc_table(struct rproc *rproc,
+ size_t *rsc_table_sz)
+{
+ struct k3_r5_rproc *kproc = rproc->priv;
+ struct device *dev = kproc->dev;
+
+ if (!kproc->rmem[0].cpu_addr) {
+ dev_err(dev, "memory-region #1 does not exist, loaded rsc table can't be found");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /*
+ * NOTE: The resource table size is currently hard-coded to a maximum
+ * of 256 bytes. The most common resource table usage for K3 firmwares
+ * is to only have the vdev resource entry and an optional trace entry.
+ * The exact size could be computed based on resource table address, but
+ * the hard-coded value suffices to support the IPC-only mode.
+ */
+ *rsc_table_sz = 256;
+ return (struct resource_table *)kproc->rmem[0].cpu_addr;
+}
+
+/*
* Internal Memory translation helper
*
* Custom function implementing the rproc .da_to_va ops to provide address
@@ -1000,6 +1090,116 @@ static void k3_r5_adjust_tcm_sizes(struct k3_r5_rproc *kproc)
}
}
+/*
+ * This function checks and configures a R5F core for IPC-only or remoteproc
+ * mode. The driver is configured to be in IPC-only mode for a R5F core when
+ * the core has been loaded and started by a bootloader. The IPC-only mode is
+ * detected by querying the System Firmware for reset, power on and halt status
+ * and ensuring that the core is running. Any incomplete steps at bootloader
+ * are validated and errored out.
+ *
+ * In IPC-only mode, the driver state flags for ATCM, BTCM and LOCZRAMA settings
+ * and cluster mode parsed originally from kernel DT are updated to reflect the
+ * actual values configured by bootloader. The driver internal device memory
+ * addresses for TCMs are also updated.
+ */
+static int k3_r5_rproc_configure_mode(struct k3_r5_rproc *kproc)
+{
+ struct k3_r5_cluster *cluster = kproc->cluster;
+ struct k3_r5_core *core = kproc->core;
+ struct device *cdev = core->dev;
+ bool r_state = false, c_state = false;
+ u32 ctrl = 0, cfg = 0, stat = 0, halted = 0;
+ u64 boot_vec = 0;
+ u32 atcm_enable, btcm_enable, loczrama;
+ struct k3_r5_core *core0;
+ enum cluster_mode mode;
+ int ret;
+
+ core0 = list_first_entry(&cluster->cores, struct k3_r5_core, elem);
+
+ ret = core->ti_sci->ops.dev_ops.is_on(core->ti_sci, core->ti_sci_id,
+ &r_state, &c_state);
+ if (ret) {
+ dev_err(cdev, "failed to get initial state, mode cannot be determined, ret = %d\n",
+ ret);
+ return ret;
+ }
+ if (r_state != c_state) {
+ dev_warn(cdev, "R5F core may have been powered on by a different host, programmed state (%d) != actual state (%d)\n",
+ r_state, c_state);
+ }
+
+ ret = reset_control_status(core->reset);
+ if (ret < 0) {
+ dev_err(cdev, "failed to get initial local reset status, ret = %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = ti_sci_proc_get_status(core->tsp, &boot_vec, &cfg, &ctrl,
+ &stat);
+ if (ret < 0) {
+ dev_err(cdev, "failed to get initial processor status, ret = %d\n",
+ ret);
+ return ret;
+ }
+ atcm_enable = cfg & PROC_BOOT_CFG_FLAG_R5_ATCM_EN ? 1 : 0;
+ btcm_enable = cfg & PROC_BOOT_CFG_FLAG_R5_BTCM_EN ? 1 : 0;
+ loczrama = cfg & PROC_BOOT_CFG_FLAG_R5_TCM_RSTBASE ? 1 : 0;
+ if (cluster->soc_data->single_cpu_mode) {
+ mode = cfg & PROC_BOOT_CFG_FLAG_R5_SINGLE_CORE ?
+ CLUSTER_MODE_SINGLECPU : CLUSTER_MODE_SPLIT;
+ } else {
+ mode = cfg & PROC_BOOT_CFG_FLAG_R5_LOCKSTEP ?
+ CLUSTER_MODE_LOCKSTEP : CLUSTER_MODE_SPLIT;
+ }
+ halted = ctrl & PROC_BOOT_CTRL_FLAG_R5_CORE_HALT;
+
+ /*
+ * IPC-only mode detection requires both local and module resets to
+ * be deasserted and R5F core to be unhalted. Local reset status is
+ * irrelevant if module reset is asserted (POR value has local reset
+ * deasserted), and is deemed as remoteproc mode
+ */
+ if (c_state && !ret && !halted) {
+ dev_info(cdev, "configured R5F for IPC-only mode\n");
+ kproc->rproc->state = RPROC_DETACHED;
+ ret = 1;
+ /* override rproc ops with only required IPC-only mode ops */
+ kproc->rproc->ops->prepare = NULL;
+ kproc->rproc->ops->unprepare = NULL;
+ kproc->rproc->ops->start = NULL;
+ kproc->rproc->ops->stop = NULL;
+ kproc->rproc->ops->attach = k3_r5_rproc_attach;
+ kproc->rproc->ops->detach = k3_r5_rproc_detach;
+ kproc->rproc->ops->get_loaded_rsc_table =
+ k3_r5_get_loaded_rsc_table;
+ } else if (!c_state) {
+ dev_info(cdev, "configured R5F for remoteproc mode\n");
+ ret = 0;
+ } else {
+ dev_err(cdev, "mismatched mode: local_reset = %s, module_reset = %s, core_state = %s\n",
+ !ret ? "deasserted" : "asserted",
+ c_state ? "deasserted" : "asserted",
+ halted ? "halted" : "unhalted");
+ ret = -EINVAL;
+ }
+
+ /* fixup TCMs, cluster & core flags to actual values in IPC-only mode */
+ if (ret > 0) {
+ if (core == core0)
+ cluster->mode = mode;
+ core->atcm_enable = atcm_enable;
+ core->btcm_enable = btcm_enable;
+ core->loczrama = loczrama;
+ core->mem[0].dev_addr = loczrama ? 0 : K3_R5_TCM_DEV_ADDR;
+ core->mem[1].dev_addr = loczrama ? K3_R5_TCM_DEV_ADDR : 0;
+ }
+
+ return ret;
+}
+
static int k3_r5_cluster_rproc_init(struct platform_device *pdev)
{
struct k3_r5_cluster *cluster = platform_get_drvdata(pdev);
@@ -1009,7 +1209,7 @@ static int k3_r5_cluster_rproc_init(struct platform_device *pdev)
struct device *cdev;
const char *fw_name;
struct rproc *rproc;
- int ret;
+ int ret, ret1;
core1 = list_last_entry(&cluster->cores, struct k3_r5_core, elem);
list_for_each_entry(core, &cluster->cores, elem) {
@@ -1040,6 +1240,12 @@ static int k3_r5_cluster_rproc_init(struct platform_device *pdev)
kproc->rproc = rproc;
core->rproc = rproc;
+ ret = k3_r5_rproc_configure_mode(kproc);
+ if (ret < 0)
+ goto err_config;
+ if (ret)
+ goto init_rmem;
+
ret = k3_r5_rproc_configure(kproc);
if (ret) {
dev_err(dev, "initial configure failed, ret = %d\n",
@@ -1047,6 +1253,7 @@ static int k3_r5_cluster_rproc_init(struct platform_device *pdev)
goto err_config;
}
+init_rmem:
k3_r5_adjust_tcm_sizes(kproc);
ret = k3_r5_reserved_mem_init(kproc);
@@ -1071,6 +1278,15 @@ static int k3_r5_cluster_rproc_init(struct platform_device *pdev)
return 0;
err_split:
+ if (rproc->state == RPROC_ATTACHED) {
+ ret1 = rproc_detach(rproc);
+ if (ret1) {
+ dev_err(kproc->dev, "failed to detach rproc, ret = %d\n",
+ ret1);
+ return ret1;
+ }
+ }
+
rproc_del(rproc);
err_add:
k3_r5_reserved_mem_exit(kproc);
@@ -1094,6 +1310,7 @@ static void k3_r5_cluster_rproc_exit(void *data)
struct k3_r5_rproc *kproc;
struct k3_r5_core *core;
struct rproc *rproc;
+ int ret;
/*
* lockstep mode and single-cpu modes have only one rproc associated
@@ -1109,6 +1326,14 @@ static void k3_r5_cluster_rproc_exit(void *data)
rproc = core->rproc;
kproc = rproc->priv;
+ if (rproc->state == RPROC_ATTACHED) {
+ ret = rproc_detach(rproc);
+ if (ret) {
+ dev_err(kproc->dev, "failed to detach rproc, ret = %d\n", ret);
+ return;
+ }
+ }
+
rproc_del(rproc);
k3_r5_reserved_mem_exit(kproc);
diff --git a/drivers/remoteproc/wkup_m3_rproc.c b/drivers/remoteproc/wkup_m3_rproc.c
index 484f7605823e..a0c204cb0979 100644
--- a/drivers/remoteproc/wkup_m3_rproc.c
+++ b/drivers/remoteproc/wkup_m3_rproc.c
@@ -163,6 +163,7 @@ static int wkup_m3_rproc_probe(struct platform_device *pdev)
}
rproc->auto_boot = false;
+ rproc->sysfs_read_only = true;
wkupm3 = rproc->priv;
wkupm3->rproc = rproc;