summaryrefslogtreecommitdiff
path: root/drivers/firmware
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2023-06-30 01:22:19 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2023-06-30 01:22:19 +0300
commite4c8d01865118ab148f77bdb54ec9c0c181d90a3 (patch)
tree07718f0f27beb7fda54163ecc9fb46638b4187a5 /drivers/firmware
parenta9025a5f16ed610ea28a188cb0946cb71fafff17 (diff)
parent356fa4975950d48d12b6ee9f9050ad429db25852 (diff)
downloadlinux-e4c8d01865118ab148f77bdb54ec9c0c181d90a3.tar.xz
Merge tag 'soc-drivers-6.5' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc
Pull ARM SoC driver updates from Arnd Bergmann: "Nothing surprising in the SoC specific drivers, with the usual updates: - Added or improved SoC driver support for Tegra234, Exynos4121, RK3588, as well as multiple Mediatek and Qualcomm chips - SCMI firmware gains support for multiple SMC/HVC transport and version 3.2 of the protocol - Cleanups amd minor changes for the reset controller, memory controller, firmware and sram drivers - Minor changes to amd/xilinx, samsung, tegra, nxp, ti, qualcomm, amlogic and renesas SoC specific drivers" * tag 'soc-drivers-6.5' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc: (118 commits) dt-bindings: interrupt-controller: Convert Amlogic Meson GPIO interrupt controller binding MAINTAINERS: add PHY-related files to Amlogic SoC file list drivers: meson: secure-pwrc: always enable DMA domain tee: optee: Use kmemdup() to replace kmalloc + memcpy soc: qcom: geni-se: Do not bother about enable/disable of interrupts in secondary sequencer dt-bindings: sram: qcom,imem: document qdu1000 soc: qcom: icc-bwmon: Fix MSM8998 count unit dt-bindings: soc: qcom,rpmh-rsc: Require power-domains soc: qcom: socinfo: Add Soc ID for IPQ5300 dt-bindings: arm: qcom,ids: add SoC ID for IPQ5300 soc: qcom: Fix a IS_ERR() vs NULL bug in probe soc: qcom: socinfo: Add support for new fields in revision 19 soc: qcom: socinfo: Add support for new fields in revision 18 dt-bindings: firmware: scm: Add compatible for SDX75 soc: qcom: mdt_loader: Fix split image detection dt-bindings: memory-controllers: drop unneeded quotes soc: rockchip: dtpm: use C99 array init syntax firmware: tegra: bpmp: Add support for DRAM MRQ GSCs soc/tegra: pmc: Use devm_clk_notifier_register() soc/tegra: pmc: Simplify debugfs initialization ...
Diffstat (limited to 'drivers/firmware')
-rw-r--r--drivers/firmware/arm_scmi/driver.c1
-rw-r--r--drivers/firmware/arm_scmi/powercap.c173
-rw-r--r--drivers/firmware/arm_scmi/smc.c30
-rw-r--r--drivers/firmware/tegra/bpmp-tegra186.c204
-rw-r--r--drivers/firmware/tegra/bpmp.c4
-rw-r--r--drivers/firmware/xilinx/zynqmp.c12
6 files changed, 335 insertions, 89 deletions
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
index e7d97b59963b..b5957cc12fee 100644
--- a/drivers/firmware/arm_scmi/driver.c
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -2914,6 +2914,7 @@ static const struct of_device_id scmi_of_match[] = {
#endif
#ifdef CONFIG_ARM_SCMI_TRANSPORT_SMC
{ .compatible = "arm,scmi-smc", .data = &scmi_smc_desc},
+ { .compatible = "arm,scmi-smc-param", .data = &scmi_smc_desc},
#endif
#ifdef CONFIG_ARM_SCMI_TRANSPORT_VIRTIO
{ .compatible = "arm,scmi-virtio", .data = &scmi_virtio_desc},
diff --git a/drivers/firmware/arm_scmi/powercap.c b/drivers/firmware/arm_scmi/powercap.c
index 83b90bde755c..244929cb4f3e 100644
--- a/drivers/firmware/arm_scmi/powercap.c
+++ b/drivers/firmware/arm_scmi/powercap.c
@@ -108,6 +108,8 @@ struct scmi_powercap_meas_changed_notify_payld {
};
struct scmi_powercap_state {
+ bool enabled;
+ u32 last_pcap;
bool meas_notif_enabled;
u64 thresholds;
#define THRESH_LOW(p, id) \
@@ -313,24 +315,33 @@ static int scmi_powercap_xfer_cap_get(const struct scmi_protocol_handle *ph,
return ret;
}
-static int scmi_powercap_cap_get(const struct scmi_protocol_handle *ph,
- u32 domain_id, u32 *power_cap)
+static int __scmi_powercap_cap_get(const struct scmi_protocol_handle *ph,
+ const struct scmi_powercap_info *dom,
+ u32 *power_cap)
{
- struct scmi_powercap_info *dom;
- struct powercap_info *pi = ph->get_priv(ph);
-
- if (!power_cap || domain_id >= pi->num_domains)
- return -EINVAL;
-
- dom = pi->powercaps + domain_id;
if (dom->fc_info && dom->fc_info[POWERCAP_FC_CAP].get_addr) {
*power_cap = ioread32(dom->fc_info[POWERCAP_FC_CAP].get_addr);
trace_scmi_fc_call(SCMI_PROTOCOL_POWERCAP, POWERCAP_CAP_GET,
- domain_id, *power_cap, 0);
+ dom->id, *power_cap, 0);
return 0;
}
- return scmi_powercap_xfer_cap_get(ph, domain_id, power_cap);
+ return scmi_powercap_xfer_cap_get(ph, dom->id, power_cap);
+}
+
+static int scmi_powercap_cap_get(const struct scmi_protocol_handle *ph,
+ u32 domain_id, u32 *power_cap)
+{
+ const struct scmi_powercap_info *dom;
+
+ if (!power_cap)
+ return -EINVAL;
+
+ dom = scmi_powercap_dom_info_get(ph, domain_id);
+ if (!dom)
+ return -EINVAL;
+
+ return __scmi_powercap_cap_get(ph, dom, power_cap);
}
static int scmi_powercap_xfer_cap_set(const struct scmi_protocol_handle *ph,
@@ -375,17 +386,20 @@ static int scmi_powercap_xfer_cap_set(const struct scmi_protocol_handle *ph,
return ret;
}
-static int scmi_powercap_cap_set(const struct scmi_protocol_handle *ph,
- u32 domain_id, u32 power_cap,
- bool ignore_dresp)
+static int __scmi_powercap_cap_set(const struct scmi_protocol_handle *ph,
+ struct powercap_info *pi, u32 domain_id,
+ u32 power_cap, bool ignore_dresp)
{
+ int ret = -EINVAL;
const struct scmi_powercap_info *pc;
pc = scmi_powercap_dom_info_get(ph, domain_id);
- if (!pc || !pc->powercap_cap_config || !power_cap ||
- power_cap < pc->min_power_cap ||
- power_cap > pc->max_power_cap)
- return -EINVAL;
+ if (!pc || !pc->powercap_cap_config)
+ return ret;
+
+ if (power_cap &&
+ (power_cap < pc->min_power_cap || power_cap > pc->max_power_cap))
+ return ret;
if (pc->fc_info && pc->fc_info[POWERCAP_FC_CAP].set_addr) {
struct scmi_fc_info *fci = &pc->fc_info[POWERCAP_FC_CAP];
@@ -394,10 +408,41 @@ static int scmi_powercap_cap_set(const struct scmi_protocol_handle *ph,
ph->hops->fastchannel_db_ring(fci->set_db);
trace_scmi_fc_call(SCMI_PROTOCOL_POWERCAP, POWERCAP_CAP_SET,
domain_id, power_cap, 0);
+ ret = 0;
+ } else {
+ ret = scmi_powercap_xfer_cap_set(ph, pc, power_cap,
+ ignore_dresp);
+ }
+
+ /* Save the last explicitly set non-zero powercap value */
+ if (PROTOCOL_REV_MAJOR(pi->version) >= 0x2 && !ret && power_cap)
+ pi->states[domain_id].last_pcap = power_cap;
+
+ return ret;
+}
+
+static int scmi_powercap_cap_set(const struct scmi_protocol_handle *ph,
+ u32 domain_id, u32 power_cap,
+ bool ignore_dresp)
+{
+ struct powercap_info *pi = ph->get_priv(ph);
+
+ /*
+ * Disallow zero as a possible explicitly requested powercap:
+ * there are enable/disable operations for this.
+ */
+ if (!power_cap)
+ return -EINVAL;
+
+ /* Just log the last set request if acting on a disabled domain */
+ if (PROTOCOL_REV_MAJOR(pi->version) >= 0x2 &&
+ !pi->states[domain_id].enabled) {
+ pi->states[domain_id].last_pcap = power_cap;
return 0;
}
- return scmi_powercap_xfer_cap_set(ph, pc, power_cap, ignore_dresp);
+ return __scmi_powercap_cap_set(ph, pi, domain_id,
+ power_cap, ignore_dresp);
}
static int scmi_powercap_xfer_pai_get(const struct scmi_protocol_handle *ph,
@@ -564,11 +609,78 @@ scmi_powercap_measurements_threshold_set(const struct scmi_protocol_handle *ph,
return ret;
}
+static int scmi_powercap_cap_enable_set(const struct scmi_protocol_handle *ph,
+ u32 domain_id, bool enable)
+{
+ int ret;
+ u32 power_cap;
+ struct powercap_info *pi = ph->get_priv(ph);
+
+ if (PROTOCOL_REV_MAJOR(pi->version) < 0x2)
+ return -EINVAL;
+
+ if (enable == pi->states[domain_id].enabled)
+ return 0;
+
+ if (enable) {
+ /* Cannot enable with a zero powercap. */
+ if (!pi->states[domain_id].last_pcap)
+ return -EINVAL;
+
+ ret = __scmi_powercap_cap_set(ph, pi, domain_id,
+ pi->states[domain_id].last_pcap,
+ true);
+ } else {
+ ret = __scmi_powercap_cap_set(ph, pi, domain_id, 0, true);
+ }
+
+ if (ret)
+ return ret;
+
+ /*
+ * Update our internal state to reflect final platform state: the SCMI
+ * server could have ignored a disable request and kept enforcing some
+ * powercap limit requested by other agents.
+ */
+ ret = scmi_powercap_cap_get(ph, domain_id, &power_cap);
+ if (!ret)
+ pi->states[domain_id].enabled = !!power_cap;
+
+ return ret;
+}
+
+static int scmi_powercap_cap_enable_get(const struct scmi_protocol_handle *ph,
+ u32 domain_id, bool *enable)
+{
+ int ret;
+ u32 power_cap;
+ struct powercap_info *pi = ph->get_priv(ph);
+
+ *enable = true;
+ if (PROTOCOL_REV_MAJOR(pi->version) < 0x2)
+ return 0;
+
+ /*
+ * Report always real platform state; platform could have ignored
+ * a previous disable request. Default true on any error.
+ */
+ ret = scmi_powercap_cap_get(ph, domain_id, &power_cap);
+ if (!ret)
+ *enable = !!power_cap;
+
+ /* Update internal state with current real platform state */
+ pi->states[domain_id].enabled = *enable;
+
+ return 0;
+}
+
static const struct scmi_powercap_proto_ops powercap_proto_ops = {
.num_domains_get = scmi_powercap_num_domains_get,
.info_get = scmi_powercap_dom_info_get,
.cap_get = scmi_powercap_cap_get,
.cap_set = scmi_powercap_cap_set,
+ .cap_enable_set = scmi_powercap_cap_enable_set,
+ .cap_enable_get = scmi_powercap_cap_enable_get,
.pai_get = scmi_powercap_pai_get,
.pai_set = scmi_powercap_pai_set,
.measurements_get = scmi_powercap_measurements_get,
@@ -829,6 +941,11 @@ scmi_powercap_protocol_init(const struct scmi_protocol_handle *ph)
if (!pinfo->powercaps)
return -ENOMEM;
+ pinfo->states = devm_kcalloc(ph->dev, pinfo->num_domains,
+ sizeof(*pinfo->states), GFP_KERNEL);
+ if (!pinfo->states)
+ return -ENOMEM;
+
/*
* Note that any failure in retrieving any domain attribute leads to
* the whole Powercap protocol initialization failure: this way the
@@ -843,15 +960,21 @@ scmi_powercap_protocol_init(const struct scmi_protocol_handle *ph)
if (pinfo->powercaps[domain].fastchannels)
scmi_powercap_domain_init_fc(ph, domain,
&pinfo->powercaps[domain].fc_info);
- }
- pinfo->states = devm_kcalloc(ph->dev, pinfo->num_domains,
- sizeof(*pinfo->states), GFP_KERNEL);
- if (!pinfo->states)
- return -ENOMEM;
+ /* Grab initial state when disable is supported. */
+ if (PROTOCOL_REV_MAJOR(version) >= 0x2) {
+ ret = __scmi_powercap_cap_get(ph,
+ &pinfo->powercaps[domain],
+ &pinfo->states[domain].last_pcap);
+ if (ret)
+ return ret;
- pinfo->version = version;
+ pinfo->states[domain].enabled =
+ !!pinfo->states[domain].last_pcap;
+ }
+ }
+ pinfo->version = version;
return ph->set_priv(ph, pinfo);
}
diff --git a/drivers/firmware/arm_scmi/smc.c b/drivers/firmware/arm_scmi/smc.c
index 93272e4bbd12..621c37efe3ec 100644
--- a/drivers/firmware/arm_scmi/smc.c
+++ b/drivers/firmware/arm_scmi/smc.c
@@ -20,6 +20,23 @@
#include "common.h"
+/*
+ * The shmem address is split into 4K page and offset.
+ * This is to make sure the parameters fit in 32bit arguments of the
+ * smc/hvc call to keep it uniform across smc32/smc64 conventions.
+ * This however limits the shmem address to 44 bit.
+ *
+ * These optional parameters can be used to distinguish among multiple
+ * scmi instances that are using the same smc-id.
+ * The page parameter is passed in r1/x1/w1 register and the offset parameter
+ * is passed in r2/x2/w2 register.
+ */
+
+#define SHMEM_SIZE (SZ_4K)
+#define SHMEM_SHIFT 12
+#define SHMEM_PAGE(x) (_UL((x) >> SHMEM_SHIFT))
+#define SHMEM_OFFSET(x) ((x) & (SHMEM_SIZE - 1))
+
/**
* struct scmi_smc - Structure representing a SCMI smc transport
*
@@ -30,6 +47,8 @@
* @inflight: Atomic flag to protect access to Tx/Rx shared memory area.
* Used when operating in atomic mode.
* @func_id: smc/hvc call function id
+ * @param_page: 4K page number of the shmem channel
+ * @param_offset: Offset within the 4K page of the shmem channel
*/
struct scmi_smc {
@@ -40,6 +59,8 @@ struct scmi_smc {
#define INFLIGHT_NONE MSG_TOKEN_MAX
atomic_t inflight;
u32 func_id;
+ u32 param_page;
+ u32 param_offset;
};
static irqreturn_t smc_msg_done_isr(int irq, void *data)
@@ -137,6 +158,10 @@ static int smc_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
if (ret < 0)
return ret;
+ if (of_device_is_compatible(dev->of_node, "arm,scmi-smc-param")) {
+ scmi_info->param_page = SHMEM_PAGE(res.start);
+ scmi_info->param_offset = SHMEM_OFFSET(res.start);
+ }
/*
* If there is an interrupt named "a2p", then the service and
* completion of a message is signaled by an interrupt rather than by
@@ -179,6 +204,8 @@ static int smc_send_message(struct scmi_chan_info *cinfo,
{
struct scmi_smc *scmi_info = cinfo->transport_info;
struct arm_smccc_res res;
+ unsigned long page = scmi_info->param_page;
+ unsigned long offset = scmi_info->param_offset;
/*
* Channel will be released only once response has been
@@ -188,7 +215,8 @@ static int smc_send_message(struct scmi_chan_info *cinfo,
shmem_tx_prepare(scmi_info->shmem, xfer, cinfo);
- arm_smccc_1_1_invoke(scmi_info->func_id, 0, 0, 0, 0, 0, 0, 0, &res);
+ arm_smccc_1_1_invoke(scmi_info->func_id, page, offset, 0, 0, 0, 0, 0,
+ &res);
/* Only SMCCC_RET_NOT_SUPPORTED is valid error code */
if (res.a0) {
diff --git a/drivers/firmware/tegra/bpmp-tegra186.c b/drivers/firmware/tegra/bpmp-tegra186.c
index 2e26199041cd..6f0d0511b486 100644
--- a/drivers/firmware/tegra/bpmp-tegra186.c
+++ b/drivers/firmware/tegra/bpmp-tegra186.c
@@ -4,7 +4,9 @@
*/
#include <linux/genalloc.h>
+#include <linux/io.h>
#include <linux/mailbox_client.h>
+#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <soc/tegra/bpmp.h>
@@ -18,7 +20,10 @@ struct tegra186_bpmp {
struct {
struct gen_pool *pool;
- void __iomem *virt;
+ union {
+ void __iomem *sram;
+ void *dram;
+ };
dma_addr_t phys;
} tx, rx;
@@ -118,8 +123,13 @@ static int tegra186_bpmp_channel_init(struct tegra_bpmp_channel *channel,
queue_size = tegra_ivc_total_queue_size(message_size);
offset = queue_size * index;
- iosys_map_set_vaddr_iomem(&rx, priv->rx.virt + offset);
- iosys_map_set_vaddr_iomem(&tx, priv->tx.virt + offset);
+ if (priv->rx.pool) {
+ iosys_map_set_vaddr_iomem(&rx, priv->rx.sram + offset);
+ iosys_map_set_vaddr_iomem(&tx, priv->tx.sram + offset);
+ } else {
+ iosys_map_set_vaddr(&rx, priv->rx.dram + offset);
+ iosys_map_set_vaddr(&tx, priv->tx.dram + offset);
+ }
err = tegra_ivc_init(channel->ivc, NULL, &rx, priv->rx.phys + offset, &tx,
priv->tx.phys + offset, 1, message_size, tegra186_bpmp_ivc_notify,
@@ -158,18 +168,72 @@ static void mbox_handle_rx(struct mbox_client *client, void *data)
tegra_bpmp_handle_rx(bpmp);
}
-static int tegra186_bpmp_init(struct tegra_bpmp *bpmp)
+static void tegra186_bpmp_teardown_channels(struct tegra_bpmp *bpmp)
{
- struct tegra186_bpmp *priv;
+ struct tegra186_bpmp *priv = bpmp->priv;
unsigned int i;
+
+ for (i = 0; i < bpmp->threaded.count; i++) {
+ if (!bpmp->threaded_channels[i].bpmp)
+ continue;
+
+ tegra186_bpmp_channel_cleanup(&bpmp->threaded_channels[i]);
+ }
+
+ tegra186_bpmp_channel_cleanup(bpmp->rx_channel);
+ tegra186_bpmp_channel_cleanup(bpmp->tx_channel);
+
+ if (priv->tx.pool) {
+ gen_pool_free(priv->tx.pool, (unsigned long)priv->tx.sram, 4096);
+ gen_pool_free(priv->rx.pool, (unsigned long)priv->rx.sram, 4096);
+ }
+}
+
+static int tegra186_bpmp_dram_init(struct tegra_bpmp *bpmp)
+{
+ struct tegra186_bpmp *priv = bpmp->priv;
+ struct device_node *np;
+ struct resource res;
+ size_t size;
int err;
- priv = devm_kzalloc(bpmp->dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ np = of_parse_phandle(bpmp->dev->of_node, "memory-region", 0);
+ if (!np)
+ return -ENODEV;
- bpmp->priv = priv;
- priv->parent = bpmp;
+ err = of_address_to_resource(np, 0, &res);
+ if (err < 0) {
+ dev_warn(bpmp->dev, "failed to parse memory region: %d\n", err);
+ return err;
+ }
+
+ size = resource_size(&res);
+
+ if (size < SZ_8K) {
+ dev_warn(bpmp->dev, "DRAM region must be larger than 8 KiB\n");
+ return -EINVAL;
+ }
+
+ priv->tx.phys = res.start;
+ priv->rx.phys = res.start + SZ_4K;
+
+ priv->tx.dram = devm_memremap(bpmp->dev, priv->tx.phys, size,
+ MEMREMAP_WC);
+ if (IS_ERR(priv->tx.dram)) {
+ err = PTR_ERR(priv->tx.dram);
+ dev_warn(bpmp->dev, "failed to map DRAM region: %d\n", err);
+ return err;
+ }
+
+ priv->rx.dram = priv->tx.dram + SZ_4K;
+
+ return 0;
+}
+
+static int tegra186_bpmp_sram_init(struct tegra_bpmp *bpmp)
+{
+ struct tegra186_bpmp *priv = bpmp->priv;
+ int err;
priv->tx.pool = of_gen_pool_get(bpmp->dev->of_node, "shmem", 0);
if (!priv->tx.pool) {
@@ -177,8 +241,9 @@ static int tegra186_bpmp_init(struct tegra_bpmp *bpmp)
return -EPROBE_DEFER;
}
- priv->tx.virt = (void __iomem *)gen_pool_dma_alloc(priv->tx.pool, 4096, &priv->tx.phys);
- if (!priv->tx.virt) {
+ priv->tx.sram = (void __iomem *)gen_pool_dma_alloc(priv->tx.pool, 4096,
+ &priv->tx.phys);
+ if (!priv->tx.sram) {
dev_err(bpmp->dev, "failed to allocate from TX pool\n");
return -ENOMEM;
}
@@ -190,22 +255,45 @@ static int tegra186_bpmp_init(struct tegra_bpmp *bpmp)
goto free_tx;
}
- priv->rx.virt = (void __iomem *)gen_pool_dma_alloc(priv->rx.pool, 4096, &priv->rx.phys);
- if (!priv->rx.virt) {
+ priv->rx.sram = (void __iomem *)gen_pool_dma_alloc(priv->rx.pool, 4096,
+ &priv->rx.phys);
+ if (!priv->rx.sram) {
dev_err(bpmp->dev, "failed to allocate from RX pool\n");
err = -ENOMEM;
goto free_tx;
}
+ return 0;
+
+free_tx:
+ gen_pool_free(priv->tx.pool, (unsigned long)priv->tx.sram, 4096);
+
+ return err;
+}
+
+static int tegra186_bpmp_setup_channels(struct tegra_bpmp *bpmp)
+{
+ unsigned int i;
+ int err;
+
+ err = tegra186_bpmp_dram_init(bpmp);
+ if (err == -ENODEV) {
+ err = tegra186_bpmp_sram_init(bpmp);
+ if (err < 0)
+ return err;
+ }
+
err = tegra186_bpmp_channel_init(bpmp->tx_channel, bpmp,
bpmp->soc->channels.cpu_tx.offset);
if (err < 0)
- goto free_rx;
+ return err;
err = tegra186_bpmp_channel_init(bpmp->rx_channel, bpmp,
bpmp->soc->channels.cpu_rx.offset);
- if (err < 0)
- goto cleanup_tx_channel;
+ if (err < 0) {
+ tegra186_bpmp_channel_cleanup(bpmp->tx_channel);
+ return err;
+ }
for (i = 0; i < bpmp->threaded.count; i++) {
unsigned int index = bpmp->soc->channels.thread.offset + i;
@@ -213,9 +301,43 @@ static int tegra186_bpmp_init(struct tegra_bpmp *bpmp)
err = tegra186_bpmp_channel_init(&bpmp->threaded_channels[i],
bpmp, index);
if (err < 0)
- goto cleanup_channels;
+ break;
}
+ if (err < 0)
+ tegra186_bpmp_teardown_channels(bpmp);
+
+ return err;
+}
+
+static void tegra186_bpmp_reset_channels(struct tegra_bpmp *bpmp)
+{
+ unsigned int i;
+
+ /* reset message channels */
+ tegra186_bpmp_channel_reset(bpmp->tx_channel);
+ tegra186_bpmp_channel_reset(bpmp->rx_channel);
+
+ for (i = 0; i < bpmp->threaded.count; i++)
+ tegra186_bpmp_channel_reset(&bpmp->threaded_channels[i]);
+}
+
+static int tegra186_bpmp_init(struct tegra_bpmp *bpmp)
+{
+ struct tegra186_bpmp *priv;
+ int err;
+
+ priv = devm_kzalloc(bpmp->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->parent = bpmp;
+ bpmp->priv = priv;
+
+ err = tegra186_bpmp_setup_channels(bpmp);
+ if (err < 0)
+ return err;
+
/* mbox registration */
priv->mbox.client.dev = bpmp->dev;
priv->mbox.client.rx_callback = mbox_handle_rx;
@@ -226,63 +348,27 @@ static int tegra186_bpmp_init(struct tegra_bpmp *bpmp)
if (IS_ERR(priv->mbox.channel)) {
err = PTR_ERR(priv->mbox.channel);
dev_err(bpmp->dev, "failed to get HSP mailbox: %d\n", err);
- goto cleanup_channels;
+ tegra186_bpmp_teardown_channels(bpmp);
+ return err;
}
- tegra186_bpmp_channel_reset(bpmp->tx_channel);
- tegra186_bpmp_channel_reset(bpmp->rx_channel);
-
- for (i = 0; i < bpmp->threaded.count; i++)
- tegra186_bpmp_channel_reset(&bpmp->threaded_channels[i]);
+ tegra186_bpmp_reset_channels(bpmp);
return 0;
-
-cleanup_channels:
- for (i = 0; i < bpmp->threaded.count; i++) {
- if (!bpmp->threaded_channels[i].bpmp)
- continue;
-
- tegra186_bpmp_channel_cleanup(&bpmp->threaded_channels[i]);
- }
-
- tegra186_bpmp_channel_cleanup(bpmp->rx_channel);
-cleanup_tx_channel:
- tegra186_bpmp_channel_cleanup(bpmp->tx_channel);
-free_rx:
- gen_pool_free(priv->rx.pool, (unsigned long)priv->rx.virt, 4096);
-free_tx:
- gen_pool_free(priv->tx.pool, (unsigned long)priv->tx.virt, 4096);
-
- return err;
}
static void tegra186_bpmp_deinit(struct tegra_bpmp *bpmp)
{
struct tegra186_bpmp *priv = bpmp->priv;
- unsigned int i;
mbox_free_channel(priv->mbox.channel);
- for (i = 0; i < bpmp->threaded.count; i++)
- tegra186_bpmp_channel_cleanup(&bpmp->threaded_channels[i]);
-
- tegra186_bpmp_channel_cleanup(bpmp->rx_channel);
- tegra186_bpmp_channel_cleanup(bpmp->tx_channel);
-
- gen_pool_free(priv->rx.pool, (unsigned long)priv->rx.virt, 4096);
- gen_pool_free(priv->tx.pool, (unsigned long)priv->tx.virt, 4096);
+ tegra186_bpmp_teardown_channels(bpmp);
}
static int tegra186_bpmp_resume(struct tegra_bpmp *bpmp)
{
- unsigned int i;
-
- /* reset message channels */
- tegra186_bpmp_channel_reset(bpmp->tx_channel);
- tegra186_bpmp_channel_reset(bpmp->rx_channel);
-
- for (i = 0; i < bpmp->threaded.count; i++)
- tegra186_bpmp_channel_reset(&bpmp->threaded_channels[i]);
+ tegra186_bpmp_reset_channels(bpmp);
return 0;
}
diff --git a/drivers/firmware/tegra/bpmp.c b/drivers/firmware/tegra/bpmp.c
index 8b5e5daa9fae..17bd3590aaa2 100644
--- a/drivers/firmware/tegra/bpmp.c
+++ b/drivers/firmware/tegra/bpmp.c
@@ -735,6 +735,8 @@ static int tegra_bpmp_probe(struct platform_device *pdev)
if (!bpmp->threaded_channels)
return -ENOMEM;
+ platform_set_drvdata(pdev, bpmp);
+
err = bpmp->soc->ops->init(bpmp);
if (err < 0)
return err;
@@ -758,8 +760,6 @@ static int tegra_bpmp_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "firmware: %.*s\n", (int)sizeof(tag), tag);
- platform_set_drvdata(pdev, bpmp);
-
err = of_platform_default_populate(pdev->dev.of_node, NULL, &pdev->dev);
if (err < 0)
goto free_mrq;
diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c
index a736db4a5825..9e585b5646df 100644
--- a/drivers/firmware/xilinx/zynqmp.c
+++ b/drivers/firmware/xilinx/zynqmp.c
@@ -942,8 +942,16 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_reset_get_status);
*/
int zynqmp_pm_fpga_load(const u64 address, const u32 size, const u32 flags)
{
- return zynqmp_pm_invoke_fn(PM_FPGA_LOAD, lower_32_bits(address),
- upper_32_bits(address), size, flags, NULL);
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ ret = zynqmp_pm_invoke_fn(PM_FPGA_LOAD, lower_32_bits(address),
+ upper_32_bits(address), size, flags,
+ ret_payload);
+ if (ret_payload[0])
+ return -ret_payload[0];
+
+ return ret;
}
EXPORT_SYMBOL_GPL(zynqmp_pm_fpga_load);