summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-01-05 22:30:37 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2019-01-05 22:30:37 +0300
commitb23b0ea3708c3dec599966fc856836aca48835b9 (patch)
treed911a2ae33b7f546e8408e7a2aef845f65a3d881 /drivers
parent078a5a4faf64fefaf13478a9091782432cad33fa (diff)
parent00f8ccd0c95f4e604297057a5bccec86c0903d14 (diff)
downloadlinux-b23b0ea3708c3dec599966fc856836aca48835b9.tar.xz
Merge tag 'armsoc-late' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc
Pull more ARM SoC updates from Olof Johansson: "A few updates that we merged late but are low risk for regressions for other platforms (and a few other straggling patches): - I mis-tagged the 'drivers' branch, and missed 3 patches. Merged in here. They're for a driver for the PL353 SRAM controller and a build fix for the qualcomm scm driver. - A new platform, RDA Micro RDA8810PL (Cortex-A5 w/ integrated Vivante GPU, 256MB RAM, Wifi). This includes some acked platform-specific drivers (serial, etc). This also include DTs for two boards with this SoC, OrangePi 2G and OrangePi i86. - i.MX8 is another new platform (NXP, 4x Cortex-A53 + Cortex-M4, 4K video playback offload). This is the first i.MX 64-bit SoC. - Some minor updates to Samsung boards (adding a few peripherals in DTs). - Small rework for SMP bootup on STi platforms. - A couple of TEE driver fixes. - A couple of new config options (bcm2835 thermal, Uniphier MDMAC) enabled in defconfigs" * tag 'armsoc-late' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc: (27 commits) ARM: multi_v7_defconfig: enable CONFIG_UNIPHIER_MDMAC arm64: defconfig: Re-enable bcm2835-thermal driver MAINTAINERS: Add entry for RDA Micro SoC architecture tty: serial: Add RDA8810PL UART driver ARM: dts: rda8810pl: Add interrupt support for UART dt-bindings: serial: Document RDA Micro UART ARM: dts: rda8810pl: Add timer support ARM: dts: Add devicetree for OrangePi i96 board ARM: dts: Add devicetree for OrangePi 2G IoT board ARM: dts: Add devicetree for RDA8810PL SoC ARM: Prepare RDA8810PL SoC dt-bindings: arm: Document RDA8810PL and reference boards dt-bindings: Add RDA Micro vendor prefix ARM: sti: remove pen_release and boot_lock arm64: dts: exynos: Add Bluetooth chip to TM2(e) boards arm64: dts: imx8mq-evk: enable watchdog arm64: dts: imx8mq: add watchdog devices MAINTAINERS: add i.MX8 DT path to i.MX architecture arm64: add support for i.MX8M EVK board arm64: add basic DTS for i.MX8MQ ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/memory/Kconfig9
-rw-r--r--drivers/memory/Makefile1
-rw-r--r--drivers/memory/pl353-smc.c463
-rw-r--r--drivers/tee/optee/core.c3
-rw-r--r--drivers/tee/optee/supp.c13
-rw-r--r--drivers/tty/serial/Kconfig19
-rw-r--r--drivers/tty/serial/Makefile1
-rw-r--r--drivers/tty/serial/rda-uart.c831
8 files changed, 1334 insertions, 6 deletions
diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig
index 63389f075f1d..2d91b00e3591 100644
--- a/drivers/memory/Kconfig
+++ b/drivers/memory/Kconfig
@@ -145,6 +145,15 @@ config DA8XX_DDRCTL
Texas Instruments da8xx SoCs. It's used to tweak various memory
controller configuration options.
+config PL353_SMC
+ tristate "ARM PL35X Static Memory Controller(SMC) driver"
+ default y
+ depends on ARM
+ depends on ARM_AMBA
+ help
+ This driver is for the ARM PL351/PL353 Static Memory
+ Controller(SMC) module.
+
source "drivers/memory/samsung/Kconfig"
source "drivers/memory/tegra/Kconfig"
diff --git a/drivers/memory/Makefile b/drivers/memory/Makefile
index a01ab3e22f94..90161dec6fa5 100644
--- a/drivers/memory/Makefile
+++ b/drivers/memory/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_MVEBU_DEVBUS) += mvebu-devbus.o
obj-$(CONFIG_JZ4780_NEMC) += jz4780-nemc.o
obj-$(CONFIG_MTK_SMI) += mtk-smi.o
obj-$(CONFIG_DA8XX_DDRCTL) += da8xx-ddrctl.o
+obj-$(CONFIG_PL353_SMC) += pl353-smc.o
obj-$(CONFIG_SAMSUNG_MC) += samsung/
obj-$(CONFIG_TEGRA_MC) += tegra/
diff --git a/drivers/memory/pl353-smc.c b/drivers/memory/pl353-smc.c
new file mode 100644
index 000000000000..73bd3023202f
--- /dev/null
+++ b/drivers/memory/pl353-smc.c
@@ -0,0 +1,463 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ARM PL353 SMC driver
+ *
+ * Copyright (C) 2012 - 2018 Xilinx, Inc
+ * Author: Punnaiah Choudary Kalluri <punnaiah@xilinx.com>
+ * Author: Naga Sureshkumar Relli <nagasure@xilinx.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/pl353-smc.h>
+#include <linux/amba/bus.h>
+
+/* Register definitions */
+#define PL353_SMC_MEMC_STATUS_OFFS 0 /* Controller status reg, RO */
+#define PL353_SMC_CFG_CLR_OFFS 0xC /* Clear config reg, WO */
+#define PL353_SMC_DIRECT_CMD_OFFS 0x10 /* Direct command reg, WO */
+#define PL353_SMC_SET_CYCLES_OFFS 0x14 /* Set cycles register, WO */
+#define PL353_SMC_SET_OPMODE_OFFS 0x18 /* Set opmode register, WO */
+#define PL353_SMC_ECC_STATUS_OFFS 0x400 /* ECC status register */
+#define PL353_SMC_ECC_MEMCFG_OFFS 0x404 /* ECC mem config reg */
+#define PL353_SMC_ECC_MEMCMD1_OFFS 0x408 /* ECC mem cmd1 reg */
+#define PL353_SMC_ECC_MEMCMD2_OFFS 0x40C /* ECC mem cmd2 reg */
+#define PL353_SMC_ECC_VALUE0_OFFS 0x418 /* ECC value 0 reg */
+
+/* Controller status register specific constants */
+#define PL353_SMC_MEMC_STATUS_RAW_INT_1_SHIFT 6
+
+/* Clear configuration register specific constants */
+#define PL353_SMC_CFG_CLR_INT_CLR_1 0x10
+#define PL353_SMC_CFG_CLR_ECC_INT_DIS_1 0x40
+#define PL353_SMC_CFG_CLR_INT_DIS_1 0x2
+#define PL353_SMC_CFG_CLR_DEFAULT_MASK (PL353_SMC_CFG_CLR_INT_CLR_1 | \
+ PL353_SMC_CFG_CLR_ECC_INT_DIS_1 | \
+ PL353_SMC_CFG_CLR_INT_DIS_1)
+
+/* Set cycles register specific constants */
+#define PL353_SMC_SET_CYCLES_T0_MASK 0xF
+#define PL353_SMC_SET_CYCLES_T0_SHIFT 0
+#define PL353_SMC_SET_CYCLES_T1_MASK 0xF
+#define PL353_SMC_SET_CYCLES_T1_SHIFT 4
+#define PL353_SMC_SET_CYCLES_T2_MASK 0x7
+#define PL353_SMC_SET_CYCLES_T2_SHIFT 8
+#define PL353_SMC_SET_CYCLES_T3_MASK 0x7
+#define PL353_SMC_SET_CYCLES_T3_SHIFT 11
+#define PL353_SMC_SET_CYCLES_T4_MASK 0x7
+#define PL353_SMC_SET_CYCLES_T4_SHIFT 14
+#define PL353_SMC_SET_CYCLES_T5_MASK 0x7
+#define PL353_SMC_SET_CYCLES_T5_SHIFT 17
+#define PL353_SMC_SET_CYCLES_T6_MASK 0xF
+#define PL353_SMC_SET_CYCLES_T6_SHIFT 20
+
+/* ECC status register specific constants */
+#define PL353_SMC_ECC_STATUS_BUSY BIT(6)
+#define PL353_SMC_ECC_REG_SIZE_OFFS 4
+
+/* ECC memory config register specific constants */
+#define PL353_SMC_ECC_MEMCFG_MODE_MASK 0xC
+#define PL353_SMC_ECC_MEMCFG_MODE_SHIFT 2
+#define PL353_SMC_ECC_MEMCFG_PGSIZE_MASK 0xC
+
+#define PL353_SMC_DC_UPT_NAND_REGS ((4 << 23) | /* CS: NAND chip */ \
+ (2 << 21)) /* UpdateRegs operation */
+
+#define PL353_NAND_ECC_CMD1 ((0x80) | /* Write command */ \
+ (0 << 8) | /* Read command */ \
+ (0x30 << 16) | /* Read End command */ \
+ (1 << 24)) /* Read End command calid */
+
+#define PL353_NAND_ECC_CMD2 ((0x85) | /* Write col change cmd */ \
+ (5 << 8) | /* Read col change cmd */ \
+ (0xE0 << 16) | /* Read col change end cmd */ \
+ (1 << 24)) /* Read col change end cmd valid */
+#define PL353_NAND_ECC_BUSY_TIMEOUT (1 * HZ)
+/**
+ * struct pl353_smc_data - Private smc driver structure
+ * @memclk: Pointer to the peripheral clock
+ * @aclk: Pointer to the APER clock
+ */
+struct pl353_smc_data {
+ struct clk *memclk;
+ struct clk *aclk;
+};
+
+/* SMC virtual register base */
+static void __iomem *pl353_smc_base;
+
+/**
+ * pl353_smc_set_buswidth - Set memory buswidth
+ * @bw: Memory buswidth (8 | 16)
+ * Return: 0 on success or negative errno.
+ */
+int pl353_smc_set_buswidth(unsigned int bw)
+{
+ if (bw != PL353_SMC_MEM_WIDTH_8 && bw != PL353_SMC_MEM_WIDTH_16)
+ return -EINVAL;
+
+ writel(bw, pl353_smc_base + PL353_SMC_SET_OPMODE_OFFS);
+ writel(PL353_SMC_DC_UPT_NAND_REGS, pl353_smc_base +
+ PL353_SMC_DIRECT_CMD_OFFS);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pl353_smc_set_buswidth);
+
+/**
+ * pl353_smc_set_cycles - Set memory timing parameters
+ * @timings: NAND controller timing parameters
+ *
+ * Sets NAND chip specific timing parameters.
+ */
+void pl353_smc_set_cycles(u32 timings[])
+{
+ /*
+ * Set write pulse timing. This one is easy to extract:
+ *
+ * NWE_PULSE = tWP
+ */
+ timings[0] &= PL353_SMC_SET_CYCLES_T0_MASK;
+ timings[1] = (timings[1] & PL353_SMC_SET_CYCLES_T1_MASK) <<
+ PL353_SMC_SET_CYCLES_T1_SHIFT;
+ timings[2] = (timings[2] & PL353_SMC_SET_CYCLES_T2_MASK) <<
+ PL353_SMC_SET_CYCLES_T2_SHIFT;
+ timings[3] = (timings[3] & PL353_SMC_SET_CYCLES_T3_MASK) <<
+ PL353_SMC_SET_CYCLES_T3_SHIFT;
+ timings[4] = (timings[4] & PL353_SMC_SET_CYCLES_T4_MASK) <<
+ PL353_SMC_SET_CYCLES_T4_SHIFT;
+ timings[5] = (timings[5] & PL353_SMC_SET_CYCLES_T5_MASK) <<
+ PL353_SMC_SET_CYCLES_T5_SHIFT;
+ timings[6] = (timings[6] & PL353_SMC_SET_CYCLES_T6_MASK) <<
+ PL353_SMC_SET_CYCLES_T6_SHIFT;
+ timings[0] |= timings[1] | timings[2] | timings[3] |
+ timings[4] | timings[5] | timings[6];
+
+ writel(timings[0], pl353_smc_base + PL353_SMC_SET_CYCLES_OFFS);
+ writel(PL353_SMC_DC_UPT_NAND_REGS, pl353_smc_base +
+ PL353_SMC_DIRECT_CMD_OFFS);
+}
+EXPORT_SYMBOL_GPL(pl353_smc_set_cycles);
+
+/**
+ * pl353_smc_ecc_is_busy - Read ecc busy flag
+ * Return: the ecc_status bit from the ecc_status register. 1 = busy, 0 = idle
+ */
+bool pl353_smc_ecc_is_busy(void)
+{
+ return ((readl(pl353_smc_base + PL353_SMC_ECC_STATUS_OFFS) &
+ PL353_SMC_ECC_STATUS_BUSY) == PL353_SMC_ECC_STATUS_BUSY);
+}
+EXPORT_SYMBOL_GPL(pl353_smc_ecc_is_busy);
+
+/**
+ * pl353_smc_get_ecc_val - Read ecc_valueN registers
+ * @ecc_reg: Index of the ecc_value reg (0..3)
+ * Return: the content of the requested ecc_value register.
+ *
+ * There are four valid ecc_value registers. The argument is truncated to stay
+ * within this valid boundary.
+ */
+u32 pl353_smc_get_ecc_val(int ecc_reg)
+{
+ u32 addr, reg;
+
+ addr = PL353_SMC_ECC_VALUE0_OFFS +
+ (ecc_reg * PL353_SMC_ECC_REG_SIZE_OFFS);
+ reg = readl(pl353_smc_base + addr);
+
+ return reg;
+}
+EXPORT_SYMBOL_GPL(pl353_smc_get_ecc_val);
+
+/**
+ * pl353_smc_get_nand_int_status_raw - Get NAND interrupt status bit
+ * Return: the raw_int_status1 bit from the memc_status register
+ */
+int pl353_smc_get_nand_int_status_raw(void)
+{
+ u32 reg;
+
+ reg = readl(pl353_smc_base + PL353_SMC_MEMC_STATUS_OFFS);
+ reg >>= PL353_SMC_MEMC_STATUS_RAW_INT_1_SHIFT;
+ reg &= 1;
+
+ return reg;
+}
+EXPORT_SYMBOL_GPL(pl353_smc_get_nand_int_status_raw);
+
+/**
+ * pl353_smc_clr_nand_int - Clear NAND interrupt
+ */
+void pl353_smc_clr_nand_int(void)
+{
+ writel(PL353_SMC_CFG_CLR_INT_CLR_1,
+ pl353_smc_base + PL353_SMC_CFG_CLR_OFFS);
+}
+EXPORT_SYMBOL_GPL(pl353_smc_clr_nand_int);
+
+/**
+ * pl353_smc_set_ecc_mode - Set SMC ECC mode
+ * @mode: ECC mode (BYPASS, APB, MEM)
+ * Return: 0 on success or negative errno.
+ */
+int pl353_smc_set_ecc_mode(enum pl353_smc_ecc_mode mode)
+{
+ u32 reg;
+ int ret = 0;
+
+ switch (mode) {
+ case PL353_SMC_ECCMODE_BYPASS:
+ case PL353_SMC_ECCMODE_APB:
+ case PL353_SMC_ECCMODE_MEM:
+
+ reg = readl(pl353_smc_base + PL353_SMC_ECC_MEMCFG_OFFS);
+ reg &= ~PL353_SMC_ECC_MEMCFG_MODE_MASK;
+ reg |= mode << PL353_SMC_ECC_MEMCFG_MODE_SHIFT;
+ writel(reg, pl353_smc_base + PL353_SMC_ECC_MEMCFG_OFFS);
+
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pl353_smc_set_ecc_mode);
+
+/**
+ * pl353_smc_set_ecc_pg_size - Set SMC ECC page size
+ * @pg_sz: ECC page size
+ * Return: 0 on success or negative errno.
+ */
+int pl353_smc_set_ecc_pg_size(unsigned int pg_sz)
+{
+ u32 reg, sz;
+
+ switch (pg_sz) {
+ case 0:
+ sz = 0;
+ break;
+ case SZ_512:
+ sz = 1;
+ break;
+ case SZ_1K:
+ sz = 2;
+ break;
+ case SZ_2K:
+ sz = 3;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ reg = readl(pl353_smc_base + PL353_SMC_ECC_MEMCFG_OFFS);
+ reg &= ~PL353_SMC_ECC_MEMCFG_PGSIZE_MASK;
+ reg |= sz;
+ writel(reg, pl353_smc_base + PL353_SMC_ECC_MEMCFG_OFFS);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pl353_smc_set_ecc_pg_size);
+
+static int __maybe_unused pl353_smc_suspend(struct device *dev)
+{
+ struct pl353_smc_data *pl353_smc = dev_get_drvdata(dev);
+
+ clk_disable(pl353_smc->memclk);
+ clk_disable(pl353_smc->aclk);
+
+ return 0;
+}
+
+static int __maybe_unused pl353_smc_resume(struct device *dev)
+{
+ int ret;
+ struct pl353_smc_data *pl353_smc = dev_get_drvdata(dev);
+
+ ret = clk_enable(pl353_smc->aclk);
+ if (ret) {
+ dev_err(dev, "Cannot enable axi domain clock.\n");
+ return ret;
+ }
+
+ ret = clk_enable(pl353_smc->memclk);
+ if (ret) {
+ dev_err(dev, "Cannot enable memory clock.\n");
+ clk_disable(pl353_smc->aclk);
+ return ret;
+ }
+
+ return ret;
+}
+
+static struct amba_driver pl353_smc_driver;
+
+static SIMPLE_DEV_PM_OPS(pl353_smc_dev_pm_ops, pl353_smc_suspend,
+ pl353_smc_resume);
+
+/**
+ * pl353_smc_init_nand_interface - Initialize the NAND interface
+ * @adev: Pointer to the amba_device struct
+ * @nand_node: Pointer to the pl353_nand device_node struct
+ */
+static void pl353_smc_init_nand_interface(struct amba_device *adev,
+ struct device_node *nand_node)
+{
+ unsigned long timeout;
+
+ pl353_smc_set_buswidth(PL353_SMC_MEM_WIDTH_8);
+ writel(PL353_SMC_CFG_CLR_INT_CLR_1,
+ pl353_smc_base + PL353_SMC_CFG_CLR_OFFS);
+ writel(PL353_SMC_DC_UPT_NAND_REGS, pl353_smc_base +
+ PL353_SMC_DIRECT_CMD_OFFS);
+
+ timeout = jiffies + PL353_NAND_ECC_BUSY_TIMEOUT;
+ /* Wait till the ECC operation is complete */
+ do {
+ if (pl353_smc_ecc_is_busy())
+ cpu_relax();
+ else
+ break;
+ } while (!time_after_eq(jiffies, timeout));
+
+ if (time_after_eq(jiffies, timeout))
+ return;
+
+ writel(PL353_NAND_ECC_CMD1,
+ pl353_smc_base + PL353_SMC_ECC_MEMCMD1_OFFS);
+ writel(PL353_NAND_ECC_CMD2,
+ pl353_smc_base + PL353_SMC_ECC_MEMCMD2_OFFS);
+}
+
+static const struct of_device_id pl353_smc_supported_children[] = {
+ {
+ .compatible = "cfi-flash"
+ },
+ {
+ .compatible = "arm,pl353-nand-r2p1",
+ .data = pl353_smc_init_nand_interface
+ },
+ {}
+};
+
+static int pl353_smc_probe(struct amba_device *adev, const struct amba_id *id)
+{
+ struct pl353_smc_data *pl353_smc;
+ struct device_node *child;
+ struct resource *res;
+ int err;
+ struct device_node *of_node = adev->dev.of_node;
+ static void (*init)(struct amba_device *adev,
+ struct device_node *nand_node);
+ const struct of_device_id *match = NULL;
+
+ pl353_smc = devm_kzalloc(&adev->dev, sizeof(*pl353_smc), GFP_KERNEL);
+ if (!pl353_smc)
+ return -ENOMEM;
+
+ /* Get the NAND controller virtual address */
+ res = &adev->res;
+ pl353_smc_base = devm_ioremap_resource(&adev->dev, res);
+ if (IS_ERR(pl353_smc_base))
+ return PTR_ERR(pl353_smc_base);
+
+ pl353_smc->aclk = devm_clk_get(&adev->dev, "apb_pclk");
+ if (IS_ERR(pl353_smc->aclk)) {
+ dev_err(&adev->dev, "aclk clock not found.\n");
+ return PTR_ERR(pl353_smc->aclk);
+ }
+
+ pl353_smc->memclk = devm_clk_get(&adev->dev, "memclk");
+ if (IS_ERR(pl353_smc->memclk)) {
+ dev_err(&adev->dev, "memclk clock not found.\n");
+ return PTR_ERR(pl353_smc->memclk);
+ }
+
+ err = clk_prepare_enable(pl353_smc->aclk);
+ if (err) {
+ dev_err(&adev->dev, "Unable to enable AXI clock.\n");
+ return err;
+ }
+
+ err = clk_prepare_enable(pl353_smc->memclk);
+ if (err) {
+ dev_err(&adev->dev, "Unable to enable memory clock.\n");
+ goto out_clk_dis_aper;
+ }
+
+ amba_set_drvdata(adev, pl353_smc);
+
+ /* clear interrupts */
+ writel(PL353_SMC_CFG_CLR_DEFAULT_MASK,
+ pl353_smc_base + PL353_SMC_CFG_CLR_OFFS);
+
+ /* Find compatible children. Only a single child is supported */
+ for_each_available_child_of_node(of_node, child) {
+ match = of_match_node(pl353_smc_supported_children, child);
+ if (!match) {
+ dev_warn(&adev->dev, "unsupported child node\n");
+ continue;
+ }
+ break;
+ }
+ if (!match) {
+ dev_err(&adev->dev, "no matching children\n");
+ goto out_clk_disable;
+ }
+
+ init = match->data;
+ if (init)
+ init(adev, child);
+ of_platform_device_create(child, NULL, &adev->dev);
+
+ return 0;
+
+out_clk_disable:
+ clk_disable_unprepare(pl353_smc->memclk);
+out_clk_dis_aper:
+ clk_disable_unprepare(pl353_smc->aclk);
+
+ return err;
+}
+
+static int pl353_smc_remove(struct amba_device *adev)
+{
+ struct pl353_smc_data *pl353_smc = amba_get_drvdata(adev);
+
+ clk_disable_unprepare(pl353_smc->memclk);
+ clk_disable_unprepare(pl353_smc->aclk);
+
+ return 0;
+}
+
+static const struct amba_id pl353_ids[] = {
+ {
+ .id = 0x00041353,
+ .mask = 0x000fffff,
+ },
+ { 0, 0 },
+};
+MODULE_DEVICE_TABLE(amba, pl353_ids);
+
+static struct amba_driver pl353_smc_driver = {
+ .drv = {
+ .owner = THIS_MODULE,
+ .name = "pl353-smc",
+ .pm = &pl353_smc_dev_pm_ops,
+ },
+ .id_table = pl353_ids,
+ .probe = pl353_smc_probe,
+ .remove = pl353_smc_remove,
+};
+
+module_amba_driver(pl353_smc_driver);
+
+MODULE_AUTHOR("Xilinx, Inc.");
+MODULE_DESCRIPTION("ARM PL353 SMC Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c
index 34dce850067b..e5efce3c08e2 100644
--- a/drivers/tee/optee/core.c
+++ b/drivers/tee/optee/core.c
@@ -631,6 +631,9 @@ static struct optee *optee_probe(struct device_node *np)
optee_enable_shm_cache(optee);
+ if (optee->sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
+ pr_info("dynamic shared memory is enabled\n");
+
pr_info("initialized driver\n");
return optee;
err:
diff --git a/drivers/tee/optee/supp.c b/drivers/tee/optee/supp.c
index df35fc01fd3e..43626e15703a 100644
--- a/drivers/tee/optee/supp.c
+++ b/drivers/tee/optee/supp.c
@@ -19,7 +19,7 @@
struct optee_supp_req {
struct list_head link;
- bool busy;
+ bool in_queue;
u32 func;
u32 ret;
size_t num_params;
@@ -54,7 +54,6 @@ void optee_supp_release(struct optee_supp *supp)
/* Abort all request retrieved by supplicant */
idr_for_each_entry(&supp->idr, req, id) {
- req->busy = false;
idr_remove(&supp->idr, id);
req->ret = TEEC_ERROR_COMMUNICATION;
complete(&req->c);
@@ -63,6 +62,7 @@ void optee_supp_release(struct optee_supp *supp)
/* Abort all queued requests */
list_for_each_entry_safe(req, req_tmp, &supp->reqs, link) {
list_del(&req->link);
+ req->in_queue = false;
req->ret = TEEC_ERROR_COMMUNICATION;
complete(&req->c);
}
@@ -103,6 +103,7 @@ u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params,
/* Insert the request in the request list */
mutex_lock(&supp->mutex);
list_add_tail(&req->link, &supp->reqs);
+ req->in_queue = true;
mutex_unlock(&supp->mutex);
/* Tell an eventual waiter there's a new request */
@@ -130,9 +131,10 @@ u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params,
* will serve all requests in a timely manner and
* interrupting then wouldn't make sense.
*/
- interruptable = !req->busy;
- if (!req->busy)
+ if (req->in_queue) {
list_del(&req->link);
+ req->in_queue = false;
+ }
}
mutex_unlock(&supp->mutex);
@@ -176,7 +178,7 @@ static struct optee_supp_req *supp_pop_entry(struct optee_supp *supp,
return ERR_PTR(-ENOMEM);
list_del(&req->link);
- req->busy = true;
+ req->in_queue = false;
return req;
}
@@ -318,7 +320,6 @@ static struct optee_supp_req *supp_pop_req(struct optee_supp *supp,
if ((num_params - nm) != req->num_params)
return ERR_PTR(-EINVAL);
- req->busy = false;
idr_remove(&supp->idr, id);
supp->req_id = -1;
*num_meta = nm;
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 32886c304641..67b9bf3b500e 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -1529,6 +1529,25 @@ config SERIAL_OWL_CONSOLE
Say 'Y' here if you wish to use Actions Semiconductor S500/S900 UART
as the system console.
+config SERIAL_RDA
+ bool "RDA Micro serial port support"
+ depends on ARCH_RDA || COMPILE_TEST
+ select SERIAL_CORE
+ help
+ This driver is for RDA8810PL SoC's UART.
+ Say 'Y' here if you wish to use the on-board serial port.
+ Otherwise, say 'N'.
+
+config SERIAL_RDA_CONSOLE
+ bool "Console on RDA Micro serial port"
+ depends on SERIAL_RDA=y
+ select SERIAL_CORE_CONSOLE
+ select SERIAL_EARLYCON
+ default y
+ help
+ Say 'Y' here if you wish to use the RDA8810PL UART as the system
+ console. Only earlycon is implemented currently.
+
endmenu
config SERIAL_MCTRL_GPIO
diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile
index daac675612df..8c303736b7e8 100644
--- a/drivers/tty/serial/Makefile
+++ b/drivers/tty/serial/Makefile
@@ -89,6 +89,7 @@ obj-$(CONFIG_SERIAL_MVEBU_UART) += mvebu-uart.o
obj-$(CONFIG_SERIAL_PIC32) += pic32_uart.o
obj-$(CONFIG_SERIAL_MPS2_UART) += mps2-uart.o
obj-$(CONFIG_SERIAL_OWL) += owl-uart.o
+obj-$(CONFIG_SERIAL_RDA) += rda-uart.o
# GPIOLIB helpers for modem control lines
obj-$(CONFIG_SERIAL_MCTRL_GPIO) += serial_mctrl_gpio.o
diff --git a/drivers/tty/serial/rda-uart.c b/drivers/tty/serial/rda-uart.c
new file mode 100644
index 000000000000..284623eefaeb
--- /dev/null
+++ b/drivers/tty/serial/rda-uart.c
@@ -0,0 +1,831 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * RDA8810PL serial device driver
+ *
+ * Copyright RDA Microelectronics Company Limited
+ * Copyright (c) 2017 Andreas Färber
+ * Copyright (c) 2018 Manivannan Sadhasivam
+ */
+
+#include <linux/clk.h>
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/serial.h>
+#include <linux/serial_core.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+
+#define RDA_UART_PORT_NUM 3
+#define RDA_UART_DEV_NAME "ttyRDA"
+
+#define RDA_UART_CTRL 0x00
+#define RDA_UART_STATUS 0x04
+#define RDA_UART_RXTX_BUFFER 0x08
+#define RDA_UART_IRQ_MASK 0x0c
+#define RDA_UART_IRQ_CAUSE 0x10
+#define RDA_UART_IRQ_TRIGGERS 0x14
+#define RDA_UART_CMD_SET 0x18
+#define RDA_UART_CMD_CLR 0x1c
+
+/* UART_CTRL Bits */
+#define RDA_UART_ENABLE BIT(0)
+#define RDA_UART_DBITS_8 BIT(1)
+#define RDA_UART_TX_SBITS_2 BIT(2)
+#define RDA_UART_PARITY_EN BIT(3)
+#define RDA_UART_PARITY(x) (((x) & 0x3) << 4)
+#define RDA_UART_PARITY_ODD RDA_UART_PARITY(0)
+#define RDA_UART_PARITY_EVEN RDA_UART_PARITY(1)
+#define RDA_UART_PARITY_SPACE RDA_UART_PARITY(2)
+#define RDA_UART_PARITY_MARK RDA_UART_PARITY(3)
+#define RDA_UART_DIV_MODE BIT(20)
+#define RDA_UART_IRDA_EN BIT(21)
+#define RDA_UART_DMA_EN BIT(22)
+#define RDA_UART_FLOW_CNT_EN BIT(23)
+#define RDA_UART_LOOP_BACK_EN BIT(24)
+#define RDA_UART_RX_LOCK_ERR BIT(25)
+#define RDA_UART_RX_BREAK_LEN(x) (((x) & 0xf) << 28)
+
+/* UART_STATUS Bits */
+#define RDA_UART_RX_FIFO(x) (((x) & 0x7f) << 0)
+#define RDA_UART_RX_FIFO_MASK (0x7f << 0)
+#define RDA_UART_TX_FIFO(x) (((x) & 0x1f) << 8)
+#define RDA_UART_TX_FIFO_MASK (0x1f << 8)
+#define RDA_UART_TX_ACTIVE BIT(14)
+#define RDA_UART_RX_ACTIVE BIT(15)
+#define RDA_UART_RX_OVERFLOW_ERR BIT(16)
+#define RDA_UART_TX_OVERFLOW_ERR BIT(17)
+#define RDA_UART_RX_PARITY_ERR BIT(18)
+#define RDA_UART_RX_FRAMING_ERR BIT(19)
+#define RDA_UART_RX_BREAK_INT BIT(20)
+#define RDA_UART_DCTS BIT(24)
+#define RDA_UART_CTS BIT(25)
+#define RDA_UART_DTR BIT(28)
+#define RDA_UART_CLK_ENABLED BIT(31)
+
+/* UART_RXTX_BUFFER Bits */
+#define RDA_UART_RX_DATA(x) (((x) & 0xff) << 0)
+#define RDA_UART_TX_DATA(x) (((x) & 0xff) << 0)
+
+/* UART_IRQ_MASK Bits */
+#define RDA_UART_TX_MODEM_STATUS BIT(0)
+#define RDA_UART_RX_DATA_AVAILABLE BIT(1)
+#define RDA_UART_TX_DATA_NEEDED BIT(2)
+#define RDA_UART_RX_TIMEOUT BIT(3)
+#define RDA_UART_RX_LINE_ERR BIT(4)
+#define RDA_UART_TX_DMA_DONE BIT(5)
+#define RDA_UART_RX_DMA_DONE BIT(6)
+#define RDA_UART_RX_DMA_TIMEOUT BIT(7)
+#define RDA_UART_DTR_RISE BIT(8)
+#define RDA_UART_DTR_FALL BIT(9)
+
+/* UART_IRQ_CAUSE Bits */
+#define RDA_UART_TX_MODEM_STATUS_U BIT(16)
+#define RDA_UART_RX_DATA_AVAILABLE_U BIT(17)
+#define RDA_UART_TX_DATA_NEEDED_U BIT(18)
+#define RDA_UART_RX_TIMEOUT_U BIT(19)
+#define RDA_UART_RX_LINE_ERR_U BIT(20)
+#define RDA_UART_TX_DMA_DONE_U BIT(21)
+#define RDA_UART_RX_DMA_DONE_U BIT(22)
+#define RDA_UART_RX_DMA_TIMEOUT_U BIT(23)
+#define RDA_UART_DTR_RISE_U BIT(24)
+#define RDA_UART_DTR_FALL_U BIT(25)
+
+/* UART_TRIGGERS Bits */
+#define RDA_UART_RX_TRIGGER(x) (((x) & 0x1f) << 0)
+#define RDA_UART_TX_TRIGGER(x) (((x) & 0xf) << 8)
+#define RDA_UART_AFC_LEVEL(x) (((x) & 0x1f) << 16)
+
+/* UART_CMD_SET Bits */
+#define RDA_UART_RI BIT(0)
+#define RDA_UART_DCD BIT(1)
+#define RDA_UART_DSR BIT(2)
+#define RDA_UART_TX_BREAK_CONTROL BIT(3)
+#define RDA_UART_TX_FINISH_N_WAIT BIT(4)
+#define RDA_UART_RTS BIT(5)
+#define RDA_UART_RX_FIFO_RESET BIT(6)
+#define RDA_UART_TX_FIFO_RESET BIT(7)
+
+#define RDA_UART_TX_FIFO_SIZE 16
+
+static struct uart_driver rda_uart_driver;
+
+struct rda_uart_port {
+ struct uart_port port;
+ struct clk *clk;
+};
+
+#define to_rda_uart_port(port) container_of(port, struct rda_uart_port, port)
+
+static struct rda_uart_port *rda_uart_ports[RDA_UART_PORT_NUM];
+
+static inline void rda_uart_write(struct uart_port *port, u32 val,
+ unsigned int off)
+{
+ writel(val, port->membase + off);
+}
+
+static inline u32 rda_uart_read(struct uart_port *port, unsigned int off)
+{
+ return readl(port->membase + off);
+}
+
+static unsigned int rda_uart_tx_empty(struct uart_port *port)
+{
+ unsigned long flags;
+ unsigned int ret;
+ u32 val;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ val = rda_uart_read(port, RDA_UART_STATUS);
+ ret = (val & RDA_UART_TX_FIFO_MASK) ? TIOCSER_TEMT : 0;
+
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ return ret;
+}
+
+static unsigned int rda_uart_get_mctrl(struct uart_port *port)
+{
+ unsigned int mctrl = 0;
+ u32 cmd_set, status;
+
+ cmd_set = rda_uart_read(port, RDA_UART_CMD_SET);
+ status = rda_uart_read(port, RDA_UART_STATUS);
+ if (cmd_set & RDA_UART_RTS)
+ mctrl |= TIOCM_RTS;
+ if (!(status & RDA_UART_CTS))
+ mctrl |= TIOCM_CTS;
+
+ return mctrl;
+}
+
+static void rda_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ u32 val;
+
+ if (mctrl & TIOCM_RTS) {
+ val = rda_uart_read(port, RDA_UART_CMD_SET);
+ rda_uart_write(port, (val | RDA_UART_RTS), RDA_UART_CMD_SET);
+ } else {
+ /* Clear RTS to stop to receive. */
+ val = rda_uart_read(port, RDA_UART_CMD_CLR);
+ rda_uart_write(port, (val | RDA_UART_RTS), RDA_UART_CMD_CLR);
+ }
+
+ val = rda_uart_read(port, RDA_UART_CTRL);
+
+ if (mctrl & TIOCM_LOOP)
+ val |= RDA_UART_LOOP_BACK_EN;
+ else
+ val &= ~RDA_UART_LOOP_BACK_EN;
+
+ rda_uart_write(port, val, RDA_UART_CTRL);
+}
+
+static void rda_uart_stop_tx(struct uart_port *port)
+{
+ u32 val;
+
+ val = rda_uart_read(port, RDA_UART_IRQ_MASK);
+ val &= ~RDA_UART_TX_DATA_NEEDED;
+ rda_uart_write(port, val, RDA_UART_IRQ_MASK);
+
+ val = rda_uart_read(port, RDA_UART_CMD_SET);
+ val |= RDA_UART_TX_FIFO_RESET;
+ rda_uart_write(port, val, RDA_UART_CMD_SET);
+}
+
+static void rda_uart_stop_rx(struct uart_port *port)
+{
+ u32 val;
+
+ val = rda_uart_read(port, RDA_UART_IRQ_MASK);
+ val &= ~(RDA_UART_RX_DATA_AVAILABLE | RDA_UART_RX_TIMEOUT);
+ rda_uart_write(port, val, RDA_UART_IRQ_MASK);
+
+ /* Read Rx buffer before reset to avoid Rx timeout interrupt */
+ val = rda_uart_read(port, RDA_UART_RXTX_BUFFER);
+
+ val = rda_uart_read(port, RDA_UART_CMD_SET);
+ val |= RDA_UART_RX_FIFO_RESET;
+ rda_uart_write(port, val, RDA_UART_CMD_SET);
+}
+
+static void rda_uart_start_tx(struct uart_port *port)
+{
+ u32 val;
+
+ if (uart_tx_stopped(port)) {
+ rda_uart_stop_tx(port);
+ return;
+ }
+
+ val = rda_uart_read(port, RDA_UART_IRQ_MASK);
+ val |= RDA_UART_TX_DATA_NEEDED;
+ rda_uart_write(port, val, RDA_UART_IRQ_MASK);
+}
+
+static void rda_uart_change_baudrate(struct rda_uart_port *rda_port,
+ unsigned long baud)
+{
+ clk_set_rate(rda_port->clk, baud * 8);
+}
+
+static void rda_uart_set_termios(struct uart_port *port,
+ struct ktermios *termios,
+ struct ktermios *old)
+{
+ struct rda_uart_port *rda_port = to_rda_uart_port(port);
+ unsigned long flags;
+ unsigned int ctrl, cmd_set, cmd_clr, triggers;
+ unsigned int baud;
+ u32 irq_mask;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ baud = uart_get_baud_rate(port, termios, old, 9600, port->uartclk / 4);
+ rda_uart_change_baudrate(rda_port, baud);
+
+ ctrl = rda_uart_read(port, RDA_UART_CTRL);
+ cmd_set = rda_uart_read(port, RDA_UART_CMD_SET);
+ cmd_clr = rda_uart_read(port, RDA_UART_CMD_CLR);
+
+ switch (termios->c_cflag & CSIZE) {
+ case CS5:
+ case CS6:
+ dev_warn(port->dev, "bit size not supported, using 7 bits\n");
+ /* Fall through */
+ case CS7:
+ ctrl &= ~RDA_UART_DBITS_8;
+ break;
+ default:
+ ctrl |= RDA_UART_DBITS_8;
+ break;
+ }
+
+ /* stop bits */
+ if (termios->c_cflag & CSTOPB)
+ ctrl |= RDA_UART_TX_SBITS_2;
+ else
+ ctrl &= ~RDA_UART_TX_SBITS_2;
+
+ /* parity check */
+ if (termios->c_cflag & PARENB) {
+ ctrl |= RDA_UART_PARITY_EN;
+
+ /* Mark or Space parity */
+ if (termios->c_cflag & CMSPAR) {
+ if (termios->c_cflag & PARODD)
+ ctrl |= RDA_UART_PARITY_MARK;
+ else
+ ctrl |= RDA_UART_PARITY_SPACE;
+ } else if (termios->c_cflag & PARODD) {
+ ctrl |= RDA_UART_PARITY_ODD;
+ } else {
+ ctrl |= RDA_UART_PARITY_EVEN;
+ }
+ } else {
+ ctrl &= ~RDA_UART_PARITY_EN;
+ }
+
+ /* Hardware handshake (RTS/CTS) */
+ if (termios->c_cflag & CRTSCTS) {
+ ctrl |= RDA_UART_FLOW_CNT_EN;
+ cmd_set |= RDA_UART_RTS;
+ } else {
+ ctrl &= ~RDA_UART_FLOW_CNT_EN;
+ cmd_clr |= RDA_UART_RTS;
+ }
+
+ ctrl |= RDA_UART_ENABLE;
+ ctrl &= ~RDA_UART_DMA_EN;
+
+ triggers = (RDA_UART_AFC_LEVEL(20) | RDA_UART_RX_TRIGGER(16));
+ irq_mask = rda_uart_read(port, RDA_UART_IRQ_MASK);
+ rda_uart_write(port, 0, RDA_UART_IRQ_MASK);
+
+ rda_uart_write(port, triggers, RDA_UART_IRQ_TRIGGERS);
+ rda_uart_write(port, ctrl, RDA_UART_CTRL);
+ rda_uart_write(port, cmd_set, RDA_UART_CMD_SET);
+ rda_uart_write(port, cmd_clr, RDA_UART_CMD_CLR);
+
+ rda_uart_write(port, irq_mask, RDA_UART_IRQ_MASK);
+
+ /* Don't rewrite B0 */
+ if (tty_termios_baud_rate(termios))
+ tty_termios_encode_baud_rate(termios, baud, baud);
+
+ /* update the per-port timeout */
+ uart_update_timeout(port, termios->c_cflag, baud);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void rda_uart_send_chars(struct uart_port *port)
+{
+ struct circ_buf *xmit = &port->state->xmit;
+ unsigned int ch;
+ u32 val;
+
+ if (uart_tx_stopped(port))
+ return;
+
+ if (port->x_char) {
+ while (!(rda_uart_read(port, RDA_UART_STATUS) &
+ RDA_UART_TX_FIFO_MASK))
+ cpu_relax();
+
+ rda_uart_write(port, port->x_char, RDA_UART_RXTX_BUFFER);
+ port->icount.tx++;
+ port->x_char = 0;
+ }
+
+ while (rda_uart_read(port, RDA_UART_STATUS) & RDA_UART_TX_FIFO_MASK) {
+ if (uart_circ_empty(xmit))
+ break;
+
+ ch = xmit->buf[xmit->tail];
+ rda_uart_write(port, ch, RDA_UART_RXTX_BUFFER);
+ xmit->tail = (xmit->tail + 1) & (SERIAL_XMIT_SIZE - 1);
+ port->icount.tx++;
+ }
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(port);
+
+ if (!uart_circ_empty(xmit)) {
+ /* Re-enable Tx FIFO interrupt */
+ val = rda_uart_read(port, RDA_UART_IRQ_MASK);
+ val |= RDA_UART_TX_DATA_NEEDED;
+ rda_uart_write(port, val, RDA_UART_IRQ_MASK);
+ }
+}
+
+static void rda_uart_receive_chars(struct uart_port *port)
+{
+ u32 status, val;
+
+ status = rda_uart_read(port, RDA_UART_STATUS);
+ while ((status & RDA_UART_RX_FIFO_MASK)) {
+ char flag = TTY_NORMAL;
+
+ if (status & RDA_UART_RX_PARITY_ERR) {
+ port->icount.parity++;
+ flag = TTY_PARITY;
+ }
+
+ if (status & RDA_UART_RX_FRAMING_ERR) {
+ port->icount.frame++;
+ flag = TTY_FRAME;
+ }
+
+ if (status & RDA_UART_RX_OVERFLOW_ERR) {
+ port->icount.overrun++;
+ flag = TTY_OVERRUN;
+ }
+
+ val = rda_uart_read(port, RDA_UART_RXTX_BUFFER);
+ val &= 0xff;
+
+ port->icount.rx++;
+ tty_insert_flip_char(&port->state->port, val, flag);
+
+ status = rda_uart_read(port, RDA_UART_STATUS);
+ }
+
+ spin_unlock(&port->lock);
+ tty_flip_buffer_push(&port->state->port);
+ spin_lock(&port->lock);
+}
+
+static irqreturn_t rda_interrupt(int irq, void *dev_id)
+{
+ struct uart_port *port = dev_id;
+ unsigned long flags;
+ u32 val, irq_mask;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ /* Clear IRQ cause */
+ val = rda_uart_read(port, RDA_UART_IRQ_CAUSE);
+ rda_uart_write(port, val, RDA_UART_IRQ_CAUSE);
+
+ if (val & (RDA_UART_RX_DATA_AVAILABLE | RDA_UART_RX_TIMEOUT))
+ rda_uart_receive_chars(port);
+
+ if (val & (RDA_UART_TX_DATA_NEEDED)) {
+ irq_mask = rda_uart_read(port, RDA_UART_IRQ_MASK);
+ irq_mask &= ~RDA_UART_TX_DATA_NEEDED;
+ rda_uart_write(port, irq_mask, RDA_UART_IRQ_MASK);
+
+ rda_uart_send_chars(port);
+ }
+
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static int rda_uart_startup(struct uart_port *port)
+{
+ unsigned long flags;
+ int ret;
+ u32 val;
+
+ spin_lock_irqsave(&port->lock, flags);
+ rda_uart_write(port, 0, RDA_UART_IRQ_MASK);
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ ret = request_irq(port->irq, rda_interrupt, IRQF_NO_SUSPEND,
+ "rda-uart", port);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ val = rda_uart_read(port, RDA_UART_CTRL);
+ val |= RDA_UART_ENABLE;
+ rda_uart_write(port, val, RDA_UART_CTRL);
+
+ /* enable rx interrupt */
+ val = rda_uart_read(port, RDA_UART_IRQ_MASK);
+ val |= (RDA_UART_RX_DATA_AVAILABLE | RDA_UART_RX_TIMEOUT);
+ rda_uart_write(port, val, RDA_UART_IRQ_MASK);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ return 0;
+}
+
+static void rda_uart_shutdown(struct uart_port *port)
+{
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ rda_uart_stop_tx(port);
+ rda_uart_stop_rx(port);
+
+ val = rda_uart_read(port, RDA_UART_CTRL);
+ val &= ~RDA_UART_ENABLE;
+ rda_uart_write(port, val, RDA_UART_CTRL);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static const char *rda_uart_type(struct uart_port *port)
+{
+ return (port->type == PORT_RDA) ? "rda-uart" : NULL;
+}
+
+static int rda_uart_request_port(struct uart_port *port)
+{
+ struct platform_device *pdev = to_platform_device(port->dev);
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENXIO;
+
+ if (!devm_request_mem_region(port->dev, port->mapbase,
+ resource_size(res), dev_name(port->dev)))
+ return -EBUSY;
+
+ if (port->flags & UPF_IOREMAP) {
+ port->membase = devm_ioremap_nocache(port->dev, port->mapbase,
+ resource_size(res));
+ if (!port->membase)
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static void rda_uart_config_port(struct uart_port *port, int flags)
+{
+ unsigned long irq_flags;
+
+ if (flags & UART_CONFIG_TYPE) {
+ port->type = PORT_RDA;
+ rda_uart_request_port(port);
+ }
+
+ spin_lock_irqsave(&port->lock, irq_flags);
+
+ /* Clear mask, so no surprise interrupts. */
+ rda_uart_write(port, 0, RDA_UART_IRQ_MASK);
+
+ /* Clear status register */
+ rda_uart_write(port, 0, RDA_UART_STATUS);
+
+ spin_unlock_irqrestore(&port->lock, irq_flags);
+}
+
+static void rda_uart_release_port(struct uart_port *port)
+{
+ struct platform_device *pdev = to_platform_device(port->dev);
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return;
+
+ if (port->flags & UPF_IOREMAP) {
+ devm_release_mem_region(port->dev, port->mapbase,
+ resource_size(res));
+ devm_iounmap(port->dev, port->membase);
+ port->membase = NULL;
+ }
+}
+
+static int rda_uart_verify_port(struct uart_port *port,
+ struct serial_struct *ser)
+{
+ if (port->type != PORT_RDA)
+ return -EINVAL;
+
+ if (port->irq != ser->irq)
+ return -EINVAL;
+
+ return 0;
+}
+
+static const struct uart_ops rda_uart_ops = {
+ .tx_empty = rda_uart_tx_empty,
+ .get_mctrl = rda_uart_get_mctrl,
+ .set_mctrl = rda_uart_set_mctrl,
+ .start_tx = rda_uart_start_tx,
+ .stop_tx = rda_uart_stop_tx,
+ .stop_rx = rda_uart_stop_rx,
+ .startup = rda_uart_startup,
+ .shutdown = rda_uart_shutdown,
+ .set_termios = rda_uart_set_termios,
+ .type = rda_uart_type,
+ .request_port = rda_uart_request_port,
+ .release_port = rda_uart_release_port,
+ .config_port = rda_uart_config_port,
+ .verify_port = rda_uart_verify_port,
+};
+
+#ifdef CONFIG_SERIAL_RDA_CONSOLE
+
+static void rda_console_putchar(struct uart_port *port, int ch)
+{
+ if (!port->membase)
+ return;
+
+ while (!(rda_uart_read(port, RDA_UART_STATUS) & RDA_UART_TX_FIFO_MASK))
+ cpu_relax();
+
+ rda_uart_write(port, ch, RDA_UART_RXTX_BUFFER);
+}
+
+static void rda_uart_port_write(struct uart_port *port, const char *s,
+ u_int count)
+{
+ u32 old_irq_mask;
+ unsigned long flags;
+ int locked;
+
+ local_irq_save(flags);
+
+ if (port->sysrq) {
+ locked = 0;
+ } else if (oops_in_progress) {
+ locked = spin_trylock(&port->lock);
+ } else {
+ spin_lock(&port->lock);
+ locked = 1;
+ }
+
+ old_irq_mask = rda_uart_read(port, RDA_UART_IRQ_MASK);
+ rda_uart_write(port, 0, RDA_UART_IRQ_MASK);
+
+ uart_console_write(port, s, count, rda_console_putchar);
+
+ /* wait until all contents have been sent out */
+ while (!(rda_uart_read(port, RDA_UART_STATUS) & RDA_UART_TX_FIFO_MASK))
+ cpu_relax();
+
+ rda_uart_write(port, old_irq_mask, RDA_UART_IRQ_MASK);
+
+ if (locked)
+ spin_unlock(&port->lock);
+
+ local_irq_restore(flags);
+}
+
+static void rda_uart_console_write(struct console *co, const char *s,
+ u_int count)
+{
+ struct rda_uart_port *rda_port;
+
+ rda_port = rda_uart_ports[co->index];
+ if (!rda_port)
+ return;
+
+ rda_uart_port_write(&rda_port->port, s, count);
+}
+
+static int rda_uart_console_setup(struct console *co, char *options)
+{
+ struct rda_uart_port *rda_port;
+ int baud = 921600;
+ int bits = 8;
+ int parity = 'n';
+ int flow = 'n';
+
+ if (co->index < 0 || co->index >= RDA_UART_PORT_NUM)
+ return -EINVAL;
+
+ rda_port = rda_uart_ports[co->index];
+ if (!rda_port || !rda_port->port.membase)
+ return -ENODEV;
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+
+ return uart_set_options(&rda_port->port, co, baud, parity, bits, flow);
+}
+
+static struct console rda_uart_console = {
+ .name = RDA_UART_DEV_NAME,
+ .write = rda_uart_console_write,
+ .device = uart_console_device,
+ .setup = rda_uart_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &rda_uart_driver,
+};
+
+static int __init rda_uart_console_init(void)
+{
+ register_console(&rda_uart_console);
+
+ return 0;
+}
+console_initcall(rda_uart_console_init);
+
+static void rda_uart_early_console_write(struct console *co,
+ const char *s,
+ u_int count)
+{
+ struct earlycon_device *dev = co->data;
+
+ rda_uart_port_write(&dev->port, s, count);
+}
+
+static int __init
+rda_uart_early_console_setup(struct earlycon_device *device, const char *opt)
+{
+ if (!device->port.membase)
+ return -ENODEV;
+
+ device->con->write = rda_uart_early_console_write;
+
+ return 0;
+}
+
+OF_EARLYCON_DECLARE(rda, "rda,8810pl-uart",
+ rda_uart_early_console_setup);
+
+#define RDA_UART_CONSOLE (&rda_uart_console)
+#else
+#define RDA_UART_CONSOLE NULL
+#endif /* CONFIG_SERIAL_RDA_CONSOLE */
+
+static struct uart_driver rda_uart_driver = {
+ .owner = THIS_MODULE,
+ .driver_name = "rda-uart",
+ .dev_name = RDA_UART_DEV_NAME,
+ .nr = RDA_UART_PORT_NUM,
+ .cons = RDA_UART_CONSOLE,
+};
+
+static const struct of_device_id rda_uart_dt_matches[] = {
+ { .compatible = "rda,8810pl-uart" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, rda_uart_dt_matches);
+
+static int rda_uart_probe(struct platform_device *pdev)
+{
+ struct resource *res_mem;
+ struct rda_uart_port *rda_port;
+ int ret, irq;
+
+ if (pdev->dev.of_node)
+ pdev->id = of_alias_get_id(pdev->dev.of_node, "serial");
+
+ if (pdev->id < 0 || pdev->id >= RDA_UART_PORT_NUM) {
+ dev_err(&pdev->dev, "id %d out of range\n", pdev->id);
+ return -EINVAL;
+ }
+
+ res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res_mem) {
+ dev_err(&pdev->dev, "could not get mem\n");
+ return -ENODEV;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "could not get irq\n");
+ return irq;
+ }
+
+ if (rda_uart_ports[pdev->id]) {
+ dev_err(&pdev->dev, "port %d already allocated\n", pdev->id);
+ return -EBUSY;
+ }
+
+ rda_port = devm_kzalloc(&pdev->dev, sizeof(*rda_port), GFP_KERNEL);
+ if (!rda_port)
+ return -ENOMEM;
+
+ rda_port->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(rda_port->clk)) {
+ dev_err(&pdev->dev, "could not get clk\n");
+ return PTR_ERR(rda_port->clk);
+ }
+
+ rda_port->port.dev = &pdev->dev;
+ rda_port->port.regshift = 0;
+ rda_port->port.line = pdev->id;
+ rda_port->port.type = PORT_RDA;
+ rda_port->port.iotype = UPIO_MEM;
+ rda_port->port.mapbase = res_mem->start;
+ rda_port->port.irq = irq;
+ rda_port->port.uartclk = clk_get_rate(rda_port->clk);
+ if (rda_port->port.uartclk == 0) {
+ dev_err(&pdev->dev, "clock rate is zero\n");
+ return -EINVAL;
+ }
+ rda_port->port.flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP |
+ UPF_LOW_LATENCY;
+ rda_port->port.x_char = 0;
+ rda_port->port.fifosize = RDA_UART_TX_FIFO_SIZE;
+ rda_port->port.ops = &rda_uart_ops;
+
+ rda_uart_ports[pdev->id] = rda_port;
+ platform_set_drvdata(pdev, rda_port);
+
+ ret = uart_add_one_port(&rda_uart_driver, &rda_port->port);
+ if (ret)
+ rda_uart_ports[pdev->id] = NULL;
+
+ return ret;
+}
+
+static int rda_uart_remove(struct platform_device *pdev)
+{
+ struct rda_uart_port *rda_port = platform_get_drvdata(pdev);
+
+ uart_remove_one_port(&rda_uart_driver, &rda_port->port);
+ rda_uart_ports[pdev->id] = NULL;
+
+ return 0;
+}
+
+static struct platform_driver rda_uart_platform_driver = {
+ .probe = rda_uart_probe,
+ .remove = rda_uart_remove,
+ .driver = {
+ .name = "rda-uart",
+ .of_match_table = rda_uart_dt_matches,
+ },
+};
+
+static int __init rda_uart_init(void)
+{
+ int ret;
+
+ ret = uart_register_driver(&rda_uart_driver);
+ if (ret)
+ return ret;
+
+ ret = platform_driver_register(&rda_uart_platform_driver);
+ if (ret)
+ uart_unregister_driver(&rda_uart_driver);
+
+ return ret;
+}
+
+static void __init rda_uart_exit(void)
+{
+ platform_driver_unregister(&rda_uart_platform_driver);
+ uart_unregister_driver(&rda_uart_driver);
+}
+
+module_init(rda_uart_init);
+module_exit(rda_uart_exit);
+
+MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>");
+MODULE_DESCRIPTION("RDA8810PL serial device driver");
+MODULE_LICENSE("GPL");