summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Kconfig3
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/char/Kconfig9
-rw-r--r--drivers/char/ipmi/bt-bmc.c1
-rw-r--r--drivers/char/ipmi/kcs_bmc_aspeed.c1
-rw-r--r--drivers/char/mem.c11
-rw-r--r--drivers/clk/clk-ast2600.c2
-rw-r--r--drivers/edac/Kconfig7
-rw-r--r--drivers/edac/Makefile1
-rw-r--r--drivers/edac/npcm7xx_edac.c424
-rw-r--r--drivers/fsi/fsi-core.c31
-rw-r--r--drivers/fsi/fsi-master-aspeed.c134
-rw-r--r--drivers/fsi/fsi-master-ast-cf.c5
-rw-r--r--drivers/fsi/fsi-master-gpio.c5
-rw-r--r--drivers/fsi/fsi-master-hub.c13
-rw-r--r--drivers/fsi/fsi-master.h3
-rw-r--r--drivers/fsi/fsi-occ.c126
-rw-r--r--drivers/fsi/fsi-sbefifo.c10
-rw-r--r--drivers/hwmon/Kconfig28
-rw-r--r--drivers/hwmon/Makefile2
-rw-r--r--drivers/hwmon/occ/common.c65
-rw-r--r--drivers/hwmon/peci-cputemp.c472
-rw-r--r--drivers/hwmon/peci-dimmtemp.c425
-rw-r--r--drivers/hwmon/peci-hwmon.h48
-rw-r--r--drivers/hwmon/pmbus/max31785.c497
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c22
-rw-r--r--drivers/i2c/busses/Kconfig9
-rw-r--r--drivers/i2c/busses/Makefile1
-rw-r--r--drivers/i2c/busses/i2c-fsi.c7
-rw-r--r--drivers/i2c/busses/i2c-npcm7xx.c2342
-rw-r--r--drivers/leds/leds-pca955x.c8
-rw-r--r--drivers/mfd/Kconfig17
-rw-r--r--drivers/mfd/Makefile1
-rw-r--r--drivers/mfd/intel-peci-client.c148
-rw-r--r--drivers/misc/Kconfig22
-rw-r--r--drivers/misc/Makefile3
-rw-r--r--drivers/misc/mctp-lpc.c443
-rw-r--r--drivers/misc/npcm7xx-lpc-bpc.c394
-rw-r--r--drivers/misc/npcm7xx-pci-mbox.c288
-rw-r--r--drivers/mtd/spi-nor/controllers/aspeed-smc.c588
-rw-r--r--drivers/mtd/spi-nor/macronix.c5
-rw-r--r--drivers/mtd/spi-nor/winbond.c2
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/nuvoton/Kconfig30
-rw-r--r--drivers/net/ethernet/nuvoton/Makefile6
-rw-r--r--drivers/net/ethernet/nuvoton/npcm7xx_emc.c2090
-rw-r--r--drivers/peci/Kconfig37
-rw-r--r--drivers/peci/Makefile11
-rw-r--r--drivers/peci/busses/Kconfig34
-rw-r--r--drivers/peci/busses/Makefile7
-rw-r--r--drivers/peci/busses/peci-aspeed.c484
-rw-r--r--drivers/peci/busses/peci-npcm.c406
-rw-r--r--drivers/peci/peci-core.c2089
-rw-r--r--drivers/peci/peci-dev.c348
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c7
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed.c25
-rw-r--r--drivers/reset/reset-simple.c1
-rw-r--r--drivers/soc/aspeed/Kconfig16
-rw-r--r--drivers/soc/aspeed/Makefile2
-rw-r--r--drivers/soc/aspeed/aspeed-bmc-misc.c190
-rw-r--r--drivers/soc/aspeed/aspeed-lpc-ctrl.c31
-rw-r--r--drivers/soc/aspeed/aspeed-lpc-snoop.c2
-rw-r--r--drivers/soc/aspeed/aspeed-xdma.c1205
64 files changed, 13491 insertions, 156 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index dcecc9f6e33f..1bd21494d4cd 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -235,4 +235,7 @@ source "drivers/interconnect/Kconfig"
source "drivers/counter/Kconfig"
source "drivers/most/Kconfig"
+
+source "drivers/peci/Kconfig"
+
endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index c0cd1b9075e3..bbc6d661b9de 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -188,3 +188,4 @@ obj-$(CONFIG_GNSS) += gnss/
obj-$(CONFIG_INTERCONNECT) += interconnect/
obj-$(CONFIG_COUNTER) += counter/
obj-$(CONFIG_MOST) += most/
+obj-$(CONFIG_PECI) += peci/
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index d4665fe9ccd2..344c532f6961 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -333,6 +333,15 @@ config DEVMEM
memory.
When in doubt, say "Y".
+config DEVMEM_BOOTPARAM
+ bool "mem.devmem boot parameter"
+ depends on DEVMEM
+ default n
+ help
+ This option adds a 'mem.devmem' kernel parameter which activates
+ the /dev/mem device when enabled.
+ When in doubt, say "N".
+
config DEVKMEM
bool "/dev/kmem virtual device support"
# On arm64, VMALLOC_START < PAGE_OFFSET, which confuses kmem read/write
diff --git a/drivers/char/ipmi/bt-bmc.c b/drivers/char/ipmi/bt-bmc.c
index d36aeacb290e..1c3eba86834f 100644
--- a/drivers/char/ipmi/bt-bmc.c
+++ b/drivers/char/ipmi/bt-bmc.c
@@ -511,6 +511,7 @@ static int bt_bmc_remove(struct platform_device *pdev)
static const struct of_device_id bt_bmc_match[] = {
{ .compatible = "aspeed,ast2400-ibt-bmc" },
{ .compatible = "aspeed,ast2500-ibt-bmc" },
+ { .compatible = "aspeed,ast2600-ibt-bmc" },
{ },
};
diff --git a/drivers/char/ipmi/kcs_bmc_aspeed.c b/drivers/char/ipmi/kcs_bmc_aspeed.c
index a140203c079b..2a2508d54e2a 100644
--- a/drivers/char/ipmi/kcs_bmc_aspeed.c
+++ b/drivers/char/ipmi/kcs_bmc_aspeed.c
@@ -400,6 +400,7 @@ static const struct of_device_id ast_kcs_bmc_match[] = {
{ .compatible = "aspeed,ast2500-kcs-bmc" },
{ .compatible = "aspeed,ast2400-kcs-bmc-v2" },
{ .compatible = "aspeed,ast2500-kcs-bmc-v2" },
+ { .compatible = "aspeed,ast2600-kcs-bmc" },
{ }
};
MODULE_DEVICE_TABLE(of, ast_kcs_bmc_match);
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 6b56bff9b68c..06611d177d4d 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -10,6 +10,7 @@
*/
#include <linux/mm.h>
+#include <linux/moduleparam.h>
#include <linux/miscdevice.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
@@ -994,6 +995,12 @@ static char *mem_devnode(struct device *dev, umode_t *mode)
return NULL;
}
+#ifdef CONFIG_DEVMEM_BOOTPARAM
+static bool devmem;
+module_param(devmem, bool, 0444);
+MODULE_PARM_DESC(devmem, "kernel parameter to activate /dev/mem");
+#endif
+
static struct class *mem_class;
static int devmem_fs_init_fs_context(struct fs_context *fc)
@@ -1054,6 +1061,10 @@ static int __init chr_dev_init(void)
if (!devlist[minor].name)
continue;
+#ifdef CONFIG_DEVMEM_BOOTPARAM
+ if (minor == DEVMEM_MINOR && !devmem)
+ continue;
+#endif
/*
* Create /dev/port?
*/
diff --git a/drivers/clk/clk-ast2600.c b/drivers/clk/clk-ast2600.c
index 177368cac6dd..bbacaccad554 100644
--- a/drivers/clk/clk-ast2600.c
+++ b/drivers/clk/clk-ast2600.c
@@ -64,7 +64,7 @@ static const struct aspeed_gate_data aspeed_g6_gates[] = {
[ASPEED_CLK_GATE_GCLK] = { 2, 7, "gclk-gate", NULL, 0 }, /* 2D engine */
/* vclk parent - dclk/d1clk/hclk/mclk */
[ASPEED_CLK_GATE_VCLK] = { 3, 6, "vclk-gate", NULL, 0 }, /* Video Capture */
- [ASPEED_CLK_GATE_BCLK] = { 4, 8, "bclk-gate", "bclk", 0 }, /* PCIe/PCI */
+ [ASPEED_CLK_GATE_BCLK] = { 4, 8, "bclk-gate", "bclk", CLK_IS_CRITICAL }, /* PCIe/PCI */
/* From dpll */
[ASPEED_CLK_GATE_DCLK] = { 5, -1, "dclk-gate", NULL, CLK_IS_CRITICAL }, /* DAC */
[ASPEED_CLK_GATE_REF0CLK] = { 6, -1, "ref0clk-gate", "clkin", CLK_IS_CRITICAL },
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index fe2eb892a1bd..2ac119cab6cb 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -530,4 +530,11 @@ config EDAC_DMC520
Support for error detection and correction on the
SoCs with ARM DMC-520 DRAM controller.
+config EDAC_NPCM7XX
+ tristate "Nuvoton NPCM7xx DDR Memory Controller"
+ depends on ARCH_NPCM7XX
+ help
+ Support for error detection and correction on the
+ Nuvoton NPCM7xx DDR memory controller.
+
endif # EDAC
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index 269e15118cea..25c77763d666 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -88,3 +88,4 @@ obj-$(CONFIG_EDAC_QCOM) += qcom_edac.o
obj-$(CONFIG_EDAC_ASPEED) += aspeed_edac.o
obj-$(CONFIG_EDAC_BLUEFIELD) += bluefield_edac.o
obj-$(CONFIG_EDAC_DMC520) += dmc520_edac.o
+obj-$(CONFIG_EDAC_NPCM7XX) += npcm7xx_edac.o
diff --git a/drivers/edac/npcm7xx_edac.c b/drivers/edac/npcm7xx_edac.c
new file mode 100644
index 000000000000..2d2deb81e49c
--- /dev/null
+++ b/drivers/edac/npcm7xx_edac.c
@@ -0,0 +1,424 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 Quanta Computer lnc.
+ */
+
+#include <linux/edac.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+
+#include "edac_module.h"
+
+#define ECC_ENABLE BIT(24)
+#define ECC_EN_INT_MASK 0x7fffff87
+
+#define INT_STATUS_ADDR 116
+#define INT_ACK_ADDR 117
+#define INT_MASK_ADDR 118
+
+#define ECC_EN_ADDR 93
+#define ECC_C_ADDR_ADDR 98
+#define ECC_C_DATA_ADDR 100
+#define ECC_C_ID_ADDR 101
+#define ECC_C_SYND_ADDR 99
+#define ECC_U_ADDR_ADDR 95
+#define ECC_U_DATA_ADDR 97
+#define ECC_U_ID_ADDR 101
+#define ECC_U_SYND_ADDR 96
+
+#define ECC_ERROR -1
+#define EDAC_MSG_SIZE 256
+#define EDAC_MOD_NAME "npcm7xx-edac"
+
+struct ecc_error_signature_info {
+ u32 ecc_addr;
+ u32 ecc_data;
+ u32 ecc_id;
+ u32 ecc_synd;
+};
+
+struct npcm7xx_ecc_int_status {
+ u32 int_mask;
+ u32 int_status;
+ u32 int_ack;
+ u32 ce_cnt;
+ u32 ue_cnt;
+ struct ecc_error_signature_info ceinfo;
+ struct ecc_error_signature_info ueinfo;
+};
+
+struct npcm7xx_edac_priv {
+ void __iomem *baseaddr;
+ char message[EDAC_MSG_SIZE];
+ struct npcm7xx_ecc_int_status stat;
+};
+
+/**
+ * npcm7xx_edac_get_ecc_syndrom - Get the current ecc error info
+ * @base: Pointer to the base address of the ddr memory controller
+ * @p: Pointer to the Nuvoton ecc status structure
+ *
+ * Determines there is any ecc error or not
+ *
+ * Return: ECC detection status
+ */
+static int npcm7xx_edac_get_ecc_syndrom(void __iomem *base,
+ struct npcm7xx_ecc_int_status *p)
+{
+ int status = 0;
+ u32 int_status = 0;
+
+ int_status = readl(base + 4*INT_STATUS_ADDR);
+ writel(int_status, base + 4*INT_ACK_ADDR);
+ edac_dbg(3, "int_status: %#08x\n", int_status);
+
+ if ((int_status & (1 << 6)) == (1 << 6)) {
+ edac_dbg(3, "6-Mult uncorrectable detected.\n");
+ p->ue_cnt++;
+ status = ECC_ERROR;
+ }
+
+ if ((int_status & (1 << 5)) == (1 << 5)) {
+ edac_dbg(3, "5-An uncorrectable detected\n");
+ p->ue_cnt++;
+ status = ECC_ERROR;
+ }
+
+ if ((int_status & (1 << 4)) == (1 << 4)) {
+ edac_dbg(3, "4-mult correctable detected.\n");
+ p->ce_cnt++;
+ status = ECC_ERROR;
+ }
+
+ if ((int_status & (1 << 3)) == (1 << 3)) {
+ edac_dbg(3, "3-A correctable detected.\n");
+ p->ce_cnt++;
+ status = ECC_ERROR;
+ }
+
+ if (status == ECC_ERROR) {
+ u32 ecc_id;
+
+ p->ceinfo.ecc_addr = readl(base + 4*ECC_C_ADDR_ADDR);
+ p->ceinfo.ecc_data = readl(base + 4*ECC_C_DATA_ADDR);
+ p->ceinfo.ecc_synd = readl(base + 4*ECC_C_SYND_ADDR);
+
+ p->ueinfo.ecc_addr = readl(base + 4*ECC_U_ADDR_ADDR);
+ p->ueinfo.ecc_data = readl(base + 4*ECC_U_DATA_ADDR);
+ p->ueinfo.ecc_synd = readl(base + 4*ECC_U_SYND_ADDR);
+
+ /* ECC_C_ID_ADDR has same value as ECC_U_ID_ADDR */
+ ecc_id = readl(base + 4*ECC_C_ID_ADDR);
+ p->ueinfo.ecc_id = ecc_id & 0xffff;
+ p->ceinfo.ecc_id = ecc_id >> 16;
+ }
+
+ return status;
+}
+
+/**
+ * npcm7xx_edac_handle_error - Handle controller error types CE and UE
+ * @mci: Pointer to the edac memory controller instance
+ * @p: Pointer to the Nuvoton ecc status structure
+ *
+ * Handles the controller ECC correctable and un correctable error.
+ */
+static void npcm7xx_edac_handle_error(struct mem_ctl_info *mci,
+ struct npcm7xx_ecc_int_status *p)
+{
+ struct npcm7xx_edac_priv *priv = mci->pvt_info;
+ u32 page, offset;
+
+ if (p->ce_cnt) {
+ snprintf(priv->message, EDAC_MSG_SIZE,
+ "DDR ECC: synd=%#08x addr=%#08x data=%#08x source_id=%#08x ",
+ p->ceinfo.ecc_synd, p->ceinfo.ecc_addr,
+ p->ceinfo.ecc_data, p->ceinfo.ecc_id);
+
+ page = p->ceinfo.ecc_addr >> PAGE_SHIFT;
+ offset = p->ceinfo.ecc_addr & ~PAGE_MASK;
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ p->ce_cnt, page, offset,
+ p->ceinfo.ecc_synd,
+ 0, 0, -1,
+ priv->message, "");
+ }
+
+ if (p->ue_cnt) {
+ snprintf(priv->message, EDAC_MSG_SIZE,
+ "DDR ECC: synd=%#08x addr=%#08x data=%#08x source_id=%#08x ",
+ p->ueinfo.ecc_synd, p->ueinfo.ecc_addr,
+ p->ueinfo.ecc_data, p->ueinfo.ecc_id);
+
+ page = p->ueinfo.ecc_addr >> PAGE_SHIFT;
+ offset = p->ueinfo.ecc_addr & ~PAGE_MASK;
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ p->ue_cnt, page, offset,
+ p->ueinfo.ecc_synd,
+ 0, 0, -1,
+ priv->message, "");
+ }
+
+ memset(p, 0, sizeof(*p));
+}
+
+/**
+ * npcm7xx_edac_check - Check controller for ECC errors
+ * @mci: Pointer to the edac memory controller instance
+ *
+ * This routine is used to check and post ECC errors and is called by
+ * this driver's CE and UE interrupt handler.
+ */
+static void npcm7xx_edac_check(struct mem_ctl_info *mci)
+{
+ struct npcm7xx_edac_priv *priv = mci->pvt_info;
+ int status = 0;
+
+ status = npcm7xx_edac_get_ecc_syndrom(priv->baseaddr, &priv->stat);
+ if (status != ECC_ERROR)
+ return;
+
+ npcm7xx_edac_handle_error(mci, &priv->stat);
+}
+
+/**
+ * npcm7xx_edac_isr - CE/UE interrupt service routine
+ * @irq: The virtual interrupt number being serviced.
+ * @dev_id: A pointer to the EDAC memory controller instance
+ * associated with the interrupt being handled.
+ *
+ * This routine implements the interrupt handler for both correctable
+ * (CE) and uncorrectable (UE) ECC errors for the Nuvoton Cadence DDR
+ * controller. It simply calls through to the routine used to check,
+ * report and clear the ECC status.
+ *
+ * Unconditionally returns IRQ_HANDLED.
+ */
+static irqreturn_t npcm7xx_edac_isr(int irq, void *dev_id)
+{
+ struct mem_ctl_info *mci = dev_id;
+ int npcm_edac_report = 0;
+
+ npcm_edac_report = edac_get_report_status();
+ if (npcm_edac_report != EDAC_REPORTING_DISABLED)
+ npcm7xx_edac_check(mci);
+
+ return IRQ_HANDLED;
+}
+
+static int npcm7xx_edac_register_irq(struct mem_ctl_info *mci,
+ struct platform_device *pdev)
+{
+ int status = 0;
+ int mc_irq;
+ struct npcm7xx_edac_priv *priv = mci->pvt_info;
+
+ /* Only enable MC interrupts with ECC - clear int_mask[6:3] */
+ writel(ECC_EN_INT_MASK, priv->baseaddr + 4*INT_MASK_ADDR);
+
+ mc_irq = platform_get_irq(pdev, 0);
+
+ if (!mc_irq) {
+ edac_printk(KERN_ERR, EDAC_MC, "Unable to map interrupts.\n");
+ status = -ENODEV;
+ goto fail;
+ }
+
+ status = devm_request_irq(&pdev->dev, mc_irq, npcm7xx_edac_isr, 0,
+ "npcm-memory-controller", mci);
+
+ if (status < 0) {
+ edac_printk(KERN_ERR, EDAC_MC,
+ "Unable to request irq %d for ECC",
+ mc_irq);
+ status = -ENODEV;
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ return status;
+}
+
+static const struct of_device_id npcm7xx_edac_of_match[] = {
+ { .compatible = "nuvoton,npcm7xx-sdram-edac"},
+ { /* end of table */ }
+};
+
+MODULE_DEVICE_TABLE(of, npcm7xx_edac_of_match);
+
+/**
+ * npcm7xx_edac_mc_init - Initialize driver instance
+ * @mci: Pointer to the edac memory controller instance
+ * @pdev: Pointer to the platform_device struct
+ *
+ * Performs initialization of the EDAC memory controller instance and
+ * related driver-private data associated with the memory controller the
+ * instance is bound to.
+ *
+ * Returns 0 if OK; otherwise, < 0 on error.
+ */
+static int npcm7xx_edac_mc_init(struct mem_ctl_info *mci,
+ struct platform_device *pdev)
+{
+ const struct of_device_id *id;
+
+ id = of_match_device(npcm7xx_edac_of_match, &pdev->dev);
+ if (!id)
+ return -ENODEV;
+
+ /* Initialize controller capabilities and configuration */
+ mci->mtype_cap = MEM_FLAG_DDR4;
+ mci->edac_ctl_cap = EDAC_FLAG_SECDED;
+ mci->edac_cap = EDAC_FLAG_SECDED;
+ mci->scrub_cap = SCRUB_FLAG_HW_SRC;
+ mci->scrub_mode = SCRUB_HW_SRC;
+ mci->ctl_name = id->compatible;
+ mci->dev_name = dev_name(&pdev->dev);
+ mci->mod_name = EDAC_MOD_NAME;
+
+ edac_op_state = EDAC_OPSTATE_INT;
+
+ return 0;
+}
+
+/**
+ * npcm7xx_edac_get_eccstate - Return the controller ecc enable/disable status
+ * @base: Pointer to the ddr memory controller base address
+ *
+ * Get the ECC enable/disable status for the controller
+ *
+ * Return: a ecc status boolean i.e true/false - enabled/disabled.
+ */
+static bool npcm7xx_edac_get_eccstate(void __iomem *base)
+{
+ u32 ecc_en;
+ bool state = false;
+
+ ecc_en = readl(base + 4*ECC_EN_ADDR);
+ if (ecc_en & ECC_ENABLE) {
+ edac_printk(KERN_INFO, EDAC_MC, "ECC reporting and correcting on. ");
+ state = true;
+ }
+
+ return state;
+}
+
+/**
+ * npcm7xx_edac_mc_probe - Check controller and bind driver
+ * @pdev: Pointer to the platform_device struct
+ *
+ * Probes a specific controller instance for binding with the driver.
+ *
+ * Return: 0 if the controller instance was successfully bound to the
+ * driver; otherwise, < 0 on error.
+ */
+static int npcm7xx_edac_mc_probe(struct platform_device *pdev)
+{
+ struct mem_ctl_info *mci;
+ struct edac_mc_layer layers[1];
+ struct npcm7xx_edac_priv *priv;
+ struct resource *res;
+ void __iomem *baseaddr;
+ int rc;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ baseaddr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(baseaddr)) {
+ edac_printk(KERN_ERR, EDAC_MOD_NAME,
+ "DDR controller regs not defined\n");
+ return PTR_ERR(baseaddr);
+ }
+
+ /*
+ * Check if ECC is enabled.
+ * If not, there is no useful monitoring that can be done
+ * for this controller.
+ */
+ if (!npcm7xx_edac_get_eccstate(baseaddr)) {
+ edac_printk(KERN_INFO, EDAC_MC, "ECC disabled\n");
+ return -ENXIO;
+ }
+
+ /*
+ * Allocate an EDA controller instance and perform the appropriate
+ * initialization.
+ */
+ layers[0].type = EDAC_MC_LAYER_ALL_MEM;
+ layers[0].size = 1;
+
+ mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers,
+ sizeof(struct npcm7xx_edac_priv));
+ if (!mci) {
+ edac_printk(KERN_ERR, EDAC_MC,
+ "Failed memory allocation for mc instance\n");
+ return -ENOMEM;
+ }
+
+ mci->pdev = &pdev->dev;
+ priv = mci->pvt_info;
+ priv->baseaddr = baseaddr;
+ platform_set_drvdata(pdev, mci);
+
+ rc = npcm7xx_edac_mc_init(mci, pdev);
+ if (rc) {
+ edac_printk(KERN_ERR, EDAC_MC,
+ "Failed to initialize instance\n");
+ goto free_edac_mc;
+ }
+
+ /* Attempt to register it with the EDAC subsystem */
+ rc = edac_mc_add_mc(mci);
+ if (rc) {
+ edac_printk(KERN_ERR, EDAC_MC,
+ "Failed to register with EDAC core\n");
+ goto free_edac_mc;
+ }
+
+ /* Register interrupts */
+ rc = npcm7xx_edac_register_irq(mci, pdev);
+ if (rc)
+ goto free_edac_mc;
+
+ return 0;
+
+free_edac_mc:
+ edac_mc_free(mci);
+
+ return rc;
+}
+
+/**
+ * npcm7xx_edac_mc_remove - Unbind driver from controller
+ * @pdev: Pointer to the platform_device struct
+ *
+ * Return: Unconditionally 0
+ */
+static int npcm7xx_edac_mc_remove(struct platform_device *pdev)
+{
+ struct mem_ctl_info *mci = platform_get_drvdata(pdev);
+
+ edac_mc_del_mc(&pdev->dev);
+ edac_mc_free(mci);
+
+ return 0;
+}
+
+static struct platform_driver npcm7xx_edac_driver = {
+ .probe = npcm7xx_edac_mc_probe,
+ .remove = npcm7xx_edac_mc_remove,
+ .driver = {
+ .name = EDAC_MOD_NAME,
+ .of_match_table = npcm7xx_edac_of_match,
+ },
+};
+
+module_platform_driver(npcm7xx_edac_driver);
+
+MODULE_AUTHOR("Quanta Computer Inc.");
+MODULE_DESCRIPTION("Nuvoton NPCM7xx EDAC Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/fsi/fsi-core.c b/drivers/fsi/fsi-core.c
index 8244da8a7241..4e60e84cd17a 100644
--- a/drivers/fsi/fsi-core.c
+++ b/drivers/fsi/fsi-core.c
@@ -50,6 +50,7 @@ static const int engine_page_size = 0x400;
#define FSI_SMODE 0x0 /* R/W: Mode register */
#define FSI_SISC 0x8 /* R/W: Interrupt condition */
#define FSI_SSTAT 0x14 /* R : Slave status */
+#define FSI_SLBUS 0x30 /* W : LBUS Ownership */
#define FSI_LLMODE 0x100 /* R/W: Link layer mode register */
/*
@@ -67,6 +68,11 @@ static const int engine_page_size = 0x400;
#define FSI_SMODE_LBCRR_MASK 0xf /* Clk ratio mask */
/*
+ * SLBUS fields
+ */
+#define FSI_SLBUS_FORCE 0x80000000 /* Force LBUS ownership */
+
+/*
* LLMODE fields
*/
#define FSI_LLMODE_ASYNC 0x1
@@ -981,7 +987,7 @@ static int fsi_slave_init(struct fsi_master *master, int link, uint8_t id)
uint32_t cfam_id;
struct fsi_slave *slave;
uint8_t crc;
- __be32 data, llmode;
+ __be32 data, llmode, slbus;
int rc;
/* Currently, we only support single slaves on a link, and use the
@@ -1052,6 +1058,14 @@ static int fsi_slave_init(struct fsi_master *master, int link, uint8_t id)
}
+ slbus = cpu_to_be32(FSI_SLBUS_FORCE);
+ rc = fsi_master_write(master, link, id, FSI_SLAVE_BASE + FSI_SLBUS,
+ &slbus, sizeof(slbus));
+ if (rc)
+ dev_warn(&master->dev,
+ "can't set slbus on slave:%02x:%02x %d\n", link, id,
+ rc);
+
rc = fsi_slave_set_smode(slave);
if (rc) {
dev_warn(&master->dev,
@@ -1154,10 +1168,18 @@ static int fsi_master_write(struct fsi_master *master, int link,
return rc;
}
+static int fsi_master_link_disable(struct fsi_master *master, int link)
+{
+ if (master->link_enable)
+ return master->link_enable(master, link, false);
+
+ return 0;
+}
+
static int fsi_master_link_enable(struct fsi_master *master, int link)
{
if (master->link_enable)
- return master->link_enable(master, link);
+ return master->link_enable(master, link, true);
return 0;
}
@@ -1192,12 +1214,15 @@ static int fsi_master_scan(struct fsi_master *master)
}
rc = fsi_master_break(master, link);
if (rc) {
+ fsi_master_link_disable(master, link);
dev_dbg(&master->dev,
"break to link %d failed: %d\n", link, rc);
continue;
}
- fsi_slave_init(master, link, 0);
+ rc = fsi_slave_init(master, link, 0);
+ if (rc)
+ fsi_master_link_disable(master, link);
}
return 0;
diff --git a/drivers/fsi/fsi-master-aspeed.c b/drivers/fsi/fsi-master-aspeed.c
index f49742b310c2..c006ec008a1a 100644
--- a/drivers/fsi/fsi-master-aspeed.c
+++ b/drivers/fsi/fsi-master-aspeed.c
@@ -13,6 +13,7 @@
#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/iopoll.h>
+#include <linux/gpio/consumer.h>
#include "fsi-master.h"
@@ -21,6 +22,7 @@ struct fsi_master_aspeed {
struct device *dev;
void __iomem *base;
struct clk *clk;
+ struct gpio_desc *cfam_reset_gpio;
};
#define to_fsi_master_aspeed(m) \
@@ -82,7 +84,12 @@ static const u32 fsi_base = 0xa0000000;
#define FSI_LINK_ENABLE_SETUP_TIME 10 /* in mS */
-#define DEFAULT_DIVISOR 14
+/* Run the bus at maximum speed by default */
+#define FSI_DIVISOR_DEFAULT 1
+#define FSI_DIVISOR_CABLED 2
+static u16 aspeed_fsi_divisor = FSI_DIVISOR_DEFAULT;
+module_param_named(bus_div,aspeed_fsi_divisor, ushort, 0);
+
#define OPB_POLL_TIMEOUT 10000
static int __opb_write(struct fsi_master_aspeed *aspeed, u32 addr,
@@ -241,9 +248,10 @@ static int aspeed_master_read(struct fsi_master *master, int link,
struct fsi_master_aspeed *aspeed = to_fsi_master_aspeed(master);
int ret;
- if (id != 0)
+ if (id > 0x3)
return -EINVAL;
+ addr |= id << 21;
addr += link * FSI_HUB_LINK_SIZE;
switch (size) {
@@ -273,9 +281,10 @@ static int aspeed_master_write(struct fsi_master *master, int link,
struct fsi_master_aspeed *aspeed = to_fsi_master_aspeed(master);
int ret;
- if (id != 0)
+ if (id > 0x3)
return -EINVAL;
+ addr |= id << 21;
addr += link * FSI_HUB_LINK_SIZE;
switch (size) {
@@ -299,32 +308,28 @@ static int aspeed_master_write(struct fsi_master *master, int link,
return 0;
}
-static int aspeed_master_link_enable(struct fsi_master *master, int link)
+static int aspeed_master_link_enable(struct fsi_master *master, int link,
+ bool enable)
{
struct fsi_master_aspeed *aspeed = to_fsi_master_aspeed(master);
int idx, bit, ret;
- __be32 reg, result;
+ __be32 reg;
idx = link / 32;
bit = link % 32;
reg = cpu_to_be32(0x80000000 >> bit);
+ if (!enable)
+ return opb_writel(aspeed, ctrl_base + FSI_MCENP0 + (4 * idx),
+ reg);
+
ret = opb_writel(aspeed, ctrl_base + FSI_MSENP0 + (4 * idx), reg);
if (ret)
return ret;
mdelay(FSI_LINK_ENABLE_SETUP_TIME);
- ret = opb_readl(aspeed, ctrl_base + FSI_MENP0 + (4 * idx), &result);
- if (ret)
- return ret;
-
- if (result != reg) {
- dev_err(aspeed->dev, "%s failed: %08x\n", __func__, result);
- return -EIO;
- }
-
return 0;
}
@@ -386,9 +391,11 @@ static int aspeed_master_init(struct fsi_master_aspeed *aspeed)
opb_writel(aspeed, ctrl_base + FSI_MECTRL, reg);
reg = cpu_to_be32(FSI_MMODE_ECRC | FSI_MMODE_EPC | FSI_MMODE_RELA
- | fsi_mmode_crs0(DEFAULT_DIVISOR)
- | fsi_mmode_crs1(DEFAULT_DIVISOR)
+ | fsi_mmode_crs0(aspeed_fsi_divisor)
+ | fsi_mmode_crs1(aspeed_fsi_divisor)
| FSI_MMODE_P8_TO_LSB);
+ dev_info(aspeed->dev, "mmode set to %08x (divisor %d)\n",
+ be32_to_cpu(reg), aspeed_fsi_divisor);
opb_writel(aspeed, ctrl_base + FSI_MMODE, reg);
reg = cpu_to_be32(0xffff0000);
@@ -419,6 +426,90 @@ static int aspeed_master_init(struct fsi_master_aspeed *aspeed)
return 0;
}
+static ssize_t cfam_reset_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fsi_master_aspeed *aspeed = dev_get_drvdata(dev);
+
+ gpiod_set_value(aspeed->cfam_reset_gpio, 1);
+ usleep_range(900, 1000);
+ gpiod_set_value(aspeed->cfam_reset_gpio, 0);
+
+ return count;
+}
+
+static DEVICE_ATTR(cfam_reset, 0200, NULL, cfam_reset_store);
+
+static int setup_cfam_reset(struct fsi_master_aspeed *aspeed)
+{
+ struct device *dev = aspeed->dev;
+ struct gpio_desc *gpio;
+ int rc;
+
+ gpio = devm_gpiod_get_optional(dev, "cfam-reset", GPIOD_OUT_LOW);
+ if (IS_ERR(gpio))
+ return PTR_ERR(gpio);
+ if (!gpio)
+ return 0;
+
+ aspeed->cfam_reset_gpio = gpio;
+
+ rc = device_create_file(dev, &dev_attr_cfam_reset);
+ if (rc) {
+ devm_gpiod_put(dev, gpio);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int tacoma_cabled_fsi_fixup(struct device *dev)
+{
+ struct gpio_desc *routing_gpio, *mux_gpio;
+ int gpio;
+
+ /*
+ * The routing GPIO is a jumper indicating we should mux for the
+ * externally connected FSI cable.
+ */
+ routing_gpio = devm_gpiod_get_optional(dev, "fsi-routing",
+ GPIOD_IN | GPIOD_FLAGS_BIT_NONEXCLUSIVE);
+ if (IS_ERR(routing_gpio))
+ return PTR_ERR(routing_gpio);
+ if (!routing_gpio)
+ return 0;
+
+ mux_gpio = devm_gpiod_get_optional(dev, "fsi-mux", GPIOD_ASIS);
+ if (IS_ERR(mux_gpio))
+ return PTR_ERR(mux_gpio);
+ if (!mux_gpio)
+ return 0;
+
+ gpio = gpiod_get_value(routing_gpio);
+ if (gpio < 0)
+ return gpio;
+
+ /* If the routing GPIO is high we should set the mux to low. */
+ if (gpio) {
+ /*
+ * Cable signal integrity means we should run the bus
+ * slightly slower. Do not override if a kernel param
+ * has already overridden.
+ */
+ if (aspeed_fsi_divisor == FSI_DIVISOR_DEFAULT)
+ aspeed_fsi_divisor = FSI_DIVISOR_CABLED;
+
+ gpiod_direction_output(mux_gpio, 0);
+ dev_info(dev, "FSI configured for external cable\n");
+ } else {
+ gpiod_direction_output(mux_gpio, 1);
+ }
+
+ devm_gpiod_put(dev, routing_gpio);
+
+ return 0;
+}
+
static int fsi_master_aspeed_probe(struct platform_device *pdev)
{
struct fsi_master_aspeed *aspeed;
@@ -426,6 +517,12 @@ static int fsi_master_aspeed_probe(struct platform_device *pdev)
int rc, links, reg;
__be32 raw;
+ rc = tacoma_cabled_fsi_fixup(&pdev->dev);
+ if (rc) {
+ dev_err(&pdev->dev, "Tacoma FSI cable fixup failed\n");
+ return rc;
+ }
+
aspeed = devm_kzalloc(&pdev->dev, sizeof(*aspeed), GFP_KERNEL);
if (!aspeed)
return -ENOMEM;
@@ -448,6 +545,11 @@ static int fsi_master_aspeed_probe(struct platform_device *pdev)
return rc;
}
+ rc = setup_cfam_reset(aspeed);
+ if (rc) {
+ dev_err(&pdev->dev, "CFAM reset GPIO setup failed\n");
+ }
+
writel(0x1, aspeed->base + OPB_CLK_SYNC);
writel(OPB1_XFER_ACK_EN | OPB0_XFER_ACK_EN,
aspeed->base + OPB_IRQ_MASK);
diff --git a/drivers/fsi/fsi-master-ast-cf.c b/drivers/fsi/fsi-master-ast-cf.c
index 04d10ea8d343..62dcc71a30e6 100644
--- a/drivers/fsi/fsi-master-ast-cf.c
+++ b/drivers/fsi/fsi-master-ast-cf.c
@@ -1039,7 +1039,8 @@ static void fsi_master_acf_setup_external(struct fsi_master_acf *master)
gpiod_direction_input(master->gpio_data);
}
-static int fsi_master_acf_link_enable(struct fsi_master *_master, int link)
+static int fsi_master_acf_link_enable(struct fsi_master *_master, int link,
+ bool enable)
{
struct fsi_master_acf *master = to_fsi_master_acf(_master);
int rc = -EBUSY;
@@ -1049,7 +1050,7 @@ static int fsi_master_acf_link_enable(struct fsi_master *_master, int link)
mutex_lock(&master->lock);
if (!master->external_mode) {
- gpiod_set_value(master->gpio_enable, 1);
+ gpiod_set_value(master->gpio_enable, enable ? 1 : 0);
rc = 0;
}
mutex_unlock(&master->lock);
diff --git a/drivers/fsi/fsi-master-gpio.c b/drivers/fsi/fsi-master-gpio.c
index 4dcce17f243f..aa97c4a250cb 100644
--- a/drivers/fsi/fsi-master-gpio.c
+++ b/drivers/fsi/fsi-master-gpio.c
@@ -678,7 +678,8 @@ static void fsi_master_gpio_init_external(struct fsi_master_gpio *master)
gpiod_direction_input(master->gpio_data);
}
-static int fsi_master_gpio_link_enable(struct fsi_master *_master, int link)
+static int fsi_master_gpio_link_enable(struct fsi_master *_master, int link,
+ bool enable)
{
struct fsi_master_gpio *master = to_fsi_master_gpio(_master);
int rc = -EBUSY;
@@ -688,7 +689,7 @@ static int fsi_master_gpio_link_enable(struct fsi_master *_master, int link)
mutex_lock(&master->cmd_lock);
if (!master->external_mode) {
- gpiod_set_value(master->gpio_enable, 1);
+ gpiod_set_value(master->gpio_enable, enable ? 1 : 0);
rc = 0;
}
mutex_unlock(&master->cmd_lock);
diff --git a/drivers/fsi/fsi-master-hub.c b/drivers/fsi/fsi-master-hub.c
index def35cf92571..3caa2da7838c 100644
--- a/drivers/fsi/fsi-master-hub.c
+++ b/drivers/fsi/fsi-master-hub.c
@@ -77,7 +77,8 @@ static int hub_master_break(struct fsi_master *master, int link)
return hub_master_write(master, link, 0, addr, &cmd, sizeof(cmd));
}
-static int hub_master_link_enable(struct fsi_master *master, int link)
+static int hub_master_link_enable(struct fsi_master *master, int link,
+ bool enable)
{
struct fsi_master_hub *hub = to_fsi_master_hub(master);
int idx, bit;
@@ -89,13 +90,17 @@ static int hub_master_link_enable(struct fsi_master *master, int link)
reg = cpu_to_be32(0x80000000 >> bit);
+ if (!enable)
+ return fsi_device_write(hub->upstream, FSI_MCENP0 + (4 * idx),
+ &reg, 4);
+
rc = fsi_device_write(hub->upstream, FSI_MSENP0 + (4 * idx), &reg, 4);
+ if (rc)
+ return rc;
mdelay(FSI_LINK_ENABLE_SETUP_TIME);
- fsi_device_read(hub->upstream, FSI_MENP0 + (4 * idx), &reg, 4);
-
- return rc;
+ return 0;
}
static void hub_master_release(struct device *dev)
diff --git a/drivers/fsi/fsi-master.h b/drivers/fsi/fsi-master.h
index 6e8d4d4d5149..cd6bee5e12a7 100644
--- a/drivers/fsi/fsi-master.h
+++ b/drivers/fsi/fsi-master.h
@@ -130,7 +130,8 @@ struct fsi_master {
uint32_t addr, const void *val, size_t size);
int (*term)(struct fsi_master *, int link, uint8_t id);
int (*send_break)(struct fsi_master *, int link);
- int (*link_enable)(struct fsi_master *, int link);
+ int (*link_enable)(struct fsi_master *, int link,
+ bool enable);
int (*link_config)(struct fsi_master *, int link,
u8 t_send_delay, u8 t_echo_delay);
};
diff --git a/drivers/fsi/fsi-occ.c b/drivers/fsi/fsi-occ.c
index 7da9c81759ac..942eff4032b0 100644
--- a/drivers/fsi/fsi-occ.c
+++ b/drivers/fsi/fsi-occ.c
@@ -14,6 +14,7 @@
#include <linux/mutex.h>
#include <linux/fsi-occ.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/sched.h>
#include <linux/slab.h>
@@ -24,8 +25,13 @@
#define OCC_CMD_DATA_BYTES 4090
#define OCC_RESP_DATA_BYTES 4089
-#define OCC_SRAM_CMD_ADDR 0xFFFBE000
-#define OCC_SRAM_RSP_ADDR 0xFFFBF000
+#define OCC_P9_SRAM_CMD_ADDR 0xFFFBE000
+#define OCC_P9_SRAM_RSP_ADDR 0xFFFBF000
+
+#define OCC_P10_SRAM_CMD_ADDR 0xFFFFD000
+#define OCC_P10_SRAM_RSP_ADDR 0xFFFFE000
+
+#define OCC_P10_SRAM_MODE 0x58 /* Normal mode, OCB channel 2 */
/*
* Assume we don't have much FFDC, if we do we'll overflow and
@@ -37,11 +43,14 @@
#define OCC_TIMEOUT_MS 1000
#define OCC_CMD_IN_PRG_WAIT_MS 50
+enum versions { occ_p9, occ_p10 };
+
struct occ {
struct device *dev;
struct device *sbefifo;
char name[32];
int idx;
+ enum versions version;
struct miscdevice mdev;
struct mutex occ_lock;
};
@@ -235,29 +244,43 @@ static int occ_verify_checksum(struct occ_response *resp, u16 data_length)
return 0;
}
-static int occ_getsram(struct occ *occ, u32 address, void *data, ssize_t len)
+static int occ_getsram(struct occ *occ, u32 offset, void *data, ssize_t len)
{
u32 data_len = ((len + 7) / 8) * 8; /* must be multiples of 8 B */
- size_t resp_len, resp_data_len;
- __be32 *resp, cmd[5];
- int rc;
+ size_t cmd_len, resp_len, resp_data_len;
+ __be32 *resp, cmd[6];
+ int idx = 0, rc;
/*
* Magic sequence to do SBE getsram command. SBE will fetch data from
* specified SRAM address.
*/
- cmd[0] = cpu_to_be32(0x5);
+ switch (occ->version) {
+ default:
+ case occ_p9:
+ cmd_len = 5;
+ cmd[2] = cpu_to_be32(1); /* Normal mode */
+ cmd[3] = cpu_to_be32(OCC_P9_SRAM_RSP_ADDR + offset);
+ break;
+ case occ_p10:
+ idx = 1;
+ cmd_len = 6;
+ cmd[2] = cpu_to_be32(OCC_P10_SRAM_MODE);
+ cmd[3] = 0;
+ cmd[4] = cpu_to_be32(OCC_P10_SRAM_RSP_ADDR + offset);
+ break;
+ }
+
+ cmd[0] = cpu_to_be32(cmd_len);
cmd[1] = cpu_to_be32(SBEFIFO_CMD_GET_OCC_SRAM);
- cmd[2] = cpu_to_be32(1);
- cmd[3] = cpu_to_be32(address);
- cmd[4] = cpu_to_be32(data_len);
+ cmd[4 + idx] = cpu_to_be32(data_len);
resp_len = (data_len >> 2) + OCC_SBE_STATUS_WORDS;
resp = kzalloc(resp_len << 2, GFP_KERNEL);
if (!resp)
return -ENOMEM;
- rc = sbefifo_submit(occ->sbefifo, cmd, 5, resp, &resp_len);
+ rc = sbefifo_submit(occ->sbefifo, cmd, cmd_len, resp, &resp_len);
if (rc)
goto free;
@@ -287,20 +310,21 @@ free:
return rc;
}
-static int occ_putsram(struct occ *occ, u32 address, const void *data,
- ssize_t len)
+static int occ_putsram(struct occ *occ, const void *data, ssize_t len)
{
size_t cmd_len, buf_len, resp_len, resp_data_len;
u32 data_len = ((len + 7) / 8) * 8; /* must be multiples of 8 B */
__be32 *buf;
- int rc;
+ int idx = 0, rc;
+
+ cmd_len = (occ->version == occ_p10) ? 6 : 5;
/*
* We use the same buffer for command and response, make
* sure it's big enough
*/
resp_len = OCC_SBE_STATUS_WORDS;
- cmd_len = (data_len >> 2) + 5;
+ cmd_len += data_len >> 2;
buf_len = max(cmd_len, resp_len);
buf = kzalloc(buf_len << 2, GFP_KERNEL);
if (!buf)
@@ -312,11 +336,23 @@ static int occ_putsram(struct occ *occ, u32 address, const void *data,
*/
buf[0] = cpu_to_be32(cmd_len);
buf[1] = cpu_to_be32(SBEFIFO_CMD_PUT_OCC_SRAM);
- buf[2] = cpu_to_be32(1);
- buf[3] = cpu_to_be32(address);
- buf[4] = cpu_to_be32(data_len);
- memcpy(&buf[5], data, len);
+ switch (occ->version) {
+ default:
+ case occ_p9:
+ buf[2] = cpu_to_be32(1); /* Normal mode */
+ buf[3] = cpu_to_be32(OCC_P9_SRAM_CMD_ADDR);
+ break;
+ case occ_p10:
+ idx = 1;
+ buf[2] = cpu_to_be32(OCC_P10_SRAM_MODE);
+ buf[3] = 0;
+ buf[4] = cpu_to_be32(OCC_P10_SRAM_CMD_ADDR);
+ break;
+ }
+
+ buf[4 + idx] = cpu_to_be32(data_len);
+ memcpy(&buf[5 + idx], data, len);
rc = sbefifo_submit(occ->sbefifo, buf, cmd_len, buf, &resp_len);
if (rc)
@@ -356,21 +392,35 @@ free:
static int occ_trigger_attn(struct occ *occ)
{
__be32 buf[OCC_SBE_STATUS_WORDS];
- size_t resp_len, resp_data_len;
- int rc;
+ size_t cmd_len, resp_len, resp_data_len;
+ int idx = 0, rc;
- BUILD_BUG_ON(OCC_SBE_STATUS_WORDS < 7);
+ BUILD_BUG_ON(OCC_SBE_STATUS_WORDS < 8);
resp_len = OCC_SBE_STATUS_WORDS;
- buf[0] = cpu_to_be32(0x5 + 0x2); /* Chip-op length in words */
+ switch (occ->version) {
+ default:
+ case occ_p9:
+ cmd_len = 7;
+ buf[2] = cpu_to_be32(3); /* Circular mode */
+ buf[3] = 0;
+ break;
+ case occ_p10:
+ idx = 1;
+ cmd_len = 8;
+ buf[2] = cpu_to_be32(0xd0); /* Circular mode, OCB Channel 1 */
+ buf[3] = 0;
+ buf[4] = 0;
+ break;
+ }
+
+ buf[0] = cpu_to_be32(cmd_len); /* Chip-op length in words */
buf[1] = cpu_to_be32(SBEFIFO_CMD_PUT_OCC_SRAM);
- buf[2] = cpu_to_be32(0x3); /* Mode: Circular */
- buf[3] = cpu_to_be32(0x0); /* Address: ignore in mode 3 */
- buf[4] = cpu_to_be32(0x8); /* Data length in bytes */
- buf[5] = cpu_to_be32(0x20010000); /* Trigger OCC attention */
- buf[6] = 0;
+ buf[4 + idx] = cpu_to_be32(8); /* Data length in bytes */
+ buf[5 + idx] = cpu_to_be32(0x20010000); /* Trigger OCC attention */
+ buf[6 + idx] = 0;
- rc = sbefifo_submit(occ->sbefifo, buf, 7, buf, &resp_len);
+ rc = sbefifo_submit(occ->sbefifo, buf, cmd_len, buf, &resp_len);
if (rc)
goto error;
@@ -429,7 +479,7 @@ int fsi_occ_submit(struct device *dev, const void *request, size_t req_len,
/* Extract the seq_no from the command (first byte) */
seq_no = *(const u8 *)request;
- rc = occ_putsram(occ, OCC_SRAM_CMD_ADDR, request, req_len);
+ rc = occ_putsram(occ, request, req_len);
if (rc)
goto done;
@@ -440,7 +490,7 @@ int fsi_occ_submit(struct device *dev, const void *request, size_t req_len,
/* Read occ response header */
start = jiffies;
do {
- rc = occ_getsram(occ, OCC_SRAM_RSP_ADDR, resp, 8);
+ rc = occ_getsram(occ, 0, resp, 8);
if (rc)
goto done;
@@ -476,8 +526,7 @@ int fsi_occ_submit(struct device *dev, const void *request, size_t req_len,
/* Grab the rest */
if (resp_data_length > 1) {
/* already got 3 bytes resp, also need 2 bytes checksum */
- rc = occ_getsram(occ, OCC_SRAM_RSP_ADDR + 8,
- &resp->data[3], resp_data_length - 1);
+ rc = occ_getsram(occ, 8, &resp->data[3], resp_data_length - 1);
if (rc)
goto done;
}
@@ -508,6 +557,7 @@ static int occ_probe(struct platform_device *pdev)
struct occ *occ;
struct platform_device *hwmon_dev;
struct device *dev = &pdev->dev;
+ const void *md = of_device_get_match_data(dev);
struct platform_device_info hwmon_dev_info = {
.parent = dev,
.name = "occ-hwmon",
@@ -517,6 +567,7 @@ static int occ_probe(struct platform_device *pdev)
if (!occ)
return -ENOMEM;
+ occ->version = (enum versions)md;
occ->dev = dev;
occ->sbefifo = dev->parent;
mutex_init(&occ->occ_lock);
@@ -575,7 +626,14 @@ static int occ_remove(struct platform_device *pdev)
}
static const struct of_device_id occ_match[] = {
- { .compatible = "ibm,p9-occ" },
+ {
+ .compatible = "ibm,p9-occ",
+ .data = (void *)occ_p9
+ },
+ {
+ .compatible = "ibm,p10-occ",
+ .data = (void *)occ_p10
+ },
{ },
};
diff --git a/drivers/fsi/fsi-sbefifo.c b/drivers/fsi/fsi-sbefifo.c
index f54df9ebc8b3..c8ccc99e214f 100644
--- a/drivers/fsi/fsi-sbefifo.c
+++ b/drivers/fsi/fsi-sbefifo.c
@@ -325,7 +325,8 @@ static int sbefifo_up_write(struct sbefifo *sbefifo, __be32 word)
static int sbefifo_request_reset(struct sbefifo *sbefifo)
{
struct device *dev = &sbefifo->fsi_dev->dev;
- u32 status, timeout;
+ unsigned long end_time;
+ u32 status;
int rc;
dev_dbg(dev, "Requesting FIFO reset\n");
@@ -341,7 +342,8 @@ static int sbefifo_request_reset(struct sbefifo *sbefifo)
}
/* Wait for it to complete */
- for (timeout = 0; timeout < SBEFIFO_RESET_TIMEOUT; timeout++) {
+ end_time = jiffies + msecs_to_jiffies(SBEFIFO_RESET_TIMEOUT);
+ while (!time_after(jiffies, end_time)) {
rc = sbefifo_regr(sbefifo, SBEFIFO_UP | SBEFIFO_STS, &status);
if (rc) {
dev_err(dev, "Failed to read UP fifo status during reset"
@@ -355,7 +357,7 @@ static int sbefifo_request_reset(struct sbefifo *sbefifo)
return 0;
}
- msleep(1);
+ cond_resched();
}
dev_err(dev, "FIFO reset timed out\n");
@@ -400,7 +402,7 @@ static int sbefifo_cleanup_hw(struct sbefifo *sbefifo)
/* The FIFO already contains a reset request from the SBE ? */
if (down_status & SBEFIFO_STS_RESET_REQ) {
dev_info(dev, "Cleanup: FIFO reset request set, resetting\n");
- rc = sbefifo_regw(sbefifo, SBEFIFO_UP, SBEFIFO_PERFORM_RESET);
+ rc = sbefifo_regw(sbefifo, SBEFIFO_DOWN, SBEFIFO_PERFORM_RESET);
if (rc) {
sbefifo->broken = true;
dev_err(dev, "Cleanup: Reset reg write failed, rc=%d\n", rc);
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 4c62f900bf7e..957faf080a82 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -1388,6 +1388,34 @@ config SENSORS_PCF8591
These devices are hard to detect and rarely found on mainstream
hardware. If unsure, say N.
+config SENSORS_PECI_CPUTEMP
+ tristate "PECI CPU temperature monitoring client"
+ depends on PECI
+ select MFD_INTEL_PECI_CLIENT
+ help
+ If you say yes here you get support for the generic Intel PECI
+ cputemp driver which provides Digital Thermal Sensor (DTS) thermal
+ readings of the CPU package and CPU cores that are accessible using
+ the PECI Client Command Suite via the processor PECI client.
+ Check <file:Documentation/hwmon/peci-cputemp.rst> for details.
+
+ This driver can also be built as a module. If so, the module
+ will be called peci-cputemp.
+
+config SENSORS_PECI_DIMMTEMP
+ tristate "PECI DIMM temperature monitoring client"
+ depends on PECI
+ select MFD_INTEL_PECI_CLIENT
+ help
+ If you say yes here you get support for the generic Intel PECI hwmon
+ driver which provides Digital Thermal Sensor (DTS) thermal readings of
+ DIMM components that are accessible using the PECI Client Command
+ Suite via the processor PECI client.
+ Check <file:Documentation/hwmon/peci-dimmtemp.rst> for details.
+
+ This driver can also be built as a module. If so, the module
+ will be called peci-dimmtemp.
+
source "drivers/hwmon/pmbus/Kconfig"
config SENSORS_PWM_FAN
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index b0b9c8e57176..6f0061816830 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -148,6 +148,8 @@ obj-$(CONFIG_SENSORS_NTC_THERMISTOR) += ntc_thermistor.o
obj-$(CONFIG_SENSORS_PC87360) += pc87360.o
obj-$(CONFIG_SENSORS_PC87427) += pc87427.o
obj-$(CONFIG_SENSORS_PCF8591) += pcf8591.o
+obj-$(CONFIG_SENSORS_PECI_CPUTEMP) += peci-cputemp.o
+obj-$(CONFIG_SENSORS_PECI_DIMMTEMP) += peci-dimmtemp.o
obj-$(CONFIG_SENSORS_POWR1220) += powr1220.o
obj-$(CONFIG_SENSORS_PWM_FAN) += pwm-fan.o
obj-$(CONFIG_SENSORS_RASPBERRYPI_HWMON) += raspberrypi-hwmon.o
diff --git a/drivers/hwmon/occ/common.c b/drivers/hwmon/occ/common.c
index 30e18eb60da7..3e580a83ae61 100644
--- a/drivers/hwmon/occ/common.c
+++ b/drivers/hwmon/occ/common.c
@@ -41,6 +41,14 @@ struct temp_sensor_2 {
u8 value;
} __packed;
+struct temp_sensor_10 {
+ u32 sensor_id;
+ u8 fru_type;
+ u8 value;
+ u8 throttle;
+ u8 reserved;
+} __packed;
+
struct freq_sensor_1 {
u16 sensor_id;
u16 value;
@@ -307,6 +315,60 @@ static ssize_t occ_show_temp_2(struct device *dev,
return snprintf(buf, PAGE_SIZE - 1, "%u\n", val);
}
+static ssize_t occ_show_temp_10(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int rc;
+ u32 val = 0;
+ struct temp_sensor_10 *temp;
+ struct occ *occ = dev_get_drvdata(dev);
+ struct occ_sensors *sensors = &occ->sensors;
+ struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
+
+ rc = occ_update_response(occ);
+ if (rc)
+ return rc;
+
+ temp = ((struct temp_sensor_10 *)sensors->temp.data) + sattr->index;
+
+ switch (sattr->nr) {
+ case 0:
+ val = get_unaligned_be32(&temp->sensor_id);
+ break;
+ case 1:
+ val = temp->value;
+ if (val == OCC_TEMP_SENSOR_FAULT)
+ return -EREMOTEIO;
+
+ /*
+ * VRM doesn't return temperature, only alarm bit. This
+ * attribute maps to tempX_alarm instead of tempX_input for
+ * VRM
+ */
+ if (temp->fru_type != OCC_FRU_TYPE_VRM) {
+ /* sensor not ready */
+ if (val == 0)
+ return -EAGAIN;
+
+ val *= 1000;
+ }
+ break;
+ case 2:
+ val = temp->fru_type;
+ break;
+ case 3:
+ val = temp->value == OCC_TEMP_SENSOR_FAULT;
+ break;
+ case 4:
+ val = temp->throttle * 1000;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return snprintf(buf, PAGE_SIZE - 1, "%u\n", val);
+}
+
static ssize_t occ_show_freq_1(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -745,6 +807,9 @@ static int occ_setup_sensor_attrs(struct occ *occ)
num_attrs += (sensors->temp.num_sensors * 4);
show_temp = occ_show_temp_2;
break;
+ case 0x10:
+ show_temp = occ_show_temp_10;
+ break;
default:
sensors->temp.num_sensors = 0;
}
diff --git a/drivers/hwmon/peci-cputemp.c b/drivers/hwmon/peci-cputemp.c
new file mode 100644
index 000000000000..b9fe91281d58
--- /dev/null
+++ b/drivers/hwmon/peci-cputemp.c
@@ -0,0 +1,472 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2018-2019 Intel Corporation
+
+#include <linux/hwmon.h>
+#include <linux/jiffies.h>
+#include <linux/mfd/intel-peci-client.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include "peci-hwmon.h"
+
+#define DEFAULT_CHANNEL_NUMS 5
+#define CORETEMP_CHANNEL_NUMS CORE_NUMS_MAX
+#define CPUTEMP_CHANNEL_NUMS (DEFAULT_CHANNEL_NUMS + CORETEMP_CHANNEL_NUMS)
+
+struct temp_group {
+ struct peci_sensor_data die;
+ struct peci_sensor_data dts;
+ struct peci_sensor_data tcontrol;
+ struct peci_sensor_data tthrottle;
+ struct peci_sensor_data tjmax;
+ struct peci_sensor_data core[CORETEMP_CHANNEL_NUMS];
+};
+
+struct peci_cputemp {
+ struct peci_client_manager *mgr;
+ struct device *dev;
+ char name[PECI_NAME_SIZE];
+ const struct cpu_gen_info *gen_info;
+ struct temp_group temp;
+ u64 core_mask;
+ u32 temp_config[CPUTEMP_CHANNEL_NUMS + 1];
+ uint config_idx;
+ struct hwmon_channel_info temp_info;
+ const struct hwmon_channel_info *info[2];
+ struct hwmon_chip_info chip;
+ char **coretemp_label;
+};
+
+enum cputemp_channels {
+ channel_die,
+ channel_dts,
+ channel_tcontrol,
+ channel_tthrottle,
+ channel_tjmax,
+ channel_core,
+};
+
+static const u32 config_table[DEFAULT_CHANNEL_NUMS + 1] = {
+ /* Die temperature */
+ HWMON_T_LABEL | HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT |
+ HWMON_T_CRIT_HYST,
+
+ /* DTS margin */
+ HWMON_T_LABEL | HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT |
+ HWMON_T_CRIT_HYST,
+
+ /* Tcontrol temperature */
+ HWMON_T_LABEL | HWMON_T_INPUT | HWMON_T_CRIT,
+
+ /* Tthrottle temperature */
+ HWMON_T_LABEL | HWMON_T_INPUT,
+
+ /* Tjmax temperature */
+ HWMON_T_LABEL | HWMON_T_INPUT,
+
+ /* Core temperature - for all core channels */
+ HWMON_T_LABEL | HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT |
+ HWMON_T_CRIT_HYST,
+};
+
+static const char *cputemp_label[DEFAULT_CHANNEL_NUMS] = {
+ "Die",
+ "DTS",
+ "Tcontrol",
+ "Tthrottle",
+ "Tjmax"
+};
+
+static s32 ten_dot_six_to_millidegree(s32 val)
+{
+ return ((val ^ 0x8000) - 0x8000) * 1000 / 64;
+}
+
+static int get_temp_targets(struct peci_cputemp *priv)
+{
+ s32 tthrottle_offset;
+ s32 tcontrol_margin;
+ u8 pkg_cfg[4];
+ int ret;
+
+ /*
+ * Just use only the tcontrol marker to determine if target values need
+ * update.
+ */
+ if (!peci_sensor_need_update(&priv->temp.tcontrol))
+ return 0;
+
+ ret = peci_client_read_package_config(priv->mgr,
+ PECI_MBX_INDEX_TEMP_TARGET, 0,
+ pkg_cfg);
+ if (ret)
+ return ret;
+
+ priv->temp.tjmax.value = pkg_cfg[2] * 1000;
+
+ tcontrol_margin = pkg_cfg[1];
+ tcontrol_margin = ((tcontrol_margin ^ 0x80) - 0x80) * 1000;
+ priv->temp.tcontrol.value = priv->temp.tjmax.value - tcontrol_margin;
+
+ tthrottle_offset = (pkg_cfg[3] & 0x2f) * 1000;
+ priv->temp.tthrottle.value = priv->temp.tjmax.value - tthrottle_offset;
+
+ peci_sensor_mark_updated(&priv->temp.tcontrol);
+
+ return 0;
+}
+
+static int get_die_temp(struct peci_cputemp *priv)
+{
+ struct peci_get_temp_msg msg;
+ int ret;
+
+ if (!peci_sensor_need_update(&priv->temp.die))
+ return 0;
+
+ msg.addr = priv->mgr->client->addr;
+
+ ret = peci_command(priv->mgr->client->adapter, PECI_CMD_GET_TEMP, &msg);
+ if (ret)
+ return ret;
+
+ /* Note that the tjmax should be available before calling it */
+ priv->temp.die.value = priv->temp.tjmax.value +
+ (msg.temp_raw * 1000 / 64);
+
+ peci_sensor_mark_updated(&priv->temp.die);
+
+ return 0;
+}
+
+static int get_dts(struct peci_cputemp *priv)
+{
+ s32 dts_margin;
+ u8 pkg_cfg[4];
+ int ret;
+
+ if (!peci_sensor_need_update(&priv->temp.dts))
+ return 0;
+
+ ret = peci_client_read_package_config(priv->mgr,
+ PECI_MBX_INDEX_DTS_MARGIN, 0,
+ pkg_cfg);
+
+ if (ret)
+ return ret;
+
+ dts_margin = le16_to_cpup((__le16 *)pkg_cfg);
+
+ /**
+ * Processors return a value of DTS reading in 10.6 format
+ * (10 bits signed decimal, 6 bits fractional).
+ * Error codes:
+ * 0x8000: General sensor error
+ * 0x8001: Reserved
+ * 0x8002: Underflow on reading value
+ * 0x8003-0x81ff: Reserved
+ */
+ if (dts_margin >= 0x8000 && dts_margin <= 0x81ff)
+ return -EIO;
+
+ dts_margin = ten_dot_six_to_millidegree(dts_margin);
+
+ /* Note that the tcontrol should be available before calling it */
+ priv->temp.dts.value = priv->temp.tcontrol.value - dts_margin;
+
+ peci_sensor_mark_updated(&priv->temp.dts);
+
+ return 0;
+}
+
+static int get_core_temp(struct peci_cputemp *priv, int core_index)
+{
+ s32 core_dts_margin;
+ u8 pkg_cfg[4];
+ int ret;
+
+ if (!peci_sensor_need_update(&priv->temp.core[core_index]))
+ return 0;
+
+ ret = peci_client_read_package_config(priv->mgr,
+ PECI_MBX_INDEX_PER_CORE_DTS_TEMP,
+ core_index, pkg_cfg);
+ if (ret)
+ return ret;
+
+ core_dts_margin = le16_to_cpup((__le16 *)pkg_cfg);
+
+ /*
+ * Processors return a value of the core DTS reading in 10.6 format
+ * (10 bits signed decimal, 6 bits fractional).
+ * Error codes:
+ * 0x8000: General sensor error
+ * 0x8001: Reserved
+ * 0x8002: Underflow on reading value
+ * 0x8003-0x81ff: Reserved
+ */
+ if (core_dts_margin >= 0x8000 && core_dts_margin <= 0x81ff)
+ return -EIO;
+
+ core_dts_margin = ten_dot_six_to_millidegree(core_dts_margin);
+
+ /* Note that the tjmax should be available before calling it */
+ priv->temp.core[core_index].value = priv->temp.tjmax.value +
+ core_dts_margin;
+
+ peci_sensor_mark_updated(&priv->temp.core[core_index]);
+
+ return 0;
+}
+
+static int cputemp_read_string(struct device *dev,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel, const char **str)
+{
+ struct peci_cputemp *priv = dev_get_drvdata(dev);
+
+ if (attr != hwmon_temp_label)
+ return -EOPNOTSUPP;
+
+ *str = (channel < DEFAULT_CHANNEL_NUMS) ?
+ cputemp_label[channel] :
+ (const char *)priv->coretemp_label[channel -
+ DEFAULT_CHANNEL_NUMS];
+
+ return 0;
+}
+
+static int cputemp_read(struct device *dev,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct peci_cputemp *priv = dev_get_drvdata(dev);
+ int ret, core_index;
+
+ if (channel >= CPUTEMP_CHANNEL_NUMS ||
+ !(priv->temp_config[channel] & BIT(attr)))
+ return -EOPNOTSUPP;
+
+ ret = get_temp_targets(priv);
+ if (ret)
+ return ret;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ switch (channel) {
+ case channel_die:
+ ret = get_die_temp(priv);
+ if (ret)
+ break;
+
+ *val = priv->temp.die.value;
+ break;
+ case channel_dts:
+ ret = get_dts(priv);
+ if (ret)
+ break;
+
+ *val = priv->temp.dts.value;
+ break;
+ case channel_tcontrol:
+ *val = priv->temp.tcontrol.value;
+ break;
+ case channel_tthrottle:
+ *val = priv->temp.tthrottle.value;
+ break;
+ case channel_tjmax:
+ *val = priv->temp.tjmax.value;
+ break;
+ default:
+ core_index = channel - DEFAULT_CHANNEL_NUMS;
+ ret = get_core_temp(priv, core_index);
+ if (ret)
+ break;
+
+ *val = priv->temp.core[core_index].value;
+ break;
+ }
+ break;
+ case hwmon_temp_max:
+ *val = priv->temp.tcontrol.value;
+ break;
+ case hwmon_temp_crit:
+ *val = priv->temp.tjmax.value;
+ break;
+ case hwmon_temp_crit_hyst:
+ *val = priv->temp.tjmax.value - priv->temp.tcontrol.value;
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ return ret;
+}
+
+static umode_t cputemp_is_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ const struct peci_cputemp *priv = data;
+
+ if (channel < ARRAY_SIZE(priv->temp_config) &&
+ (priv->temp_config[channel] & BIT(attr)) &&
+ (channel < DEFAULT_CHANNEL_NUMS ||
+ (channel >= DEFAULT_CHANNEL_NUMS &&
+ (priv->core_mask & BIT(channel - DEFAULT_CHANNEL_NUMS)))))
+ return 0444;
+
+ return 0;
+}
+
+static const struct hwmon_ops cputemp_ops = {
+ .is_visible = cputemp_is_visible,
+ .read_string = cputemp_read_string,
+ .read = cputemp_read,
+};
+
+static int check_resolved_cores(struct peci_cputemp *priv)
+{
+ struct peci_rd_pci_cfg_local_msg msg;
+ int ret;
+
+ /* Get the RESOLVED_CORES register value */
+ msg.addr = priv->mgr->client->addr;
+ msg.device = 30;
+ msg.function = 3;
+ msg.rx_len = 4;
+ msg.bus = 1;
+ msg.reg = 0xb4;
+
+ ret = peci_command(priv->mgr->client->adapter,
+ PECI_CMD_RD_PCI_CFG_LOCAL, &msg);
+ if (msg.cc != PECI_DEV_CC_SUCCESS)
+ ret = -EAGAIN;
+ if (ret)
+ return ret;
+
+ priv->core_mask = le32_to_cpup((__le32 *)msg.pci_config);
+ if (!priv->core_mask)
+ return -EAGAIN;
+
+ dev_dbg(priv->dev, "Scanned resolved cores: 0x%llx\n", priv->core_mask);
+
+ return 0;
+}
+
+static int create_core_temp_label(struct peci_cputemp *priv, int idx)
+{
+ priv->coretemp_label[idx] = devm_kzalloc(priv->dev,
+ PECI_HWMON_LABEL_STR_LEN,
+ GFP_KERNEL);
+ if (!priv->coretemp_label[idx])
+ return -ENOMEM;
+
+ sprintf(priv->coretemp_label[idx], "Core %d", idx + 1);
+
+ return 0;
+}
+
+static int create_core_temp_info(struct peci_cputemp *priv)
+{
+ int ret, i;
+
+ ret = check_resolved_cores(priv);
+ if (ret)
+ return ret;
+
+ priv->coretemp_label = devm_kzalloc(priv->dev,
+ priv->gen_info->core_max *
+ sizeof(char *),
+ GFP_KERNEL);
+ if (!priv->coretemp_label)
+ return -ENOMEM;
+
+ for (i = 0; i < priv->gen_info->core_max; i++)
+ if (priv->core_mask & BIT(i)) {
+ while (priv->config_idx <= i + DEFAULT_CHANNEL_NUMS)
+ priv->temp_config[priv->config_idx++] =
+ config_table[channel_core];
+
+ ret = create_core_temp_label(priv, i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int peci_cputemp_probe(struct platform_device *pdev)
+{
+ struct peci_client_manager *mgr = dev_get_drvdata(pdev->dev.parent);
+ struct device *dev = &pdev->dev;
+ struct peci_cputemp *priv;
+ struct device *hwmon_dev;
+ int ret;
+
+ if ((mgr->client->adapter->cmd_mask &
+ (BIT(PECI_CMD_GET_TEMP) | BIT(PECI_CMD_RD_PKG_CFG))) !=
+ (BIT(PECI_CMD_GET_TEMP) | BIT(PECI_CMD_RD_PKG_CFG)))
+ return -ENODEV;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, priv);
+ priv->mgr = mgr;
+ priv->dev = dev;
+ priv->gen_info = mgr->gen_info;
+
+ snprintf(priv->name, PECI_NAME_SIZE, "peci_cputemp.cpu%d",
+ mgr->client->addr - PECI_BASE_ADDR);
+
+ priv->temp_config[priv->config_idx++] = config_table[channel_die];
+ priv->temp_config[priv->config_idx++] = config_table[channel_dts];
+ priv->temp_config[priv->config_idx++] = config_table[channel_tcontrol];
+ priv->temp_config[priv->config_idx++] = config_table[channel_tthrottle];
+ priv->temp_config[priv->config_idx++] = config_table[channel_tjmax];
+
+ ret = create_core_temp_info(priv);
+ if (ret)
+ dev_dbg(dev, "Skipped creating core temp info\n");
+
+ priv->chip.ops = &cputemp_ops;
+ priv->chip.info = priv->info;
+
+ priv->info[0] = &priv->temp_info;
+
+ priv->temp_info.type = hwmon_temp;
+ priv->temp_info.config = priv->temp_config;
+
+ hwmon_dev = devm_hwmon_device_register_with_info(priv->dev,
+ priv->name,
+ priv,
+ &priv->chip,
+ NULL);
+
+ if (IS_ERR(hwmon_dev))
+ return PTR_ERR(hwmon_dev);
+
+ dev_dbg(dev, "%s: sensor '%s'\n", dev_name(hwmon_dev), priv->name);
+
+ return 0;
+}
+
+static const struct platform_device_id peci_cputemp_ids[] = {
+ { .name = "peci-cputemp", .driver_data = 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(platform, peci_cputemp_ids);
+
+static struct platform_driver peci_cputemp_driver = {
+ .probe = peci_cputemp_probe,
+ .id_table = peci_cputemp_ids,
+ .driver = { .name = KBUILD_MODNAME, },
+};
+module_platform_driver(peci_cputemp_driver);
+
+MODULE_AUTHOR("Jae Hyun Yoo <jae.hyun.yoo@linux.intel.com>");
+MODULE_DESCRIPTION("PECI cputemp driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hwmon/peci-dimmtemp.c b/drivers/hwmon/peci-dimmtemp.c
new file mode 100644
index 000000000000..1555bfdefabd
--- /dev/null
+++ b/drivers/hwmon/peci-dimmtemp.c
@@ -0,0 +1,425 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2018-2019 Intel Corporation
+
+#include <linux/hwmon.h>
+#include <linux/jiffies.h>
+#include <linux/mfd/intel-peci-client.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include "peci-hwmon.h"
+
+#define DIMM_MASK_CHECK_DELAY_JIFFIES msecs_to_jiffies(5000)
+#define DIMM_MASK_CHECK_RETRY_MAX 60 /* 60 x 5 secs = 5 minutes */
+
+struct peci_dimmtemp {
+ struct peci_client_manager *mgr;
+ struct device *dev;
+ char name[PECI_NAME_SIZE];
+ const struct cpu_gen_info *gen_info;
+ struct workqueue_struct *work_queue;
+ struct delayed_work work_handler;
+ struct peci_sensor_data temp[DIMM_NUMS_MAX];
+ long temp_max[DIMM_NUMS_MAX];
+ long temp_crit[DIMM_NUMS_MAX];
+ u32 dimm_mask;
+ int retry_count;
+ u32 temp_config[DIMM_NUMS_MAX + 1];
+ struct hwmon_channel_info temp_info;
+ const struct hwmon_channel_info *info[2];
+ struct hwmon_chip_info chip;
+ char **dimmtemp_label;
+};
+
+static const u8 support_model[4] = {
+ INTEL_FAM6_HASWELL_X,
+ INTEL_FAM6_BROADWELL_X,
+ INTEL_FAM6_SKYLAKE_X,
+ INTEL_FAM6_SKYLAKE_XD,
+};
+
+static inline int read_ddr_dimm_temp_config(struct peci_dimmtemp *priv,
+ int chan_rank,
+ u8 *cfg_data)
+{
+ return peci_client_read_package_config(priv->mgr,
+ PECI_MBX_INDEX_DDR_DIMM_TEMP,
+ chan_rank, cfg_data);
+}
+
+static int get_dimm_temp(struct peci_dimmtemp *priv, int dimm_no)
+{
+ int dimm_order = dimm_no % priv->gen_info->dimm_idx_max;
+ int chan_rank = dimm_no / priv->gen_info->dimm_idx_max;
+ struct peci_rd_pci_cfg_local_msg rp_msg;
+ u8 cfg_data[4];
+ int ret;
+
+ if (!peci_sensor_need_update(&priv->temp[dimm_no]))
+ return 0;
+
+ ret = read_ddr_dimm_temp_config(priv, chan_rank, cfg_data);
+ if (ret)
+ return ret;
+
+ priv->temp[dimm_no].value = cfg_data[dimm_order] * 1000;
+
+ switch (priv->gen_info->model) {
+ case INTEL_FAM6_SKYLAKE_X:
+ rp_msg.addr = priv->mgr->client->addr;
+ rp_msg.bus = 2;
+ /*
+ * Device 10, Function 2: IMC 0 channel 0 -> rank 0
+ * Device 10, Function 6: IMC 0 channel 1 -> rank 1
+ * Device 11, Function 2: IMC 0 channel 2 -> rank 2
+ * Device 12, Function 2: IMC 1 channel 0 -> rank 3
+ * Device 12, Function 6: IMC 1 channel 1 -> rank 4
+ * Device 13, Function 2: IMC 1 channel 2 -> rank 5
+ */
+ rp_msg.device = 10 + chan_rank / 3 * 2 +
+ (chan_rank % 3 == 2 ? 1 : 0);
+ rp_msg.function = chan_rank % 3 == 1 ? 6 : 2;
+ rp_msg.reg = 0x120 + dimm_order * 4;
+ rp_msg.rx_len = 4;
+
+ ret = peci_command(priv->mgr->client->adapter,
+ PECI_CMD_RD_PCI_CFG_LOCAL, &rp_msg);
+ if (rp_msg.cc != PECI_DEV_CC_SUCCESS)
+ ret = -EAGAIN;
+ if (ret)
+ return ret;
+
+ priv->temp_max[dimm_no] = rp_msg.pci_config[1] * 1000;
+ priv->temp_crit[dimm_no] = rp_msg.pci_config[2] * 1000;
+ break;
+ case INTEL_FAM6_SKYLAKE_XD:
+ rp_msg.addr = priv->mgr->client->addr;
+ rp_msg.bus = 2;
+ /*
+ * Device 10, Function 2: IMC 0 channel 0 -> rank 0
+ * Device 10, Function 6: IMC 0 channel 1 -> rank 1
+ * Device 12, Function 2: IMC 1 channel 0 -> rank 2
+ * Device 12, Function 6: IMC 1 channel 1 -> rank 3
+ */
+ rp_msg.device = 10 + chan_rank / 2 * 2;
+ rp_msg.function = (chan_rank % 2) ? 6 : 2;
+ rp_msg.reg = 0x120 + dimm_order * 4;
+ rp_msg.rx_len = 4;
+
+ ret = peci_command(priv->mgr->client->adapter,
+ PECI_CMD_RD_PCI_CFG_LOCAL, &rp_msg);
+ if (rp_msg.cc != PECI_DEV_CC_SUCCESS)
+ ret = -EAGAIN;
+ if (ret)
+ return ret;
+
+ priv->temp_max[dimm_no] = rp_msg.pci_config[1] * 1000;
+ priv->temp_crit[dimm_no] = rp_msg.pci_config[2] * 1000;
+ break;
+ case INTEL_FAM6_HASWELL_X:
+ case INTEL_FAM6_BROADWELL_X:
+ rp_msg.addr = priv->mgr->client->addr;
+ rp_msg.bus = 1;
+ /*
+ * Device 20, Function 0: IMC 0 channel 0 -> rank 0
+ * Device 20, Function 1: IMC 0 channel 1 -> rank 1
+ * Device 21, Function 0: IMC 0 channel 2 -> rank 2
+ * Device 21, Function 1: IMC 0 channel 3 -> rank 3
+ * Device 23, Function 0: IMC 1 channel 0 -> rank 4
+ * Device 23, Function 1: IMC 1 channel 1 -> rank 5
+ * Device 24, Function 0: IMC 1 channel 2 -> rank 6
+ * Device 24, Function 1: IMC 1 channel 3 -> rank 7
+ */
+ rp_msg.device = 20 + chan_rank / 2 + chan_rank / 4;
+ rp_msg.function = chan_rank % 2;
+ rp_msg.reg = 0x120 + dimm_order * 4;
+ rp_msg.rx_len = 4;
+
+ ret = peci_command(priv->mgr->client->adapter,
+ PECI_CMD_RD_PCI_CFG_LOCAL, &rp_msg);
+ if (rp_msg.cc != PECI_DEV_CC_SUCCESS)
+ ret = -EAGAIN;
+ if (ret)
+ return ret;
+
+ priv->temp_max[dimm_no] = rp_msg.pci_config[1] * 1000;
+ priv->temp_crit[dimm_no] = rp_msg.pci_config[2] * 1000;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ peci_sensor_mark_updated(&priv->temp[dimm_no]);
+
+ return 0;
+}
+
+static int dimmtemp_read_string(struct device *dev,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel, const char **str)
+{
+ struct peci_dimmtemp *priv = dev_get_drvdata(dev);
+
+ if (attr != hwmon_temp_label)
+ return -EOPNOTSUPP;
+
+ *str = (const char *)priv->dimmtemp_label[channel];
+
+ return 0;
+}
+
+static int dimmtemp_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct peci_dimmtemp *priv = dev_get_drvdata(dev);
+ int ret;
+
+ ret = get_dimm_temp(priv, channel);
+ if (ret)
+ return ret;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ *val = priv->temp[channel].value;
+ break;
+ case hwmon_temp_max:
+ *val = priv->temp_max[channel];
+ break;
+ case hwmon_temp_crit:
+ *val = priv->temp_crit[channel];
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ return ret;
+}
+
+static umode_t dimmtemp_is_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ const struct peci_dimmtemp *priv = data;
+
+ if (priv->temp_config[channel] & BIT(attr) &&
+ priv->dimm_mask & BIT(channel))
+ return 0444;
+
+ return 0;
+}
+
+static const struct hwmon_ops dimmtemp_ops = {
+ .is_visible = dimmtemp_is_visible,
+ .read_string = dimmtemp_read_string,
+ .read = dimmtemp_read,
+};
+
+static int check_populated_dimms(struct peci_dimmtemp *priv)
+{
+ u32 chan_rank_max = priv->gen_info->chan_rank_max;
+ u32 dimm_idx_max = priv->gen_info->dimm_idx_max;
+ int chan_rank, dimm_idx;
+ u8 cfg_data[4];
+
+ for (chan_rank = 0; chan_rank < chan_rank_max; chan_rank++) {
+ int ret;
+
+ ret = read_ddr_dimm_temp_config(priv, chan_rank, cfg_data);
+ if (ret) {
+ priv->dimm_mask = 0;
+ return ret;
+ }
+
+ for (dimm_idx = 0; dimm_idx < dimm_idx_max; dimm_idx++)
+ if (cfg_data[dimm_idx])
+ priv->dimm_mask |= BIT(chan_rank *
+ dimm_idx_max +
+ dimm_idx);
+ }
+
+ if (!priv->dimm_mask)
+ return -EAGAIN;
+
+ dev_dbg(priv->dev, "Scanned populated DIMMs: 0x%x\n", priv->dimm_mask);
+
+ return 0;
+}
+
+static int create_dimm_temp_label(struct peci_dimmtemp *priv, int chan)
+{
+ int rank, idx;
+
+ priv->dimmtemp_label[chan] = devm_kzalloc(priv->dev,
+ PECI_HWMON_LABEL_STR_LEN,
+ GFP_KERNEL);
+ if (!priv->dimmtemp_label[chan])
+ return -ENOMEM;
+
+ rank = chan / priv->gen_info->dimm_idx_max;
+ idx = chan % priv->gen_info->dimm_idx_max;
+
+ sprintf(priv->dimmtemp_label[chan], "DIMM %c%d", 'A' + rank, idx + 1);
+
+ return 0;
+}
+
+static int create_dimm_temp_info(struct peci_dimmtemp *priv)
+{
+ int ret, i, config_idx, channels;
+ struct device *dev;
+
+ ret = check_populated_dimms(priv);
+ if (ret) {
+ if (ret == -EAGAIN) {
+ if (priv->retry_count < DIMM_MASK_CHECK_RETRY_MAX) {
+ queue_delayed_work(priv->work_queue,
+ &priv->work_handler,
+ DIMM_MASK_CHECK_DELAY_JIFFIES);
+ priv->retry_count++;
+ dev_dbg(priv->dev,
+ "Deferred DIMM temp info creation\n");
+ } else {
+ dev_err(priv->dev,
+ "Timeout DIMM temp info creation\n");
+ ret = -ETIMEDOUT;
+ }
+ }
+
+ return ret;
+ }
+
+ channels = priv->gen_info->chan_rank_max *
+ priv->gen_info->dimm_idx_max;
+
+ priv->dimmtemp_label = devm_kzalloc(priv->dev,
+ channels * sizeof(char *),
+ GFP_KERNEL);
+ if (!priv->dimmtemp_label)
+ return -ENOMEM;
+
+ for (i = 0, config_idx = 0; i < channels; i++)
+ if (priv->dimm_mask & BIT(i)) {
+ while (i >= config_idx)
+ priv->temp_config[config_idx++] =
+ HWMON_T_LABEL | HWMON_T_INPUT |
+ HWMON_T_MAX | HWMON_T_CRIT;
+
+ ret = create_dimm_temp_label(priv, i);
+ if (ret)
+ return ret;
+ }
+
+ priv->chip.ops = &dimmtemp_ops;
+ priv->chip.info = priv->info;
+
+ priv->info[0] = &priv->temp_info;
+
+ priv->temp_info.type = hwmon_temp;
+ priv->temp_info.config = priv->temp_config;
+
+ dev = devm_hwmon_device_register_with_info(priv->dev,
+ priv->name,
+ priv,
+ &priv->chip,
+ NULL);
+ if (IS_ERR(dev)) {
+ dev_err(priv->dev, "Failed to register hwmon device\n");
+ return PTR_ERR(dev);
+ }
+
+ dev_dbg(priv->dev, "%s: sensor '%s'\n", dev_name(dev), priv->name);
+
+ return 0;
+}
+
+static void create_dimm_temp_info_delayed(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct peci_dimmtemp *priv = container_of(dwork, struct peci_dimmtemp,
+ work_handler);
+ int ret;
+
+ ret = create_dimm_temp_info(priv);
+ if (ret && ret != -EAGAIN)
+ dev_dbg(priv->dev, "Failed to create DIMM temp info\n");
+}
+
+static int peci_dimmtemp_probe(struct platform_device *pdev)
+{
+ struct peci_client_manager *mgr = dev_get_drvdata(pdev->dev.parent);
+ struct device *dev = &pdev->dev;
+ struct peci_dimmtemp *priv;
+ int ret, i;
+
+ if ((mgr->client->adapter->cmd_mask &
+ (BIT(PECI_CMD_GET_TEMP) | BIT(PECI_CMD_RD_PKG_CFG))) !=
+ (BIT(PECI_CMD_GET_TEMP) | BIT(PECI_CMD_RD_PKG_CFG)))
+ return -ENODEV;
+
+ for (i = 0; i < ARRAY_SIZE(support_model); i++) {
+ if (mgr->gen_info->model == support_model[i])
+ break;
+ }
+ if (i == ARRAY_SIZE(support_model))
+ return -ENODEV;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, priv);
+ priv->mgr = mgr;
+ priv->dev = dev;
+ priv->gen_info = mgr->gen_info;
+
+ snprintf(priv->name, PECI_NAME_SIZE, "peci_dimmtemp.cpu%d",
+ priv->mgr->client->addr - PECI_BASE_ADDR);
+
+ priv->work_queue = alloc_ordered_workqueue(priv->name, 0);
+ if (!priv->work_queue)
+ return -ENOMEM;
+
+ INIT_DELAYED_WORK(&priv->work_handler, create_dimm_temp_info_delayed);
+
+ ret = create_dimm_temp_info(priv);
+ if (ret && ret != -EAGAIN) {
+ dev_dbg(dev, "Failed to create DIMM temp info\n");
+ goto err_free_wq;
+ }
+
+ return 0;
+
+err_free_wq:
+ destroy_workqueue(priv->work_queue);
+ return ret;
+}
+
+static int peci_dimmtemp_remove(struct platform_device *pdev)
+{
+ struct peci_dimmtemp *priv = dev_get_drvdata(&pdev->dev);
+
+ cancel_delayed_work_sync(&priv->work_handler);
+ destroy_workqueue(priv->work_queue);
+
+ return 0;
+}
+
+static const struct platform_device_id peci_dimmtemp_ids[] = {
+ { .name = "peci-dimmtemp", .driver_data = 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(platform, peci_dimmtemp_ids);
+
+static struct platform_driver peci_dimmtemp_driver = {
+ .probe = peci_dimmtemp_probe,
+ .remove = peci_dimmtemp_remove,
+ .id_table = peci_dimmtemp_ids,
+ .driver = { .name = KBUILD_MODNAME, },
+};
+module_platform_driver(peci_dimmtemp_driver);
+
+MODULE_AUTHOR("Jae Hyun Yoo <jae.hyun.yoo@linux.intel.com>");
+MODULE_DESCRIPTION("PECI dimmtemp driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hwmon/peci-hwmon.h b/drivers/hwmon/peci-hwmon.h
new file mode 100644
index 000000000000..4d78c528c4c8
--- /dev/null
+++ b/drivers/hwmon/peci-hwmon.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018-2019 Intel Corporation */
+
+#ifndef __PECI_HWMON_H
+#define __PECI_HWMON_H
+
+#include <linux/peci.h>
+
+#define TEMP_TYPE_PECI 6 /* Sensor type 6: Intel PECI */
+#define UPDATE_INTERVAL HZ
+
+#define PECI_HWMON_LABEL_STR_LEN 10
+
+/**
+ * struct peci_sensor_data - PECI sensor information
+ * @valid: flag to indicate the sensor value is valid
+ * @value: sensor value in millidegree Celsius
+ * @last_updated: time of the last update in jiffies
+ */
+struct peci_sensor_data {
+ uint valid;
+ s32 value;
+ ulong last_updated;
+};
+
+/**
+ * peci_sensor_need_update - check whether sensor update is needed or not
+ * @sensor: pointer to sensor data struct
+ *
+ * Return: true if update is needed, false if not.
+ */
+static inline bool peci_sensor_need_update(struct peci_sensor_data *sensor)
+{
+ return !sensor->valid ||
+ time_after(jiffies, sensor->last_updated + UPDATE_INTERVAL);
+}
+
+/**
+ * peci_sensor_mark_updated - mark the sensor is updated
+ * @sensor: pointer to sensor data struct
+ */
+static inline void peci_sensor_mark_updated(struct peci_sensor_data *sensor)
+{
+ sensor->valid = 1;
+ sensor->last_updated = jiffies;
+}
+
+#endif /* __PECI_HWMON_H */
diff --git a/drivers/hwmon/pmbus/max31785.c b/drivers/hwmon/pmbus/max31785.c
index d9aa5c873d21..cbcd0b2301f4 100644
--- a/drivers/hwmon/pmbus/max31785.c
+++ b/drivers/hwmon/pmbus/max31785.c
@@ -12,40 +12,126 @@
enum max31785_regs {
MFR_REVISION = 0x9b,
+ MFR_FAULT_RESPONSE = 0xd9,
+ MFR_TEMP_SENSOR_CONFIG = 0xf0,
MFR_FAN_CONFIG = 0xf1,
+ MFR_FAN_FAULT_LIMIT = 0xf5,
};
#define MAX31785 0x3030
#define MAX31785A 0x3040
#define MFR_FAN_CONFIG_DUAL_TACH BIT(12)
+#define MFR_FAN_CONFIG_TSFO BIT(9)
+#define MFR_FAN_CONFIG_TACHO BIT(8)
+#define MFR_FAN_CONFIG_HEALTH BIT(4)
+#define MFR_FAN_CONFIG_ROTOR_HI_LO BIT(3)
+#define MFR_FAN_CONFIG_ROTOR BIT(2)
+
+#define MFR_FAULT_RESPONSE_MONITOR BIT(0)
#define MAX31785_NR_PAGES 23
#define MAX31785_NR_FAN_PAGES 6
+/*
+ * MAX31785 dragons ahead
+ *
+ * We see weird issues where some transfers fail. There doesn't appear to be
+ * any pattern to the problem, so below we wrap all the read/write calls with a
+ * retry. The device provides no indication of this besides NACK'ing master
+ * Txs; no bits are set in STATUS_BYTE to suggest anything has gone wrong.
+ */
+
+#define max31785_retry(_func, ...) ({ \
+ /* All relevant functions return int, sue me */ \
+ int _ret = _func(__VA_ARGS__); \
+ if (_ret == -EIO) \
+ _ret = _func(__VA_ARGS__); \
+ _ret; \
+})
+
+static int max31785_i2c_smbus_read_byte_data(struct i2c_client *client,
+ int command)
+{
+ return max31785_retry(i2c_smbus_read_byte_data, client, command);
+}
+
+
+static int max31785_i2c_smbus_write_byte_data(struct i2c_client *client,
+ int command, u16 data)
+{
+ return max31785_retry(i2c_smbus_write_byte_data, client, command, data);
+}
+
+static int max31785_i2c_smbus_read_word_data(struct i2c_client *client,
+ int command)
+{
+ return max31785_retry(i2c_smbus_read_word_data, client, command);
+}
+
+static int max31785_i2c_smbus_write_word_data(struct i2c_client *client,
+ int command, u16 data)
+{
+ return max31785_retry(i2c_smbus_write_word_data, client, command, data);
+}
+
+static int max31785_pmbus_write_byte(struct i2c_client *client, int page,
+ u8 value)
+{
+ return max31785_retry(pmbus_write_byte, client, page, value);
+}
+
+static int max31785_pmbus_read_byte_data(struct i2c_client *client, int page,
+ int command)
+{
+ return max31785_retry(pmbus_read_byte_data, client, page, command);
+}
+
+static int max31785_pmbus_write_byte_data(struct i2c_client *client, int page,
+ int command, u16 data)
+{
+ return max31785_retry(pmbus_write_byte_data, client, page, command,
+ data);
+}
+
+static int max31785_pmbus_read_word_data(struct i2c_client *client, int page,
+ int phase, int command)
+{
+ return max31785_retry(pmbus_read_word_data, client, page, phase, command);
+}
+
+static int max31785_pmbus_write_word_data(struct i2c_client *client, int page,
+ int command, u16 data)
+{
+ return max31785_retry(pmbus_write_word_data, client, page, command,
+ data);
+}
+
static int max31785_read_byte_data(struct i2c_client *client, int page,
int reg)
{
- if (page < MAX31785_NR_PAGES)
- return -ENODATA;
-
switch (reg) {
case PMBUS_VOUT_MODE:
- return -ENOTSUPP;
+ if (page >= MAX31785_NR_PAGES)
+ return -ENOTSUPP;
+ break;
case PMBUS_FAN_CONFIG_12:
- return pmbus_read_byte_data(client, page - MAX31785_NR_PAGES,
- reg);
+ if (page >= MAX31785_NR_PAGES)
+ return max31785_pmbus_read_byte_data(client,
+ page - MAX31785_NR_PAGES,
+ reg);
+ break;
}
- return -ENODATA;
+ return max31785_pmbus_read_byte_data(client, page, reg);
}
static int max31785_write_byte(struct i2c_client *client, int page, u8 value)
{
- if (page < MAX31785_NR_PAGES)
- return -ENODATA;
+ if (page >= MAX31785_NR_PAGES)
+ return -ENOTSUPP;
- return -ENOTSUPP;
+ return max31785_pmbus_write_byte(client, page, value);
}
static int max31785_read_long_data(struct i2c_client *client, int page,
@@ -106,11 +192,13 @@ static int max31785_get_pwm_mode(struct i2c_client *client, int page)
int config;
int command;
- config = pmbus_read_byte_data(client, page, PMBUS_FAN_CONFIG_12);
+ config = max31785_pmbus_read_byte_data(client, page,
+ PMBUS_FAN_CONFIG_12);
if (config < 0)
return config;
- command = pmbus_read_word_data(client, page, 0xff, PMBUS_FAN_COMMAND_1);
+ command = max31785_pmbus_read_word_data(client, page, 0xff,
+ PMBUS_FAN_COMMAND_1);
if (command < 0)
return command;
@@ -134,15 +222,14 @@ static int max31785_read_word_data(struct i2c_client *client, int page,
switch (reg) {
case PMBUS_READ_FAN_SPEED_1:
if (page < MAX31785_NR_PAGES)
- return -ENODATA;
+ return max31785_pmbus_read_word_data(client, page, 0xff, reg);
rv = max31785_read_long_data(client, page - MAX31785_NR_PAGES,
reg, &val);
if (rv < 0)
return rv;
- rv = (val >> 16) & 0xffff;
- break;
+ return (val >> 16) & 0xffff;
case PMBUS_FAN_COMMAND_1:
/*
* PMBUS_FAN_COMMAND_x is probed to judge whether or not to
@@ -150,20 +237,28 @@ static int max31785_read_word_data(struct i2c_client *client, int page,
*
* Don't expose fan_target attribute for virtual pages.
*/
- rv = (page >= MAX31785_NR_PAGES) ? -ENOTSUPP : -ENODATA;
+ if (page >= MAX31785_NR_PAGES)
+ return -ENOTSUPP;
break;
+ case PMBUS_VIRT_FAN_TARGET_1:
+ if (page >= MAX31785_NR_PAGES)
+ return -ENOTSUPP;
+
+ return -ENODATA;
case PMBUS_VIRT_PWM_1:
- rv = max31785_get_pwm(client, page);
- break;
+ return max31785_get_pwm(client, page);
case PMBUS_VIRT_PWM_ENABLE_1:
- rv = max31785_get_pwm_mode(client, page);
- break;
+ return max31785_get_pwm_mode(client, page);
default:
- rv = -ENODATA;
+ if (page >= MAX31785_NR_PAGES)
+ return -ENXIO;
break;
}
- return rv;
+ if (reg >= PMBUS_VIRT_BASE)
+ return -ENXIO;
+
+ return max31785_pmbus_read_word_data(client, page, 0xff, reg);
}
static inline u32 max31785_scale_pwm(u32 sensor_val)
@@ -187,6 +282,31 @@ static inline u32 max31785_scale_pwm(u32 sensor_val)
return (sensor_val * 100) / 255;
}
+static int max31785_update_fan(struct i2c_client *client, int page,
+ u8 config, u8 mask, u16 command)
+{
+ int from, rv;
+ u8 to;
+
+ from = max31785_pmbus_read_byte_data(client, page, PMBUS_FAN_CONFIG_12);
+ if (from < 0)
+ return from;
+
+ to = (from & ~mask) | (config & mask);
+
+ if (to != from) {
+ rv = max31785_pmbus_write_byte_data(client, page,
+ PMBUS_FAN_CONFIG_12, to);
+ if (rv < 0)
+ return rv;
+ }
+
+ rv = max31785_pmbus_write_word_data(client, page, PMBUS_FAN_COMMAND_1,
+ command);
+
+ return rv;
+}
+
static int max31785_pwm_enable(struct i2c_client *client, int page,
u16 word)
{
@@ -216,15 +336,18 @@ static int max31785_pwm_enable(struct i2c_client *client, int page,
return -EINVAL;
}
- return pmbus_update_fan(client, page, 0, config, PB_FAN_1_RPM, rate);
+ return max31785_update_fan(client, page, config, PB_FAN_1_RPM, rate);
}
static int max31785_write_word_data(struct i2c_client *client, int page,
int reg, u16 word)
{
switch (reg) {
+ case PMBUS_VIRT_FAN_TARGET_1:
+ return max31785_update_fan(client, page, PB_FAN_1_RPM,
+ PB_FAN_1_RPM, word);
case PMBUS_VIRT_PWM_1:
- return pmbus_update_fan(client, page, 0, 0, PB_FAN_1_RPM,
+ return max31785_update_fan(client, page, 0, PB_FAN_1_RPM,
max31785_scale_pwm(word));
case PMBUS_VIRT_PWM_ENABLE_1:
return max31785_pwm_enable(client, page, word);
@@ -232,7 +355,279 @@ static int max31785_write_word_data(struct i2c_client *client, int page,
break;
}
- return -ENODATA;
+ if (reg < PMBUS_VIRT_BASE)
+ return max31785_pmbus_write_word_data(client, page, reg, word);
+
+ return -ENXIO;
+}
+
+/*
+ * Returns negative error codes if an unrecoverable problem is detected, 0 if a
+ * recoverable problem is detected, or a positive value on success.
+ */
+static int max31785_of_fan_config(struct i2c_client *client,
+ struct pmbus_driver_info *info,
+ struct device_node *child)
+{
+ int mfr_cfg = 0, mfr_fault_resp = 0, pb_cfg;
+ struct device *dev = &client->dev;
+ char *lock_polarity = NULL;
+ const char *sval;
+ u32 page;
+ u32 uval;
+ int ret;
+
+ if (!of_device_is_compatible(child, "pmbus-fan"))
+ return 0;
+
+ ret = of_property_read_u32(child, "reg", &page);
+ if (ret < 0) {
+ dev_err(&client->dev, "Missing valid reg property\n");
+ return ret;
+ }
+
+ if (!(info->func[page] & PMBUS_HAVE_FAN12)) {
+ dev_err(dev, "Page %d does not have fan capabilities\n", page);
+ return -ENXIO;
+ }
+
+ ret = max31785_i2c_smbus_write_byte_data(client, PMBUS_PAGE, page);
+ if (ret < 0)
+ return ret;
+
+ pb_cfg = max31785_i2c_smbus_read_byte_data(client, PMBUS_FAN_CONFIG_12);
+ if (pb_cfg < 0)
+ return pb_cfg;
+
+ if (of_property_read_bool(child->parent, "use-stored-presence")) {
+ if (!(pb_cfg & PB_FAN_1_INSTALLED))
+ dev_info(dev, "Fan %d is configured but not installed\n",
+ page);
+ } else {
+ pb_cfg |= PB_FAN_1_INSTALLED;
+ }
+
+ ret = of_property_read_string(child, "maxim,fan-rotor-input", &sval);
+ if (ret < 0) {
+ dev_err(dev, "Missing valid maxim,fan-rotor-input property for fan %d\n",
+ page);
+ return ret;
+ }
+
+ if (strcmp("tach", sval) && strcmp("lock", sval)) {
+ dev_err(dev, "maxim,fan-rotor-input has invalid value for fan %d: %s\n",
+ page, sval);
+ return -EINVAL;
+ } else if (!strcmp("lock", sval)) {
+ mfr_cfg |= MFR_FAN_CONFIG_ROTOR;
+
+ ret = max31785_i2c_smbus_write_word_data(client,
+ MFR_FAN_FAULT_LIMIT,
+ 1);
+ if (ret < 0)
+ return ret;
+
+ ret = of_property_read_string(child, "maxim,fan-lock-polarity",
+ &sval);
+ if (ret < 0) {
+ dev_err(dev, "Missing valid maxim,fan-lock-polarity property for fan %d\n",
+ page);
+ return ret;
+ }
+
+ if (strcmp("low", sval) && strcmp("high", sval)) {
+ dev_err(dev, "maxim,fan-lock-polarity has invalid value for fan %d: %s\n",
+ page, lock_polarity);
+ return -EINVAL;
+ } else if (!strcmp("high", sval))
+ mfr_cfg |= MFR_FAN_CONFIG_ROTOR_HI_LO;
+ }
+
+ if (!of_property_read_string(child, "fan-mode", &sval)) {
+ if (!strcmp("rpm", sval))
+ pb_cfg |= PB_FAN_1_RPM;
+ else if (!strcmp("pwm", sval))
+ pb_cfg &= ~PB_FAN_1_RPM;
+ else {
+ dev_err(dev, "fan-mode has invalid value for fan %d: %s\n",
+ page, sval);
+ return -EINVAL;
+ }
+ }
+
+ ret = of_property_read_u32(child, "tach-pulses", &uval);
+ if (ret < 0) {
+ pb_cfg &= ~PB_FAN_1_PULSE_MASK;
+ } else if (uval && (uval - 1) < 4) {
+ pb_cfg = ((pb_cfg & ~PB_FAN_1_PULSE_MASK) | ((uval - 1) << 4));
+ } else {
+ dev_err(dev, "tach-pulses has invalid value for fan %d: %u\n",
+ page, uval);
+ return -EINVAL;
+ }
+
+ if (of_property_read_bool(child, "maxim,fan-health"))
+ mfr_cfg |= MFR_FAN_CONFIG_HEALTH;
+
+ if (of_property_read_bool(child, "maxim,fan-no-watchdog") ||
+ of_property_read_bool(child, "maxim,tmp-no-fault-ramp"))
+ mfr_cfg |= MFR_FAN_CONFIG_TSFO;
+
+ if (of_property_read_bool(child, "maxim,fan-dual-tach"))
+ mfr_cfg |= MFR_FAN_CONFIG_DUAL_TACH;
+
+ if (of_property_read_bool(child, "maxim,fan-no-fault-ramp"))
+ mfr_cfg |= MFR_FAN_CONFIG_TACHO;
+
+ if (!of_property_read_u32(child, "maxim,fan-startup", &uval)) {
+ uval /= 2;
+ if (uval < 5) {
+ mfr_cfg |= uval;
+ } else {
+ dev_err(dev, "maxim,fan-startup has invalid value for fan %d: %u\n",
+ page, uval);
+ return -EINVAL;
+ }
+ }
+
+ if (!of_property_read_u32(child, "maxim,fan-ramp", &uval)) {
+ if (uval < 8) {
+ mfr_cfg |= uval << 5;
+ } else {
+ dev_err(dev, "maxim,fan-ramp has invalid value for fan %d: %u\n",
+ page, uval);
+ return -EINVAL;
+ }
+ }
+
+ if (!of_property_read_u32(child, "maxim,tmp-hysteresis", &uval)) {
+ uval /= 2;
+ uval -= 1;
+ if (uval < 4) {
+ mfr_cfg |= uval << 10;
+ } else {
+ dev_err(dev, "maxim,tmp-hysteresis has invalid value for fan %d, %u\n",
+ page, uval);
+ return -EINVAL;
+ }
+ }
+
+ if (!of_property_read_u32(child, "maxim,fan-pwm-freq", &uval)) {
+ u16 val;
+
+ if (uval == 30) {
+ val = 0;
+ } else if (uval == 50) {
+ val = 1;
+ } else if (uval == 100) {
+ val = 2;
+ } else if (uval == 150) {
+ val = 3;
+ } else if (uval == 25000) {
+ val = 7;
+ } else {
+ dev_err(dev, "maxim,fan-pwm-freq has invalid value for fan %d: %u\n",
+ page, uval);
+ return -EINVAL;
+ }
+
+ mfr_cfg |= val << 13;
+ }
+
+ if (of_property_read_bool(child, "maxim,fan-fault-pin-mon"))
+ mfr_fault_resp |= MFR_FAULT_RESPONSE_MONITOR;
+
+ ret = max31785_i2c_smbus_write_byte_data(client, PMBUS_FAN_CONFIG_12,
+ pb_cfg & ~PB_FAN_1_INSTALLED);
+ if (ret < 0)
+ return ret;
+
+ ret = max31785_i2c_smbus_write_word_data(client, MFR_FAN_CONFIG,
+ mfr_cfg);
+ if (ret < 0)
+ return ret;
+
+ ret = max31785_i2c_smbus_write_byte_data(client, MFR_FAULT_RESPONSE,
+ mfr_fault_resp);
+ if (ret < 0)
+ return ret;
+
+ ret = max31785_i2c_smbus_write_byte_data(client, PMBUS_FAN_CONFIG_12,
+ pb_cfg);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Fans are on pages 0 - 5. If the page property of a fan node is
+ * greater than 5 we will have errored in checks above out above.
+ * Therefore we don't need to cope with values up to 31, and the int
+ * return type is enough.
+ *
+ * The bit mask return value is used to populate a bitfield of fans
+ * who are both configured in the devicetree _and_ reported as
+ * installed by the hardware. Any fans that are not configured in the
+ * devicetree but are reported as installed by the hardware will have
+ * their hardware configuration updated to unset the installed bit.
+ */
+ return BIT(page);
+}
+
+static int max31785_of_tmp_config(struct i2c_client *client,
+ struct pmbus_driver_info *info,
+ struct device_node *child)
+{
+ struct device *dev = &client->dev;
+ struct device_node *np;
+ u16 mfr_tmp_cfg = 0;
+ u32 page;
+ u32 uval;
+ int ret;
+ int i;
+
+ if (!of_device_is_compatible(child, "pmbus-temperature"))
+ return 0;
+
+ ret = of_property_read_u32(child, "reg", &page);
+ if (ret < 0) {
+ dev_err(&client->dev, "Missing valid reg property\n");
+ return ret;
+ }
+
+ if (!(info->func[page] & PMBUS_HAVE_TEMP)) {
+ dev_err(dev, "Page %d does not have temp capabilities\n", page);
+ return -ENXIO;
+ }
+
+ ret = max31785_i2c_smbus_write_byte_data(client, PMBUS_PAGE, page);
+ if (ret < 0)
+ return ret;
+
+ if (!of_property_read_u32(child, "maxim,tmp-offset", &uval)) {
+ if (uval < 32)
+ mfr_tmp_cfg |= uval << 10;
+ }
+
+ i = 0;
+ while ((np = of_parse_phandle(child, "maxim,tmp-fans", i))) {
+ if (of_property_read_u32(np, "reg", &uval)) {
+ dev_err(&client->dev, "Failed to read fan reg property for phandle index %d\n",
+ i);
+ } else {
+ if (uval < 6)
+ mfr_tmp_cfg |= BIT(uval);
+ else
+ dev_warn(&client->dev, "Invalid fan page: %d\n",
+ uval);
+ }
+ i++;
+ }
+
+ ret = max31785_i2c_smbus_write_word_data(client, MFR_TEMP_SENSOR_CONFIG,
+ mfr_tmp_cfg);
+ if (ret < 0)
+ return ret;
+
+ return 0;
}
#define MAX31785_FAN_FUNCS \
@@ -304,11 +699,11 @@ static int max31785_configure_dual_tach(struct i2c_client *client,
int i;
for (i = 0; i < MAX31785_NR_FAN_PAGES; i++) {
- ret = i2c_smbus_write_byte_data(client, PMBUS_PAGE, i);
+ ret = max31785_i2c_smbus_write_byte_data(client, PMBUS_PAGE, i);
if (ret < 0)
return ret;
- ret = i2c_smbus_read_word_data(client, MFR_FAN_CONFIG);
+ ret = max31785_i2c_smbus_read_word_data(client, MFR_FAN_CONFIG);
if (ret < 0)
return ret;
@@ -328,9 +723,12 @@ static int max31785_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct device *dev = &client->dev;
+ struct device_node *child;
struct pmbus_driver_info *info;
bool dual_tach = false;
+ u32 fans;
s64 ret;
+ int i;
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_BYTE_DATA |
@@ -343,7 +741,7 @@ static int max31785_probe(struct i2c_client *client,
*info = max31785_info;
- ret = i2c_smbus_write_byte_data(client, PMBUS_PAGE, 255);
+ ret = max31785_i2c_smbus_write_byte_data(client, PMBUS_PAGE, 255);
if (ret < 0)
return ret;
@@ -360,6 +758,49 @@ static int max31785_probe(struct i2c_client *client,
return -ENODEV;
}
+ fans = 0;
+ for_each_child_of_node(dev->of_node, child) {
+ ret = max31785_of_fan_config(client, info, child);
+ if (ret < 0) {
+ of_node_put(child);
+ return ret;
+ }
+
+ if (ret)
+ fans |= ret;
+
+ ret = max31785_of_tmp_config(client, info, child);
+ if (ret < 0) {
+ of_node_put(child);
+ return ret;
+ }
+ }
+
+ for (i = 0; i < MAX31785_NR_PAGES; i++) {
+ bool have_fan = !!(info->func[i] & PMBUS_HAVE_FAN12);
+ bool fan_configured = !!(fans & BIT(i));
+
+ if (!have_fan || fan_configured)
+ continue;
+
+ ret = max31785_i2c_smbus_write_byte_data(client, PMBUS_PAGE,
+ i);
+ if (ret < 0)
+ return ret;
+
+ ret = max31785_i2c_smbus_read_byte_data(client,
+ PMBUS_FAN_CONFIG_12);
+ if (ret < 0)
+ return ret;
+
+ ret &= ~PB_FAN_1_INSTALLED;
+ ret = max31785_i2c_smbus_write_word_data(client,
+ PMBUS_FAN_CONFIG_12,
+ ret);
+ if (ret < 0)
+ return ret;
+ }
+
if (dual_tach) {
ret = max31785_configure_dual_tach(client, info);
if (ret < 0)
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index e721a016f3e7..2ee7c13666e6 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -158,9 +158,19 @@ int pmbus_set_page(struct i2c_client *client, int page, int phase)
if (!(data->info->func[page] & PMBUS_PAGE_VIRTUAL) &&
data->info->pages > 1 && page != data->currpage) {
+ dev_dbg(&client->dev, "Want page %u, %u cached\n", page,
+ data->currpage);
+
rv = i2c_smbus_write_byte_data(client, PMBUS_PAGE, page);
- if (rv < 0)
- return rv;
+ if (rv < 0) {
+ rv = i2c_smbus_write_byte_data(client, PMBUS_PAGE,
+ page);
+ dev_dbg(&client->dev,
+ "Failed to set page %u, performed one-shot retry %s: %d\n",
+ page, rv ? "and failed" : "with success", rv);
+ if (rv < 0)
+ return rv;
+ }
rv = i2c_smbus_read_byte_data(client, PMBUS_PAGE);
if (rv < 0)
@@ -451,15 +461,15 @@ static int pmbus_get_fan_rate(struct i2c_client *client, int page, int id,
return s->data;
}
- config = pmbus_read_byte_data(client, page,
- pmbus_fan_config_registers[id]);
+ config = _pmbus_read_byte_data(client, page,
+ pmbus_fan_config_registers[id]);
if (config < 0)
return config;
have_rpm = !!(config & pmbus_fan_rpm_mask[id]);
if (want_rpm == have_rpm)
- return pmbus_read_word_data(client, page, 0xff,
- pmbus_fan_command_registers[id]);
+ return _pmbus_read_word_data(client, page, 0xff,
+ pmbus_fan_command_registers[id]);
/* Can't sensibly map between RPM and PWM, just return zero */
return 0;
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 2ddca08f8a76..a2cfc555c284 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -791,6 +791,15 @@ config I2C_NOMADIK
I2C interface from ST-Ericsson's Nomadik and Ux500 architectures,
as well as the STA2X11 PCIe I/O HUB.
+config I2C_NPCM7XX
+ tristate "Nuvoton I2C Controller"
+ depends on ARCH_NPCM7XX || COMPILE_TEST
+ help
+ If you say yes to this option, support will be included for the
+ Nuvoton I2C controller, which is available on the NPCM7xx BMC
+ controller.
+ Driver can also support slave mode (select I2C_SLAVE).
+
config I2C_OCORES
tristate "OpenCores I2C Controller"
help
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 25d60889713c..8f3dfd376bec 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -81,6 +81,7 @@ obj-$(CONFIG_I2C_MT7621) += i2c-mt7621.o
obj-$(CONFIG_I2C_MV64XXX) += i2c-mv64xxx.o
obj-$(CONFIG_I2C_MXS) += i2c-mxs.o
obj-$(CONFIG_I2C_NOMADIK) += i2c-nomadik.o
+obj-$(CONFIG_I2C_NPCM7XX) += i2c-npcm7xx.o
obj-$(CONFIG_I2C_OCORES) += i2c-ocores.o
obj-$(CONFIG_I2C_OMAP) += i2c-omap.o
obj-$(CONFIG_I2C_OWL) += i2c-owl.o
diff --git a/drivers/i2c/busses/i2c-fsi.c b/drivers/i2c/busses/i2c-fsi.c
index 977d6f524649..95b6b6bc1d78 100644
--- a/drivers/i2c/busses/i2c-fsi.c
+++ b/drivers/i2c/busses/i2c-fsi.c
@@ -703,7 +703,12 @@ static int fsi_i2c_probe(struct device *dev)
for (port_no = 0; port_no < ports; port_no++) {
np = fsi_i2c_find_port_of_node(dev->of_node, port_no);
- if (np && !of_device_is_available(np))
+ /* Do not add port if it is not described in the device tree */
+ if (!np)
+ continue;
+
+ /* Do not add port if it is described as disabled */
+ if (!of_device_is_available(np))
continue;
port = kzalloc(sizeof(*port), GFP_KERNEL);
diff --git a/drivers/i2c/busses/i2c-npcm7xx.c b/drivers/i2c/busses/i2c-npcm7xx.c
new file mode 100644
index 000000000000..a8e75c3484f1
--- /dev/null
+++ b/drivers/i2c/busses/i2c-npcm7xx.c
@@ -0,0 +1,2342 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Nuvoton NPCM7xx I2C Controller driver
+ *
+ * Copyright (C) 2020 Nuvoton Technologies tali.perry@nuvoton.com
+ */
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/errno.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/irq.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+enum i2c_mode {
+ I2C_MASTER,
+ I2C_SLAVE,
+};
+
+/*
+ * External I2C Interface driver xfer indication values, which indicate status
+ * of the bus.
+ */
+enum i2c_state_ind {
+ I2C_NO_STATUS_IND = 0,
+ I2C_SLAVE_RCV_IND,
+ I2C_SLAVE_XMIT_IND,
+ I2C_SLAVE_XMIT_MISSING_DATA_IND,
+ I2C_SLAVE_RESTART_IND,
+ I2C_SLAVE_DONE_IND,
+ I2C_MASTER_DONE_IND,
+ I2C_NACK_IND,
+ I2C_BUS_ERR_IND,
+ I2C_WAKE_UP_IND,
+ I2C_BLOCK_BYTES_ERR_IND,
+ I2C_SLAVE_RCV_MISSING_DATA_IND,
+};
+
+/*
+ * Operation type values (used to define the operation currently running)
+ * module is interrupt driven, on each interrupt the current operation is
+ * checked to see if the module is currently reading or writing.
+ */
+enum i2c_oper {
+ I2C_NO_OPER = 0,
+ I2C_WRITE_OPER,
+ I2C_READ_OPER,
+};
+
+/* I2C Bank (module had 2 banks of registers) */
+enum i2c_bank {
+ I2C_BANK_0 = 0,
+ I2C_BANK_1,
+};
+
+/* Internal I2C states values (for the I2C module state machine). */
+enum i2c_state {
+ I2C_DISABLE = 0,
+ I2C_IDLE,
+ I2C_MASTER_START,
+ I2C_SLAVE_MATCH,
+ I2C_OPER_STARTED,
+ I2C_STOP_PENDING,
+};
+
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
+/* Module supports setting multiple own slave addresses */
+enum i2c_addr {
+ I2C_SLAVE_ADDR1 = 0,
+ I2C_SLAVE_ADDR2,
+ I2C_SLAVE_ADDR3,
+ I2C_SLAVE_ADDR4,
+ I2C_SLAVE_ADDR5,
+ I2C_SLAVE_ADDR6,
+ I2C_SLAVE_ADDR7,
+ I2C_SLAVE_ADDR8,
+ I2C_SLAVE_ADDR9,
+ I2C_SLAVE_ADDR10,
+ I2C_GC_ADDR,
+ I2C_ARP_ADDR,
+};
+#endif
+
+/* init register and default value required to enable module */
+#define NPCM_I2CSEGCTL 0xE4
+#define NPCM_I2CSEGCTL_INIT_VAL 0x0333F000
+
+/* Common regs */
+#define NPCM_I2CSDA 0x00
+#define NPCM_I2CST 0x02
+#define NPCM_I2CCST 0x04
+#define NPCM_I2CCTL1 0x06
+#define NPCM_I2CADDR1 0x08
+#define NPCM_I2CCTL2 0x0A
+#define NPCM_I2CADDR2 0x0C
+#define NPCM_I2CCTL3 0x0E
+#define NPCM_I2CCST2 0x18
+#define NPCM_I2CCST3 0x19
+#define I2C_VER 0x1F
+
+/*BANK0 regs*/
+#define NPCM_I2CADDR3 0x10
+#define NPCM_I2CADDR7 0x11
+#define NPCM_I2CADDR4 0x12
+#define NPCM_I2CADDR8 0x13
+#define NPCM_I2CADDR5 0x14
+#define NPCM_I2CADDR9 0x15
+#define NPCM_I2CADDR6 0x16
+#define NPCM_I2CADDR10 0x17
+
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
+/*
+ * npcm_i2caddr array:
+ * The module supports having multiple own slave addresses.
+ * Since the addr regs are sprinkled all over the address space,
+ * use this array to get the address or each register.
+ */
+#define I2C_NUM_OWN_ADDR 10
+const int npcm_i2caddr[I2C_NUM_OWN_ADDR] = {
+ NPCM_I2CADDR1, NPCM_I2CADDR2, NPCM_I2CADDR3, NPCM_I2CADDR4,
+ NPCM_I2CADDR5, NPCM_I2CADDR6, NPCM_I2CADDR7, NPCM_I2CADDR8,
+ NPCM_I2CADDR9, NPCM_I2CADDR10,
+};
+#endif
+
+#define NPCM_I2CCTL4 0x1A
+#define NPCM_I2CCTL5 0x1B
+#define NPCM_I2CSCLLT 0x1C /* SCL Low Time */
+#define NPCM_I2CFIF_CTL 0x1D /* FIFO Control */
+#define NPCM_I2CSCLHT 0x1E /* SCL High Time */
+
+/* BANK 1 regs */
+#define NPCM_I2CFIF_CTS 0x10 /* Both FIFOs Control and Status */
+#define NPCM_I2CTXF_CTL 0x12 /* Tx-FIFO Control */
+#define NPCM_I2CT_OUT 0x14 /* Bus T.O. */
+#define NPCM_I2CPEC 0x16 /* PEC Data */
+#define NPCM_I2CTXF_STS 0x1A /* Tx-FIFO Status */
+#define NPCM_I2CRXF_STS 0x1C /* Rx-FIFO Status */
+#define NPCM_I2CRXF_CTL 0x1E /* Rx-FIFO Control */
+
+/* NPCM_I2CST reg fields */
+#define NPCM_I2CST_XMIT BIT(0)
+#define NPCM_I2CST_MASTER BIT(1)
+#define NPCM_I2CST_NMATCH BIT(2)
+#define NPCM_I2CST_STASTR BIT(3)
+#define NPCM_I2CST_NEGACK BIT(4)
+#define NPCM_I2CST_BER BIT(5)
+#define NPCM_I2CST_SDAST BIT(6)
+#define NPCM_I2CST_SLVSTP BIT(7)
+
+/* NPCM_I2CCST reg fields */
+#define NPCM_I2CCST_BUSY BIT(0)
+#define NPCM_I2CCST_BB BIT(1)
+#define NPCM_I2CCST_MATCH BIT(2)
+#define NPCM_I2CCST_GCMATCH BIT(3)
+#define NPCM_I2CCST_TSDA BIT(4)
+#define NPCM_I2CCST_TGSCL BIT(5)
+#define NPCM_I2CCST_MATCHAF BIT(6)
+#define NPCM_I2CCST_ARPMATCH BIT(7)
+
+/* NPCM_I2CCTL1 reg fields */
+#define NPCM_I2CCTL1_START BIT(0)
+#define NPCM_I2CCTL1_STOP BIT(1)
+#define NPCM_I2CCTL1_INTEN BIT(2)
+#define NPCM_I2CCTL1_EOBINTE BIT(3)
+#define NPCM_I2CCTL1_ACK BIT(4)
+#define NPCM_I2CCTL1_GCMEN BIT(5)
+#define NPCM_I2CCTL1_NMINTE BIT(6)
+#define NPCM_I2CCTL1_STASTRE BIT(7)
+
+/* RW1S fields (inside a RW reg): */
+#define NPCM_I2CCTL1_RWS \
+ (NPCM_I2CCTL1_START | NPCM_I2CCTL1_STOP | NPCM_I2CCTL1_ACK)
+
+/* npcm_i2caddr reg fields */
+#define NPCM_I2CADDR_A GENMASK(6, 0)
+#define NPCM_I2CADDR_SAEN BIT(7)
+
+/* NPCM_I2CCTL2 reg fields */
+#define I2CCTL2_ENABLE BIT(0)
+#define I2CCTL2_SCLFRQ6_0 GENMASK(7, 1)
+
+/* NPCM_I2CCTL3 reg fields */
+#define I2CCTL3_SCLFRQ8_7 GENMASK(1, 0)
+#define I2CCTL3_ARPMEN BIT(2)
+#define I2CCTL3_IDL_START BIT(3)
+#define I2CCTL3_400K_MODE BIT(4)
+#define I2CCTL3_BNK_SEL BIT(5)
+#define I2CCTL3_SDA_LVL BIT(6)
+#define I2CCTL3_SCL_LVL BIT(7)
+
+/* NPCM_I2CCST2 reg fields */
+#define NPCM_I2CCST2_MATCHA1F BIT(0)
+#define NPCM_I2CCST2_MATCHA2F BIT(1)
+#define NPCM_I2CCST2_MATCHA3F BIT(2)
+#define NPCM_I2CCST2_MATCHA4F BIT(3)
+#define NPCM_I2CCST2_MATCHA5F BIT(4)
+#define NPCM_I2CCST2_MATCHA6F BIT(5)
+#define NPCM_I2CCST2_MATCHA7F BIT(5)
+#define NPCM_I2CCST2_INTSTS BIT(7)
+
+/* NPCM_I2CCST3 reg fields */
+#define NPCM_I2CCST3_MATCHA8F BIT(0)
+#define NPCM_I2CCST3_MATCHA9F BIT(1)
+#define NPCM_I2CCST3_MATCHA10F BIT(2)
+#define NPCM_I2CCST3_EO_BUSY BIT(7)
+
+/* NPCM_I2CCTL4 reg fields */
+#define I2CCTL4_HLDT GENMASK(5, 0)
+#define I2CCTL4_LVL_WE BIT(7)
+
+/* NPCM_I2CCTL5 reg fields */
+#define I2CCTL5_DBNCT GENMASK(3, 0)
+
+/* NPCM_I2CFIF_CTS reg fields */
+#define NPCM_I2CFIF_CTS_RXF_TXE BIT(1)
+#define NPCM_I2CFIF_CTS_RFTE_IE BIT(3)
+#define NPCM_I2CFIF_CTS_CLR_FIFO BIT(6)
+#define NPCM_I2CFIF_CTS_SLVRSTR BIT(7)
+
+/* NPCM_I2CTXF_CTL reg fields */
+#define NPCM_I2CTXF_CTL_TX_THR GENMASK(4, 0)
+#define NPCM_I2CTXF_CTL_THR_TXIE BIT(6)
+
+/* NPCM_I2CT_OUT reg fields */
+#define NPCM_I2CT_OUT_TO_CKDIV GENMASK(5, 0)
+#define NPCM_I2CT_OUT_T_OUTIE BIT(6)
+#define NPCM_I2CT_OUT_T_OUTST BIT(7)
+
+/* NPCM_I2CTXF_STS reg fields */
+#define NPCM_I2CTXF_STS_TX_BYTES GENMASK(4, 0)
+#define NPCM_I2CTXF_STS_TX_THST BIT(6)
+
+/* NPCM_I2CRXF_STS reg fields */
+#define NPCM_I2CRXF_STS_RX_BYTES GENMASK(4, 0)
+#define NPCM_I2CRXF_STS_RX_THST BIT(6)
+
+/* NPCM_I2CFIF_CTL reg fields */
+#define NPCM_I2CFIF_CTL_FIFO_EN BIT(4)
+
+/* NPCM_I2CRXF_CTL reg fields */
+#define NPCM_I2CRXF_CTL_RX_THR GENMASK(4, 0)
+#define NPCM_I2CRXF_CTL_LAST_PEC BIT(5)
+#define NPCM_I2CRXF_CTL_THR_RXIE BIT(6)
+
+#define I2C_HW_FIFO_SIZE 16
+
+/* I2C_VER reg fields */
+#define I2C_VER_VERSION GENMASK(6, 0)
+#define I2C_VER_FIFO_EN BIT(7)
+
+/* stall/stuck timeout in us */
+#define DEFAULT_STALL_COUNT 25
+
+/* SCLFRQ field position */
+#define SCLFRQ_0_TO_6 GENMASK(6, 0)
+#define SCLFRQ_7_TO_8 GENMASK(8, 7)
+
+/* supported clk settings. values in Hz. */
+#define I2C_FREQ_MIN_HZ 10000
+#define I2C_FREQ_MAX_HZ I2C_MAX_FAST_MODE_PLUS_FREQ
+
+/* Status of one I2C module */
+struct npcm_i2c {
+ struct i2c_adapter adap;
+ struct device *dev;
+ unsigned char __iomem *reg;
+ spinlock_t lock; /* IRQ synchronization */
+ struct completion cmd_complete;
+ int cmd_err;
+ struct i2c_msg *msgs;
+ int msgs_num;
+ int num;
+ u32 apb_clk;
+ struct i2c_bus_recovery_info rinfo;
+ enum i2c_state state;
+ enum i2c_oper operation;
+ enum i2c_mode master_or_slave;
+ enum i2c_state_ind stop_ind;
+ u8 dest_addr;
+ u8 *rd_buf;
+ u16 rd_size;
+ u16 rd_ind;
+ u8 *wr_buf;
+ u16 wr_size;
+ u16 wr_ind;
+ bool fifo_use;
+ u16 PEC_mask; /* PEC bit mask per slave address */
+ bool PEC_use;
+ bool read_block_use;
+ unsigned long int_time_stamp;
+ unsigned long bus_freq; /* in Hz */
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
+ u8 own_slave_addr;
+ struct i2c_client *slave;
+ int slv_rd_size;
+ int slv_rd_ind;
+ int slv_wr_size;
+ int slv_wr_ind;
+ u8 slv_rd_buf[I2C_HW_FIFO_SIZE];
+ u8 slv_wr_buf[I2C_HW_FIFO_SIZE];
+#endif
+ struct dentry *debugfs; /* debugfs device directory */
+ u64 ber_cnt;
+ u64 rec_succ_cnt;
+ u64 rec_fail_cnt;
+ u64 nack_cnt;
+ u64 timeout_cnt;
+};
+
+static inline void npcm_i2c_select_bank(struct npcm_i2c *bus,
+ enum i2c_bank bank)
+{
+ u8 i2cctl3 = ioread8(bus->reg + NPCM_I2CCTL3);
+
+ if (bank == I2C_BANK_0)
+ i2cctl3 = i2cctl3 & ~I2CCTL3_BNK_SEL;
+ else
+ i2cctl3 = i2cctl3 | I2CCTL3_BNK_SEL;
+ iowrite8(i2cctl3, bus->reg + NPCM_I2CCTL3);
+}
+
+static void npcm_i2c_init_params(struct npcm_i2c *bus)
+{
+ bus->stop_ind = I2C_NO_STATUS_IND;
+ bus->rd_size = 0;
+ bus->wr_size = 0;
+ bus->rd_ind = 0;
+ bus->wr_ind = 0;
+ bus->read_block_use = false;
+ bus->int_time_stamp = 0;
+ bus->PEC_use = false;
+ bus->PEC_mask = 0;
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
+ if (bus->slave)
+ bus->master_or_slave = I2C_SLAVE;
+#endif
+}
+
+static inline void npcm_i2c_wr_byte(struct npcm_i2c *bus, u8 data)
+{
+ iowrite8(data, bus->reg + NPCM_I2CSDA);
+}
+
+static inline u8 npcm_i2c_rd_byte(struct npcm_i2c *bus)
+{
+ return ioread8(bus->reg + NPCM_I2CSDA);
+}
+
+static int npcm_i2c_get_SCL(struct i2c_adapter *_adap)
+{
+ struct npcm_i2c *bus = container_of(_adap, struct npcm_i2c, adap);
+
+ return !!(I2CCTL3_SCL_LVL & ioread32(bus->reg + NPCM_I2CCTL3));
+}
+
+static int npcm_i2c_get_SDA(struct i2c_adapter *_adap)
+{
+ struct npcm_i2c *bus = container_of(_adap, struct npcm_i2c, adap);
+
+ return !!(I2CCTL3_SDA_LVL & ioread32(bus->reg + NPCM_I2CCTL3));
+}
+
+static inline u16 npcm_i2c_get_index(struct npcm_i2c *bus)
+{
+ if (bus->operation == I2C_READ_OPER)
+ return bus->rd_ind;
+ if (bus->operation == I2C_WRITE_OPER)
+ return bus->wr_ind;
+ return 0;
+}
+
+/* quick protocol (just address) */
+static inline bool npcm_i2c_is_quick(struct npcm_i2c *bus)
+{
+ return bus->wr_size == 0 && bus->rd_size == 0;
+}
+
+static void npcm_i2c_disable(struct npcm_i2c *bus)
+{
+ u8 i2cctl2;
+
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
+ int i;
+
+ /* select bank 0 for I2C addresses */
+ npcm_i2c_select_bank(bus, I2C_BANK_0);
+
+ /* Slave addresses removal */
+ for (i = I2C_SLAVE_ADDR1; i < I2C_NUM_OWN_ADDR; i++)
+ iowrite8(0, bus->reg + npcm_i2caddr[i]);
+
+ npcm_i2c_select_bank(bus, I2C_BANK_1);
+#endif
+ /* Disable module */
+ i2cctl2 = ioread8(bus->reg + NPCM_I2CCTL2);
+ i2cctl2 = i2cctl2 & ~I2CCTL2_ENABLE;
+ iowrite8(i2cctl2, bus->reg + NPCM_I2CCTL2);
+
+ bus->state = I2C_DISABLE;
+}
+
+static void npcm_i2c_enable(struct npcm_i2c *bus)
+{
+ u8 i2cctl2 = ioread8(bus->reg + NPCM_I2CCTL2);
+
+ i2cctl2 = i2cctl2 | I2CCTL2_ENABLE;
+ iowrite8(i2cctl2, bus->reg + NPCM_I2CCTL2);
+ bus->state = I2C_IDLE;
+}
+
+/* enable\disable end of busy (EOB) interrupts */
+static inline void npcm_i2c_eob_int(struct npcm_i2c *bus, bool enable)
+{
+ u8 val;
+
+ /* Clear EO_BUSY pending bit: */
+ val = ioread8(bus->reg + NPCM_I2CCST3);
+ val = val | NPCM_I2CCST3_EO_BUSY;
+ iowrite8(val, bus->reg + NPCM_I2CCST3);
+
+ val = ioread8(bus->reg + NPCM_I2CCTL1);
+ val &= ~NPCM_I2CCTL1_RWS;
+ if (enable)
+ val |= NPCM_I2CCTL1_EOBINTE;
+ else
+ val &= ~NPCM_I2CCTL1_EOBINTE;
+ iowrite8(val, bus->reg + NPCM_I2CCTL1);
+}
+
+static inline bool npcm_i2c_tx_fifo_empty(struct npcm_i2c *bus)
+{
+ u8 tx_fifo_sts;
+
+ tx_fifo_sts = ioread8(bus->reg + NPCM_I2CTXF_STS);
+ /* check if TX FIFO is not empty */
+ if ((tx_fifo_sts & NPCM_I2CTXF_STS_TX_BYTES) == 0)
+ return false;
+
+ /* check if TX FIFO status bit is set: */
+ return !!FIELD_GET(NPCM_I2CTXF_STS_TX_THST, tx_fifo_sts);
+}
+
+static inline bool npcm_i2c_rx_fifo_full(struct npcm_i2c *bus)
+{
+ u8 rx_fifo_sts;
+
+ rx_fifo_sts = ioread8(bus->reg + NPCM_I2CRXF_STS);
+ /* check if RX FIFO is not empty: */
+ if ((rx_fifo_sts & NPCM_I2CRXF_STS_RX_BYTES) == 0)
+ return false;
+
+ /* check if rx fifo full status is set: */
+ return !!FIELD_GET(NPCM_I2CRXF_STS_RX_THST, rx_fifo_sts);
+}
+
+static inline void npcm_i2c_clear_fifo_int(struct npcm_i2c *bus)
+{
+ u8 val;
+
+ val = ioread8(bus->reg + NPCM_I2CFIF_CTS);
+ val = (val & NPCM_I2CFIF_CTS_SLVRSTR) | NPCM_I2CFIF_CTS_RXF_TXE;
+ iowrite8(val, bus->reg + NPCM_I2CFIF_CTS);
+}
+
+static inline void npcm_i2c_clear_tx_fifo(struct npcm_i2c *bus)
+{
+ u8 val;
+
+ val = ioread8(bus->reg + NPCM_I2CTXF_STS);
+ val = val | NPCM_I2CTXF_STS_TX_THST;
+ iowrite8(val, bus->reg + NPCM_I2CTXF_STS);
+}
+
+static inline void npcm_i2c_clear_rx_fifo(struct npcm_i2c *bus)
+{
+ u8 val;
+
+ val = ioread8(bus->reg + NPCM_I2CRXF_STS);
+ val = val | NPCM_I2CRXF_STS_RX_THST;
+ iowrite8(val, bus->reg + NPCM_I2CRXF_STS);
+}
+
+static void npcm_i2c_int_enable(struct npcm_i2c *bus, bool enable)
+{
+ u8 val;
+
+ val = ioread8(bus->reg + NPCM_I2CCTL1);
+ val &= ~NPCM_I2CCTL1_RWS;
+ if (enable)
+ val |= NPCM_I2CCTL1_INTEN;
+ else
+ val &= ~NPCM_I2CCTL1_INTEN;
+ iowrite8(val, bus->reg + NPCM_I2CCTL1);
+}
+
+static inline void npcm_i2c_master_start(struct npcm_i2c *bus)
+{
+ u8 val;
+
+ val = ioread8(bus->reg + NPCM_I2CCTL1);
+ val &= ~(NPCM_I2CCTL1_STOP | NPCM_I2CCTL1_ACK);
+ val |= NPCM_I2CCTL1_START;
+ iowrite8(val, bus->reg + NPCM_I2CCTL1);
+}
+
+static inline void npcm_i2c_master_stop(struct npcm_i2c *bus)
+{
+ u8 val;
+
+ /*
+ * override HW issue: I2C may fail to supply stop condition in Master
+ * Write operation.
+ * Need to delay at least 5 us from the last int, before issueing a stop
+ */
+ udelay(10); /* function called from interrupt, can't sleep */
+ val = ioread8(bus->reg + NPCM_I2CCTL1);
+ val &= ~(NPCM_I2CCTL1_START | NPCM_I2CCTL1_ACK);
+ val |= NPCM_I2CCTL1_STOP;
+ iowrite8(val, bus->reg + NPCM_I2CCTL1);
+
+ if (!bus->fifo_use)
+ return;
+
+ npcm_i2c_select_bank(bus, I2C_BANK_1);
+
+ if (bus->operation == I2C_READ_OPER)
+ npcm_i2c_clear_rx_fifo(bus);
+ else
+ npcm_i2c_clear_tx_fifo(bus);
+ npcm_i2c_clear_fifo_int(bus);
+ iowrite8(0, bus->reg + NPCM_I2CTXF_CTL);
+}
+
+static inline void npcm_i2c_stall_after_start(struct npcm_i2c *bus, bool stall)
+{
+ u8 val;
+
+ val = ioread8(bus->reg + NPCM_I2CCTL1);
+ val &= ~NPCM_I2CCTL1_RWS;
+ if (stall)
+ val |= NPCM_I2CCTL1_STASTRE;
+ else
+ val &= ~NPCM_I2CCTL1_STASTRE;
+ iowrite8(val, bus->reg + NPCM_I2CCTL1);
+}
+
+static inline void npcm_i2c_nack(struct npcm_i2c *bus)
+{
+ u8 val;
+
+ val = ioread8(bus->reg + NPCM_I2CCTL1);
+ val &= ~(NPCM_I2CCTL1_STOP | NPCM_I2CCTL1_START);
+ val |= NPCM_I2CCTL1_ACK;
+ iowrite8(val, bus->reg + NPCM_I2CCTL1);
+}
+
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
+static void npcm_i2c_slave_int_enable(struct npcm_i2c *bus, bool enable)
+{
+ u8 i2cctl1;
+
+ /* enable interrupt on slave match: */
+ i2cctl1 = ioread8(bus->reg + NPCM_I2CCTL1);
+ i2cctl1 &= ~NPCM_I2CCTL1_RWS;
+ if (enable)
+ i2cctl1 |= NPCM_I2CCTL1_NMINTE;
+ else
+ i2cctl1 &= ~NPCM_I2CCTL1_NMINTE;
+ iowrite8(i2cctl1, bus->reg + NPCM_I2CCTL1);
+}
+
+static int npcm_i2c_slave_enable(struct npcm_i2c *bus, enum i2c_addr addr_type,
+ u8 addr, bool enable)
+{
+ u8 i2cctl1;
+ u8 i2cctl3;
+ u8 sa_reg;
+
+ sa_reg = (addr & 0x7F) | FIELD_PREP(NPCM_I2CADDR_SAEN, enable);
+ if (addr_type == I2C_GC_ADDR) {
+ i2cctl1 = ioread8(bus->reg + NPCM_I2CCTL1);
+ if (enable)
+ i2cctl1 |= NPCM_I2CCTL1_GCMEN;
+ else
+ i2cctl1 &= ~NPCM_I2CCTL1_GCMEN;
+ iowrite8(i2cctl1, bus->reg + NPCM_I2CCTL1);
+ return 0;
+ }
+ if (addr_type == I2C_ARP_ADDR) {
+ i2cctl3 = ioread8(bus->reg + NPCM_I2CCTL3);
+ if (enable)
+ i2cctl3 |= I2CCTL3_ARPMEN;
+ else
+ i2cctl3 &= ~I2CCTL3_ARPMEN;
+ iowrite8(i2cctl3, bus->reg + NPCM_I2CCTL3);
+ return 0;
+ }
+ if (addr_type >= I2C_ARP_ADDR)
+ return -EFAULT;
+ /* select bank 0 for address 3 to 10 */
+ if (addr_type > I2C_SLAVE_ADDR2)
+ npcm_i2c_select_bank(bus, I2C_BANK_0);
+ /* Set and enable the address */
+ iowrite8(sa_reg, bus->reg + npcm_i2caddr[addr_type]);
+ npcm_i2c_slave_int_enable(bus, enable);
+ if (addr_type > I2C_SLAVE_ADDR2)
+ npcm_i2c_select_bank(bus, I2C_BANK_1);
+ return 0;
+}
+#endif
+
+static void npcm_i2c_reset(struct npcm_i2c *bus)
+{
+ /*
+ * Save I2CCTL1 relevant bits. It is being cleared when the module
+ * is disabled.
+ */
+ u8 i2cctl1;
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
+ u8 addr;
+#endif
+
+ i2cctl1 = ioread8(bus->reg + NPCM_I2CCTL1);
+
+ npcm_i2c_disable(bus);
+ npcm_i2c_enable(bus);
+
+ /* Restore NPCM_I2CCTL1 Status */
+ i2cctl1 &= ~NPCM_I2CCTL1_RWS;
+ iowrite8(i2cctl1, bus->reg + NPCM_I2CCTL1);
+
+ /* Clear BB (BUS BUSY) bit */
+ iowrite8(NPCM_I2CCST_BB, bus->reg + NPCM_I2CCST);
+ iowrite8(0xFF, bus->reg + NPCM_I2CST);
+
+ /* Clear EOB bit */
+ iowrite8(NPCM_I2CCST3_EO_BUSY, bus->reg + NPCM_I2CCST3);
+
+ /* Clear all fifo bits: */
+ iowrite8(NPCM_I2CFIF_CTS_CLR_FIFO, bus->reg + NPCM_I2CFIF_CTS);
+
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
+ if (bus->slave) {
+ addr = bus->slave->addr;
+ npcm_i2c_slave_enable(bus, I2C_SLAVE_ADDR1, addr, true);
+ }
+#endif
+
+ bus->state = I2C_IDLE;
+}
+
+static inline bool npcm_i2c_is_master(struct npcm_i2c *bus)
+{
+ return !!FIELD_GET(NPCM_I2CST_MASTER, ioread8(bus->reg + NPCM_I2CST));
+}
+
+static void npcm_i2c_callback(struct npcm_i2c *bus,
+ enum i2c_state_ind op_status, u16 info)
+{
+ struct i2c_msg *msgs;
+ int msgs_num;
+
+ msgs = bus->msgs;
+ msgs_num = bus->msgs_num;
+ /*
+ * check that transaction was not timed-out, and msgs still
+ * holds a valid value.
+ */
+ if (!msgs)
+ return;
+
+ if (completion_done(&bus->cmd_complete))
+ return;
+
+ switch (op_status) {
+ case I2C_MASTER_DONE_IND:
+ bus->cmd_err = bus->msgs_num;
+ fallthrough;
+ case I2C_BLOCK_BYTES_ERR_IND:
+ /* Master tx finished and all transmit bytes were sent */
+ if (bus->msgs) {
+ if (msgs[0].flags & I2C_M_RD)
+ msgs[0].len = info;
+ else if (msgs_num == 2 &&
+ msgs[1].flags & I2C_M_RD)
+ msgs[1].len = info;
+ }
+ if (completion_done(&bus->cmd_complete) == false)
+ complete(&bus->cmd_complete);
+ break;
+
+ case I2C_NACK_IND:
+ /* MASTER transmit got a NACK before tx all bytes */
+ bus->cmd_err = -ENXIO;
+ if (bus->master_or_slave == I2C_MASTER)
+ complete(&bus->cmd_complete);
+
+ break;
+ case I2C_BUS_ERR_IND:
+ /* Bus error */
+ bus->cmd_err = -EAGAIN;
+ if (bus->master_or_slave == I2C_MASTER)
+ complete(&bus->cmd_complete);
+
+ break;
+ case I2C_WAKE_UP_IND:
+ /* I2C wake up */
+ break;
+ default:
+ break;
+ }
+
+ bus->operation = I2C_NO_OPER;
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
+ if (bus->slave)
+ bus->master_or_slave = I2C_SLAVE;
+#endif
+}
+
+static u8 npcm_i2c_fifo_usage(struct npcm_i2c *bus)
+{
+ if (bus->operation == I2C_WRITE_OPER)
+ return FIELD_GET(NPCM_I2CTXF_STS_TX_BYTES,
+ ioread8(bus->reg + NPCM_I2CTXF_STS));
+ if (bus->operation == I2C_READ_OPER)
+ return FIELD_GET(NPCM_I2CRXF_STS_RX_BYTES,
+ ioread8(bus->reg + NPCM_I2CRXF_STS));
+ return 0;
+}
+
+static void npcm_i2c_write_to_fifo_master(struct npcm_i2c *bus, u16 max_bytes)
+{
+ u8 size_free_fifo;
+
+ /*
+ * Fill the FIFO, while the FIFO is not full and there are more bytes
+ * to write
+ */
+ size_free_fifo = I2C_HW_FIFO_SIZE - npcm_i2c_fifo_usage(bus);
+ while (max_bytes-- && size_free_fifo) {
+ if (bus->wr_ind < bus->wr_size)
+ npcm_i2c_wr_byte(bus, bus->wr_buf[bus->wr_ind++]);
+ else
+ npcm_i2c_wr_byte(bus, 0xFF);
+ size_free_fifo = I2C_HW_FIFO_SIZE - npcm_i2c_fifo_usage(bus);
+ }
+}
+
+/*
+ * npcm_i2c_set_fifo:
+ * configure the FIFO before using it. If nread is -1 RX FIFO will not be
+ * configured. same for nwrite
+ */
+static void npcm_i2c_set_fifo(struct npcm_i2c *bus, int nread, int nwrite)
+{
+ u8 rxf_ctl = 0;
+
+ if (!bus->fifo_use)
+ return;
+ npcm_i2c_select_bank(bus, I2C_BANK_1);
+ npcm_i2c_clear_tx_fifo(bus);
+ npcm_i2c_clear_rx_fifo(bus);
+
+ /* configure RX FIFO */
+ if (nread > 0) {
+ rxf_ctl = min_t(int, nread, I2C_HW_FIFO_SIZE);
+
+ /* set LAST bit. if LAST is set next FIFO packet is nacked */
+ if (nread <= I2C_HW_FIFO_SIZE)
+ rxf_ctl |= NPCM_I2CRXF_CTL_LAST_PEC;
+
+ /*
+ * if we are about to read the first byte in blk rd mode,
+ * don't NACK it. If slave returns zero size HW can't NACK
+ * it immidiattly, it will read extra byte and then NACK.
+ */
+ if (bus->rd_ind == 0 && bus->read_block_use) {
+ /* set fifo to read one byte, no last: */
+ rxf_ctl = 1;
+ }
+
+ /* set fifo size: */
+ iowrite8(rxf_ctl, bus->reg + NPCM_I2CRXF_CTL);
+ }
+
+ /* configure TX FIFO */
+ if (nwrite > 0) {
+ if (nwrite > I2C_HW_FIFO_SIZE)
+ /* data to send is more then FIFO size. */
+ iowrite8(I2C_HW_FIFO_SIZE, bus->reg + NPCM_I2CTXF_CTL);
+ else
+ iowrite8(nwrite, bus->reg + NPCM_I2CTXF_CTL);
+
+ npcm_i2c_clear_tx_fifo(bus);
+ }
+}
+
+static void npcm_i2c_read_fifo(struct npcm_i2c *bus, u8 bytes_in_fifo)
+{
+ u8 data;
+
+ while (bytes_in_fifo--) {
+ data = npcm_i2c_rd_byte(bus);
+ if (bus->rd_ind < bus->rd_size)
+ bus->rd_buf[bus->rd_ind++] = data;
+ }
+}
+
+static inline void npcm_i2c_clear_master_status(struct npcm_i2c *bus)
+{
+ u8 val;
+
+ /* Clear NEGACK, STASTR and BER bits */
+ val = NPCM_I2CST_BER | NPCM_I2CST_NEGACK | NPCM_I2CST_STASTR;
+ iowrite8(val, bus->reg + NPCM_I2CST);
+}
+
+static void npcm_i2c_master_abort(struct npcm_i2c *bus)
+{
+ /* Only current master is allowed to issue a stop condition */
+ if (!npcm_i2c_is_master(bus))
+ return;
+
+ npcm_i2c_eob_int(bus, true);
+ npcm_i2c_master_stop(bus);
+ npcm_i2c_clear_master_status(bus);
+}
+
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
+static u8 npcm_i2c_get_slave_addr(struct npcm_i2c *bus, enum i2c_addr addr_type)
+{
+ u8 slave_add;
+
+ /* select bank 0 for address 3 to 10 */
+ if (addr_type > I2C_SLAVE_ADDR2)
+ npcm_i2c_select_bank(bus, I2C_BANK_0);
+
+ slave_add = ioread8(bus->reg + npcm_i2caddr[(int)addr_type]);
+
+ if (addr_type > I2C_SLAVE_ADDR2)
+ npcm_i2c_select_bank(bus, I2C_BANK_1);
+
+ return slave_add;
+}
+
+static int npcm_i2c_remove_slave_addr(struct npcm_i2c *bus, u8 slave_add)
+{
+ int i;
+
+ /* Set the enable bit */
+ slave_add |= 0x80;
+ npcm_i2c_select_bank(bus, I2C_BANK_0);
+ for (i = I2C_SLAVE_ADDR1; i < I2C_NUM_OWN_ADDR; i++) {
+ if (ioread8(bus->reg + npcm_i2caddr[i]) == slave_add)
+ iowrite8(0, bus->reg + npcm_i2caddr[i]);
+ }
+ npcm_i2c_select_bank(bus, I2C_BANK_1);
+ return 0;
+}
+
+static void npcm_i2c_write_fifo_slave(struct npcm_i2c *bus, u16 max_bytes)
+{
+ /*
+ * Fill the FIFO, while the FIFO is not full and there are more bytes
+ * to write
+ */
+ npcm_i2c_clear_fifo_int(bus);
+ npcm_i2c_clear_tx_fifo(bus);
+ iowrite8(0, bus->reg + NPCM_I2CTXF_CTL);
+ while (max_bytes-- && I2C_HW_FIFO_SIZE != npcm_i2c_fifo_usage(bus)) {
+ if (bus->slv_wr_size <= 0)
+ break;
+ bus->slv_wr_ind = bus->slv_wr_ind % I2C_HW_FIFO_SIZE;
+ npcm_i2c_wr_byte(bus, bus->slv_wr_buf[bus->slv_wr_ind]);
+ bus->slv_wr_ind++;
+ bus->slv_wr_ind = bus->slv_wr_ind % I2C_HW_FIFO_SIZE;
+ bus->slv_wr_size--;
+ }
+}
+
+static void npcm_i2c_read_fifo_slave(struct npcm_i2c *bus, u8 bytes_in_fifo)
+{
+ u8 data;
+
+ if (!bus->slave)
+ return;
+
+ while (bytes_in_fifo--) {
+ data = npcm_i2c_rd_byte(bus);
+
+ bus->slv_rd_ind = bus->slv_rd_ind % I2C_HW_FIFO_SIZE;
+ bus->slv_rd_buf[bus->slv_rd_ind] = data;
+ bus->slv_rd_ind++;
+
+ /* 1st byte is length in block protocol: */
+ if (bus->slv_rd_ind == 1 && bus->read_block_use)
+ bus->slv_rd_size = data + bus->PEC_use + 1;
+ }
+}
+
+static int npcm_i2c_slave_get_wr_buf(struct npcm_i2c *bus)
+{
+ int i;
+ u8 value;
+ int ind;
+ int ret = bus->slv_wr_ind;
+
+ /* fill a cyclic buffer */
+ for (i = 0; i < I2C_HW_FIFO_SIZE; i++) {
+ if (bus->slv_wr_size >= I2C_HW_FIFO_SIZE)
+ break;
+ i2c_slave_event(bus->slave, I2C_SLAVE_READ_REQUESTED, &value);
+ ind = (bus->slv_wr_ind + bus->slv_wr_size) % I2C_HW_FIFO_SIZE;
+ bus->slv_wr_buf[ind] = value;
+ bus->slv_wr_size++;
+ i2c_slave_event(bus->slave, I2C_SLAVE_READ_PROCESSED, &value);
+ }
+ return I2C_HW_FIFO_SIZE - ret;
+}
+
+static void npcm_i2c_slave_send_rd_buf(struct npcm_i2c *bus)
+{
+ int i;
+
+ for (i = 0; i < bus->slv_rd_ind; i++)
+ i2c_slave_event(bus->slave, I2C_SLAVE_WRITE_RECEIVED,
+ &bus->slv_rd_buf[i]);
+ /*
+ * once we send bytes up, need to reset the counter of the wr buf
+ * got data from master (new offset in device), ignore wr fifo:
+ */
+ if (bus->slv_rd_ind) {
+ bus->slv_wr_size = 0;
+ bus->slv_wr_ind = 0;
+ }
+
+ bus->slv_rd_ind = 0;
+ bus->slv_rd_size = bus->adap.quirks->max_read_len;
+
+ npcm_i2c_clear_fifo_int(bus);
+ npcm_i2c_clear_rx_fifo(bus);
+}
+
+static void npcm_i2c_slave_receive(struct npcm_i2c *bus, u16 nread,
+ u8 *read_data)
+{
+ bus->state = I2C_OPER_STARTED;
+ bus->operation = I2C_READ_OPER;
+ bus->slv_rd_size = nread;
+ bus->slv_rd_ind = 0;
+
+ iowrite8(0, bus->reg + NPCM_I2CTXF_CTL);
+ iowrite8(I2C_HW_FIFO_SIZE, bus->reg + NPCM_I2CRXF_CTL);
+ npcm_i2c_clear_tx_fifo(bus);
+ npcm_i2c_clear_rx_fifo(bus);
+}
+
+static void npcm_i2c_slave_xmit(struct npcm_i2c *bus, u16 nwrite,
+ u8 *write_data)
+{
+ if (nwrite == 0)
+ return;
+
+ bus->state = I2C_OPER_STARTED;
+ bus->operation = I2C_WRITE_OPER;
+
+ /* get the next buffer */
+ npcm_i2c_slave_get_wr_buf(bus);
+ npcm_i2c_write_fifo_slave(bus, nwrite);
+}
+
+/*
+ * npcm_i2c_slave_wr_buf_sync:
+ * currently slave IF only supports single byte operations.
+ * in order to utilyze the npcm HW FIFO, the driver will ask for 16 bytes
+ * at a time, pack them in buffer, and then transmit them all together
+ * to the FIFO and onward to the bus.
+ * NACK on read will be once reached to bus->adap->quirks->max_read_len.
+ * sending a NACK wherever the backend requests for it is not supported.
+ * the next two functions allow reading to local buffer before writing it all
+ * to the HW FIFO.
+ */
+static void npcm_i2c_slave_wr_buf_sync(struct npcm_i2c *bus)
+{
+ int left_in_fifo;
+
+ left_in_fifo = FIELD_GET(NPCM_I2CTXF_STS_TX_BYTES,
+ ioread8(bus->reg + NPCM_I2CTXF_STS));
+
+ /* fifo already full: */
+ if (left_in_fifo >= I2C_HW_FIFO_SIZE ||
+ bus->slv_wr_size >= I2C_HW_FIFO_SIZE)
+ return;
+
+ /* update the wr fifo index back to the untransmitted bytes: */
+ bus->slv_wr_ind = bus->slv_wr_ind - left_in_fifo;
+ bus->slv_wr_size = bus->slv_wr_size + left_in_fifo;
+
+ if (bus->slv_wr_ind < 0)
+ bus->slv_wr_ind += I2C_HW_FIFO_SIZE;
+}
+
+static void npcm_i2c_slave_rd_wr(struct npcm_i2c *bus)
+{
+ if (NPCM_I2CST_XMIT & ioread8(bus->reg + NPCM_I2CST)) {
+ /*
+ * Slave got an address match with direction bit 1 so it should
+ * transmit data. Write till the master will NACK
+ */
+ bus->operation = I2C_WRITE_OPER;
+ npcm_i2c_slave_xmit(bus, bus->adap.quirks->max_write_len,
+ bus->slv_wr_buf);
+ } else {
+ /*
+ * Slave got an address match with direction bit 0 so it should
+ * receive data.
+ * this module does not support saying no to bytes.
+ * it will always ACK.
+ */
+ bus->operation = I2C_READ_OPER;
+ npcm_i2c_read_fifo_slave(bus, npcm_i2c_fifo_usage(bus));
+ bus->stop_ind = I2C_SLAVE_RCV_IND;
+ npcm_i2c_slave_send_rd_buf(bus);
+ npcm_i2c_slave_receive(bus, bus->adap.quirks->max_read_len,
+ bus->slv_rd_buf);
+ }
+}
+
+static irqreturn_t npcm_i2c_int_slave_handler(struct npcm_i2c *bus)
+{
+ u8 val;
+ irqreturn_t ret = IRQ_NONE;
+ u8 i2cst = ioread8(bus->reg + NPCM_I2CST);
+
+ /* Slave: A NACK has occurred */
+ if (NPCM_I2CST_NEGACK & i2cst) {
+ bus->stop_ind = I2C_NACK_IND;
+ npcm_i2c_slave_wr_buf_sync(bus);
+ if (bus->fifo_use)
+ /* clear the FIFO */
+ iowrite8(NPCM_I2CFIF_CTS_CLR_FIFO,
+ bus->reg + NPCM_I2CFIF_CTS);
+
+ /* In slave write, NACK is OK, otherwise it is a problem */
+ bus->stop_ind = I2C_NO_STATUS_IND;
+ bus->operation = I2C_NO_OPER;
+ bus->own_slave_addr = 0xFF;
+
+ /*
+ * Slave has to wait for STOP to decide this is the end
+ * of the transaction. tx is not yet considered as done
+ */
+ iowrite8(NPCM_I2CST_NEGACK, bus->reg + NPCM_I2CST);
+
+ ret = IRQ_HANDLED;
+ }
+
+ /* Slave mode: a Bus Error (BER) has been identified */
+ if (NPCM_I2CST_BER & i2cst) {
+ /*
+ * Check whether bus arbitration or Start or Stop during data
+ * xfer bus arbitration problem should not result in recovery
+ */
+ bus->stop_ind = I2C_BUS_ERR_IND;
+
+ /* wait for bus busy before clear fifo */
+ iowrite8(NPCM_I2CFIF_CTS_CLR_FIFO, bus->reg + NPCM_I2CFIF_CTS);
+
+ bus->state = I2C_IDLE;
+
+ /*
+ * in BER case we might get 2 interrupts: one for slave one for
+ * master ( for a channel which is master\slave switching)
+ */
+ if (completion_done(&bus->cmd_complete) == false) {
+ bus->cmd_err = -EIO;
+ complete(&bus->cmd_complete);
+ }
+ bus->own_slave_addr = 0xFF;
+ iowrite8(NPCM_I2CST_BER, bus->reg + NPCM_I2CST);
+ ret = IRQ_HANDLED;
+ }
+
+ /* A Slave Stop Condition has been identified */
+ if (NPCM_I2CST_SLVSTP & i2cst) {
+ u8 bytes_in_fifo = npcm_i2c_fifo_usage(bus);
+
+ bus->stop_ind = I2C_SLAVE_DONE_IND;
+
+ if (bus->operation == I2C_READ_OPER)
+ npcm_i2c_read_fifo_slave(bus, bytes_in_fifo);
+
+ /* if the buffer is empty nothing will be sent */
+ npcm_i2c_slave_send_rd_buf(bus);
+
+ /* Slave done transmitting or receiving */
+ bus->stop_ind = I2C_NO_STATUS_IND;
+
+ /*
+ * Note, just because we got here, it doesn't mean we through
+ * away the wr buffer.
+ * we keep it until the next received offset.
+ */
+ bus->operation = I2C_NO_OPER;
+ bus->own_slave_addr = 0xFF;
+ i2c_slave_event(bus->slave, I2C_SLAVE_STOP, 0);
+ iowrite8(NPCM_I2CST_SLVSTP, bus->reg + NPCM_I2CST);
+ if (bus->fifo_use) {
+ npcm_i2c_clear_fifo_int(bus);
+ npcm_i2c_clear_rx_fifo(bus);
+ npcm_i2c_clear_tx_fifo(bus);
+
+ iowrite8(NPCM_I2CFIF_CTS_CLR_FIFO,
+ bus->reg + NPCM_I2CFIF_CTS);
+ }
+ bus->state = I2C_IDLE;
+ ret = IRQ_HANDLED;
+ }
+
+ /* restart condition occurred and Rx-FIFO was not empty */
+ if (bus->fifo_use && FIELD_GET(NPCM_I2CFIF_CTS_SLVRSTR,
+ ioread8(bus->reg + NPCM_I2CFIF_CTS))) {
+ bus->stop_ind = I2C_SLAVE_RESTART_IND;
+ bus->master_or_slave = I2C_SLAVE;
+ if (bus->operation == I2C_READ_OPER)
+ npcm_i2c_read_fifo_slave(bus, npcm_i2c_fifo_usage(bus));
+ bus->operation = I2C_WRITE_OPER;
+ iowrite8(0, bus->reg + NPCM_I2CRXF_CTL);
+ val = NPCM_I2CFIF_CTS_CLR_FIFO | NPCM_I2CFIF_CTS_SLVRSTR |
+ NPCM_I2CFIF_CTS_RXF_TXE;
+ iowrite8(val, bus->reg + NPCM_I2CFIF_CTS);
+ npcm_i2c_slave_rd_wr(bus);
+ ret = IRQ_HANDLED;
+ }
+
+ /* A Slave Address Match has been identified */
+ if (NPCM_I2CST_NMATCH & i2cst) {
+ u8 info = 0;
+
+ /* Address match automatically implies slave mode */
+ bus->master_or_slave = I2C_SLAVE;
+ npcm_i2c_clear_fifo_int(bus);
+ npcm_i2c_clear_rx_fifo(bus);
+ npcm_i2c_clear_tx_fifo(bus);
+ iowrite8(0, bus->reg + NPCM_I2CTXF_CTL);
+ iowrite8(I2C_HW_FIFO_SIZE, bus->reg + NPCM_I2CRXF_CTL);
+ if (NPCM_I2CST_XMIT & i2cst) {
+ bus->operation = I2C_WRITE_OPER;
+ } else {
+ i2c_slave_event(bus->slave, I2C_SLAVE_WRITE_REQUESTED,
+ &info);
+ bus->operation = I2C_READ_OPER;
+ }
+ if (bus->own_slave_addr == 0xFF) {
+ /* Check which type of address match */
+ val = ioread8(bus->reg + NPCM_I2CCST);
+ if (NPCM_I2CCST_MATCH & val) {
+ u16 addr;
+ enum i2c_addr eaddr;
+ u8 i2ccst2;
+ u8 i2ccst3;
+
+ i2ccst3 = ioread8(bus->reg + NPCM_I2CCST3);
+ i2ccst2 = ioread8(bus->reg + NPCM_I2CCST2);
+
+ /*
+ * the i2c module can response to 10 own SA.
+ * check which one was addressed by the master.
+ * repond to the first one.
+ */
+ addr = ((i2ccst3 & 0x07) << 7) |
+ (i2ccst2 & 0x7F);
+ info = ffs(addr);
+ eaddr = (enum i2c_addr)info;
+ addr = npcm_i2c_get_slave_addr(bus, eaddr);
+ addr &= 0x7F;
+ bus->own_slave_addr = addr;
+ if (bus->PEC_mask & BIT(info))
+ bus->PEC_use = true;
+ else
+ bus->PEC_use = false;
+ } else {
+ if (NPCM_I2CCST_GCMATCH & val)
+ bus->own_slave_addr = 0;
+ if (NPCM_I2CCST_ARPMATCH & val)
+ bus->own_slave_addr = 0x61;
+ }
+ } else {
+ /*
+ * Slave match can happen in two options:
+ * 1. Start, SA, read (slave read without further ado)
+ * 2. Start, SA, read, data, restart, SA, read, ...
+ * (slave read in fragmented mode)
+ * 3. Start, SA, write, data, restart, SA, read, ..
+ * (regular write-read mode)
+ */
+ if ((bus->state == I2C_OPER_STARTED &&
+ bus->operation == I2C_READ_OPER &&
+ bus->stop_ind == I2C_SLAVE_XMIT_IND) ||
+ bus->stop_ind == I2C_SLAVE_RCV_IND) {
+ /* slave tx after slave rx w/o STOP */
+ bus->stop_ind = I2C_SLAVE_RESTART_IND;
+ }
+ }
+
+ if (NPCM_I2CST_XMIT & i2cst)
+ bus->stop_ind = I2C_SLAVE_XMIT_IND;
+ else
+ bus->stop_ind = I2C_SLAVE_RCV_IND;
+ bus->state = I2C_SLAVE_MATCH;
+ npcm_i2c_slave_rd_wr(bus);
+ iowrite8(NPCM_I2CST_NMATCH, bus->reg + NPCM_I2CST);
+ ret = IRQ_HANDLED;
+ }
+
+ /* Slave SDA status is set - tx or rx */
+ if ((NPCM_I2CST_SDAST & i2cst) ||
+ (bus->fifo_use &&
+ (npcm_i2c_tx_fifo_empty(bus) || npcm_i2c_rx_fifo_full(bus)))) {
+ npcm_i2c_slave_rd_wr(bus);
+ iowrite8(NPCM_I2CST_SDAST, bus->reg + NPCM_I2CST);
+ ret = IRQ_HANDLED;
+ } /* SDAST */
+
+ return ret;
+}
+
+static int npcm_i2c_reg_slave(struct i2c_client *client)
+{
+ unsigned long lock_flags;
+ struct npcm_i2c *bus = i2c_get_adapdata(client->adapter);
+
+ bus->slave = client;
+
+ if (!bus->slave)
+ return -EINVAL;
+
+ if (client->flags & I2C_CLIENT_TEN)
+ return -EAFNOSUPPORT;
+
+ spin_lock_irqsave(&bus->lock, lock_flags);
+
+ npcm_i2c_init_params(bus);
+ bus->slv_rd_size = 0;
+ bus->slv_wr_size = 0;
+ bus->slv_rd_ind = 0;
+ bus->slv_wr_ind = 0;
+ if (client->flags & I2C_CLIENT_PEC)
+ bus->PEC_use = true;
+
+ dev_info(bus->dev, "i2c%d register slave SA=0x%x, PEC=%d\n", bus->num,
+ client->addr, bus->PEC_use);
+
+ npcm_i2c_slave_enable(bus, I2C_SLAVE_ADDR1, client->addr, true);
+ npcm_i2c_clear_fifo_int(bus);
+ npcm_i2c_clear_rx_fifo(bus);
+ npcm_i2c_clear_tx_fifo(bus);
+ npcm_i2c_slave_int_enable(bus, true);
+
+ spin_unlock_irqrestore(&bus->lock, lock_flags);
+ return 0;
+}
+
+static int npcm_i2c_unreg_slave(struct i2c_client *client)
+{
+ struct npcm_i2c *bus = client->adapter->algo_data;
+ unsigned long lock_flags;
+
+ spin_lock_irqsave(&bus->lock, lock_flags);
+ if (!bus->slave) {
+ spin_unlock_irqrestore(&bus->lock, lock_flags);
+ return -EINVAL;
+ }
+ npcm_i2c_slave_int_enable(bus, false);
+ npcm_i2c_remove_slave_addr(bus, client->addr);
+ bus->slave = NULL;
+ spin_unlock_irqrestore(&bus->lock, lock_flags);
+ return 0;
+}
+#endif /* CONFIG_I2C_SLAVE */
+
+static void npcm_i2c_master_fifo_read(struct npcm_i2c *bus)
+{
+ int rcount;
+ int fifo_bytes;
+ enum i2c_state_ind ind = I2C_MASTER_DONE_IND;
+
+ fifo_bytes = npcm_i2c_fifo_usage(bus);
+ rcount = bus->rd_size - bus->rd_ind;
+
+ /*
+ * In order not to change the RX_TRH during transaction (we found that
+ * this might be problematic if it takes too much time to read the FIFO)
+ * we read the data in the following way. If the number of bytes to
+ * read == FIFO Size + C (where C < FIFO Size)then first read C bytes
+ * and in the next int we read rest of the data.
+ */
+ if (rcount < (2 * I2C_HW_FIFO_SIZE) && rcount > I2C_HW_FIFO_SIZE)
+ fifo_bytes = rcount - I2C_HW_FIFO_SIZE;
+
+ if (rcount <= fifo_bytes) {
+ /* last bytes are about to be read - end of tx */
+ bus->state = I2C_STOP_PENDING;
+ bus->stop_ind = ind;
+ npcm_i2c_eob_int(bus, true);
+ /* Stop should be set before reading last byte. */
+ npcm_i2c_master_stop(bus);
+ npcm_i2c_read_fifo(bus, fifo_bytes);
+ } else {
+ npcm_i2c_read_fifo(bus, fifo_bytes);
+ rcount = bus->rd_size - bus->rd_ind;
+ npcm_i2c_set_fifo(bus, rcount, -1);
+ }
+}
+
+static void npcm_i2c_irq_master_handler_write(struct npcm_i2c *bus)
+{
+ u16 wcount;
+
+ if (bus->fifo_use)
+ npcm_i2c_clear_tx_fifo(bus); /* clear the TX fifo status bit */
+
+ /* Master write operation - last byte handling */
+ if (bus->wr_ind == bus->wr_size) {
+ if (bus->fifo_use && npcm_i2c_fifo_usage(bus) > 0)
+ /*
+ * No more bytes to send (to add to the FIFO),
+ * however the FIFO is not empty yet. It is
+ * still in the middle of tx. Currently there's nothing
+ * to do except for waiting to the end of the tx
+ * We will get an int when the FIFO will get empty.
+ */
+ return;
+
+ if (bus->rd_size == 0) {
+ /* all bytes have been written, in wr only operation */
+ npcm_i2c_eob_int(bus, true);
+ bus->state = I2C_STOP_PENDING;
+ bus->stop_ind = I2C_MASTER_DONE_IND;
+ npcm_i2c_master_stop(bus);
+ /* Clear SDA Status bit (by writing dummy byte) */
+ npcm_i2c_wr_byte(bus, 0xFF);
+
+ } else {
+ /* last write-byte written on previous int - restart */
+ npcm_i2c_set_fifo(bus, bus->rd_size, -1);
+ /* Generate repeated start upon next write to SDA */
+ npcm_i2c_master_start(bus);
+
+ /*
+ * Receiving one byte only - stall after successful
+ * completion of send address byte. If we NACK here, and
+ * slave doesn't ACK the address, we might
+ * unintentionally NACK the next multi-byte read.
+ */
+ if (bus->rd_size == 1)
+ npcm_i2c_stall_after_start(bus, true);
+
+ /* Next int will occur on read */
+ bus->operation = I2C_READ_OPER;
+ /* send the slave address in read direction */
+ npcm_i2c_wr_byte(bus, bus->dest_addr | 0x1);
+ }
+ } else {
+ /* write next byte not last byte and not slave address */
+ if (!bus->fifo_use || bus->wr_size == 1) {
+ npcm_i2c_wr_byte(bus, bus->wr_buf[bus->wr_ind++]);
+ } else {
+ wcount = bus->wr_size - bus->wr_ind;
+ npcm_i2c_set_fifo(bus, -1, wcount);
+ if (wcount)
+ npcm_i2c_write_to_fifo_master(bus, wcount);
+ }
+ }
+}
+
+static void npcm_i2c_irq_master_handler_read(struct npcm_i2c *bus)
+{
+ u16 block_extra_bytes_size;
+ u8 data;
+
+ /* added bytes to the packet: */
+ block_extra_bytes_size = bus->read_block_use + bus->PEC_use;
+
+ /*
+ * Perform master read, distinguishing between last byte and the rest of
+ * the bytes. The last byte should be read when the clock is stopped
+ */
+ if (bus->rd_ind == 0) { /* first byte handling: */
+ if (bus->read_block_use) {
+ /* first byte in block protocol is the size: */
+ data = npcm_i2c_rd_byte(bus);
+ data = clamp_val(data, 1, I2C_SMBUS_BLOCK_MAX);
+ bus->rd_size = data + block_extra_bytes_size;
+ bus->rd_buf[bus->rd_ind++] = data;
+
+ /* clear RX FIFO interrupt status: */
+ if (bus->fifo_use) {
+ data = ioread8(bus->reg + NPCM_I2CFIF_CTS);
+ data = data | NPCM_I2CFIF_CTS_RXF_TXE;
+ iowrite8(data, bus->reg + NPCM_I2CFIF_CTS);
+ }
+
+ npcm_i2c_set_fifo(bus, bus->rd_size - 1, -1);
+ npcm_i2c_stall_after_start(bus, false);
+ } else {
+ npcm_i2c_clear_tx_fifo(bus);
+ npcm_i2c_master_fifo_read(bus);
+ }
+ } else {
+ if (bus->rd_size == block_extra_bytes_size &&
+ bus->read_block_use) {
+ bus->state = I2C_STOP_PENDING;
+ bus->stop_ind = I2C_BLOCK_BYTES_ERR_IND;
+ bus->cmd_err = -EIO;
+ npcm_i2c_eob_int(bus, true);
+ npcm_i2c_master_stop(bus);
+ npcm_i2c_read_fifo(bus, npcm_i2c_fifo_usage(bus));
+ } else {
+ npcm_i2c_master_fifo_read(bus);
+ }
+ }
+}
+
+static void npcm_i2c_irq_handle_nmatch(struct npcm_i2c *bus)
+{
+ iowrite8(NPCM_I2CST_NMATCH, bus->reg + NPCM_I2CST);
+ npcm_i2c_nack(bus);
+ bus->stop_ind = I2C_BUS_ERR_IND;
+ npcm_i2c_callback(bus, bus->stop_ind, npcm_i2c_get_index(bus));
+}
+
+/* A NACK has occurred */
+static void npcm_i2c_irq_handle_nack(struct npcm_i2c *bus)
+{
+ u8 val;
+
+ if (bus->nack_cnt < ULLONG_MAX)
+ bus->nack_cnt++;
+
+ if (bus->fifo_use) {
+ /*
+ * if there are still untransmitted bytes in TX FIFO
+ * reduce them from wr_ind
+ */
+ if (bus->operation == I2C_WRITE_OPER)
+ bus->wr_ind -= npcm_i2c_fifo_usage(bus);
+
+ /* clear the FIFO */
+ iowrite8(NPCM_I2CFIF_CTS_CLR_FIFO, bus->reg + NPCM_I2CFIF_CTS);
+ }
+
+ /* In master write operation, got unexpected NACK */
+ bus->stop_ind = I2C_NACK_IND;
+ /* Only current master is allowed to issue Stop Condition */
+ if (npcm_i2c_is_master(bus)) {
+ /* stopping in the middle */
+ npcm_i2c_eob_int(bus, false);
+ npcm_i2c_master_stop(bus);
+
+ /*
+ * The bus is released from stall only after the SW clears
+ * NEGACK bit. Then a Stop condition is sent.
+ */
+ npcm_i2c_clear_master_status(bus);
+ readx_poll_timeout_atomic(ioread8, bus->reg + NPCM_I2CCST, val,
+ !(val & NPCM_I2CCST_BUSY), 10, 200);
+ }
+ bus->state = I2C_IDLE;
+
+ /*
+ * In Master mode, NACK should be cleared only after STOP.
+ * In such case, the bus is released from stall only after the
+ * software clears NACK bit. Then a Stop condition is sent.
+ */
+ npcm_i2c_callback(bus, bus->stop_ind, bus->wr_ind);
+}
+
+ /* Master mode: a Bus Error has been identified */
+static void npcm_i2c_irq_handle_ber(struct npcm_i2c *bus)
+{
+ if (bus->ber_cnt < ULLONG_MAX)
+ bus->ber_cnt++;
+ bus->stop_ind = I2C_BUS_ERR_IND;
+ if (npcm_i2c_is_master(bus)) {
+ npcm_i2c_master_abort(bus);
+ } else {
+ npcm_i2c_clear_master_status(bus);
+
+ /* Clear BB (BUS BUSY) bit */
+ iowrite8(NPCM_I2CCST_BB, bus->reg + NPCM_I2CCST);
+
+ bus->cmd_err = -EAGAIN;
+ npcm_i2c_callback(bus, bus->stop_ind, npcm_i2c_get_index(bus));
+ }
+ bus->state = I2C_IDLE;
+}
+
+ /* EOB: a master End Of Busy (meaning STOP completed) */
+static void npcm_i2c_irq_handle_eob(struct npcm_i2c *bus)
+{
+ npcm_i2c_eob_int(bus, false);
+ bus->state = I2C_IDLE;
+ npcm_i2c_callback(bus, bus->stop_ind, bus->rd_ind);
+}
+
+/* Address sent and requested stall occurred (Master mode) */
+static void npcm_i2c_irq_handle_stall_after_start(struct npcm_i2c *bus)
+{
+ if (npcm_i2c_is_quick(bus)) {
+ bus->state = I2C_STOP_PENDING;
+ bus->stop_ind = I2C_MASTER_DONE_IND;
+ npcm_i2c_eob_int(bus, true);
+ npcm_i2c_master_stop(bus);
+ } else if ((bus->rd_size == 1) && !bus->read_block_use) {
+ /*
+ * Receiving one byte only - set NACK after ensuring
+ * slave ACKed the address byte.
+ */
+ npcm_i2c_nack(bus);
+ }
+
+ /* Reset stall-after-address-byte */
+ npcm_i2c_stall_after_start(bus, false);
+
+ /* Clear stall only after setting STOP */
+ iowrite8(NPCM_I2CST_STASTR, bus->reg + NPCM_I2CST);
+}
+
+/* SDA status is set - TX or RX, master */
+static void npcm_i2c_irq_handle_sda(struct npcm_i2c *bus, u8 i2cst)
+{
+ u8 fif_cts;
+
+ if (!npcm_i2c_is_master(bus))
+ return;
+
+ if (bus->state == I2C_IDLE) {
+ bus->stop_ind = I2C_WAKE_UP_IND;
+
+ if (npcm_i2c_is_quick(bus) || bus->read_block_use)
+ /*
+ * Need to stall after successful
+ * completion of sending address byte
+ */
+ npcm_i2c_stall_after_start(bus, true);
+ else
+ npcm_i2c_stall_after_start(bus, false);
+
+ /*
+ * Receiving one byte only - stall after successful completion
+ * of sending address byte If we NACK here, and slave doesn't
+ * ACK the address, we might unintentionally NACK the next
+ * multi-byte read
+ */
+ if (bus->wr_size == 0 && bus->rd_size == 1)
+ npcm_i2c_stall_after_start(bus, true);
+
+ /* Initiate I2C master tx */
+
+ /* select bank 1 for FIFO regs */
+ npcm_i2c_select_bank(bus, I2C_BANK_1);
+
+ fif_cts = ioread8(bus->reg + NPCM_I2CFIF_CTS);
+ fif_cts = fif_cts & ~NPCM_I2CFIF_CTS_SLVRSTR;
+
+ /* clear FIFO and relevant status bits. */
+ fif_cts = fif_cts | NPCM_I2CFIF_CTS_CLR_FIFO;
+ iowrite8(fif_cts, bus->reg + NPCM_I2CFIF_CTS);
+
+ /* re-enable */
+ fif_cts = fif_cts | NPCM_I2CFIF_CTS_RXF_TXE;
+ iowrite8(fif_cts, bus->reg + NPCM_I2CFIF_CTS);
+
+ /*
+ * Configure the FIFO threshold:
+ * according to the needed # of bytes to read.
+ * Note: due to HW limitation can't config the rx fifo before it
+ * got and ACK on the restart. LAST bit will not be reset unless
+ * RX completed. It will stay set on the next tx.
+ */
+ if (bus->wr_size)
+ npcm_i2c_set_fifo(bus, -1, bus->wr_size);
+ else
+ npcm_i2c_set_fifo(bus, bus->rd_size, -1);
+
+ bus->state = I2C_OPER_STARTED;
+
+ if (npcm_i2c_is_quick(bus) || bus->wr_size)
+ npcm_i2c_wr_byte(bus, bus->dest_addr);
+ else
+ npcm_i2c_wr_byte(bus, bus->dest_addr | BIT(0));
+ /* SDA interrupt, after start\restart */
+ } else {
+ if (NPCM_I2CST_XMIT & i2cst) {
+ bus->operation = I2C_WRITE_OPER;
+ npcm_i2c_irq_master_handler_write(bus);
+ } else {
+ bus->operation = I2C_READ_OPER;
+ npcm_i2c_irq_master_handler_read(bus);
+ }
+ }
+}
+
+static int npcm_i2c_int_master_handler(struct npcm_i2c *bus)
+{
+ u8 i2cst;
+ int ret = -EIO;
+
+ i2cst = ioread8(bus->reg + NPCM_I2CST);
+
+ if (FIELD_GET(NPCM_I2CST_NMATCH, i2cst)) {
+ npcm_i2c_irq_handle_nmatch(bus);
+ return 0;
+ }
+ /* A NACK has occurred */
+ if (FIELD_GET(NPCM_I2CST_NEGACK, i2cst)) {
+ npcm_i2c_irq_handle_nack(bus);
+ return 0;
+ }
+
+ /* Master mode: a Bus Error has been identified */
+ if (FIELD_GET(NPCM_I2CST_BER, i2cst)) {
+ npcm_i2c_irq_handle_ber(bus);
+ return 0;
+ }
+
+ /* EOB: a master End Of Busy (meaning STOP completed) */
+ if ((FIELD_GET(NPCM_I2CCTL1_EOBINTE,
+ ioread8(bus->reg + NPCM_I2CCTL1)) == 1) &&
+ (FIELD_GET(NPCM_I2CCST3_EO_BUSY,
+ ioread8(bus->reg + NPCM_I2CCST3)))) {
+ npcm_i2c_irq_handle_eob(bus);
+ return 0;
+ }
+
+ /* Address sent and requested stall occurred (Master mode) */
+ if (FIELD_GET(NPCM_I2CST_STASTR, i2cst)) {
+ npcm_i2c_irq_handle_stall_after_start(bus);
+ ret = 0;
+ }
+
+ /* SDA status is set - TX or RX, master */
+ if (FIELD_GET(NPCM_I2CST_SDAST, i2cst) ||
+ (bus->fifo_use &&
+ (npcm_i2c_tx_fifo_empty(bus) || npcm_i2c_rx_fifo_full(bus)))) {
+ npcm_i2c_irq_handle_sda(bus, i2cst);
+ ret = 0;
+ }
+
+ return ret;
+}
+
+/* recovery using TGCLK functionality of the module */
+static int npcm_i2c_recovery_tgclk(struct i2c_adapter *_adap)
+{
+ u8 val;
+ u8 fif_cts;
+ bool done = false;
+ int status = -ENOTRECOVERABLE;
+ struct npcm_i2c *bus = container_of(_adap, struct npcm_i2c, adap);
+ /* Allow 3 bytes (27 toggles) to be read from the slave: */
+ int iter = 27;
+
+ if ((npcm_i2c_get_SDA(_adap) == 1) && (npcm_i2c_get_SCL(_adap) == 1)) {
+ dev_dbg(bus->dev, "bus%d recovery skipped, bus not stuck",
+ bus->num);
+ npcm_i2c_reset(bus);
+ return status;
+ }
+
+ npcm_i2c_int_enable(bus, false);
+ npcm_i2c_disable(bus);
+ npcm_i2c_enable(bus);
+ iowrite8(NPCM_I2CCST_BB, bus->reg + NPCM_I2CCST);
+ npcm_i2c_clear_tx_fifo(bus);
+ npcm_i2c_clear_rx_fifo(bus);
+ iowrite8(0, bus->reg + NPCM_I2CRXF_CTL);
+ iowrite8(0, bus->reg + NPCM_I2CTXF_CTL);
+ npcm_i2c_stall_after_start(bus, false);
+
+ /* select bank 1 for FIFO regs */
+ npcm_i2c_select_bank(bus, I2C_BANK_1);
+
+ /* clear FIFO and relevant status bits. */
+ fif_cts = ioread8(bus->reg + NPCM_I2CFIF_CTS);
+ fif_cts &= ~NPCM_I2CFIF_CTS_SLVRSTR;
+ fif_cts |= NPCM_I2CFIF_CTS_CLR_FIFO;
+ iowrite8(fif_cts, bus->reg + NPCM_I2CFIF_CTS);
+ npcm_i2c_set_fifo(bus, -1, 0);
+
+ /* Repeat the following sequence until SDA is released */
+ do {
+ /* Issue a single SCL toggle */
+ iowrite8(NPCM_I2CCST_TGSCL, bus->reg + NPCM_I2CCST);
+ usleep_range(20, 30);
+ /* If SDA line is inactive (high), stop */
+ if (npcm_i2c_get_SDA(_adap)) {
+ done = true;
+ status = 0;
+ }
+ } while (!done && iter--);
+
+ /* If SDA line is released: send start-addr-stop, to re-sync. */
+ if (npcm_i2c_get_SDA(_adap)) {
+ /* Send an address byte in write direction: */
+ npcm_i2c_wr_byte(bus, bus->dest_addr);
+ npcm_i2c_master_start(bus);
+ /* Wait until START condition is sent */
+ status = readx_poll_timeout(npcm_i2c_get_SCL, _adap, val, !val,
+ 20, 200);
+ /* If START condition was sent */
+ if (npcm_i2c_is_master(bus) > 0) {
+ usleep_range(20, 30);
+ npcm_i2c_master_stop(bus);
+ usleep_range(200, 500);
+ }
+ }
+ npcm_i2c_reset(bus);
+ npcm_i2c_int_enable(bus, true);
+
+ if ((npcm_i2c_get_SDA(_adap) == 1) && (npcm_i2c_get_SCL(_adap) == 1))
+ status = 0;
+ else
+ status = -ENOTRECOVERABLE;
+ if (status) {
+ if (bus->rec_fail_cnt < ULLONG_MAX)
+ bus->rec_fail_cnt++;
+ } else {
+ if (bus->rec_succ_cnt < ULLONG_MAX)
+ bus->rec_succ_cnt++;
+ }
+ return status;
+}
+
+/* recovery using bit banging functionality of the module */
+static void npcm_i2c_recovery_init(struct i2c_adapter *_adap)
+{
+ struct npcm_i2c *bus = container_of(_adap, struct npcm_i2c, adap);
+ struct i2c_bus_recovery_info *rinfo = &bus->rinfo;
+
+ rinfo->recover_bus = npcm_i2c_recovery_tgclk;
+
+ /*
+ * npcm i2c HW allows direct reading of SCL and SDA.
+ * However, it does not support setting SCL and SDA directly.
+ * The recovery function can togle SCL when SDA is low (but not set)
+ * Getter functions used internally, and can be used externaly.
+ */
+ rinfo->get_scl = npcm_i2c_get_SCL;
+ rinfo->get_sda = npcm_i2c_get_SDA;
+ _adap->bus_recovery_info = rinfo;
+}
+
+/* SCLFRQ min/max field values */
+#define SCLFRQ_MIN 10
+#define SCLFRQ_MAX 511
+#define clk_coef(freq, mul) DIV_ROUND_UP((freq) * (mul), 1000000)
+
+/*
+ * npcm_i2c_init_clk: init HW timing parameters.
+ * NPCM7XX i2c module timing parameters are depenent on module core clk (APB)
+ * and bus frequency.
+ * 100kHz bus requires tSCL = 4 * SCLFRQ * tCLK. LT and HT are simetric.
+ * 400kHz bus requires assymetric HT and LT. A different equation is recomended
+ * by the HW designer, given core clock range (equations in comments below).
+ *
+ */
+static int npcm_i2c_init_clk(struct npcm_i2c *bus, u32 bus_freq_hz)
+{
+ u32 k1 = 0;
+ u32 k2 = 0;
+ u8 dbnct = 0;
+ u32 sclfrq = 0;
+ u8 hldt = 7;
+ u8 fast_mode = 0;
+ u32 src_clk_khz;
+ u32 bus_freq_khz;
+
+ src_clk_khz = bus->apb_clk / 1000;
+ bus_freq_khz = bus_freq_hz / 1000;
+ bus->bus_freq = bus_freq_hz;
+
+ /* 100KHz and below: */
+ if (bus_freq_hz <= I2C_MAX_STANDARD_MODE_FREQ) {
+ sclfrq = src_clk_khz / (bus_freq_khz * 4);
+
+ if (sclfrq < SCLFRQ_MIN || sclfrq > SCLFRQ_MAX)
+ return -EDOM;
+
+ if (src_clk_khz >= 40000)
+ hldt = 17;
+ else if (src_clk_khz >= 12500)
+ hldt = 15;
+ else
+ hldt = 7;
+ }
+
+ /* 400KHz: */
+ else if (bus_freq_hz <= I2C_MAX_FAST_MODE_FREQ) {
+ sclfrq = 0;
+ fast_mode = I2CCTL3_400K_MODE;
+
+ if (src_clk_khz < 7500)
+ /* 400KHZ cannot be supported for core clock < 7.5MHz */
+ return -EDOM;
+
+ else if (src_clk_khz >= 50000) {
+ k1 = 80;
+ k2 = 48;
+ hldt = 12;
+ dbnct = 7;
+ }
+
+ /* Master or Slave with frequency > 25MHz */
+ else if (src_clk_khz > 25000) {
+ hldt = clk_coef(src_clk_khz, 300) + 7;
+ k1 = clk_coef(src_clk_khz, 1600);
+ k2 = clk_coef(src_clk_khz, 900);
+ }
+ }
+
+ /* 1MHz: */
+ else if (bus_freq_hz <= I2C_MAX_FAST_MODE_PLUS_FREQ) {
+ sclfrq = 0;
+ fast_mode = I2CCTL3_400K_MODE;
+
+ /* 1MHZ cannot be supported for core clock < 24 MHz */
+ if (src_clk_khz < 24000)
+ return -EDOM;
+
+ k1 = clk_coef(src_clk_khz, 620);
+ k2 = clk_coef(src_clk_khz, 380);
+
+ /* Core clk > 40 MHz */
+ if (src_clk_khz > 40000) {
+ /*
+ * Set HLDT:
+ * SDA hold time: (HLDT-7) * T(CLK) >= 120
+ * HLDT = 120/T(CLK) + 7 = 120 * FREQ(CLK) + 7
+ */
+ hldt = clk_coef(src_clk_khz, 120) + 7;
+ } else {
+ hldt = 7;
+ dbnct = 2;
+ }
+ }
+
+ /* Frequency larger than 1 MHz is not supported */
+ else
+ return -EINVAL;
+
+ if (bus_freq_hz >= I2C_MAX_FAST_MODE_FREQ) {
+ k1 = round_up(k1, 2);
+ k2 = round_up(k2 + 1, 2);
+ if (k1 < SCLFRQ_MIN || k1 > SCLFRQ_MAX ||
+ k2 < SCLFRQ_MIN || k2 > SCLFRQ_MAX)
+ return -EDOM;
+ }
+
+ /* write sclfrq value. bits [6:0] are in I2CCTL2 reg */
+ iowrite8(FIELD_PREP(I2CCTL2_SCLFRQ6_0, sclfrq & 0x7F),
+ bus->reg + NPCM_I2CCTL2);
+
+ /* bits [8:7] are in I2CCTL3 reg */
+ iowrite8(fast_mode | FIELD_PREP(I2CCTL3_SCLFRQ8_7, (sclfrq >> 7) & 0x3),
+ bus->reg + NPCM_I2CCTL3);
+
+ /* Select Bank 0 to access NPCM_I2CCTL4/NPCM_I2CCTL5 */
+ npcm_i2c_select_bank(bus, I2C_BANK_0);
+
+ if (bus_freq_hz >= I2C_MAX_FAST_MODE_FREQ) {
+ /*
+ * Set SCL Low/High Time:
+ * k1 = 2 * SCLLT7-0 -> Low Time = k1 / 2
+ * k2 = 2 * SCLLT7-0 -> High Time = k2 / 2
+ */
+ iowrite8(k1 / 2, bus->reg + NPCM_I2CSCLLT);
+ iowrite8(k2 / 2, bus->reg + NPCM_I2CSCLHT);
+
+ iowrite8(dbnct, bus->reg + NPCM_I2CCTL5);
+ }
+
+ iowrite8(hldt, bus->reg + NPCM_I2CCTL4);
+
+ /* Return to Bank 1, and stay there by default: */
+ npcm_i2c_select_bank(bus, I2C_BANK_1);
+
+ return 0;
+}
+
+static int npcm_i2c_init_module(struct npcm_i2c *bus, enum i2c_mode mode,
+ u32 bus_freq_hz)
+{
+ u8 val;
+ int ret;
+
+ /* Check whether module already enabled or frequency is out of bounds */
+ if ((bus->state != I2C_DISABLE && bus->state != I2C_IDLE) ||
+ bus_freq_hz < I2C_FREQ_MIN_HZ || bus_freq_hz > I2C_FREQ_MAX_HZ)
+ return -EINVAL;
+
+ npcm_i2c_disable(bus);
+
+ /* Configure FIFO mode : */
+ if (FIELD_GET(I2C_VER_FIFO_EN, ioread8(bus->reg + I2C_VER))) {
+ bus->fifo_use = true;
+ npcm_i2c_select_bank(bus, I2C_BANK_0);
+ val = ioread8(bus->reg + NPCM_I2CFIF_CTL);
+ val |= NPCM_I2CFIF_CTL_FIFO_EN;
+ iowrite8(val, bus->reg + NPCM_I2CFIF_CTL);
+ npcm_i2c_select_bank(bus, I2C_BANK_1);
+ } else {
+ bus->fifo_use = false;
+ }
+
+ /* Configure I2C module clock frequency */
+ ret = npcm_i2c_init_clk(bus, bus_freq_hz);
+ if (ret) {
+ dev_err(bus->dev, "npcm_i2c_init_clk failed\n");
+ return ret;
+ }
+
+ /* Enable module (before configuring CTL1) */
+ npcm_i2c_enable(bus);
+ bus->state = I2C_IDLE;
+ val = ioread8(bus->reg + NPCM_I2CCTL1);
+ val = (val | NPCM_I2CCTL1_NMINTE) & ~NPCM_I2CCTL1_RWS;
+ iowrite8(val, bus->reg + NPCM_I2CCTL1);
+
+ npcm_i2c_int_enable(bus, true);
+
+ npcm_i2c_reset(bus);
+
+ return 0;
+}
+
+static int __npcm_i2c_init(struct npcm_i2c *bus, struct platform_device *pdev)
+{
+ u32 clk_freq_hz;
+ int ret;
+
+ /* Initialize the internal data structures */
+ bus->state = I2C_DISABLE;
+ bus->master_or_slave = I2C_SLAVE;
+ bus->int_time_stamp = 0;
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
+ bus->slave = NULL;
+#endif
+
+ ret = device_property_read_u32(&pdev->dev, "clock-frequency",
+ &clk_freq_hz);
+ if (ret) {
+ dev_info(&pdev->dev, "Could not read clock-frequency property");
+ clk_freq_hz = I2C_MAX_STANDARD_MODE_FREQ;
+ }
+
+ ret = npcm_i2c_init_module(bus, I2C_MASTER, clk_freq_hz);
+ if (ret) {
+ dev_err(&pdev->dev, "npcm_i2c_init_module failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static irqreturn_t npcm_i2c_bus_irq(int irq, void *dev_id)
+{
+ struct npcm_i2c *bus = dev_id;
+
+ if (npcm_i2c_is_master(bus))
+ bus->master_or_slave = I2C_MASTER;
+
+ if (bus->master_or_slave == I2C_MASTER) {
+ bus->int_time_stamp = jiffies;
+ if (!npcm_i2c_int_master_handler(bus))
+ return IRQ_HANDLED;
+ }
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
+ if (bus->slave) {
+ bus->master_or_slave = I2C_SLAVE;
+ return npcm_i2c_int_slave_handler(bus);
+ }
+#endif
+ return IRQ_NONE;
+}
+
+static bool npcm_i2c_master_start_xmit(struct npcm_i2c *bus,
+ u8 slave_addr, u16 nwrite, u16 nread,
+ u8 *write_data, u8 *read_data,
+ bool use_PEC, bool use_read_block)
+{
+ if (bus->state != I2C_IDLE) {
+ bus->cmd_err = -EBUSY;
+ return false;
+ }
+ bus->dest_addr = slave_addr << 1;
+ bus->wr_buf = write_data;
+ bus->wr_size = nwrite;
+ bus->wr_ind = 0;
+ bus->rd_buf = read_data;
+ bus->rd_size = nread;
+ bus->rd_ind = 0;
+ bus->PEC_use = 0;
+
+ /* for tx PEC is appended to buffer from i2c IF. PEC flag is ignored */
+ if (nread)
+ bus->PEC_use = use_PEC;
+
+ bus->read_block_use = use_read_block;
+ if (nread && !nwrite)
+ bus->operation = I2C_READ_OPER;
+ else
+ bus->operation = I2C_WRITE_OPER;
+ if (bus->fifo_use) {
+ u8 i2cfif_cts;
+
+ npcm_i2c_select_bank(bus, I2C_BANK_1);
+ /* clear FIFO and relevant status bits. */
+ i2cfif_cts = ioread8(bus->reg + NPCM_I2CFIF_CTS);
+ i2cfif_cts &= ~NPCM_I2CFIF_CTS_SLVRSTR;
+ i2cfif_cts |= NPCM_I2CFIF_CTS_CLR_FIFO;
+ iowrite8(i2cfif_cts, bus->reg + NPCM_I2CFIF_CTS);
+ }
+
+ bus->state = I2C_IDLE;
+ npcm_i2c_stall_after_start(bus, true);
+ npcm_i2c_master_start(bus);
+ return true;
+}
+
+static int npcm_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
+ int num)
+{
+ struct npcm_i2c *bus = container_of(adap, struct npcm_i2c, adap);
+ struct i2c_msg *msg0, *msg1;
+ unsigned long time_left, flags;
+ u16 nwrite, nread;
+ u8 *write_data, *read_data;
+ u8 slave_addr;
+ int timeout;
+ int ret = 0;
+ bool read_block = false;
+ bool read_PEC = false;
+ u8 bus_busy;
+ unsigned long timeout_usec;
+
+ if (bus->state == I2C_DISABLE) {
+ dev_err(bus->dev, "I2C%d module is disabled", bus->num);
+ return -EINVAL;
+ }
+
+ msg0 = &msgs[0];
+ slave_addr = msg0->addr;
+ if (msg0->flags & I2C_M_RD) { /* read */
+ nwrite = 0;
+ write_data = NULL;
+ read_data = msg0->buf;
+ if (msg0->flags & I2C_M_RECV_LEN) {
+ nread = 1;
+ read_block = true;
+ if (msg0->flags & I2C_CLIENT_PEC)
+ read_PEC = true;
+ } else {
+ nread = msg0->len;
+ }
+ } else { /* write */
+ nwrite = msg0->len;
+ write_data = msg0->buf;
+ nread = 0;
+ read_data = NULL;
+ if (num == 2) {
+ msg1 = &msgs[1];
+ read_data = msg1->buf;
+ if (msg1->flags & I2C_M_RECV_LEN) {
+ nread = 1;
+ read_block = true;
+ if (msg1->flags & I2C_CLIENT_PEC)
+ read_PEC = true;
+ } else {
+ nread = msg1->len;
+ read_block = false;
+ }
+ }
+ }
+
+ /* Adaptive TimeOut: astimated time in usec + 100% margin */
+ timeout_usec = (2 * 10000 / bus->bus_freq) * (2 + nread + nwrite);
+ timeout = max(msecs_to_jiffies(35), usecs_to_jiffies(timeout_usec));
+ if (nwrite >= 32 * 1024 || nread >= 32 * 1024) {
+ dev_err(bus->dev, "i2c%d buffer too big\n", bus->num);
+ return -EINVAL;
+ }
+
+ time_left = jiffies + msecs_to_jiffies(DEFAULT_STALL_COUNT) + 1;
+ do {
+ /*
+ * we must clear slave address immediately when the bus is not
+ * busy, so we spinlock it, but we don't keep the lock for the
+ * entire while since it is too long.
+ */
+ spin_lock_irqsave(&bus->lock, flags);
+ bus_busy = ioread8(bus->reg + NPCM_I2CCST) & NPCM_I2CCST_BB;
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
+ if (!bus_busy && bus->slave)
+ iowrite8((bus->slave->addr & 0x7F),
+ bus->reg + NPCM_I2CADDR1);
+#endif
+ spin_unlock_irqrestore(&bus->lock, flags);
+
+ } while (time_is_after_jiffies(time_left) && bus_busy);
+
+ if (bus_busy) {
+ iowrite8(NPCM_I2CCST_BB, bus->reg + NPCM_I2CCST);
+ npcm_i2c_reset(bus);
+ i2c_recover_bus(adap);
+ return -EAGAIN;
+ }
+
+ npcm_i2c_init_params(bus);
+ bus->dest_addr = slave_addr;
+ bus->msgs = msgs;
+ bus->msgs_num = num;
+ bus->cmd_err = 0;
+ bus->read_block_use = read_block;
+
+ reinit_completion(&bus->cmd_complete);
+ if (!npcm_i2c_master_start_xmit(bus, slave_addr, nwrite, nread,
+ write_data, read_data, read_PEC,
+ read_block))
+ ret = -EBUSY;
+
+ if (ret != -EBUSY) {
+ time_left = wait_for_completion_timeout(&bus->cmd_complete,
+ timeout);
+
+ if (time_left == 0) {
+ if (bus->timeout_cnt < ULLONG_MAX)
+ bus->timeout_cnt++;
+ if (bus->master_or_slave == I2C_MASTER) {
+ i2c_recover_bus(adap);
+ bus->cmd_err = -EIO;
+ bus->state = I2C_IDLE;
+ }
+ }
+ }
+ ret = bus->cmd_err;
+
+ /* if there was BER, check if need to recover the bus: */
+ if (bus->cmd_err == -EAGAIN)
+ ret = i2c_recover_bus(adap);
+
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
+ /* reenable slave if it was enabled */
+ if (bus->slave)
+ iowrite8((bus->slave->addr & 0x7F) | NPCM_I2CADDR_SAEN,
+ bus->reg + NPCM_I2CADDR1);
+#endif
+ return bus->cmd_err;
+}
+
+static u32 npcm_i2c_functionality(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C |
+ I2C_FUNC_SMBUS_EMUL |
+ I2C_FUNC_SMBUS_BLOCK_DATA |
+ I2C_FUNC_SMBUS_PEC |
+ I2C_FUNC_SLAVE;
+}
+
+static const struct i2c_adapter_quirks npcm_i2c_quirks = {
+ .max_read_len = 32768,
+ .max_write_len = 32768,
+ .flags = I2C_AQ_COMB_WRITE_THEN_READ,
+};
+
+static const struct i2c_algorithm npcm_i2c_algo = {
+ .master_xfer = npcm_i2c_master_xfer,
+ .functionality = npcm_i2c_functionality,
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
+ .reg_slave = npcm_i2c_reg_slave,
+ .unreg_slave = npcm_i2c_unreg_slave,
+#endif
+};
+
+/* i2c debugfs directory: used to keep health monitor of i2c devices */
+static struct dentry *npcm_i2c_debugfs_dir;
+
+static void npcm_i2c_init_debugfs(struct platform_device *pdev,
+ struct npcm_i2c *bus)
+{
+ struct dentry *d;
+
+ if (!npcm_i2c_debugfs_dir)
+ return;
+ d = debugfs_create_dir(dev_name(&pdev->dev), npcm_i2c_debugfs_dir);
+ if (IS_ERR_OR_NULL(d))
+ return;
+ debugfs_create_u64("ber_cnt", 0444, d, &bus->ber_cnt);
+ debugfs_create_u64("nack_cnt", 0444, d, &bus->nack_cnt);
+ debugfs_create_u64("rec_succ_cnt", 0444, d, &bus->rec_succ_cnt);
+ debugfs_create_u64("rec_fail_cnt", 0444, d, &bus->rec_fail_cnt);
+ debugfs_create_u64("timeout_cnt", 0444, d, &bus->timeout_cnt);
+
+ bus->debugfs = d;
+}
+
+static int npcm_i2c_probe_bus(struct platform_device *pdev)
+{
+ struct npcm_i2c *bus;
+ struct i2c_adapter *adap;
+ struct clk *i2c_clk;
+ static struct regmap *gcr_regmap;
+ static struct regmap *clk_regmap;
+ int irq;
+ int ret;
+
+ bus = devm_kzalloc(&pdev->dev, sizeof(*bus), GFP_KERNEL);
+ if (!bus)
+ return -ENOMEM;
+
+ bus->dev = &pdev->dev;
+
+ bus->num = of_alias_get_id(pdev->dev.of_node, "i2c");
+ /* core clk must be acquired to calculate module timing settings */
+ i2c_clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(i2c_clk))
+ return PTR_ERR(i2c_clk);
+ bus->apb_clk = clk_get_rate(i2c_clk);
+
+ gcr_regmap = syscon_regmap_lookup_by_compatible("nuvoton,npcm750-gcr");
+ if (IS_ERR(gcr_regmap))
+ return IS_ERR(gcr_regmap);
+ regmap_write(gcr_regmap, NPCM_I2CSEGCTL, NPCM_I2CSEGCTL_INIT_VAL);
+
+ clk_regmap = syscon_regmap_lookup_by_compatible("nuvoton,npcm750-clk");
+ if (IS_ERR(clk_regmap))
+ return IS_ERR(clk_regmap);
+
+ bus->reg = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(bus->reg))
+ return PTR_ERR((bus)->reg);
+
+ spin_lock_init(&bus->lock);
+ init_completion(&bus->cmd_complete);
+
+ adap = &bus->adap;
+ adap->owner = THIS_MODULE;
+ adap->retries = 3;
+ adap->timeout = HZ;
+ adap->algo = &npcm_i2c_algo;
+ adap->quirks = &npcm_i2c_quirks;
+ adap->algo_data = bus;
+ adap->dev.parent = &pdev->dev;
+ adap->dev.of_node = pdev->dev.of_node;
+ adap->nr = pdev->id;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_irq(bus->dev, irq, npcm_i2c_bus_irq, 0,
+ dev_name(bus->dev), bus);
+ if (ret)
+ return ret;
+
+ ret = __npcm_i2c_init(bus, pdev);
+ if (ret)
+ return ret;
+
+ npcm_i2c_recovery_init(adap);
+
+ i2c_set_adapdata(adap, bus);
+
+ snprintf(bus->adap.name, sizeof(bus->adap.name), "npcm_i2c_%d",
+ bus->num);
+ ret = i2c_add_numbered_adapter(&bus->adap);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, bus);
+ npcm_i2c_init_debugfs(pdev, bus);
+ return 0;
+}
+
+static int npcm_i2c_remove_bus(struct platform_device *pdev)
+{
+ unsigned long lock_flags;
+ struct npcm_i2c *bus = platform_get_drvdata(pdev);
+
+ debugfs_remove_recursive(bus->debugfs);
+ spin_lock_irqsave(&bus->lock, lock_flags);
+ npcm_i2c_disable(bus);
+ spin_unlock_irqrestore(&bus->lock, lock_flags);
+ i2c_del_adapter(&bus->adap);
+ return 0;
+}
+
+static const struct of_device_id npcm_i2c_bus_of_table[] = {
+ { .compatible = "nuvoton,npcm750-i2c", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, npcm_i2c_bus_of_table);
+
+static struct platform_driver npcm_i2c_bus_driver = {
+ .probe = npcm_i2c_probe_bus,
+ .remove = npcm_i2c_remove_bus,
+ .driver = {
+ .name = "nuvoton-i2c",
+ .of_match_table = npcm_i2c_bus_of_table,
+ }
+};
+
+static int __init npcm_i2c_init(void)
+{
+ npcm_i2c_debugfs_dir = debugfs_create_dir("npcm_i2c", NULL);
+ platform_driver_register(&npcm_i2c_bus_driver);
+ return 0;
+}
+module_init(npcm_i2c_init);
+
+static void __exit npcm_i2c_exit(void)
+{
+ platform_driver_unregister(&npcm_i2c_bus_driver);
+ debugfs_remove_recursive(npcm_i2c_debugfs_dir);
+}
+module_exit(npcm_i2c_exit);
+
+MODULE_AUTHOR("Avi Fishman <avi.fishman@gmail.com>");
+MODULE_AUTHOR("Tali Perry <tali.perry@nuvoton.com>");
+MODULE_AUTHOR("Tyrone Ting <kfting@nuvoton.com>");
+MODULE_DESCRIPTION("Nuvoton I2C Bus Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/leds-pca955x.c b/drivers/leds/leds-pca955x.c
index 4037c504589c..bf7ead45f66b 100644
--- a/drivers/leds/leds-pca955x.c
+++ b/drivers/leds/leds-pca955x.c
@@ -65,6 +65,7 @@ enum pca955x_type {
pca9550,
pca9551,
pca9552,
+ pca9552_ibm,
pca9553,
};
@@ -90,6 +91,11 @@ static struct pca955x_chipdef pca955x_chipdefs[] = {
.slv_addr = /* 1100xxx */ 0x60,
.slv_addr_shift = 3,
},
+ [pca9552_ibm] = {
+ .bits = 16,
+ .slv_addr = /* 0110xxx */ 0x30,
+ .slv_addr_shift = 3,
+ },
[pca9553] = {
.bits = 4,
.slv_addr = /* 110001x */ 0x62,
@@ -101,6 +107,7 @@ static const struct i2c_device_id pca955x_id[] = {
{ "pca9550", pca9550 },
{ "pca9551", pca9551 },
{ "pca9552", pca9552 },
+ { "pca9552-ibm", pca9552_ibm },
{ "pca9553", pca9553 },
{ }
};
@@ -412,6 +419,7 @@ static const struct of_device_id of_pca955x_match[] = {
{ .compatible = "nxp,pca9550", .data = (void *)pca9550 },
{ .compatible = "nxp,pca9551", .data = (void *)pca9551 },
{ .compatible = "nxp,pca9552", .data = (void *)pca9552 },
+ { .compatible = "nxp,pca9552-ibm", .data = (void *)pca9552_ibm },
{ .compatible = "nxp,pca9553", .data = (void *)pca9553 },
{},
};
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 0a59249198d3..ddf5bcdf74eb 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -632,6 +632,23 @@ config MFD_INTEL_MSIC
Passage) chip. This chip embeds audio, battery, GPIO, etc.
devices used in Intel Medfield platforms.
+config MFD_INTEL_PECI_CLIENT
+ tristate "Intel PECI client"
+ depends on (PECI || COMPILE_TEST)
+ select MFD_CORE
+ help
+ If you say yes to this option, support will be included for the
+ Intel PECI (Platform Environment Control Interface) client. PECI is a
+ one-wire bus interface that provides a communication channel from PECI
+ clients in Intel processors and chipset components to external
+ monitoring or control devices.
+
+ Additional drivers must be enabled in order to use the functionality
+ of the device.
+
+ This driver can also be built as a module. If so, the module
+ will be called intel-peci-client.
+
config MFD_IPAQ_MICRO
bool "Atmel Micro ASIC (iPAQ h3100/h3600/h3700) Support"
depends on SA1100_H3100 || SA1100_H3600
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index f935d10cbf0f..ca8683961847 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -212,6 +212,7 @@ obj-$(CONFIG_MFD_INTEL_LPSS) += intel-lpss.o
obj-$(CONFIG_MFD_INTEL_LPSS_PCI) += intel-lpss-pci.o
obj-$(CONFIG_MFD_INTEL_LPSS_ACPI) += intel-lpss-acpi.o
obj-$(CONFIG_MFD_INTEL_MSIC) += intel_msic.o
+obj-$(CONFIG_MFD_INTEL_PECI_CLIENT) += intel-peci-client.o
obj-$(CONFIG_MFD_PALMAS) += palmas.o
obj-$(CONFIG_MFD_VIPERBOARD) += viperboard.o
obj-$(CONFIG_MFD_RC5T583) += rc5t583.o rc5t583-irq.o
diff --git a/drivers/mfd/intel-peci-client.c b/drivers/mfd/intel-peci-client.c
new file mode 100644
index 000000000000..24f15438634c
--- /dev/null
+++ b/drivers/mfd/intel-peci-client.c
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2018-2019 Intel Corporation
+
+#include <linux/bitfield.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/intel-peci-client.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/peci.h>
+
+#define CPU_ID_MODEL_MASK GENMASK(7, 4)
+#define CPU_ID_FAMILY_MASK GENMASK(11, 8)
+#define CPU_ID_EXT_MODEL_MASK GENMASK(19, 16)
+#define CPU_ID_EXT_FAMILY_MASK GENMASK(27, 20)
+
+#define LOWER_NIBBLE_MASK GENMASK(3, 0)
+#define UPPER_NIBBLE_MASK GENMASK(7, 4)
+#define LOWER_BYTE_MASK GENMASK(7, 0)
+#define UPPER_BYTE_MASK GENMASK(16, 8)
+
+static struct mfd_cell peci_functions[] = {
+ { .name = "peci-cputemp", },
+ { .name = "peci-dimmtemp", },
+};
+
+static const struct cpu_gen_info cpu_gen_info_table[] = {
+ { /* Haswell Xeon */
+ .family = INTEL_FAM6,
+ .model = INTEL_FAM6_HASWELL_X,
+ .core_max = CORE_MAX_ON_HSX,
+ .chan_rank_max = CHAN_RANK_MAX_ON_HSX,
+ .dimm_idx_max = DIMM_IDX_MAX_ON_HSX },
+ { /* Broadwell Xeon */
+ .family = INTEL_FAM6,
+ .model = INTEL_FAM6_BROADWELL_X,
+ .core_max = CORE_MAX_ON_BDX,
+ .chan_rank_max = CHAN_RANK_MAX_ON_BDX,
+ .dimm_idx_max = DIMM_IDX_MAX_ON_BDX },
+ { /* Skylake Xeon */
+ .family = INTEL_FAM6,
+ .model = INTEL_FAM6_SKYLAKE_X,
+ .core_max = CORE_MAX_ON_SKX,
+ .chan_rank_max = CHAN_RANK_MAX_ON_SKX,
+ .dimm_idx_max = DIMM_IDX_MAX_ON_SKX },
+ { /* Skylake Xeon D */
+ .family = INTEL_FAM6,
+ .model = INTEL_FAM6_SKYLAKE_XD,
+ .core_max = CORE_MAX_ON_SKXD,
+ .chan_rank_max = CHAN_RANK_MAX_ON_SKXD,
+ .dimm_idx_max = DIMM_IDX_MAX_ON_SKXD },
+};
+
+static int peci_client_get_cpu_gen_info(struct peci_client_manager *priv)
+{
+ struct device *dev = &priv->client->dev;
+ u32 cpu_id;
+ u16 family;
+ u8 model;
+ int ret;
+ int i;
+
+ ret = peci_get_cpu_id(priv->client->adapter, priv->client->addr,
+ &cpu_id);
+ if (ret)
+ return ret;
+
+ family = FIELD_PREP(LOWER_BYTE_MASK,
+ FIELD_GET(CPU_ID_FAMILY_MASK, cpu_id)) |
+ FIELD_PREP(UPPER_BYTE_MASK,
+ FIELD_GET(CPU_ID_EXT_FAMILY_MASK, cpu_id));
+ model = FIELD_PREP(LOWER_NIBBLE_MASK,
+ FIELD_GET(CPU_ID_MODEL_MASK, cpu_id)) |
+ FIELD_PREP(UPPER_NIBBLE_MASK,
+ FIELD_GET(CPU_ID_EXT_MODEL_MASK, cpu_id));
+
+ for (i = 0; i < ARRAY_SIZE(cpu_gen_info_table); i++) {
+ const struct cpu_gen_info *cpu_info = &cpu_gen_info_table[i];
+
+ if (family == cpu_info->family && model == cpu_info->model) {
+ priv->gen_info = cpu_info;
+ break;
+ }
+ }
+
+ if (!priv->gen_info) {
+ dev_err(dev, "Can't support this CPU: 0x%x\n", cpu_id);
+ ret = -ENODEV;
+ }
+
+ return ret;
+}
+
+static int peci_client_probe(struct peci_client *client)
+{
+ struct device *dev = &client->dev;
+ struct peci_client_manager *priv;
+ uint cpu_no;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, priv);
+ priv->client = client;
+ cpu_no = client->addr - PECI_BASE_ADDR;
+
+ ret = peci_client_get_cpu_gen_info(priv);
+ if (ret)
+ return ret;
+
+ ret = devm_mfd_add_devices(dev, cpu_no, peci_functions,
+ ARRAY_SIZE(peci_functions), NULL, 0, NULL);
+ if (ret < 0) {
+ dev_err(dev, "Failed to register child devices: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id peci_client_of_table[] = {
+ { .compatible = "intel,peci-client" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, peci_client_of_table);
+#endif
+
+static const struct peci_device_id peci_client_ids[] = {
+ { .name = "peci-client" },
+ { }
+};
+MODULE_DEVICE_TABLE(peci, peci_client_ids);
+
+static struct peci_driver peci_client_driver = {
+ .probe = peci_client_probe,
+ .id_table = peci_client_ids,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = of_match_ptr(peci_client_of_table),
+ },
+};
+module_peci_driver(peci_client_driver);
+
+MODULE_AUTHOR("Jae Hyun Yoo <jae.hyun.yoo@linux.intel.com>");
+MODULE_DESCRIPTION("PECI client driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 99e151475d8f..90657a05c4c3 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -453,6 +453,13 @@ config XILINX_SDFEC
If unsure, say N.
+config MCTP_LPC
+ tristate "MCTP LPC binding implementation for ASPEED BMCs"
+ depends on REGMAP
+ help
+ Implements the MCTP LPC binding via KCS LPC IO cycles for control and
+ LPC FWH cycles for data
+
config MISC_RTSX
tristate
default MISC_RTSX_PCI || MISC_RTSX_USB
@@ -465,6 +472,21 @@ config PVPANIC
a paravirtualized device provided by QEMU; it lets a virtual machine
(guest) communicate panic events to the host.
+config NPCM7XX_LPC_BPC
+ tristate "NPCM7xx LPC BIOS Post Code support"
+ depends on (ARCH_NPCM7XX || COMPILE_TEST)
+ help
+ Provides a NPCM7xx driver to control the LPC BIOS Post Code
+ interface which allows the BMC to monitoring and save
+ the data written by the host to an arbitrary LPC I/O port.
+
+config NPCM7XX_PCI_MBOX
+ tristate "NPCM7xx PCI Mailbox Controller"
+ depends on (ARCH_NPCM7XX || COMPILE_TEST) && REGMAP && MFD_SYSCON
+ help
+ Expose the NPCM750/730/715/705 PCI MBOX registers found on
+ Nuvoton SOCs to userspace.
+
source "drivers/misc/c2port/Kconfig"
source "drivers/misc/eeprom/Kconfig"
source "drivers/misc/cb710/Kconfig"
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 9abf2923d831..7720cd329021 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -58,3 +58,6 @@ obj-$(CONFIG_PVPANIC) += pvpanic.o
obj-$(CONFIG_HABANA_AI) += habanalabs/
obj-$(CONFIG_UACCE) += uacce/
obj-$(CONFIG_XILINX_SDFEC) += xilinx_sdfec.o
+obj-$(CONFIG_NPCM7XX_LPC_BPC) += npcm7xx-lpc-bpc.o
+obj-$(CONFIG_NPCM7XX_PCI_MBOX) += npcm7xx-pci-mbox.o
+obj-$(CONFIG_MCTP_LPC) += mctp-lpc.o
diff --git a/drivers/misc/mctp-lpc.c b/drivers/misc/mctp-lpc.c
new file mode 100644
index 000000000000..71fc4ae69de7
--- /dev/null
+++ b/drivers/misc/mctp-lpc.c
@@ -0,0 +1,443 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2019, IBM Corp.
+ */
+
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/poll.h>
+#include <linux/regmap.h>
+#include <linux/sched/signal.h>
+#include <linux/uaccess.h>
+#include <linux/wait.h>
+
+#define LPC_HICRB 0x080
+#define LPC_HICRB_IBFIF4 BIT(1)
+#define LPC_HICRB_LPC4E BIT(0)
+#define LPC_HICRC 0x084
+#define LPC_KCS4_IRQSEL_MASK GENMASK(7, 4)
+#define LPC_KCS4_IRQSEL_SHIFT 4
+#define LPC_KCS4_IRQTYPE_MASK GENMASK(3, 2)
+#define LPC_KCS4_IRQTYPE_SHIFT 2
+#define LPC_KCS4_IRQTYPE_LOW 0b00
+#define LPC_KCS4_IRQTYPE_HIGH 0b01
+#define LPC_KCS4_IRQTYPE_RSVD 0b10
+#define LPC_KCS4_IRQTYPE_RISING 0b11
+#define LPC_KCS4_OBF4_AUTO_CLR BIT(1)
+#define LPC_KCS4_IRQ_HOST BIT(0)
+#define LPC_LADR4 0x090
+#define LPC_IDR4 0x094
+#define LPC_ODR4 0x098
+#define LPC_STR4 0x09C
+#define STR4_IBF (1 << 1)
+#define STR4_OBF (1 << 0)
+
+#define HOST_ODR 0xca2
+#define HOST_STR 0xca3
+#define HOST_SERIRQ_ID 11
+#define HOST_SERIRQ_TYPE LPC_KCS4_IRQTYPE_LOW
+
+#define RX_BUF_SIZE 1024
+
+struct mctp_lpc {
+ struct miscdevice miscdev;
+ struct regmap *map;
+
+ wait_queue_head_t rx;
+ bool pending;
+ u8 idr;
+};
+
+static irqreturn_t mctp_lpc_irq(int irq, void *data)
+{
+ struct mctp_lpc *priv = data;
+ unsigned long flags;
+ unsigned int hicrb;
+ struct device *dev;
+ unsigned int str;
+ irqreturn_t ret;
+
+ dev = priv->miscdev.this_device;
+
+ spin_lock_irqsave(&priv->rx.lock, flags);
+
+ regmap_read(priv->map, LPC_STR4, &str);
+ regmap_read(priv->map, LPC_HICRB, &hicrb);
+
+ if ((str & STR4_IBF) && (hicrb & LPC_HICRB_IBFIF4)) {
+ unsigned int val;
+
+ if (priv->pending)
+ dev_err(dev, "Storm brewing!");
+
+ /* Mask the IRQ / Enter polling mode */
+ dev_dbg(dev, "Received IRQ %d, disabling to provide back-pressure\n",
+ irq);
+ regmap_update_bits(priv->map, LPC_HICRB, LPC_HICRB_IBFIF4, 0);
+
+ /*
+ * Extract the IDR4 value to ack the IRQ. Reading IDR clears
+ * IBF and allows the host to write another value, however as
+ * we have disabled IRQs the back-pressure is still applied
+ * until userspace starts servicing the interface.
+ */
+ regmap_read(priv->map, LPC_IDR4, &val);
+ priv->idr = val & 0xff;
+ priv->pending = true;
+
+ dev_dbg(dev, "Set pending, waking waiters\n");
+ wake_up_locked(&priv->rx);
+ ret = IRQ_HANDLED;
+ } else {
+ dev_dbg(dev, "LPC IRQ triggered, but not for us (str=0x%x, hicrb=0x%x)\n",
+ str, hicrb);
+ ret = IRQ_NONE;
+ }
+
+ spin_unlock_irqrestore(&priv->rx.lock, flags);
+
+ return ret;
+}
+
+static inline struct mctp_lpc *to_mctp_lpc(struct file *filp)
+{
+ return container_of(filp->private_data, struct mctp_lpc, miscdev);
+}
+
+static ssize_t mctp_lpc_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct mctp_lpc *priv;
+ struct device *dev;
+ size_t remaining;
+ ssize_t rc;
+
+ priv = to_mctp_lpc(filp);
+ dev = priv->miscdev.this_device;
+
+ if (!count)
+ return 0;
+
+ if (count > 2 || *ppos > 1)
+ return -EINVAL;
+
+ remaining = count;
+
+ spin_lock_irq(&priv->rx.lock);
+ if (*ppos == 0) {
+ unsigned int val;
+ u8 str;
+
+ /* YOLO blocking, non-block not supported */
+ dev_dbg(dev, "Waiting for IBF\n");
+ regmap_read(priv->map, LPC_STR4, &val);
+ str = val & 0xff;
+ rc = wait_event_interruptible_locked(priv->rx, (priv->pending || str & STR4_IBF));
+ if (rc < 0)
+ goto out;
+
+ if (signal_pending(current)) {
+ dev_dbg(dev, "Interrupted waiting for IBF\n");
+ rc = -EINTR;
+ goto out;
+ }
+
+ /*
+ * Re-enable IRQs prior to possible read of IDR (which clears
+ * IBF) to ensure we receive interrupts for subsequent writes
+ * to IDR. Writes to IDR by the host should not occur while IBF
+ * is set.
+ */
+ dev_dbg(dev, "Woken by IBF, enabling IRQ\n");
+ regmap_update_bits(priv->map, LPC_HICRB, LPC_HICRB_IBFIF4,
+ LPC_HICRB_IBFIF4);
+
+ /* Read data out of IDR into internal storage if necessary */
+ if (!priv->pending) {
+ WARN(!(str & STR4_IBF), "Unknown reason for wakeup!");
+
+ /* Extract the IDR4 value to ack the IRQ */
+ regmap_read(priv->map, LPC_IDR4, &val);
+ priv->idr = val & 0xff;
+ }
+
+ /* Copy data from internal storage to userspace */
+ if (copy_to_user(buf, &priv->idr, sizeof(priv->idr))) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ /* We're done consuming the internally stored value */
+ priv->pending = false;
+
+ remaining--;
+ buf++;
+ }
+
+ if (remaining) {
+ /* Either:
+ *
+ * 1. (count == 1 && *ppos == 1)
+ * 2. (count == 2 && *ppos == 0)
+ */
+ unsigned int val;
+ u8 str;
+
+ regmap_read(priv->map, LPC_STR4, &val);
+ str = val & 0xff;
+ if (*ppos == 0 || priv->pending)
+ /*
+ * If we got this far with `*ppos == 0` then we've read
+ * data out of IDR, so set IBF when reporting back to
+ * userspace so userspace knows the IDR value is valid.
+ */
+ str |= STR4_IBF;
+
+ dev_dbg(dev, "Read status 0x%x\n", str);
+ if (copy_to_user(buf, &str, sizeof(str))) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ remaining--;
+ }
+
+ WARN_ON(remaining);
+
+ rc = count;
+
+out:
+ spin_unlock_irq(&priv->rx.lock);
+
+ return rc;
+}
+
+static ssize_t mctp_lpc_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ uint8_t _data[2], *data = &_data[0];
+ struct mctp_lpc *priv;
+ struct device *dev;
+ size_t remaining;
+ unsigned int str;
+
+ priv = to_mctp_lpc(filp);
+ dev = priv->miscdev.this_device;
+
+ if (!count)
+ return count;
+
+ if (count > 2)
+ return -EINVAL;
+
+ if (*ppos >= 2)
+ return -EINVAL;
+
+ if (*ppos + count > 2)
+ return -EINVAL;
+
+ if (copy_from_user(data, buf, count))
+ return -EFAULT;
+
+ remaining = count;
+
+ if (*ppos == 0) {
+ /* Wait until OBF is clear - we don't get an IRQ */
+ dev_dbg(dev, "Waiting for OBF to clear\n");
+ for (;;) {
+ if (signal_pending(current))
+ return -EINTR;
+
+ regmap_read(priv->map, LPC_STR4, &str);
+ if (!(str & STR4_OBF))
+ break;
+
+ msleep(1);
+ }
+
+ dev_dbg(dev, "Writing 0x%x to ODR\n", *data);
+ regmap_write(priv->map, LPC_ODR4, *data);
+ remaining--;
+ data++;
+ }
+
+ if (remaining) {
+ if (!(*data & STR4_OBF))
+ dev_err(dev, "Clearing OBF with status write: 0x%x\n",
+ *data);
+ dev_dbg(dev, "Writing status 0x%x\n", *data);
+ regmap_write(priv->map, LPC_STR4, *data);
+ remaining--;
+ }
+
+ WARN_ON(remaining);
+
+ regmap_read(priv->map, LPC_STR4, &str);
+ dev_dbg(dev, "Triggering SerIRQ (current str=0x%x)\n", str);
+
+ /*
+ * Trigger Host IRQ on ODR write. Do this after any STR write in case
+ * we need to write ODR to indicate an STR update (which we do).
+ */
+ if (*ppos == 0)
+ regmap_update_bits(priv->map, LPC_HICRC, LPC_KCS4_IRQ_HOST,
+ LPC_KCS4_IRQ_HOST);
+
+ return count;
+}
+
+static __poll_t mctp_lpc_poll(struct file *filp, poll_table *wait)
+{
+ struct mctp_lpc *priv;
+ struct device *dev;
+ unsigned int val;
+ bool ibf;
+
+ priv = to_mctp_lpc(filp);
+ dev = priv->miscdev.this_device;
+
+ regmap_read(priv->map, LPC_STR4, &val);
+
+ spin_lock_irq(&priv->rx.lock);
+
+ ibf = priv->pending || val & STR4_IBF;
+
+ if (!ibf) {
+ dev_dbg(dev, "Polling on IBF\n");
+
+ spin_unlock_irq(&priv->rx.lock);
+
+ poll_wait(filp, &priv->rx, wait);
+ if (signal_pending(current)) {
+ dev_dbg(dev, "Polling IBF was interrupted\n");
+ goto out;
+ }
+
+ spin_lock_irq(&priv->rx.lock);
+
+ regmap_read(priv->map, LPC_STR4, &val);
+
+ ibf = priv->pending || val & STR4_IBF;
+ }
+
+ spin_unlock_irq(&priv->rx.lock);
+
+out:
+ dev_dbg(dev, "Polled IBF state: %s\n", ibf ? "set" : "clear");
+
+ return ibf ? EPOLLIN : 0;
+}
+
+static const struct file_operations mctp_lpc_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_seek_end_llseek,
+ .read = mctp_lpc_read,
+ .write = mctp_lpc_write,
+ .poll = mctp_lpc_poll,
+};
+
+static int mctp_lpc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ unsigned int mask, val;
+ struct mctp_lpc *priv;
+ int irq;
+ int rc;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->map = syscon_node_to_regmap(dev->parent->of_node);
+ if (IS_ERR(priv->map)) {
+ dev_err(dev, "Couldn't get regmap\n");
+ return -ENODEV;
+ }
+
+ /*
+ * Set the LPC address. Simultaneously, test our MMIO regmap works. All
+ * subsequent accesses are assumed to work
+ */
+ rc = regmap_write(priv->map, LPC_LADR4, ((HOST_STR) << 16) | HOST_ODR);
+ if (rc < 0)
+ return rc;
+
+ /* Set up the SerIRQ */
+ mask = LPC_KCS4_IRQSEL_MASK
+ | LPC_KCS4_IRQTYPE_MASK
+ | LPC_KCS4_OBF4_AUTO_CLR;
+ val = (HOST_SERIRQ_ID << LPC_KCS4_IRQSEL_SHIFT)
+ | (HOST_SERIRQ_TYPE << LPC_KCS4_IRQTYPE_SHIFT);
+ val &= ~LPC_KCS4_OBF4_AUTO_CLR; /* Unnecessary, just documentation */
+ regmap_update_bits(priv->map, LPC_HICRC, mask, val);
+
+ /* Trigger waiters from IRQ */
+ init_waitqueue_head(&priv->rx);
+
+ dev_set_drvdata(dev, priv);
+
+ /* Set up the miscdevice */
+ priv->miscdev.minor = MISC_DYNAMIC_MINOR;
+ priv->miscdev.name = "mctp0";
+ priv->miscdev.fops = &mctp_lpc_fops;
+
+ /* Configure the IRQ handler */
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ rc = devm_request_irq(dev, irq, mctp_lpc_irq, IRQF_SHARED,
+ dev_name(dev), priv);
+ if (rc < 0)
+ return rc;
+
+ /* Register the device */
+ rc = misc_register(&priv->miscdev);
+ if (rc) {
+ dev_err(dev, "Unable to register device\n");
+ return rc;
+ }
+
+ /* Enable the channel */
+ regmap_update_bits(priv->map, LPC_HICRB,
+ LPC_HICRB_IBFIF4 | LPC_HICRB_LPC4E,
+ LPC_HICRB_IBFIF4 | LPC_HICRB_LPC4E);
+
+ return 0;
+}
+
+static int mctp_lpc_remove(struct platform_device *pdev)
+{
+ struct mctp_lpc *ctx = dev_get_drvdata(&pdev->dev);
+
+ misc_deregister(&ctx->miscdev);
+
+ return 0;
+}
+
+static const struct of_device_id mctp_lpc_match[] = {
+ { .compatible = "openbmc,mctp-lpc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, mctp_lpc_match);
+
+static struct platform_driver mctp_lpc = {
+ .driver = {
+ .name = "mctp-lpc",
+ .of_match_table = mctp_lpc_match,
+ },
+ .probe = mctp_lpc_probe,
+ .remove = mctp_lpc_remove,
+};
+module_platform_driver(mctp_lpc);
+
+MODULE_LICENSE("GPL v2+");
+MODULE_AUTHOR("Andrew Jeffery <andrew@aj.id.au>");
+MODULE_DESCRIPTION("OpenBMC MCTP LPC binding on ASPEED KCS");
diff --git a/drivers/misc/npcm7xx-lpc-bpc.c b/drivers/misc/npcm7xx-lpc-bpc.c
new file mode 100644
index 000000000000..e014e07cd4a4
--- /dev/null
+++ b/drivers/misc/npcm7xx-lpc-bpc.c
@@ -0,0 +1,394 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2014-2018 Nuvoton Technology corporation.
+
+#include <linux/fs.h>
+#include <linux/bitops.h>
+#include <linux/interrupt.h>
+#include <linux/kfifo.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/miscdevice.h>
+#include <linux/poll.h>
+
+#define DEVICE_NAME "npcm7xx-lpc-bpc"
+
+#define NUM_BPC_CHANNELS 2
+#define DW_PAD_SIZE 3
+
+/* BIOS POST Code FIFO Registers */
+#define NPCM7XX_BPCFA2L_REG 0x2 //BIOS POST Code FIFO Address 2 LSB
+#define NPCM7XX_BPCFA2M_REG 0x4 //BIOS POST Code FIFO Address 2 MSB
+#define NPCM7XX_BPCFEN_REG 0x6 //BIOS POST Code FIFO Enable
+#define NPCM7XX_BPCFSTAT_REG 0x8 //BIOS POST Code FIFO Status
+#define NPCM7XX_BPCFDATA_REG 0xA //BIOS POST Code FIFO Data
+#define NPCM7XX_BPCFMSTAT_REG 0xC //BIOS POST Code FIFO Miscellaneous Status
+#define NPCM7XX_BPCFA1L_REG 0x10 //BIOS POST Code FIFO Address 1 LSB
+#define NPCM7XX_BPCFA1M_REG 0x12 //BIOS POST Code FIFO Address 1 MSB
+
+/*BIOS regiser data*/
+#define FIFO_IOADDR1_ENABLE 0x80
+#define FIFO_IOADDR2_ENABLE 0x40
+
+/* BPC interface package and structure definition */
+#define BPC_KFIFO_SIZE 0x400
+
+/*BPC regiser data*/
+#define FIFO_DATA_VALID 0x80
+#define FIFO_OVERFLOW 0x20
+#define FIFO_READY_INT_ENABLE 0x8
+#define FIFO_DWCAPTURE 0x4
+#define FIFO_ADDR_DECODE 0x1
+
+/*Host Reset*/
+#define HOST_RESET_INT_ENABLE 0x10
+#define HOST_RESET_CHANGED 0x40
+
+struct npcm7xx_bpc_channel {
+ struct npcm7xx_bpc *data;
+ struct kfifo fifo;
+ wait_queue_head_t wq;
+ bool host_reset;
+ struct miscdevice miscdev;
+};
+
+struct npcm7xx_bpc {
+ void __iomem *base;
+ int irq;
+ bool en_dwcap;
+ struct npcm7xx_bpc_channel ch[NUM_BPC_CHANNELS];
+};
+
+static struct npcm7xx_bpc_channel *npcm7xx_file_to_ch(struct file *file)
+{
+ return container_of(file->private_data, struct npcm7xx_bpc_channel,
+ miscdev);
+}
+
+static ssize_t npcm7xx_bpc_read(struct file *file, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct npcm7xx_bpc_channel *chan = npcm7xx_file_to_ch(file);
+ struct npcm7xx_bpc *lpc_bpc = chan->data;
+ unsigned int copied;
+ int ret = 0;
+ int cond_size = 1;
+
+ if (lpc_bpc->en_dwcap)
+ cond_size = 3;
+
+ if (kfifo_len(&chan->fifo) < cond_size) {
+ if (file->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+
+ ret = wait_event_interruptible
+ (chan->wq, kfifo_len(&chan->fifo) > cond_size);
+ if (ret == -ERESTARTSYS)
+ return -EINTR;
+ }
+
+ ret = kfifo_to_user(&chan->fifo, buffer, count, &copied);
+
+ return ret ? ret : copied;
+}
+
+static __poll_t npcm7xx_bpc_poll(struct file *file,
+ struct poll_table_struct *pt)
+{
+ struct npcm7xx_bpc_channel *chan = npcm7xx_file_to_ch(file);
+ __poll_t mask = 0;
+
+ poll_wait(file, &chan->wq, pt);
+ if (!kfifo_is_empty(&chan->fifo))
+ mask |= POLLIN;
+
+ if (chan->host_reset) {
+ mask |= POLLHUP;
+ chan->host_reset = false;
+ }
+
+ return mask;
+}
+
+static const struct file_operations npcm7xx_bpc_fops = {
+ .owner = THIS_MODULE,
+ .read = npcm7xx_bpc_read,
+ .poll = npcm7xx_bpc_poll,
+ .llseek = noop_llseek,
+};
+
+static irqreturn_t npcm7xx_bpc_irq(int irq, void *arg)
+{
+ struct npcm7xx_bpc *lpc_bpc = arg;
+ u8 fifo_st;
+ u8 host_st;
+ u8 addr_index = 0;
+ u8 Data;
+ u8 padzero[3] = {0};
+ u8 last_addr_bit = 0;
+ bool isr_flag = false;
+
+ fifo_st = ioread8(lpc_bpc->base + NPCM7XX_BPCFSTAT_REG);
+ while (FIFO_DATA_VALID & fifo_st) {
+ /* If dwcapture enabled only channel 0 (FIFO 0) used */
+ if (!lpc_bpc->en_dwcap)
+ addr_index = fifo_st & FIFO_ADDR_DECODE;
+ else
+ last_addr_bit = fifo_st & FIFO_ADDR_DECODE;
+
+ /*Read data from FIFO to clear interrupt*/
+ Data = ioread8(lpc_bpc->base + NPCM7XX_BPCFDATA_REG);
+ if (kfifo_is_full(&lpc_bpc->ch[addr_index].fifo))
+ kfifo_skip(&lpc_bpc->ch[addr_index].fifo);
+ kfifo_put(&lpc_bpc->ch[addr_index].fifo, Data);
+ if (fifo_st & FIFO_OVERFLOW)
+ pr_info("BIOS Post Codes FIFO Overflow!!!\n");
+
+ fifo_st = ioread8(lpc_bpc->base + NPCM7XX_BPCFSTAT_REG);
+ if (lpc_bpc->en_dwcap && last_addr_bit) {
+ if ((fifo_st & FIFO_ADDR_DECODE) ||
+ ((FIFO_DATA_VALID & fifo_st) == 0)) {
+ while (kfifo_avail(&lpc_bpc->ch[addr_index].fifo) < DW_PAD_SIZE)
+ kfifo_skip(&lpc_bpc->ch[addr_index].fifo);
+ kfifo_in(&lpc_bpc->ch[addr_index].fifo,
+ padzero, DW_PAD_SIZE);
+ }
+ }
+ isr_flag = true;
+ }
+
+ host_st = ioread8(lpc_bpc->base + NPCM7XX_BPCFMSTAT_REG);
+ if (host_st & HOST_RESET_CHANGED) {
+ iowrite8(HOST_RESET_CHANGED,
+ lpc_bpc->base + NPCM7XX_BPCFMSTAT_REG);
+ lpc_bpc->ch[addr_index].host_reset = true;
+ isr_flag = true;
+ }
+
+ if (isr_flag) {
+ wake_up_interruptible(&lpc_bpc->ch[addr_index].wq);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static int npcm7xx_bpc_config_irq(struct npcm7xx_bpc *lpc_bpc,
+ struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ int rc;
+
+ lpc_bpc->irq = platform_get_irq(pdev, 0);
+ if (lpc_bpc->irq < 0) {
+ dev_err(dev, "get IRQ failed\n");
+ return lpc_bpc->irq;
+ }
+
+ rc = devm_request_irq(dev, lpc_bpc->irq,
+ npcm7xx_bpc_irq, IRQF_SHARED,
+ DEVICE_NAME, lpc_bpc);
+ if (rc < 0) {
+ dev_warn(dev, "Unable to request IRQ %d\n", lpc_bpc->irq);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int npcm7xx_enable_bpc(struct npcm7xx_bpc *lpc_bpc, struct device *dev,
+ int channel, u16 lpc_port)
+{
+ int rc;
+ u8 addr_en, reg_en;
+
+ init_waitqueue_head(&lpc_bpc->ch[channel].wq);
+
+ rc = kfifo_alloc(&lpc_bpc->ch[channel].fifo,
+ BPC_KFIFO_SIZE, GFP_KERNEL);
+ if (rc)
+ return rc;
+
+ lpc_bpc->ch[channel].miscdev.minor = MISC_DYNAMIC_MINOR;
+ lpc_bpc->ch[channel].miscdev.name =
+ devm_kasprintf(dev, GFP_KERNEL, "%s%d", DEVICE_NAME, channel);
+ lpc_bpc->ch[channel].miscdev.fops = &npcm7xx_bpc_fops;
+ lpc_bpc->ch[channel].miscdev.parent = dev;
+ rc = misc_register(&lpc_bpc->ch[channel].miscdev);
+ if (rc)
+ return rc;
+
+ lpc_bpc->ch[channel].data = lpc_bpc;
+ lpc_bpc->ch[channel].host_reset = false;
+
+ /* Enable LPC snoop channel at requested port */
+ switch (channel) {
+ case 0:
+ addr_en = FIFO_IOADDR1_ENABLE;
+ iowrite8((u8)lpc_port & 0xFF,
+ lpc_bpc->base + NPCM7XX_BPCFA1L_REG);
+ iowrite8((u8)(lpc_port >> 8),
+ lpc_bpc->base + NPCM7XX_BPCFA1M_REG);
+ break;
+ case 1:
+ addr_en = FIFO_IOADDR2_ENABLE;
+ iowrite8((u8)lpc_port & 0xFF,
+ lpc_bpc->base + NPCM7XX_BPCFA2L_REG);
+ iowrite8((u8)(lpc_port >> 8),
+ lpc_bpc->base + NPCM7XX_BPCFA2M_REG);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (lpc_bpc->en_dwcap)
+ addr_en = FIFO_DWCAPTURE;
+
+ /*
+ * Enable FIFO Ready Interrupt, FIFO Capture of I/O addr,
+ * and Host Reset
+ */
+ reg_en = ioread8(lpc_bpc->base + NPCM7XX_BPCFEN_REG);
+ iowrite8(reg_en | addr_en | FIFO_READY_INT_ENABLE |
+ HOST_RESET_INT_ENABLE, lpc_bpc->base + NPCM7XX_BPCFEN_REG);
+
+ return 0;
+}
+
+static void npcm7xx_disable_bpc(struct npcm7xx_bpc *lpc_bpc, int channel)
+{
+ u8 reg_en;
+
+ switch (channel) {
+ case 0:
+ reg_en = ioread8(lpc_bpc->base + NPCM7XX_BPCFEN_REG);
+ if (lpc_bpc->en_dwcap)
+ iowrite8(reg_en & ~FIFO_DWCAPTURE,
+ lpc_bpc->base + NPCM7XX_BPCFEN_REG);
+ else
+ iowrite8(reg_en & ~FIFO_IOADDR1_ENABLE,
+ lpc_bpc->base + NPCM7XX_BPCFEN_REG);
+ break;
+ case 1:
+ reg_en = ioread8(lpc_bpc->base + NPCM7XX_BPCFEN_REG);
+ iowrite8(reg_en & ~FIFO_IOADDR2_ENABLE,
+ lpc_bpc->base + NPCM7XX_BPCFEN_REG);
+ break;
+ default:
+ return;
+ }
+
+ if (!(reg_en & (FIFO_IOADDR1_ENABLE | FIFO_IOADDR2_ENABLE)))
+ iowrite8(reg_en &
+ ~(FIFO_READY_INT_ENABLE | HOST_RESET_INT_ENABLE),
+ lpc_bpc->base + NPCM7XX_BPCFEN_REG);
+
+ kfifo_free(&lpc_bpc->ch[channel].fifo);
+ misc_deregister(&lpc_bpc->ch[channel].miscdev);
+}
+
+static int npcm7xx_bpc_probe(struct platform_device *pdev)
+{
+ struct npcm7xx_bpc *lpc_bpc;
+ struct resource *res;
+ struct device *dev;
+ u32 port;
+ int rc;
+
+ dev = &pdev->dev;
+
+ lpc_bpc = devm_kzalloc(dev, sizeof(*lpc_bpc), GFP_KERNEL);
+ if (!lpc_bpc)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "BIOS post code reg resource not found\n");
+ return -ENODEV;
+ }
+
+ dev_dbg(dev, "BIOS post code base resource is %pR\n", res);
+ lpc_bpc->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(lpc_bpc->base))
+ return PTR_ERR(lpc_bpc->base);
+
+ dev_set_drvdata(&pdev->dev, lpc_bpc);
+
+ rc = of_property_read_u32_index(dev->of_node, "monitor-ports", 0,
+ &port);
+ if (rc) {
+ dev_err(dev, "no monitor ports configured\n");
+ return -ENODEV;
+ }
+
+ lpc_bpc->en_dwcap =
+ of_property_read_bool(dev->of_node, "bpc-en-dwcapture");
+
+ rc = npcm7xx_bpc_config_irq(lpc_bpc, pdev);
+ if (rc)
+ return rc;
+
+ rc = npcm7xx_enable_bpc(lpc_bpc, dev, 0, port);
+ if (rc) {
+ dev_err(dev, "Enable BIOS post code I/O port 0 failed\n");
+ return rc;
+ }
+
+ /*
+ * Configuration of second BPC channel port is optional
+ * Double-Word Capture ignoring address 2
+ */
+ if (!lpc_bpc->en_dwcap) {
+ if (of_property_read_u32_index(dev->of_node, "monitor-ports",
+ 1, &port) == 0) {
+ rc = npcm7xx_enable_bpc(lpc_bpc, dev, 1, port);
+ if (rc) {
+ dev_err(dev, "Enable BIOS post code I/O port 1 failed, disable I/O port 0\n");
+ npcm7xx_disable_bpc(lpc_bpc, 0);
+ return rc;
+ }
+ }
+ }
+
+ pr_info("npcm7xx BIOS post code probe\n");
+
+ return rc;
+}
+
+static int npcm7xx_bpc_remove(struct platform_device *pdev)
+{
+ struct npcm7xx_bpc *lpc_bpc = dev_get_drvdata(&pdev->dev);
+ u8 reg_en;
+
+ reg_en = ioread8(lpc_bpc->base + NPCM7XX_BPCFEN_REG);
+
+ if (reg_en & FIFO_IOADDR1_ENABLE)
+ npcm7xx_disable_bpc(lpc_bpc, 0);
+ if (reg_en & FIFO_IOADDR2_ENABLE)
+ npcm7xx_disable_bpc(lpc_bpc, 1);
+
+ return 0;
+}
+
+static const struct of_device_id npcm7xx_bpc_match[] = {
+ { .compatible = "nuvoton,npcm750-lpc-bpc" },
+ { },
+};
+
+static struct platform_driver npcm7xx_bpc_driver = {
+ .driver = {
+ .name = DEVICE_NAME,
+ .of_match_table = npcm7xx_bpc_match,
+ },
+ .probe = npcm7xx_bpc_probe,
+ .remove = npcm7xx_bpc_remove,
+};
+
+module_platform_driver(npcm7xx_bpc_driver);
+
+MODULE_DEVICE_TABLE(of, npcm7xx_bpc_match);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Tomer Maimon <tomer.maimon@nuvoton.com>");
+MODULE_DESCRIPTION("Linux driver to control NPCM7XX LPC BIOS post code monitoring");
diff --git a/drivers/misc/npcm7xx-pci-mbox.c b/drivers/misc/npcm7xx-pci-mbox.c
new file mode 100644
index 000000000000..1a80661a4296
--- /dev/null
+++ b/drivers/misc/npcm7xx-pci-mbox.c
@@ -0,0 +1,288 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2014-2018 Nuvoton Technology corporation.
+
+#include <linux/interrupt.h>
+#include <linux/mfd/syscon.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/poll.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#define DEVICE_NAME "npcm7xx-pci-mbox"
+
+#define NPCM7XX_MBOX_BMBXSTAT 0x0
+#define NPCM7XX_MBOX_BMBXCTL 0x4
+#define NPCM7XX_MBOX_BMBXCMD 0x8
+
+#define NPCM7XX_MBOX_CIF_0 BIT(0)
+#define NPCM7XX_MBOX_CIE_0 BIT(0)
+#define NPCM7XX_MBOX_HIF_0 BIT(0)
+
+#define NPCM7XX_MBOX_ALL_CIF GENMASK(7, 0)
+#define NPCM7XX_MBOX_ALL_CIE GENMASK(7, 0)
+#define NPCM7XX_MBOX_ALL_HIF GENMASK(7, 0)
+
+struct npcm7xx_mbox {
+ struct miscdevice miscdev;
+ struct regmap *regmap;
+ void __iomem *memory;
+ wait_queue_head_t queue;
+ spinlock_t lock; /* mbox access mutex */
+ bool cif0;
+ u32 max_buf_size;
+};
+
+static atomic_t npcm7xx_mbox_open_count = ATOMIC_INIT(0);
+
+static struct npcm7xx_mbox *file_mbox(struct file *file)
+{
+ return container_of(file->private_data, struct npcm7xx_mbox, miscdev);
+}
+
+static int npcm7xx_mbox_open(struct inode *inode, struct file *file)
+{
+ struct npcm7xx_mbox *mbox = file_mbox(file);
+
+ if (atomic_inc_return(&npcm7xx_mbox_open_count) == 1) {
+ /* enable mailbox interrupt */
+ regmap_update_bits(mbox->regmap, NPCM7XX_MBOX_BMBXCTL,
+ NPCM7XX_MBOX_ALL_CIE, NPCM7XX_MBOX_CIE_0);
+ return 0;
+ }
+
+ atomic_dec(&npcm7xx_mbox_open_count);
+ return -EBUSY;
+}
+
+static ssize_t npcm7xx_mbox_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct npcm7xx_mbox *mbox = file_mbox(file);
+ unsigned long flags;
+
+ if (!access_ok(buf, count))
+ return -EFAULT;
+
+ if ((*ppos + count) > mbox->max_buf_size)
+ return -EINVAL;
+
+ if (file->f_flags & O_NONBLOCK) {
+ if (!mbox->cif0)
+ return -EAGAIN;
+ } else if (wait_event_interruptible(mbox->queue, mbox->cif0)) {
+ return -ERESTARTSYS;
+ }
+
+ spin_lock_irqsave(&mbox->lock, flags);
+
+ if (copy_to_user((void __user *)buf,
+ (const void *)(mbox->memory + *ppos), count)) {
+ spin_unlock_irqrestore(&mbox->lock, flags);
+ return -EFAULT;
+ }
+
+ mbox->cif0 = false;
+ spin_unlock_irqrestore(&mbox->lock, flags);
+ return count;
+}
+
+static ssize_t npcm7xx_mbox_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct npcm7xx_mbox *mbox = file_mbox(file);
+ unsigned long flags;
+
+ if (!access_ok(buf, count))
+ return -EFAULT;
+
+ if ((*ppos + count) > mbox->max_buf_size)
+ return -EINVAL;
+
+ spin_lock_irqsave(&mbox->lock, flags);
+
+ if (copy_from_user((void *)(mbox->memory + *ppos),
+ (void __user *)buf, count)) {
+ spin_unlock_irqrestore(&mbox->lock, flags);
+ return -EFAULT;
+ }
+
+ regmap_update_bits(mbox->regmap, NPCM7XX_MBOX_BMBXCMD,
+ NPCM7XX_MBOX_ALL_HIF, NPCM7XX_MBOX_HIF_0);
+
+ spin_unlock_irqrestore(&mbox->lock, flags);
+ return count;
+}
+
+static unsigned int npcm7xx_mbox_poll(struct file *file, poll_table *wait)
+{
+ struct npcm7xx_mbox *mbox = file_mbox(file);
+ unsigned int mask = 0;
+
+ poll_wait(file, &mbox->queue, wait);
+ if (mbox->cif0)
+ mask |= POLLIN;
+
+ return mask;
+}
+
+static int npcm7xx_mbox_release(struct inode *inode, struct file *file)
+{
+ atomic_dec(&npcm7xx_mbox_open_count);
+ return 0;
+}
+
+static const struct file_operations npcm7xx_mbox_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_seek_end_llseek,
+ .read = npcm7xx_mbox_read,
+ .write = npcm7xx_mbox_write,
+ .open = npcm7xx_mbox_open,
+ .release = npcm7xx_mbox_release,
+ .poll = npcm7xx_mbox_poll,
+};
+
+static irqreturn_t npcm7xx_mbox_irq(int irq, void *arg)
+{
+ struct npcm7xx_mbox *mbox = arg;
+ u32 val;
+
+ regmap_read(mbox->regmap, NPCM7XX_MBOX_BMBXSTAT, &val);
+ if ((val & NPCM7XX_MBOX_CIF_0) != NPCM7XX_MBOX_CIF_0)
+ return IRQ_NONE;
+
+ /*
+ * Leave the status bit set so that we know the data is for us,
+ * clear it once it has been read.
+ */
+ mbox->cif0 = true;
+
+ /* Mask it off, we'll clear it when we the data gets read */
+ regmap_write_bits(mbox->regmap, NPCM7XX_MBOX_BMBXSTAT,
+ NPCM7XX_MBOX_ALL_CIF, NPCM7XX_MBOX_CIF_0);
+
+ wake_up(&mbox->queue);
+
+ return IRQ_HANDLED;
+}
+
+static int npcm7xx_mbox_config_irq(struct npcm7xx_mbox *mbox,
+ struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ int rc, irq;
+ u32 val;
+
+ /* Disable all register based interrupts */
+ regmap_update_bits(mbox->regmap, NPCM7XX_MBOX_BMBXCTL,
+ NPCM7XX_MBOX_ALL_CIE, 0);
+/*
+ * These registers are write one to clear. Clear them.
+ * Per spec, cleared bits should not be re-cleared.
+ * Need to read and clear needed bits only, instead of blindly clearing all.
+ */
+ regmap_read(mbox->regmap, NPCM7XX_MBOX_BMBXSTAT, &val);
+ val &= NPCM7XX_MBOX_ALL_CIF;
+
+ /* If any bit is set, write back to clear */
+ if (val)
+ regmap_write_bits(mbox->regmap, NPCM7XX_MBOX_BMBXSTAT,
+ NPCM7XX_MBOX_ALL_CIF, val);
+
+ irq = irq_of_parse_and_map(dev->of_node, 0);
+ if (!irq)
+ return -ENODEV;
+
+ rc = devm_request_irq(dev, irq, npcm7xx_mbox_irq, 0, DEVICE_NAME, mbox);
+ if (rc < 0) {
+ dev_err(dev, "Unable to request IRQ %d\n", irq);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int npcm7xx_mbox_probe(struct platform_device *pdev)
+{
+ struct npcm7xx_mbox *mbox;
+ struct device *dev;
+ struct resource *res;
+ int rc;
+
+ dev = &pdev->dev;
+
+ mbox = devm_kzalloc(dev, sizeof(*mbox), GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+
+ dev_set_drvdata(&pdev->dev, mbox);
+
+ mbox->regmap = syscon_node_to_regmap(dev->of_node);
+ if (IS_ERR(mbox->regmap)) {
+ dev_err(dev, "Couldn't get regmap\n");
+ return -ENODEV;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ mbox->memory = devm_ioremap_resource(dev, res);
+ if (IS_ERR(mbox->memory))
+ return PTR_ERR(mbox->memory);
+ mbox->max_buf_size = resource_size(res);
+
+ spin_lock_init(&mbox->lock);
+ init_waitqueue_head(&mbox->queue);
+
+ mbox->miscdev.minor = MISC_DYNAMIC_MINOR;
+ mbox->miscdev.name = DEVICE_NAME;
+ mbox->miscdev.fops = &npcm7xx_mbox_fops;
+ mbox->miscdev.parent = dev;
+ mbox->cif0 = false;
+ rc = misc_register(&mbox->miscdev);
+ if (rc) {
+ dev_err(dev, "Unable to register device\n");
+ return rc;
+ }
+
+ rc = npcm7xx_mbox_config_irq(mbox, pdev);
+ if (rc) {
+ dev_err(dev, "Failed to configure IRQ\n");
+ misc_deregister(&mbox->miscdev);
+ return rc;
+ }
+
+ pr_info("NPCM7xx PCI Mailbox probed\n");
+
+ return 0;
+}
+
+static int npcm7xx_mbox_remove(struct platform_device *pdev)
+{
+ struct npcm7xx_mbox *mbox = dev_get_drvdata(&pdev->dev);
+
+ misc_deregister(&mbox->miscdev);
+
+ return 0;
+}
+
+static const struct of_device_id npcm7xx_mbox_match[] = {
+ { .compatible = "nuvoton,npcm750-pci-mbox" },
+ { },
+};
+
+static struct platform_driver npcm7xx_mbox_driver = {
+ .driver = {
+ .name = DEVICE_NAME,
+ .of_match_table = npcm7xx_mbox_match,
+ },
+ .probe = npcm7xx_mbox_probe,
+ .remove = npcm7xx_mbox_remove,
+};
+
+module_platform_driver(npcm7xx_mbox_driver);
+
+MODULE_DEVICE_TABLE(of, npcm7xx_mbox_match);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Tomer Maimon <tomer.maimon@nuvoton.com>");
+MODULE_DESCRIPTION("NPCM7XX mailbox device driver");
diff --git a/drivers/mtd/spi-nor/controllers/aspeed-smc.c b/drivers/mtd/spi-nor/controllers/aspeed-smc.c
index ae85e4c0e114..8daf423d02de 100644
--- a/drivers/mtd/spi-nor/controllers/aspeed-smc.c
+++ b/drivers/mtd/spi-nor/controllers/aspeed-smc.c
@@ -6,6 +6,7 @@
*/
#include <linux/bug.h>
+#include <linux/clk.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/module.h>
@@ -16,10 +17,13 @@
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/sizes.h>
+#include <linux/slab.h>
#include <linux/sysfs.h>
#define DEVICE_NAME "aspeed-smc"
+#define SNOR_F_4B_OPCODES BIT(6)
+
/*
* The driver only support SPI flash
*/
@@ -30,6 +34,7 @@ enum aspeed_smc_flash_type {
};
struct aspeed_smc_chip;
+struct aspeed_smc_controller;
struct aspeed_smc_info {
u32 maxsize; /* maximum size of chip window */
@@ -37,12 +42,34 @@ struct aspeed_smc_info {
bool hastype; /* flash type field exists in config reg */
u8 we0; /* shift for write enable bit for CE0 */
u8 ctl0; /* offset in regs of ctl for CE0 */
+ u8 timing; /* offset in regs of timing */
+ u32 hclk_mask; /* clock frequency mask in CEx Control reg */
+ u32 hdiv_max; /* Max HCLK divisor on read timing reg */
void (*set_4b)(struct aspeed_smc_chip *chip);
+ int (*optimize_read)(struct aspeed_smc_chip *chip, u32 max_freq);
+ int (*calibrate)(struct aspeed_smc_chip *chip, u32 hdiv,
+ const u8 *golden_buf, u8 *test_buf);
+
+ u32 (*segment_start)(struct aspeed_smc_controller *controller, u32 reg);
+ u32 (*segment_end)(struct aspeed_smc_controller *controller, u32 reg);
+ u32 (*segment_reg)(struct aspeed_smc_controller *controller,
+ u32 start, u32 end);
};
static void aspeed_smc_chip_set_4b_spi_2400(struct aspeed_smc_chip *chip);
static void aspeed_smc_chip_set_4b(struct aspeed_smc_chip *chip);
+static int aspeed_smc_optimize_read(struct aspeed_smc_chip *chip,
+ u32 max_freq);
+static int aspeed_smc_calibrate_reads(struct aspeed_smc_chip *chip, u32 hdiv,
+ const u8 *golden_buf, u8 *test_buf);
+
+static u32 aspeed_smc_segment_start(
+ struct aspeed_smc_controller *controller, u32 reg);
+static u32 aspeed_smc_segment_end(
+ struct aspeed_smc_controller *controller, u32 reg);
+static u32 aspeed_smc_segment_reg(
+ struct aspeed_smc_controller *controller, u32 start, u32 end);
static const struct aspeed_smc_info fmc_2400_info = {
.maxsize = 64 * 1024 * 1024,
@@ -50,7 +77,15 @@ static const struct aspeed_smc_info fmc_2400_info = {
.hastype = true,
.we0 = 16,
.ctl0 = 0x10,
+ .timing = 0x94,
+ .hclk_mask = 0xfffff0ff,
+ .hdiv_max = 1,
.set_4b = aspeed_smc_chip_set_4b,
+ .optimize_read = aspeed_smc_optimize_read,
+ .calibrate = aspeed_smc_calibrate_reads,
+ .segment_start = aspeed_smc_segment_start,
+ .segment_end = aspeed_smc_segment_end,
+ .segment_reg = aspeed_smc_segment_reg,
};
static const struct aspeed_smc_info spi_2400_info = {
@@ -59,7 +94,13 @@ static const struct aspeed_smc_info spi_2400_info = {
.hastype = false,
.we0 = 0,
.ctl0 = 0x04,
+ .timing = 0x14,
+ .hclk_mask = 0xfffff0ff,
+ .hdiv_max = 1,
.set_4b = aspeed_smc_chip_set_4b_spi_2400,
+ .optimize_read = aspeed_smc_optimize_read,
+ .calibrate = aspeed_smc_calibrate_reads,
+ /* No segment registers */
};
static const struct aspeed_smc_info fmc_2500_info = {
@@ -68,7 +109,15 @@ static const struct aspeed_smc_info fmc_2500_info = {
.hastype = true,
.we0 = 16,
.ctl0 = 0x10,
+ .timing = 0x94,
+ .hclk_mask = 0xfffff0ff,
+ .hdiv_max = 1,
.set_4b = aspeed_smc_chip_set_4b,
+ .optimize_read = aspeed_smc_optimize_read,
+ .calibrate = aspeed_smc_calibrate_reads,
+ .segment_start = aspeed_smc_segment_start,
+ .segment_end = aspeed_smc_segment_end,
+ .segment_reg = aspeed_smc_segment_reg,
};
static const struct aspeed_smc_info spi_2500_info = {
@@ -77,7 +126,59 @@ static const struct aspeed_smc_info spi_2500_info = {
.hastype = false,
.we0 = 16,
.ctl0 = 0x10,
+ .timing = 0x94,
+ .hclk_mask = 0xfffff0ff,
+ .hdiv_max = 1,
+ .set_4b = aspeed_smc_chip_set_4b,
+ .optimize_read = aspeed_smc_optimize_read,
+ .calibrate = aspeed_smc_calibrate_reads,
+ .segment_start = aspeed_smc_segment_start,
+ .segment_end = aspeed_smc_segment_end,
+ .segment_reg = aspeed_smc_segment_reg,
+};
+
+static u32 aspeed_smc_segment_start_ast2600(
+ struct aspeed_smc_controller *controller, u32 reg);
+static u32 aspeed_smc_segment_end_ast2600(
+ struct aspeed_smc_controller *controller, u32 reg);
+static u32 aspeed_smc_segment_reg_ast2600(
+ struct aspeed_smc_controller *controller, u32 start, u32 end);
+
+static int aspeed_smc_calibrate_reads_ast2600(struct aspeed_smc_chip *chip,
+ u32 hdiv, const u8 *golden_buf, u8 *test_buf);
+
+static const struct aspeed_smc_info fmc_2600_info = {
+ .maxsize = 256 * 1024 * 1024,
+ .nce = 3,
+ .hastype = false, /* SPI Only */
+ .we0 = 16,
+ .ctl0 = 0x10,
+ .timing = 0x94,
+ .hclk_mask = 0xf0fff0ff,
+ .hdiv_max = 2,
+ .set_4b = aspeed_smc_chip_set_4b,
+ .optimize_read = aspeed_smc_optimize_read,
+ .calibrate = aspeed_smc_calibrate_reads_ast2600,
+ .segment_start = aspeed_smc_segment_start_ast2600,
+ .segment_end = aspeed_smc_segment_end_ast2600,
+ .segment_reg = aspeed_smc_segment_reg_ast2600,
+};
+
+static const struct aspeed_smc_info spi_2600_info = {
+ .maxsize = 256 * 1024 * 1024,
+ .nce = 2,
+ .hastype = false,
+ .we0 = 16,
+ .ctl0 = 0x10,
+ .timing = 0x94,
+ .hclk_mask = 0xf0fff0ff,
+ .hdiv_max = 2,
.set_4b = aspeed_smc_chip_set_4b,
+ .optimize_read = aspeed_smc_optimize_read,
+ .calibrate = aspeed_smc_calibrate_reads_ast2600,
+ .segment_start = aspeed_smc_segment_start_ast2600,
+ .segment_end = aspeed_smc_segment_end_ast2600,
+ .segment_reg = aspeed_smc_segment_reg_ast2600,
};
enum aspeed_smc_ctl_reg_value {
@@ -98,6 +199,7 @@ struct aspeed_smc_chip {
u32 ctl_val[smc_max]; /* control settings */
enum aspeed_smc_flash_type type; /* what type of flash */
struct spi_nor nor;
+ u32 clk_rate;
};
struct aspeed_smc_controller {
@@ -106,12 +208,17 @@ struct aspeed_smc_controller {
struct mutex mutex; /* controller access mutex */
const struct aspeed_smc_info *info; /* type info of controller */
void __iomem *regs; /* controller registers */
- void __iomem *ahb_base; /* per-chip windows resource */
+ void __iomem *ahb_base; /* per-chip window resource */
+ u32 ahb_base_phy; /* phys addr of AHB window */
u32 ahb_window_size; /* full mapping window size */
+ unsigned long clk_frequency;
+
struct aspeed_smc_chip *chips[]; /* pointers to attached chips */
};
+#define ASPEED_SPI_DEFAULT_FREQ 50000000
+
/*
* SPI Flash Configuration Register (AST2500 SPI)
* or
@@ -181,23 +288,82 @@ struct aspeed_smc_controller {
(CONTROL_AAF_MODE | CONTROL_CE_INACTIVE_MASK | CONTROL_CLK_DIV4 | \
CONTROL_CLOCK_FREQ_SEL_MASK | CONTROL_LSB_FIRST | CONTROL_CLOCK_MODE_3)
-/*
- * The Segment Register uses a 8MB unit to encode the start address
- * and the end address of the mapping window of a flash SPI slave :
- *
- * | byte 1 | byte 2 | byte 3 | byte 4 |
- * +--------+--------+--------+--------+
- * | end | start | 0 | 0 |
- */
#define SEGMENT_ADDR_REG0 0x30
-#define SEGMENT_ADDR_START(_r) ((((_r) >> 16) & 0xFF) << 23)
-#define SEGMENT_ADDR_END(_r) ((((_r) >> 24) & 0xFF) << 23)
-#define SEGMENT_ADDR_VALUE(start, end) \
- (((((start) >> 23) & 0xFF) << 16) | ((((end) >> 23) & 0xFF) << 24))
#define SEGMENT_ADDR_REG(controller, cs) \
((controller)->regs + SEGMENT_ADDR_REG0 + (cs) * 4)
/*
+ * The Segment Registers of the AST2400 and AST2500 have a 8MB
+ * unit. The address range of a flash SPI slave is encoded with
+ * absolute addresses which should be part of the overall controller
+ * window.
+ */
+static u32 aspeed_smc_segment_start(
+ struct aspeed_smc_controller *controller, u32 reg)
+{
+ return ((reg >> 16) & 0xFF) << 23;
+}
+
+static u32 aspeed_smc_segment_end(
+ struct aspeed_smc_controller *controller, u32 reg)
+{
+ return ((reg >> 24) & 0xFF) << 23;
+}
+
+static u32 aspeed_smc_segment_reg(
+ struct aspeed_smc_controller *controller, u32 start, u32 end)
+{
+ return (((start >> 23) & 0xFF) << 16) | (((end >> 23) & 0xFF) << 24);
+}
+
+/*
+ * The Segment Registers of the AST2600 have a 1MB unit. The address
+ * range of a flash SPI slave is encoded with offsets in the overall
+ * controller window. The previous SoC AST2400 and AST2500 used
+ * absolute addresses. Only bits [27:20] are relevant and the end
+ * address is an upper bound limit.
+ */
+
+#define AST2600_SEG_ADDR_MASK 0x0ff00000
+
+static u32 aspeed_smc_segment_start_ast2600(
+ struct aspeed_smc_controller *controller, u32 reg)
+{
+ uint32_t start_offset = (reg << 16) & AST2600_SEG_ADDR_MASK;
+
+ return controller->ahb_base_phy + start_offset;
+}
+
+static u32 aspeed_smc_segment_end_ast2600(
+ struct aspeed_smc_controller *controller, u32 reg)
+{
+ uint32_t end_offset = reg & AST2600_SEG_ADDR_MASK;
+
+ /* segment is disabled */
+ if (!end_offset)
+ return controller->ahb_base_phy;
+
+ return controller->ahb_base_phy + end_offset + 0x100000;
+}
+
+static u32 aspeed_smc_segment_reg_ast2600(
+ struct aspeed_smc_controller *controller, u32 start, u32 end)
+{
+ /* disable zero size segments */
+ if (start == end)
+ return 0;
+
+ return ((start & AST2600_SEG_ADDR_MASK) >> 16) |
+ ((end - 1) & AST2600_SEG_ADDR_MASK);
+}
+
+/*
+ * Switch to turn off read optimisation if needed
+ */
+static bool optimize_read = true;
+module_param(optimize_read, bool, 0644);
+
+/*
* In user mode all data bytes read or written to the chip decode address
* range are transferred to or from the SPI bus. The range is treated as a
* fifo of arbitratry 1, 2, or 4 byte width but each write has to be aligned
@@ -370,18 +536,49 @@ static void aspeed_smc_send_cmd_addr(struct spi_nor *nor, u8 cmd, u32 addr)
}
}
+static int aspeed_smc_get_io_mode(struct aspeed_smc_chip *chip)
+{
+ switch (chip->nor.read_proto) {
+ case SNOR_PROTO_1_1_1:
+ return 0;
+ case SNOR_PROTO_1_1_2:
+ return CONTROL_IO_DUAL_DATA;
+ case SNOR_PROTO_1_2_2:
+ return CONTROL_IO_DUAL_ADDR_DATA;
+ default:
+ dev_err(chip->nor.dev, "unsupported SPI read mode\n");
+ return -EINVAL;
+ }
+}
+
+static void aspeed_smc_set_io_mode(struct aspeed_smc_chip *chip, u32 io_mode)
+{
+ u32 ctl;
+
+ if (io_mode > 0) {
+ ctl = readl(chip->ctl) & ~CONTROL_IO_MODE_MASK;
+ ctl |= io_mode;
+ writel(ctl, chip->ctl);
+ }
+}
+
static ssize_t aspeed_smc_read_user(struct spi_nor *nor, loff_t from,
size_t len, u_char *read_buf)
{
struct aspeed_smc_chip *chip = nor->priv;
int i;
u8 dummy = 0xFF;
+ int io_mode = aspeed_smc_get_io_mode(chip);
aspeed_smc_start_user(nor);
aspeed_smc_send_cmd_addr(nor, nor->read_opcode, from);
for (i = 0; i < chip->nor.read_dummy / 8; i++)
aspeed_smc_write_to_ahb(chip->ahb_base, &dummy, sizeof(dummy));
+ /* Set IO mode only for data */
+ if (io_mode == CONTROL_IO_DUAL_DATA)
+ aspeed_smc_set_io_mode(chip, io_mode);
+
aspeed_smc_read_from_ahb(read_buf, chip->ahb_base, len);
aspeed_smc_stop_user(nor);
return len;
@@ -399,6 +596,31 @@ static ssize_t aspeed_smc_write_user(struct spi_nor *nor, loff_t to,
return len;
}
+static ssize_t aspeed_smc_read(struct spi_nor *nor, loff_t from, size_t len,
+ u_char *read_buf)
+{
+ struct aspeed_smc_chip *chip = nor->priv;
+
+ /*
+ * The AHB window configured for the chip is too small for the
+ * read offset. Use the "User mode" of the controller to
+ * perform the read.
+ */
+ if (from >= chip->ahb_window_size) {
+ aspeed_smc_read_user(nor, from, len, read_buf);
+ goto out;
+ }
+
+ /*
+ * Use the "Command mode" to do a direct read from the AHB
+ * window configured for the chip. This should be the default.
+ */
+ memcpy_fromio(read_buf, chip->ahb_base + from, len);
+
+out:
+ return len;
+}
+
static int aspeed_smc_unregister(struct aspeed_smc_controller *controller)
{
struct aspeed_smc_chip *chip;
@@ -423,6 +645,8 @@ static const struct of_device_id aspeed_smc_matches[] = {
{ .compatible = "aspeed,ast2400-spi", .data = &spi_2400_info },
{ .compatible = "aspeed,ast2500-fmc", .data = &fmc_2500_info },
{ .compatible = "aspeed,ast2500-spi", .data = &spi_2500_info },
+ { .compatible = "aspeed,ast2600-fmc", .data = &fmc_2600_info },
+ { .compatible = "aspeed,ast2600-spi", .data = &spi_2600_info },
{ }
};
MODULE_DEVICE_TABLE(of, aspeed_smc_matches);
@@ -438,36 +662,32 @@ static void __iomem *aspeed_smc_chip_base(struct aspeed_smc_chip *chip,
struct resource *res)
{
struct aspeed_smc_controller *controller = chip->controller;
+ const struct aspeed_smc_info *info = controller->info;
u32 offset = 0;
u32 reg;
- if (controller->info->nce > 1) {
+ if (info->nce > 1) {
reg = readl(SEGMENT_ADDR_REG(controller, chip->cs));
- if (SEGMENT_ADDR_START(reg) >= SEGMENT_ADDR_END(reg))
+ if (info->segment_start(controller, reg) >=
+ info->segment_end(controller, reg)) {
return NULL;
+ }
- offset = SEGMENT_ADDR_START(reg) - res->start;
+ offset = info->segment_start(controller, reg) - res->start;
}
return controller->ahb_base + offset;
}
-static u32 aspeed_smc_ahb_base_phy(struct aspeed_smc_controller *controller)
-{
- u32 seg0_val = readl(SEGMENT_ADDR_REG(controller, 0));
-
- return SEGMENT_ADDR_START(seg0_val);
-}
-
static u32 chip_set_segment(struct aspeed_smc_chip *chip, u32 cs, u32 start,
u32 size)
{
struct aspeed_smc_controller *controller = chip->controller;
+ const struct aspeed_smc_info *info = controller->info;
void __iomem *seg_reg;
- u32 seg_oldval, seg_newval, ahb_base_phy, end;
-
- ahb_base_phy = aspeed_smc_ahb_base_phy(controller);
+ u32 seg_oldval, seg_newval, end;
+ u32 ahb_base_phy = controller->ahb_base_phy;
seg_reg = SEGMENT_ADDR_REG(controller, cs);
seg_oldval = readl(seg_reg);
@@ -477,8 +697,15 @@ static u32 chip_set_segment(struct aspeed_smc_chip *chip, u32 cs, u32 start,
* size, but take into account the possible overlap with the
* previous segment
*/
- if (!size)
- size = SEGMENT_ADDR_END(seg_oldval) - start;
+ if (!size) {
+ end = info->segment_end(controller, seg_oldval);
+
+ /*
+ * Check for disabled segment (AST2600).
+ */
+ if (end != ahb_base_phy)
+ size = end - start;
+ }
/*
* The segment cannot exceed the maximum window size of the
@@ -491,7 +718,7 @@ static u32 chip_set_segment(struct aspeed_smc_chip *chip, u32 cs, u32 start,
}
end = start + size;
- seg_newval = SEGMENT_ADDR_VALUE(start, end);
+ seg_newval = info->segment_reg(controller, start, end);
writel(seg_newval, seg_reg);
/*
@@ -502,13 +729,13 @@ static u32 chip_set_segment(struct aspeed_smc_chip *chip, u32 cs, u32 start,
if (seg_newval != readl(seg_reg)) {
dev_err(chip->nor.dev, "CE%d window invalid", cs);
writel(seg_oldval, seg_reg);
- start = SEGMENT_ADDR_START(seg_oldval);
- end = SEGMENT_ADDR_END(seg_oldval);
+ start = info->segment_start(controller, seg_oldval);
+ end = info->segment_end(controller, seg_oldval);
size = end - start;
}
- dev_info(chip->nor.dev, "CE%d window [ 0x%.8x - 0x%.8x ] %dMB",
- cs, start, end, size >> 20);
+ dev_info(chip->nor.dev, "CE%d window [ 0x%.8x - 0x%.8x ] %dMB%s",
+ cs, start, end, size >> 20, size ? "" : " (disabled)");
return size;
}
@@ -556,7 +783,7 @@ static u32 aspeed_smc_chip_set_segment(struct aspeed_smc_chip *chip)
chip->cs, size >> 20);
}
- ahb_base_phy = aspeed_smc_ahb_base_phy(controller);
+ ahb_base_phy = controller->ahb_base_phy;
/*
* As a start address for the current segment, use the default
@@ -566,7 +793,7 @@ static u32 aspeed_smc_chip_set_segment(struct aspeed_smc_chip *chip)
if (chip->cs) {
u32 prev = readl(SEGMENT_ADDR_REG(controller, chip->cs - 1));
- start = SEGMENT_ADDR_END(prev);
+ start = controller->info->segment_end(controller, prev);
} else {
start = ahb_base_phy;
}
@@ -703,10 +930,258 @@ static int aspeed_smc_chip_setup_init(struct aspeed_smc_chip *chip,
return 0;
}
+
+#define CALIBRATE_BUF_SIZE 16384
+
+static bool aspeed_smc_check_reads(struct aspeed_smc_chip *chip,
+ const u8 *golden_buf, u8 *test_buf)
+{
+ int i;
+
+ for (i = 0; i < 10; i++) {
+ memcpy_fromio(test_buf, chip->ahb_base, CALIBRATE_BUF_SIZE);
+ if (memcmp(test_buf, golden_buf, CALIBRATE_BUF_SIZE) != 0)
+ return false;
+ }
+ return true;
+}
+
+static int aspeed_smc_calibrate_reads(struct aspeed_smc_chip *chip, u32 hdiv,
+ const u8 *golden_buf, u8 *test_buf)
+{
+ struct aspeed_smc_controller *controller = chip->controller;
+ const struct aspeed_smc_info *info = controller->info;
+ int i;
+ int good_pass = -1, pass_count = 0;
+ u32 shift = (hdiv - 1) << 2;
+ u32 mask = ~(0xfu << shift);
+ u32 fread_timing_val = 0;
+
+#define FREAD_TPASS(i) (((i) / 2) | (((i) & 1) ? 0 : 8))
+
+ /* Try HCLK delay 0..5, each one with/without delay and look for a
+ * good pair.
+ */
+ for (i = 0; i < 12; i++) {
+ bool pass;
+
+ fread_timing_val &= mask;
+ fread_timing_val |= FREAD_TPASS(i) << shift;
+
+ writel(fread_timing_val, controller->regs + info->timing);
+ pass = aspeed_smc_check_reads(chip, golden_buf, test_buf);
+ dev_dbg(chip->nor.dev,
+ " * [%08x] %d HCLK delay, %dns DI delay : %s",
+ fread_timing_val, i/2, (i & 1) ? 0 : 4,
+ pass ? "PASS" : "FAIL");
+ if (pass) {
+ pass_count++;
+ if (pass_count == 3) {
+ good_pass = i - 1;
+ break;
+ }
+ } else
+ pass_count = 0;
+ }
+
+ /* No good setting for this frequency */
+ if (good_pass < 0)
+ return -1;
+
+ /* We have at least one pass of margin, let's use first pass */
+ fread_timing_val &= mask;
+ fread_timing_val |= FREAD_TPASS(good_pass) << shift;
+ writel(fread_timing_val, controller->regs + info->timing);
+ dev_dbg(chip->nor.dev, " * -> good is pass %d [0x%08x]",
+ good_pass, fread_timing_val);
+ return 0;
+}
+
+static bool aspeed_smc_check_calib_data(const u8 *test_buf, u32 size)
+{
+ const u32 *tb32 = (const u32 *) test_buf;
+ u32 i, cnt = 0;
+
+ /* We check if we have enough words that are neither all 0
+ * nor all 1's so the calibration can be considered valid.
+ *
+ * I use an arbitrary threshold for now of 64
+ */
+ size >>= 2;
+ for (i = 0; i < size; i++) {
+ if (tb32[i] != 0 && tb32[i] != 0xffffffff)
+ cnt++;
+ }
+ return cnt >= 64;
+}
+
+static const uint32_t aspeed_smc_hclk_divs[] = {
+ 0xf, /* HCLK */
+ 0x7, /* HCLK/2 */
+ 0xe, /* HCLK/3 */
+ 0x6, /* HCLK/4 */
+ 0xd, /* HCLK/5 */
+};
+#define ASPEED_SMC_HCLK_DIV(i) \
+ (aspeed_smc_hclk_divs[(i) - 1] << CONTROL_CLOCK_FREQ_SEL_SHIFT)
+
+static u32 aspeed_smc_default_read(struct aspeed_smc_chip *chip)
+{
+ /*
+ * Keep the 4Byte address mode on the AST2400 SPI controller.
+ * Other controllers set the 4Byte mode in the CE Control
+ * Register
+ */
+ u32 ctl_mask = chip->controller->info == &spi_2400_info ?
+ CONTROL_IO_ADDRESS_4B : 0;
+ u8 cmd = chip->nor.addr_width == 4 ? SPINOR_OP_READ_4B :
+ SPINOR_OP_READ;
+
+ /*
+ * Use the "read command" mode to customize the opcode. In
+ * normal command mode, the value is necessarily READ (0x3) on
+ * the AST2400/2500 SoCs.
+ */
+ return (chip->ctl_val[smc_read] & ctl_mask) |
+ (0x00 << 28) | /* Single bit */
+ (0x00 << 24) | /* CE# max */
+ (cmd << 16) | /* use read mode to support 4B opcode */
+ (0x00 << 8) | /* HCLK/16 */
+ (0x00 << 6) | /* no dummy cycle */
+ (0x01); /* read mode */
+}
+
+static int aspeed_smc_optimize_read(struct aspeed_smc_chip *chip,
+ u32 max_freq)
+{
+ struct aspeed_smc_controller *controller = chip->controller;
+ const struct aspeed_smc_info *info = controller->info;
+ u8 *golden_buf, *test_buf;
+ int i, rc, best_div = -1;
+ u32 save_read_val = chip->ctl_val[smc_read];
+ u32 ahb_freq = chip->controller->clk_frequency;
+
+ dev_dbg(chip->nor.dev, "AHB frequency: %d MHz", ahb_freq / 1000000);
+
+ test_buf = kmalloc(CALIBRATE_BUF_SIZE * 2, GFP_KERNEL);
+ golden_buf = test_buf + CALIBRATE_BUF_SIZE;
+
+ /* We start with the dumbest setting (keep 4Byte bit) and read
+ * some data
+ */
+ chip->ctl_val[smc_read] = aspeed_smc_default_read(chip);
+
+ writel(chip->ctl_val[smc_read], chip->ctl);
+
+ memcpy_fromio(golden_buf, chip->ahb_base, CALIBRATE_BUF_SIZE);
+
+ /* Establish our read mode with freq field set to 0 (HCLK/16) */
+ chip->ctl_val[smc_read] = save_read_val & info->hclk_mask;
+
+ /* Check if calibration data is suitable */
+ if (!aspeed_smc_check_calib_data(golden_buf, CALIBRATE_BUF_SIZE)) {
+ dev_info(chip->nor.dev,
+ "Calibration area too uniform, using low speed");
+ writel(chip->ctl_val[smc_read], chip->ctl);
+ kfree(test_buf);
+ return 0;
+ }
+
+ /* Now we iterate the HCLK dividers until we find our breaking point */
+ for (i = ARRAY_SIZE(aspeed_smc_hclk_divs); i > info->hdiv_max - 1; i--) {
+ u32 tv, freq;
+
+ /* Compare timing to max */
+ freq = ahb_freq / i;
+ if (freq > max_freq)
+ continue;
+
+ /* Set the timing */
+ tv = chip->ctl_val[smc_read] | ASPEED_SMC_HCLK_DIV(i);
+ writel(tv, chip->ctl);
+ dev_dbg(chip->nor.dev, "Trying HCLK/%d [%08x] ...", i, tv);
+ rc = info->calibrate(chip, i, golden_buf, test_buf);
+ if (rc == 0)
+ best_div = i;
+ }
+ kfree(test_buf);
+
+ /* Nothing found ? */
+ if (best_div < 0)
+ dev_warn(chip->nor.dev, "No good frequency, using dumb slow");
+ else {
+ dev_dbg(chip->nor.dev, "Found good read timings at HCLK/%d",
+ best_div);
+ chip->ctl_val[smc_read] |= ASPEED_SMC_HCLK_DIV(best_div);
+ }
+
+ writel(chip->ctl_val[smc_read], chip->ctl);
+ return 0;
+}
+
+#define TIMING_DELAY_DI BIT(3)
+#define TIMING_DELAY_HCYCLE_MAX 5
+#define TIMING_REG_AST2600(chip) \
+ ((chip)->controller->regs + (chip)->controller->info->timing + \
+ (chip)->cs * 4)
+
+static int aspeed_smc_calibrate_reads_ast2600(struct aspeed_smc_chip *chip, u32 hdiv,
+ const u8 *golden_buf, u8 *test_buf)
+{
+ int hcycle;
+ u32 shift = (hdiv - 2) << 3;
+ u32 mask = ~(0xfu << shift);
+ u32 fread_timing_val = 0;
+
+ for (hcycle = 0; hcycle <= TIMING_DELAY_HCYCLE_MAX; hcycle++) {
+ int delay_ns;
+ bool pass = false;
+
+ fread_timing_val &= mask;
+ fread_timing_val |= hcycle << shift;
+
+ /* no DI input delay first */
+ writel(fread_timing_val, TIMING_REG_AST2600(chip));
+ pass = aspeed_smc_check_reads(chip, golden_buf, test_buf);
+ dev_dbg(chip->nor.dev,
+ " * [%08x] %d HCLK delay, DI delay none : %s",
+ fread_timing_val, hcycle, pass ? "PASS" : "FAIL");
+ if (pass)
+ return 0;
+
+ /* Add DI input delays */
+ fread_timing_val &= mask;
+ fread_timing_val |= (TIMING_DELAY_DI | hcycle) << shift;
+
+ for (delay_ns = 0; delay_ns < 0x10; delay_ns++) {
+ fread_timing_val &= ~(0xf << (4 + shift));
+ fread_timing_val |= delay_ns << (4 + shift);
+
+ writel(fread_timing_val, TIMING_REG_AST2600(chip));
+ pass = aspeed_smc_check_reads(chip, golden_buf, test_buf);
+ dev_dbg(chip->nor.dev,
+ " * [%08x] %d HCLK delay, DI delay %d.%dns : %s",
+ fread_timing_val, hcycle, (delay_ns + 1)/2,
+ (delay_ns + 1) & 1 ? 5 : 5, pass ? "PASS" : "FAIL");
+ /*
+ * TODO: This is optimistic. We should look
+ * for a working interval and save the middle
+ * value in the read timing register.
+ */
+ if (pass)
+ return 0;
+ }
+ }
+
+ /* No good setting for this frequency */
+ return -1;
+}
+
static int aspeed_smc_chip_setup_finish(struct aspeed_smc_chip *chip)
{
struct aspeed_smc_controller *controller = chip->controller;
const struct aspeed_smc_info *info = controller->info;
+ int io_mode;
u32 cmd;
if (chip->nor.addr_width == 4 && info->set_4b)
@@ -729,21 +1204,24 @@ static int aspeed_smc_chip_setup_finish(struct aspeed_smc_chip *chip)
* TODO: Adjust clocks if fast read is supported and interpret
* SPI-NOR flags to adjust controller settings.
*/
- if (chip->nor.read_proto == SNOR_PROTO_1_1_1) {
- if (chip->nor.read_dummy == 0)
- cmd = CONTROL_COMMAND_MODE_NORMAL;
- else
- cmd = CONTROL_COMMAND_MODE_FREAD;
- } else {
- dev_err(chip->nor.dev, "unsupported SPI read mode\n");
- return -EINVAL;
- }
+ io_mode = aspeed_smc_get_io_mode(chip);
+ if (io_mode < 0)
+ return io_mode;
+
+ if (chip->nor.read_dummy == 0)
+ cmd = CONTROL_COMMAND_MODE_NORMAL;
+ else
+ cmd = CONTROL_COMMAND_MODE_FREAD;
- chip->ctl_val[smc_read] |= cmd |
+ chip->ctl_val[smc_read] |= cmd | io_mode |
+ chip->nor.read_opcode << CONTROL_COMMAND_SHIFT |
CONTROL_IO_DUMMY_SET(chip->nor.read_dummy / 8);
- dev_dbg(controller->dev, "base control register: %08x\n",
+ dev_info(controller->dev, "read control register: %08x\n",
chip->ctl_val[smc_read]);
+
+ if (optimize_read && info->optimize_read)
+ info->optimize_read(chip, chip->clk_rate);
return 0;
}
@@ -752,7 +1230,7 @@ static const struct spi_nor_controller_ops aspeed_smc_controller_ops = {
.unprepare = aspeed_smc_unprep,
.read_reg = aspeed_smc_read_reg,
.write_reg = aspeed_smc_write_reg,
- .read = aspeed_smc_read_user,
+ .read = aspeed_smc_read,
.write = aspeed_smc_write_user,
};
@@ -762,6 +1240,7 @@ static int aspeed_smc_setup_flash(struct aspeed_smc_controller *controller,
const struct spi_nor_hwcaps hwcaps = {
.mask = SNOR_HWCAPS_READ |
SNOR_HWCAPS_READ_FAST |
+ SNOR_HWCAPS_READ_1_1_2 |
SNOR_HWCAPS_PP,
};
const struct aspeed_smc_info *info = controller->info;
@@ -805,6 +1284,13 @@ static int aspeed_smc_setup_flash(struct aspeed_smc_controller *controller,
break;
}
+ if (of_property_read_u32(child, "spi-max-frequency",
+ &chip->clk_rate)) {
+ chip->clk_rate = ASPEED_SPI_DEFAULT_FREQ;
+ }
+ dev_info(dev, "Using %d MHz SPI frequency\n",
+ chip->clk_rate / 1000000);
+
chip->controller = controller;
chip->ctl = controller->regs + info->ctl0 + cs * 4;
chip->cs = cs;
@@ -856,6 +1342,7 @@ static int aspeed_smc_probe(struct platform_device *pdev)
struct aspeed_smc_controller *controller;
const struct of_device_id *match;
const struct aspeed_smc_info *info;
+ struct clk *clk;
struct resource *res;
int ret;
@@ -881,12 +1368,19 @@ static int aspeed_smc_probe(struct platform_device *pdev)
return PTR_ERR(controller->regs);
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ controller->ahb_base_phy = res->start;
controller->ahb_base = devm_ioremap_resource(dev, res);
if (IS_ERR(controller->ahb_base))
return PTR_ERR(controller->ahb_base);
controller->ahb_window_size = resource_size(res);
+ clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+ controller->clk_frequency = clk_get_rate(clk);
+ devm_clk_put(&pdev->dev, clk);
+
ret = aspeed_smc_setup_flash(controller, np, res);
if (ret)
dev_err(dev, "Aspeed SMC probe failed %d\n", ret);
diff --git a/drivers/mtd/spi-nor/macronix.c b/drivers/mtd/spi-nor/macronix.c
index ab0f963d630c..6f3e60f61cc3 100644
--- a/drivers/mtd/spi-nor/macronix.c
+++ b/drivers/mtd/spi-nor/macronix.c
@@ -67,9 +67,8 @@ static const struct flash_info macronix_parts[] = {
SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
{ "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
- { "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024,
- SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_4B_OPCODES) },
+ { "mx66l51235f", INFO(0xc2201a, 0, 64 * 1024, 1024,
+ SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "mx66u51235f", INFO(0xc2253a, 0, 64 * 1024, 1024,
SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
diff --git a/drivers/mtd/spi-nor/winbond.c b/drivers/mtd/spi-nor/winbond.c
index 5062af10f138..425a478576cc 100644
--- a/drivers/mtd/spi-nor/winbond.c
+++ b/drivers/mtd/spi-nor/winbond.c
@@ -84,6 +84,8 @@ static const struct flash_info winbond_parts[] = {
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "w25q256jw", INFO(0xef6019, 0, 64 * 1024, 512,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "w25q512jv", INFO(0xef4020, 0, 64 * 1024, 1024, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
{ "w25m512jv", INFO(0xef7119, 0, 64 * 1024, 1024,
SECT_4K | SPI_NOR_QUAD_READ | SPI_NOR_DUAL_READ) },
};
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 4ded81b27d0a..b986c05e36f5 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -139,6 +139,7 @@ source "drivers/net/ethernet/neterion/Kconfig"
source "drivers/net/ethernet/netronome/Kconfig"
source "drivers/net/ethernet/ni/Kconfig"
source "drivers/net/ethernet/8390/Kconfig"
+source "drivers/net/ethernet/nuvoton/Kconfig"
source "drivers/net/ethernet/nvidia/Kconfig"
source "drivers/net/ethernet/nxp/Kconfig"
source "drivers/net/ethernet/oki-semi/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index f8f38dcb5f8a..c4c3b715c101 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -63,6 +63,7 @@ obj-$(CONFIG_NET_VENDOR_NATSEMI) += natsemi/
obj-$(CONFIG_NET_VENDOR_NETERION) += neterion/
obj-$(CONFIG_NET_VENDOR_NETRONOME) += netronome/
obj-$(CONFIG_NET_VENDOR_NI) += ni/
+obj-$(CONFIG_NET_VENDOR_NUVOTON) += nuvoton/
obj-$(CONFIG_NET_VENDOR_NVIDIA) += nvidia/
obj-$(CONFIG_LPC_ENET) += nxp/
obj-$(CONFIG_NET_VENDOR_OKI) += oki-semi/
diff --git a/drivers/net/ethernet/nuvoton/Kconfig b/drivers/net/ethernet/nuvoton/Kconfig
new file mode 100644
index 000000000000..e79af5f0ba3c
--- /dev/null
+++ b/drivers/net/ethernet/nuvoton/Kconfig
@@ -0,0 +1,30 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Nuvoton network device configuration
+#
+
+config NET_VENDOR_NUVOTON
+ bool "Nuvoton devices"
+ default y
+ depends on ARM && (ARCH_W90X900 || ARCH_NPCM7XX)
+ help
+ If you have a network (Ethernet) card belonging to this class, say Y.
+
+if NET_VENDOR_NUVOTON
+
+config NPCM7XX_EMC_ETH
+ bool "Nuvoton NPCM7XX Ethernet EMC"
+ depends on ARM && ARCH_NPCM7XX
+ select PHYLIB
+ select MII
+ help
+ Say Y here if you want to use built-in Ethernet MAC
+ on NPCM750 MCU.
+
+config NPCM7XX_EMC_ETH_DEBUG
+ bool "Nuvoton NPCM7XX Ethernet EMC debug"
+ depends on NPCM7XX_EMC_ETH
+ help
+ Say Y here if you want debug info via /proc/driver/npcm7xx_emc.x
+
+endif # NET_VENDOR_NUVOTON
diff --git a/drivers/net/ethernet/nuvoton/Makefile b/drivers/net/ethernet/nuvoton/Makefile
new file mode 100644
index 000000000000..3811daa84be8
--- /dev/null
+++ b/drivers/net/ethernet/nuvoton/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for the Nuvoton network device drivers.
+#
+
+obj-$(CONFIG_NPCM7XX_EMC_ETH) += npcm7xx_emc.o
diff --git a/drivers/net/ethernet/nuvoton/npcm7xx_emc.c b/drivers/net/ethernet/nuvoton/npcm7xx_emc.c
new file mode 100644
index 000000000000..9872da33fa5d
--- /dev/null
+++ b/drivers/net/ethernet/nuvoton/npcm7xx_emc.c
@@ -0,0 +1,2090 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2014-2019 Nuvoton Technology corporation.
+
+#ifdef CONFIG_NPCM7XX_EMC_ETH_DEBUG
+#define DEBUG
+#endif
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/ethtool.h>
+#include <linux/platform_device.h>
+#include <linux/gfp.h>
+#include <linux/kthread.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/ctype.h>
+#include <linux/debugfs.h>
+
+#include <linux/clk.h>
+
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/of_device.h>
+#include <linux/dma-mapping.h>
+
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+
+#include <linux/if_ether.h>
+
+#include <net/ip.h>
+#include <net/ncsi.h>
+
+#ifdef CONFIG_DEBUG_FS
+static struct dentry *npcm7xx_fs_dir;
+#endif
+
+#define MFSEL1_OFFSET 0x00C
+#define MFSEL3_OFFSET 0x064
+#define INTCR_OFFSET 0x03C
+
+#define IPSRST1_OFFSET 0x020
+
+#define DRV_MODULE_NAME "npcm7xx-emc"
+#define DRV_MODULE_VERSION "3.90"
+
+/* Ethernet MAC Registers */
+#define REG_CAMCMR 0x00
+#define REG_CAMEN 0x04
+#define REG_CAMM_BASE 0x08
+#define REG_CAML_BASE 0x0c
+#define REG_TXDLSA 0x88
+#define REG_RXDLSA 0x8C
+#define REG_MCMDR 0x90
+#define REG_MIID 0x94
+#define REG_MIIDA 0x98
+#define REG_FFTCR 0x9C
+#define REG_TSDR 0xa0
+#define REG_RSDR 0xa4
+#define REG_DMARFC 0xa8
+#define REG_MIEN 0xac
+#define REG_MISTA 0xb0
+#define REG_MGSTA 0xb4
+#define REG_MPCNT 0xb8
+#define REG_MRPC 0xbc
+#define REG_MRPCC 0xc0
+#define REG_MREPC 0xc4
+#define REG_DMARFS 0xc8
+#define REG_CTXDSA 0xcc
+#define REG_CTXBSA 0xd0
+#define REG_CRXDSA 0xd4
+#define REG_CRXBSA 0xd8
+
+/* EMC Diagnostic Registers */
+#define REG_RXFSM 0x200
+#define REG_TXFSM 0x204
+#define REG_FSM0 0x208
+#define REG_FSM1 0x20c
+#define REG_DCR 0x210
+#define REG_DMMIR 0x214
+#define REG_BISTR 0x300
+
+/* mac controller bit */
+#define MCMDR_RXON BIT(0)
+#define MCMDR_ALP BIT(1)
+#define MCMDR_ACP BIT(3)
+#define MCMDR_SPCRC BIT(5)
+#define MCMDR_TXON BIT(8)
+#define MCMDR_NDEF BIT(9)
+#define MCMDR_FDUP BIT(18)
+#define MCMDR_ENMDC BIT(19)
+#define MCMDR_OPMOD BIT(20)
+#define SWR BIT(24)
+
+/* cam command regiser */
+#define CAMCMR_AUP BIT(0)
+#define CAMCMR_AMP BIT(1)
+#define CAMCMR_ABP BIT(2)
+#define CAMCMR_CCAM BIT(3)
+#define CAMCMR_ECMP BIT(4)
+
+/* cam enable regiser */
+#define CAM0EN BIT(0)
+
+/* mac mii controller bit */
+#define PHYAD BIT(8)
+#define PHYWR BIT(16)
+#define PHYBUSY BIT(17)
+#define PHYPRESP BIT(18)
+#define MDCON BIT(19)
+#define CAM_ENTRY_SIZE 0x08
+
+/* rx and tx status */
+#define TXDS_TXCP BIT(19)
+#define RXDS_CRCE BIT(17)
+#define RXDS_PTLE BIT(19)
+#define RXDS_RXGD BIT(20)
+#define RXDS_ALIE BIT(21)
+#define RXDS_RP BIT(22)
+
+/* mac interrupt status*/
+#define MISTA_RXINTR BIT(0)
+#define MISTA_CRCE BIT(1)
+#define MISTA_RXOV BIT(2)
+#define MISTA_PTLE BIT(3)
+#define MISTA_RXGD BIT(4)
+#define MISTA_ALIE BIT(5)
+#define MISTA_RP BIT(6)
+#define MISTA_MMP BIT(7)
+#define MISTA_DFOI BIT(8)
+#define MISTA_DENI BIT(9)
+#define MISTA_RDU BIT(10)
+#define MISTA_RXBERR BIT(11)
+#define MISTA_CFR BIT(14)
+#define MISTA_TXINTR BIT(16)
+#define MISTA_TXEMP BIT(17)
+#define MISTA_TXCP BIT(18)
+#define MISTA_EXDEF BIT(19)
+#define MISTA_NCS BIT(20)
+#define MISTA_TXABT BIT(21)
+#define MISTA_LC BIT(22)
+#define MISTA_TDU BIT(23)
+#define MISTA_TXBERR BIT(24)
+
+/* Transmit/Receive Start Demand Register */
+#define ENSTART BIT(0)
+
+#define ENRXINTR BIT(0)
+#define ENCRCE BIT(1)
+#define EMRXOV BIT(2)
+#define ENPTLE BIT(3)
+#define ENRXGD BIT(4)
+#define ENALIE BIT(5)
+#define ENRP BIT(6)
+#define ENMMP BIT(7)
+#define ENDFO BIT(8)
+#define ENDENI BIT(9)
+#define ENRDU BIT(10)
+#define ENRXBERR BIT(11)
+#define ENCFR BIT(14)
+#define ENTXINTR BIT(16)
+#define ENTXEMP BIT(17)
+#define ENTXCP BIT(18)
+#define ENTXDEF BIT(19)
+#define ENNCS BIT(20)
+#define ENTXABT BIT(21)
+#define ENLC BIT(22)
+#define ENTDU BIT(23)
+#define ENTXBERR BIT(24)
+
+/* rx and tx owner bit */
+#define RX_OWN_DMA BIT(31)
+#define TX_OWN_DMA BIT(31)
+
+/* tx frame desc controller bit */
+#define MACTXINTEN BIT(2)
+#define CRCMODE BIT(1)
+#define PADDINGMODE BIT(0)
+
+/* fftcr controller bit */
+#define RXTHD (0x03 << 0)
+#define TXTHD (0x02 << 8)
+#define BLENGTH (0x02 << 20)
+
+/* global setting for driver */
+#define RX_QUEUE_LEN 128
+#define TX_QUEUE_LEN 64
+#define MAX_RBUFF_SZ 0x600
+#define MAX_TBUFF_SZ 0x600
+#define TX_TIMEOUT 50
+#define DELAY 1000
+#define CAM0 0x0
+#define RX_POLL_SIZE 16
+
+#ifdef CONFIG_VLAN_8021Q
+#define IS_VLAN 1
+#else
+#define IS_VLAN 0
+#endif
+
+#define MAX_PACKET_SIZE (1514 + (IS_VLAN * 4))
+#define MAX_PACKET_SIZE_W_CRC (MAX_PACKET_SIZE + 4) /* 1518 */
+
+#define MHZ (1000 * 1000)
+#define MII_TIMEOUT 100
+
+struct plat_npcm7xx_emc_data {
+ char *phy_bus_name;
+ int phy_addr;
+ unsigned char mac_addr[ETH_ALEN];
+};
+
+struct npcm7xx_rxbd {
+ __le32 sl;
+ __le32 buffer;
+ __le32 reserved;
+ __le32 next;
+};
+
+struct npcm7xx_txbd {
+ __le32 mode; /* Ownership bit and some other bits */
+ __le32 buffer; /* Transmit Buffer Starting Address */
+ __le32 sl; /* Transmit Byte Count and status bits */
+ __le32 next; /* Next Tx Descriptor Starting Address */
+};
+
+struct npcm7xx_ether {
+ struct sk_buff *rx_skb[RX_QUEUE_LEN];
+ struct sk_buff *tx_skb[TX_QUEUE_LEN];
+ spinlock_t lock; /* lock sk */
+ struct npcm7xx_rxbd *rdesc;
+ struct npcm7xx_txbd *tdesc;
+ dma_addr_t rdesc_phys;
+ dma_addr_t tdesc_phys;
+ struct net_device_stats stats;
+ struct platform_device *pdev;
+ struct net_device *ndev;
+ struct resource *res;
+ unsigned int msg_enable;
+ struct mii_bus *mii_bus;
+ struct phy_device *phy_dev;
+ struct napi_struct napi;
+ struct ncsi_dev *ncsidev;
+ bool use_ncsi;
+ void __iomem *reg;
+ int rxirq;
+ int txirq;
+ unsigned int cur_tx;
+ unsigned int cur_rx;
+ unsigned int finish_tx;
+ unsigned int pending_tx;
+ __le32 start_tx_ptr;
+ __le32 start_rx_ptr;
+ unsigned int rx_berr;
+ unsigned int rx_err;
+ unsigned int rdu;
+ unsigned int rxov;
+ __le32 camcmr;
+ unsigned int rx_stuck;
+ int link;
+ int speed;
+ int duplex;
+ int need_reset;
+ char *dump_buf;
+ struct regmap *rst_regmap;
+
+ /* debug counters */
+ unsigned int max_waiting_rx;
+ unsigned int rx_count_pool;
+ unsigned int count_xmit;
+ unsigned int rx_int_count;
+ unsigned int rx_err_count;
+ unsigned int tx_int_count;
+ unsigned int tx_tdu;
+ unsigned int tx_tdu_i;
+ unsigned int tx_cp_i;
+ unsigned int count_finish;
+
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *dbgfs_dir;
+ struct dentry *dbgfs_status;
+ struct dentry *dbgfs_dma_cap;
+#endif
+};
+
+#if defined CONFIG_NPCM7XX_EMC_ETH_DEBUG || defined CONFIG_DEBUG_FS
+#define REG_PRINT(reg_name) {t = scnprintf(next, size, "%-10s = %08X\n", \
+ #reg_name, readl(ether->reg + reg_name)); size -= t; next += t; }
+#define DUMP_PRINT(f, x...) {t = scnprintf(next, size, f, ## x); size -= t; \
+ next += t; }
+
+static int npcm7xx_info_dump(char *buf, int count, struct net_device *dev)
+{
+ struct npcm7xx_ether *ether = netdev_priv(dev);
+ struct npcm7xx_txbd *txbd;
+ struct npcm7xx_rxbd *rxbd;
+ unsigned long flags;
+ unsigned int i, cur, txd_offset, rxd_offset;
+ char *next = buf;
+ unsigned int size = count;
+ int t;
+ int is_locked = spin_is_locked(&ether->lock);
+
+ if (!is_locked)
+ spin_lock_irqsave(&ether->lock, flags);
+
+ /* ------basic driver information ---- */
+ DUMP_PRINT("NPCM7XX EMC %s driver version: %s\n", dev->name,
+ DRV_MODULE_VERSION);
+
+ REG_PRINT(REG_CAMCMR);
+ REG_PRINT(REG_CAMEN);
+ REG_PRINT(REG_CAMM_BASE);
+ REG_PRINT(REG_CAML_BASE);
+ REG_PRINT(REG_TXDLSA);
+ REG_PRINT(REG_RXDLSA);
+ REG_PRINT(REG_MCMDR);
+ REG_PRINT(REG_MIID);
+ REG_PRINT(REG_MIIDA);
+ REG_PRINT(REG_FFTCR);
+ REG_PRINT(REG_TSDR);
+ REG_PRINT(REG_RSDR);
+ REG_PRINT(REG_DMARFC);
+ REG_PRINT(REG_MIEN);
+ REG_PRINT(REG_MISTA);
+ REG_PRINT(REG_MGSTA);
+ REG_PRINT(REG_MPCNT);
+ writel(0x7FFF, (ether->reg + REG_MPCNT));
+ REG_PRINT(REG_MRPC);
+ REG_PRINT(REG_MRPCC);
+ REG_PRINT(REG_MREPC);
+ REG_PRINT(REG_DMARFS);
+ REG_PRINT(REG_CTXDSA);
+ REG_PRINT(REG_CTXBSA);
+ REG_PRINT(REG_CRXDSA);
+ REG_PRINT(REG_CRXBSA);
+ REG_PRINT(REG_RXFSM);
+ REG_PRINT(REG_TXFSM);
+ REG_PRINT(REG_FSM0);
+ REG_PRINT(REG_FSM1);
+ REG_PRINT(REG_DCR);
+ REG_PRINT(REG_DMMIR);
+ REG_PRINT(REG_BISTR);
+ DUMP_PRINT("\n");
+
+ DUMP_PRINT("netif_queue %s\n\n", netif_queue_stopped(dev) ?
+ "Stopped" : "Running");
+ if (ether->rdesc)
+ DUMP_PRINT("napi is %s\n\n", test_bit(NAPI_STATE_SCHED,
+ &ether->napi.state) ?
+ "scheduled" :
+ "not scheduled");
+
+ txd_offset = (readl((ether->reg + REG_CTXDSA)) -
+ readl((ether->reg + REG_TXDLSA))) /
+ sizeof(struct npcm7xx_txbd);
+ DUMP_PRINT("TXD offset %6d\n", txd_offset);
+ DUMP_PRINT("cur_tx %6d\n", ether->cur_tx);
+ DUMP_PRINT("finish_tx %6d\n", ether->finish_tx);
+ DUMP_PRINT("pending_tx %6d\n", ether->pending_tx);
+ /* debug counters */
+ DUMP_PRINT("tx_tdu %6d\n", ether->tx_tdu);
+ ether->tx_tdu = 0;
+ DUMP_PRINT("tx_tdu_i %6d\n", ether->tx_tdu_i);
+ ether->tx_tdu_i = 0;
+ DUMP_PRINT("tx_cp_i %6d\n", ether->tx_cp_i);
+ ether->tx_cp_i = 0;
+ DUMP_PRINT("tx_int_count %6d\n", ether->tx_int_count);
+ ether->tx_int_count = 0;
+ DUMP_PRINT("count_xmit tx %6d\n", ether->count_xmit);
+ ether->count_xmit = 0;
+ DUMP_PRINT("count_finish %6d\n", ether->count_finish);
+ ether->count_finish = 0;
+ DUMP_PRINT("\n");
+
+ rxd_offset = (readl((ether->reg + REG_CRXDSA)) -
+ readl((ether->reg + REG_RXDLSA)))
+ / sizeof(struct npcm7xx_txbd);
+ DUMP_PRINT("RXD offset %6d\n", rxd_offset);
+ DUMP_PRINT("cur_rx %6d\n", ether->cur_rx);
+ DUMP_PRINT("rx_err %6d\n", ether->rx_err);
+ ether->rx_err = 0;
+ DUMP_PRINT("rx_berr %6d\n", ether->rx_berr);
+ ether->rx_berr = 0;
+ DUMP_PRINT("rx_stuck %6d\n", ether->rx_stuck);
+ ether->rx_stuck = 0;
+ DUMP_PRINT("rdu %6d\n", ether->rdu);
+ ether->rdu = 0;
+ DUMP_PRINT("rxov rx %6d\n", ether->rxov);
+ ether->rxov = 0;
+ /* debug counters */
+ DUMP_PRINT("rx_int_count %6d\n", ether->rx_int_count);
+ ether->rx_int_count = 0;
+ DUMP_PRINT("rx_err_count %6d\n", ether->rx_err_count);
+ ether->rx_err_count = 0;
+ DUMP_PRINT("rx_count_pool %6d\n", ether->rx_count_pool);
+ ether->rx_count_pool = 0;
+ DUMP_PRINT("max_waiting_rx %5d\n", ether->max_waiting_rx);
+ ether->max_waiting_rx = 0;
+ DUMP_PRINT("\n");
+ DUMP_PRINT("need_reset %5d\n", ether->need_reset);
+
+ if (ether->tdesc && ether->rdesc) {
+ cur = ether->finish_tx - 2;
+ for (i = 0; i < 3; i++) {
+ cur = (cur + 1) % TX_QUEUE_LEN;
+ txbd = (ether->tdesc + cur);
+ DUMP_PRINT("finish %3d txbd mode %08X buffer %08X sl %08X next %08X tx_skb %p\n",
+ cur, txbd->mode, txbd->buffer,
+ txbd->sl, txbd->next, ether->tx_skb[cur]);
+ }
+ DUMP_PRINT("\n");
+
+ cur = txd_offset - 2;
+ for (i = 0; i < 3; i++) {
+ cur = (cur + 1) % TX_QUEUE_LEN;
+ txbd = (ether->tdesc + cur);
+ DUMP_PRINT("txd_of %3d txbd mode %08X buffer %08X sl %08X next %08X\n",
+ cur, txbd->mode, txbd->buffer,
+ txbd->sl, txbd->next);
+ }
+ DUMP_PRINT("\n");
+
+ cur = ether->cur_tx - 63;
+ for (i = 0; i < 64; i++) {
+ cur = (cur + 1) % TX_QUEUE_LEN;
+ txbd = (ether->tdesc + cur);
+ DUMP_PRINT("cur_tx %3d txbd mode %08X buffer %08X sl %08X next %08X\n",
+ cur, txbd->mode, txbd->buffer,
+ txbd->sl, txbd->next);
+ }
+ DUMP_PRINT("\n");
+
+ cur = ether->cur_rx - 63;
+ for (i = 0; i < 64; i++) {
+ cur = (cur + 1) % RX_QUEUE_LEN;
+ rxbd = (ether->rdesc + cur);
+ DUMP_PRINT("cur_rx %3d rxbd sl %08X buffer %08X sl %08X next %08X\n",
+ cur, rxbd->sl, rxbd->buffer,
+ rxbd->reserved, rxbd->next);
+ }
+ DUMP_PRINT("\n");
+
+ cur = rxd_offset - 2;
+ for (i = 0; i < 3; i++) {
+ cur = (cur + 1) % RX_QUEUE_LEN;
+ rxbd = (ether->rdesc + cur);
+ DUMP_PRINT("rxd_of %3d rxbd sl %08X buffer %08X sl %08X next %08X\n",
+ cur, rxbd->sl, rxbd->buffer,
+ rxbd->reserved, rxbd->next);
+ }
+ DUMP_PRINT("\n");
+ }
+
+ if (!is_locked)
+ spin_unlock_irqrestore(&ether->lock, flags);
+
+ return count - size;
+}
+#endif
+
+#ifdef CONFIG_NPCM7XX_EMC_ETH_DEBUG
+static void npcm7xx_info_print(struct net_device *dev)
+{
+ char *emc_dump_buf;
+ int count;
+ struct npcm7xx_ether *ether;
+ struct platform_device *pdev;
+ const size_t print_size = 5 * PAGE_SIZE;
+
+ ether = netdev_priv(dev);
+ pdev = ether->pdev;
+
+ emc_dump_buf = kmalloc(print_size, GFP_KERNEL);
+ if (!emc_dump_buf) {
+ dev_err(&pdev->dev, "kmalloc failed\n");
+ } else {
+ char c;
+ char *tmp_buf = emc_dump_buf;
+
+ count = npcm7xx_info_dump(emc_dump_buf, print_size, dev);
+ while (count > 512) {
+ c = tmp_buf[512];
+ tmp_buf[512] = 0;
+ dev_info(&pdev->dev, "%s", tmp_buf);
+ tmp_buf += 512;
+ tmp_buf[0] = c;
+ count -= 512;
+ }
+ dev_info(&pdev->dev, "%s", tmp_buf);
+ kfree(emc_dump_buf);
+ }
+}
+#endif
+
+#ifdef CONFIG_DEBUG_FS
+#include <linux/seq_file.h>
+
+static int npcm7xx_debug_show(struct seq_file *sf, void *v)
+{
+ struct net_device *dev = (struct net_device *)sf->private;
+ struct npcm7xx_ether *ether = netdev_priv(dev);
+ const size_t print_size = 5 * PAGE_SIZE;
+
+ if (!ether->dump_buf) {
+ ether->dump_buf = kmalloc(print_size, GFP_KERNEL);
+ if (!ether->dump_buf)
+ return -1;
+ npcm7xx_info_dump(ether->dump_buf, print_size, dev);
+ }
+
+ seq_printf(sf, "%s", ether->dump_buf);
+ if (sf->count < sf->size) {
+ kfree(ether->dump_buf);
+ ether->dump_buf = NULL;
+ }
+
+ return 0;
+}
+
+static int npcm7xx_debug_show_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, npcm7xx_debug_show, inode->i_private);
+}
+
+static const struct file_operations npcm7xx_debug_show_fops = {
+ .open = npcm7xx_debug_show_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int npcm7xx_debug_reset(struct seq_file *sf, void *v)
+{
+ struct net_device *dev = (struct net_device *)sf->private;
+ struct npcm7xx_ether *ether = netdev_priv(dev);
+ unsigned long flags;
+
+ seq_puts(sf, "Ask to reset the module\n");
+ spin_lock_irqsave(&ether->lock, flags);
+ writel(0, (ether->reg + REG_MIEN));
+ spin_unlock_irqrestore(&ether->lock, flags);
+ ether->need_reset = 1;
+ napi_schedule(&ether->napi);
+
+ return 0;
+}
+
+static int npcm7xx_debug_reset_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, npcm7xx_debug_reset, inode->i_private);
+}
+
+static const struct file_operations npcm7xx_debug_reset_fops = {
+ .owner = THIS_MODULE,
+ .open = npcm7xx_debug_reset_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int npcm7xx_debug_fs(struct npcm7xx_ether *ether)
+{
+ /* Create debugfs main directory if it doesn't exist yet */
+ if (!npcm7xx_fs_dir) {
+ npcm7xx_fs_dir = debugfs_create_dir(DRV_MODULE_NAME, NULL);
+
+ if (!npcm7xx_fs_dir || IS_ERR(npcm7xx_fs_dir)) {
+ dev_err(&ether->pdev->dev, "ERROR %s, debugfs create directory failed\n",
+ DRV_MODULE_NAME);
+ return -ENOMEM;
+ }
+ }
+
+ /* Create per netdev entries */
+ ether->dbgfs_dir = debugfs_create_dir(ether->ndev->name,
+ npcm7xx_fs_dir);
+ if (!ether->dbgfs_dir || IS_ERR(ether->dbgfs_dir)) {
+ dev_err(&ether->pdev->dev, "ERROR failed to create %s directory\n", ether->ndev->name);
+ return -ENOMEM;
+ }
+
+ /* Entry to report DMA RX/TX rings */
+ ether->dbgfs_status =
+ debugfs_create_file("status", 0444,
+ ether->dbgfs_dir, ether->ndev,
+ &npcm7xx_debug_show_fops);
+
+ if (!ether->dbgfs_status || IS_ERR(ether->dbgfs_status)) {
+ dev_err(&ether->pdev->dev, "ERROR creating \'status\' debugfs file\n");
+ debugfs_remove_recursive(ether->dbgfs_dir);
+
+ return -ENOMEM;
+ }
+
+ /* Entry to report the DMA HW features */
+ ether->dbgfs_dma_cap = debugfs_create_file("do_reset", 0444,
+ ether->dbgfs_dir,
+ ether->ndev,
+ &npcm7xx_debug_reset_fops);
+
+ if (!ether->dbgfs_dma_cap || IS_ERR(ether->dbgfs_dma_cap)) {
+ dev_err(&ether->pdev->dev, "ERROR creating stmmac \'do_reset\' debugfs file\n");
+ debugfs_remove_recursive(ether->dbgfs_dir);
+
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+#endif
+
+static void npcm7xx_opmode(struct net_device *dev, int speed, int duplex)
+{
+ __le32 val;
+ struct npcm7xx_ether *ether = netdev_priv(dev);
+
+ val = readl((ether->reg + REG_MCMDR));
+ if (speed == 100)
+ val |= MCMDR_OPMOD;
+ else
+ val &= ~MCMDR_OPMOD;
+
+ if (duplex == DUPLEX_FULL)
+ val |= MCMDR_FDUP;
+ else
+ val &= ~MCMDR_FDUP;
+
+ writel(val, (ether->reg + REG_MCMDR));
+}
+
+static void adjust_link(struct net_device *dev)
+{
+ struct npcm7xx_ether *ether = netdev_priv(dev);
+ struct phy_device *phydev = ether->phy_dev;
+ bool status_change = false;
+ unsigned long flags;
+
+ /* clear GPIO interrupt status whihc indicates PHY statu change? */
+ spin_lock_irqsave(&ether->lock, flags);
+
+ if (phydev->link) {
+ if (ether->speed != phydev->speed ||
+ ether->duplex != phydev->duplex) {
+ ether->speed = phydev->speed;
+ ether->duplex = phydev->duplex;
+ status_change = true;
+ }
+ } else {
+ ether->speed = 0;
+ ether->duplex = -1;
+ }
+
+ if (phydev->link != ether->link) {
+ ether->link = phydev->link;
+ status_change = true;
+ }
+
+ spin_unlock_irqrestore(&ether->lock, flags);
+
+ if (status_change)
+ npcm7xx_opmode(dev, ether->speed, ether->duplex);
+}
+
+static void npcm7xx_write_cam(struct net_device *dev,
+ unsigned int x, unsigned char *pval)
+{
+ struct npcm7xx_ether *ether = netdev_priv(dev);
+ __le32 msw, lsw;
+
+ msw = (pval[0] << 24) | (pval[1] << 16) | (pval[2] << 8) | pval[3];
+
+ lsw = (pval[4] << 24) | (pval[5] << 16);
+
+ writel(lsw, (ether->reg + REG_CAML_BASE) + x * CAM_ENTRY_SIZE);
+ writel(msw, (ether->reg + REG_CAMM_BASE) + x * CAM_ENTRY_SIZE);
+ dev_dbg(&ether->pdev->dev, "REG_CAML_BASE = 0x%08X REG_CAMM_BASE = 0x%08X", lsw, msw);
+}
+
+static struct sk_buff *get_new_skb(struct net_device *dev, u32 i)
+{
+ struct npcm7xx_ether *ether = netdev_priv(dev);
+ struct sk_buff *skb = dev_alloc_skb(roundup(MAX_PACKET_SIZE_W_CRC, 4));
+
+ if (!skb)
+ return NULL;
+
+ /* Do not unmark the following skb_reserve() Receive Buffer Starting
+ * Address must be aligned to 4 bytes and the following line
+ * if unmarked will make it align to 2 and this likely will
+ * hult the RX and crash the linux skb_reserve(skb, NET_IP_ALIGN);
+ */
+ skb->dev = dev;
+ (ether->rdesc + i)->buffer =
+ dma_map_single(&dev->dev, skb->data,
+ roundup(MAX_PACKET_SIZE_W_CRC, 4),
+ DMA_FROM_DEVICE);
+ ether->rx_skb[i] = skb;
+
+ return skb;
+}
+
+static int npcm7xx_init_desc(struct net_device *dev)
+{
+ struct npcm7xx_ether *ether;
+ struct npcm7xx_txbd *tdesc;
+ struct npcm7xx_rxbd *rdesc;
+ struct platform_device *pdev;
+ unsigned int i;
+
+ ether = netdev_priv(dev);
+ pdev = ether->pdev;
+
+ if (!ether->tdesc) {
+ ether->tdesc = (struct npcm7xx_txbd *)
+ dma_alloc_coherent(&pdev->dev,
+ sizeof(struct npcm7xx_txbd) *
+ TX_QUEUE_LEN,
+ &ether->tdesc_phys,
+ GFP_KERNEL);
+
+ if (!ether->tdesc) {
+ dev_err(&pdev->dev, "Failed to allocate memory for tx desc\n");
+ return -ENOMEM;
+ }
+ }
+
+ if (!ether->rdesc) {
+ ether->rdesc = (struct npcm7xx_rxbd *)
+ dma_alloc_coherent(&pdev->dev,
+ sizeof(struct npcm7xx_rxbd) *
+ RX_QUEUE_LEN,
+ &ether->rdesc_phys,
+ GFP_KERNEL);
+
+ if (!ether->rdesc) {
+ dev_err(&pdev->dev, "Failed to allocate memory for rx desc\n");
+ dma_free_coherent(&pdev->dev,
+ sizeof(struct npcm7xx_txbd) *
+ TX_QUEUE_LEN, ether->tdesc,
+ ether->tdesc_phys);
+ ether->tdesc = NULL;
+ return -ENOMEM;
+ }
+ }
+
+ for (i = 0; i < TX_QUEUE_LEN; i++) {
+ unsigned int offset;
+
+ tdesc = (ether->tdesc + i);
+
+ if (i == TX_QUEUE_LEN - 1)
+ offset = 0;
+ else
+ offset = sizeof(struct npcm7xx_txbd) * (i + 1);
+
+ tdesc->next = ether->tdesc_phys + offset;
+ tdesc->buffer = (__le32)NULL;
+ tdesc->sl = 0;
+ tdesc->mode = 0;
+ }
+
+ ether->start_tx_ptr = ether->tdesc_phys;
+
+ for (i = 0; i < RX_QUEUE_LEN; i++) {
+ unsigned int offset;
+
+ rdesc = (ether->rdesc + i);
+
+ if (i == RX_QUEUE_LEN - 1)
+ offset = 0;
+ else
+ offset = sizeof(struct npcm7xx_rxbd) * (i + 1);
+
+ rdesc->next = ether->rdesc_phys + offset;
+ rdesc->sl = RX_OWN_DMA;
+
+ if (!get_new_skb(dev, i)) {
+ dev_err(&pdev->dev, "get_new_skb() failed\n");
+
+ for (; i != 0; i--) {
+ dma_unmap_single(&dev->dev, (dma_addr_t)
+ ((ether->rdesc + i)->buffer),
+ roundup(MAX_PACKET_SIZE_W_CRC,
+ 4), DMA_FROM_DEVICE);
+ dev_kfree_skb_any(ether->rx_skb[i]);
+ ether->rx_skb[i] = NULL;
+ }
+
+ dma_free_coherent(&pdev->dev,
+ sizeof(struct npcm7xx_txbd) *
+ TX_QUEUE_LEN,
+ ether->tdesc, ether->tdesc_phys);
+ dma_free_coherent(&pdev->dev,
+ sizeof(struct npcm7xx_rxbd) *
+ RX_QUEUE_LEN,
+ ether->rdesc, ether->rdesc_phys);
+
+ return -ENOMEM;
+ }
+ }
+
+ ether->start_rx_ptr = ether->rdesc_phys;
+ wmb();
+ for (i = 0; i < TX_QUEUE_LEN; i++)
+ ether->tx_skb[i] = NULL;
+
+ return 0;
+}
+
+/* This API must call with Tx/Rx stopped */
+static void npcm7xx_free_desc(struct net_device *dev,
+ bool free_also_descriptors)
+{
+ struct sk_buff *skb;
+ u32 i;
+ struct npcm7xx_ether *ether = netdev_priv(dev);
+ struct platform_device *pdev = ether->pdev;
+
+ for (i = 0; i < TX_QUEUE_LEN; i++) {
+ skb = ether->tx_skb[i];
+ if (skb) {
+ dma_unmap_single(&dev->dev, (dma_addr_t)((ether->tdesc +
+ i)->buffer),
+ skb->len, DMA_TO_DEVICE);
+ dev_kfree_skb_any(skb);
+ ether->tx_skb[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < RX_QUEUE_LEN; i++) {
+ skb = ether->rx_skb[i];
+ if (skb) {
+ dma_unmap_single(&dev->dev, (dma_addr_t)((ether->rdesc +
+ i)->buffer),
+ roundup(MAX_PACKET_SIZE_W_CRC, 4),
+ DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ ether->rx_skb[i] = NULL;
+ }
+ }
+
+ if (free_also_descriptors) {
+ if (ether->tdesc)
+ dma_free_coherent(&pdev->dev,
+ sizeof(struct npcm7xx_txbd) *
+ TX_QUEUE_LEN,
+ ether->tdesc, ether->tdesc_phys);
+ ether->tdesc = NULL;
+
+ if (ether->rdesc)
+ dma_free_coherent(&pdev->dev,
+ sizeof(struct npcm7xx_rxbd) *
+ RX_QUEUE_LEN,
+ ether->rdesc, ether->rdesc_phys);
+ ether->rdesc = NULL;
+ }
+}
+
+static void npcm7xx_set_fifo_threshold(struct net_device *dev)
+{
+ struct npcm7xx_ether *ether = netdev_priv(dev);
+ __le32 val;
+
+ val = RXTHD | TXTHD | BLENGTH;
+ writel(val, (ether->reg + REG_FFTCR));
+}
+
+static void npcm7xx_return_default_idle(struct net_device *dev)
+{
+ struct npcm7xx_ether *ether = netdev_priv(dev);
+ __le32 val;
+ __le32 saved_bits;
+
+ val = readl((ether->reg + REG_MCMDR));
+ saved_bits = val & (MCMDR_FDUP | MCMDR_OPMOD);
+ val |= SWR;
+ writel(val, (ether->reg + REG_MCMDR));
+
+ /* During the EMC reset the AHB will read 0 from all registers,
+ * so in order to see if the reset finished we can't count on
+ * (ether->reg + REG_MCMDR).SWR to become 0, instead we read another
+ * register that its reset value is not 0,
+ * we choose (ether->reg + REG_FFTCR).
+ */
+ do {
+ val = readl((ether->reg + REG_FFTCR));
+ } while (val == 0);
+
+ /*
+ * Now we can verify if (ether->reg + REG_MCMDR).SWR became
+ * 0 (probably it will be 0 on the first read).
+ */
+ do {
+ val = readl((ether->reg + REG_MCMDR));
+ } while (val & SWR);
+
+ /* restore values */
+ writel(saved_bits, (ether->reg + REG_MCMDR));
+}
+
+static void npcm7xx_enable_mac_interrupt(struct net_device *dev)
+{
+ struct npcm7xx_ether *ether = netdev_priv(dev);
+ __le32 val;
+
+ val = ENRXINTR | /* Start of RX interrupts */
+ ENCRCE |
+ EMRXOV |
+ (ENPTLE * (!IS_VLAN)) | /* If we don't support VLAN we want interrupt on long packets */
+ ENRXGD |
+ ENALIE |
+ ENRP |
+ ENMMP |
+ ENDFO |
+ /* ENDENI | */ /* We don't need interrupt on DMA Early Notification */
+ ENRDU | /* We don't need interrupt on Receive Descriptor Unavailable Interrupt */
+ ENRXBERR |
+ /* ENCFR | */
+ ENTXINTR | /* Start of TX interrupts */
+ ENTXEMP |
+ ENTXCP |
+ ENTXDEF |
+ ENNCS |
+ ENTXABT |
+ ENLC |
+ /* ENTDU | */ /* We don't need interrupt on Transmit Descriptor Unavailable at start of operation */
+ ENTXBERR;
+ writel(val, (ether->reg + REG_MIEN));
+}
+
+static void npcm7xx_get_and_clear_int(struct net_device *dev,
+ __le32 *val, __le32 mask)
+{
+ struct npcm7xx_ether *ether = netdev_priv(dev);
+
+ *val = readl((ether->reg + REG_MISTA)) & mask;
+ writel(*val, (ether->reg + REG_MISTA));
+}
+
+static void npcm7xx_set_global_maccmd(struct net_device *dev)
+{
+ struct npcm7xx_ether *ether = netdev_priv(dev);
+ __le32 val;
+
+ val = readl((ether->reg + REG_MCMDR));
+
+ val |= MCMDR_SPCRC | MCMDR_ENMDC | MCMDR_ACP | MCMDR_NDEF;
+ if (IS_VLAN) {
+ /*
+ * we set ALP accept long packets since VLAN packets
+ * are 4 bytes longer than 1518
+ */
+ val |= MCMDR_ALP;
+ /* limit receive length to 1522 bytes due to VLAN */
+ writel(MAX_PACKET_SIZE_W_CRC, (ether->reg + REG_DMARFC));
+ }
+ writel(val, (ether->reg + REG_MCMDR));
+}
+
+static void npcm7xx_enable_cam(struct net_device *dev)
+{
+ struct npcm7xx_ether *ether = netdev_priv(dev);
+ __le32 val;
+
+ npcm7xx_write_cam(dev, CAM0, dev->dev_addr);
+
+ val = readl((ether->reg + REG_CAMEN));
+ val |= CAM0EN;
+ writel(val, (ether->reg + REG_CAMEN));
+}
+
+static void npcm7xx_set_curdest(struct net_device *dev)
+{
+ struct npcm7xx_ether *ether = netdev_priv(dev);
+
+ writel(ether->start_rx_ptr, (ether->reg + REG_RXDLSA));
+ writel(ether->start_tx_ptr, (ether->reg + REG_TXDLSA));
+}
+
+static void npcm7xx_ether_set_rx_mode(struct net_device *dev)
+{
+ struct npcm7xx_ether *ether;
+ __le32 rx_mode;
+
+ ether = netdev_priv(dev);
+
+ dev_dbg(&ether->pdev->dev, "%s CAMCMR_AUP\n",
+ (dev->flags & IFF_PROMISC) ? "Set" : "Clear");
+ if (dev->flags & IFF_PROMISC)
+ rx_mode = CAMCMR_AUP | CAMCMR_AMP | CAMCMR_ABP | CAMCMR_ECMP;
+ else if ((dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev))
+ rx_mode = CAMCMR_AMP | CAMCMR_ABP | CAMCMR_ECMP;
+ else
+ rx_mode = CAMCMR_ECMP | CAMCMR_ABP;
+ writel(rx_mode, (ether->reg + REG_CAMCMR));
+ ether->camcmr = rx_mode;
+}
+
+static void npcm7xx_reset_mac(struct net_device *dev, int need_free)
+{
+ struct npcm7xx_ether *ether = netdev_priv(dev);
+
+ netif_tx_lock(dev);
+
+ /* disable RX and TX */
+ writel(readl((ether->reg + REG_MCMDR)) & ~(MCMDR_TXON | MCMDR_RXON),
+ (ether->reg + REG_MCMDR));
+
+ npcm7xx_return_default_idle(dev);
+ npcm7xx_set_fifo_threshold(dev);
+
+ if (need_free)
+ npcm7xx_free_desc(dev, false);
+
+ npcm7xx_init_desc(dev);
+
+ ether->cur_tx = 0x0;
+ ether->finish_tx = 0x0;
+ ether->pending_tx = 0x0;
+ ether->cur_rx = 0x0;
+ ether->tx_tdu = 0;
+ ether->tx_tdu_i = 0;
+ ether->tx_cp_i = 0;
+
+ npcm7xx_set_curdest(dev);
+ npcm7xx_enable_cam(dev);
+ npcm7xx_ether_set_rx_mode(dev);
+ npcm7xx_enable_mac_interrupt(dev);
+ npcm7xx_set_global_maccmd(dev);
+
+ /* enable RX and TX */
+ writel(readl((ether->reg + REG_MCMDR)) | MCMDR_TXON | MCMDR_RXON,
+ (ether->reg + REG_MCMDR));
+
+ /* trigger RX */
+ writel(ENSTART, (ether->reg + REG_RSDR));
+
+ ether->need_reset = 0;
+
+ netif_wake_queue(dev);
+ netif_tx_unlock(dev);
+}
+
+static int npcm7xx_mdio_write(struct mii_bus *bus, int phy_id, int regnum,
+ u16 value)
+{
+ struct npcm7xx_ether *ether = bus->priv;
+ unsigned long timeout = jiffies + msecs_to_jiffies(MII_TIMEOUT * 100);
+
+ writel(value, (ether->reg + REG_MIID));
+ writel((phy_id << 0x08) | regnum | PHYBUSY | PHYWR,
+ (ether->reg + REG_MIIDA));
+
+ /* Wait for completion */
+ while (readl((ether->reg + REG_MIIDA)) & PHYBUSY) {
+ if (time_after(jiffies, timeout)) {
+ dev_dbg(&ether->pdev->dev, "mdio read timed out\n ether->reg = 0x%x phy_id=0x%x REG_MIIDA=0x%x\n",
+ (unsigned int)ether->reg, phy_id
+ , readl((ether->reg + REG_MIIDA)));
+ return -ETIMEDOUT;
+ }
+ cpu_relax();
+ }
+
+ return 0;
+}
+
+static int npcm7xx_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
+{
+ struct npcm7xx_ether *ether = bus->priv;
+ unsigned long timeout = jiffies + msecs_to_jiffies(MII_TIMEOUT * 100);
+
+ writel((phy_id << 0x08) | regnum | PHYBUSY, (ether->reg + REG_MIIDA));
+
+ /* Wait for completion */
+ while (readl((ether->reg + REG_MIIDA)) & PHYBUSY) {
+ if (time_after(jiffies, timeout)) {
+ dev_dbg(&ether->pdev->dev, "mdio read timed out\n ether->reg = 0x%x phy_id=0x%x REG_MIIDA=0x%x\n",
+ (unsigned int)ether->reg, phy_id
+ , readl((ether->reg + REG_MIIDA)));
+ return -ETIMEDOUT;
+ }
+ cpu_relax();
+ }
+
+ return readl((ether->reg + REG_MIID));
+}
+
+static int npcm7xx_mdio_reset(struct mii_bus *bus)
+{
+ /* reset EMAC engine?? */
+ return 0;
+}
+
+static int npcm7xx_set_mac_address(struct net_device *dev, void *addr)
+{
+ struct sockaddr *address = addr;
+
+ if (!is_valid_ether_addr((u8 *)address->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(dev->dev_addr, address->sa_data, dev->addr_len);
+ npcm7xx_write_cam(dev, CAM0, dev->dev_addr);
+
+ return 0;
+}
+
+static int npcm7xx_ether_close(struct net_device *dev)
+{
+ struct npcm7xx_ether *ether = netdev_priv(dev);
+
+ npcm7xx_return_default_idle(dev);
+
+ if (ether->phy_dev)
+ phy_stop(ether->phy_dev);
+ else if (ether->use_ncsi)
+ ncsi_stop_dev(ether->ncsidev);
+
+ msleep(20);
+
+ free_irq(ether->txirq, dev);
+ free_irq(ether->rxirq, dev);
+
+ netif_stop_queue(dev);
+ napi_disable(&ether->napi);
+
+ npcm7xx_free_desc(dev, true);
+
+ kfree(ether->dump_buf);
+ ether->dump_buf = NULL;
+
+ return 0;
+}
+
+static struct net_device_stats *npcm7xx_ether_stats(struct net_device *dev)
+{
+ struct npcm7xx_ether *ether;
+
+ ether = netdev_priv(dev);
+ return &ether->stats;
+}
+
+static int npcm7xx_clean_tx(struct net_device *dev, bool from_xmit)
+{
+ struct npcm7xx_ether *ether = netdev_priv(dev);
+ struct npcm7xx_txbd *txbd;
+ struct sk_buff *s;
+ dma_addr_t cur_entry, entry;
+ __le32 sl;
+
+ if (ether->pending_tx == 0)
+ return (0);
+
+ cur_entry = readl((ether->reg + REG_CTXDSA));
+
+ /* Release old used buffers */
+ entry = ether->tdesc_phys + sizeof(struct npcm7xx_txbd) *
+ (ether->finish_tx);
+
+ while (entry != cur_entry) {
+ txbd = (ether->tdesc + ether->finish_tx);
+ s = ether->tx_skb[ether->finish_tx];
+ if (!s)
+ break;
+
+ ether->count_finish++;
+
+ dma_unmap_single(&dev->dev, txbd->buffer, s->len,
+ DMA_TO_DEVICE);
+ consume_skb(s);
+ ether->tx_skb[ether->finish_tx] = NULL;
+
+ if (++ether->finish_tx >= TX_QUEUE_LEN)
+ ether->finish_tx = 0;
+ ether->pending_tx--;
+
+ sl = txbd->sl;
+ if (sl & TXDS_TXCP) {
+ ether->stats.tx_packets++;
+ ether->stats.tx_bytes += (sl & 0xFFFF);
+ } else {
+ ether->stats.tx_errors++;
+ }
+
+ entry = ether->tdesc_phys + sizeof(struct npcm7xx_txbd) *
+ (ether->finish_tx);
+ }
+
+ if (!from_xmit && unlikely(netif_queue_stopped(dev) &&
+ (TX_QUEUE_LEN - ether->pending_tx) > 1)) {
+ netif_tx_lock(dev);
+ if (netif_queue_stopped(dev) &&
+ (TX_QUEUE_LEN - ether->pending_tx) > 1) {
+ netif_wake_queue(dev);
+ }
+ netif_tx_unlock(dev);
+ }
+
+ return(0);
+}
+
+static int npcm7xx_ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct npcm7xx_ether *ether = netdev_priv(dev);
+ struct npcm7xx_txbd *txbd;
+ unsigned long flags;
+
+ ether->count_xmit++;
+
+ /* Insert new buffer */
+ txbd = (ether->tdesc + ether->cur_tx);
+ txbd->buffer = dma_map_single(&dev->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ ether->tx_skb[ether->cur_tx] = skb;
+ if (skb->len > MAX_PACKET_SIZE)
+ dev_err(&ether->pdev->dev, "skb->len (= %d) > MAX_PACKET_SIZE (= %d)\n",
+ skb->len, MAX_PACKET_SIZE);
+
+ txbd->sl = skb->len > MAX_PACKET_SIZE ? MAX_PACKET_SIZE : skb->len;
+ dma_wmb();
+
+ txbd->mode = TX_OWN_DMA | PADDINGMODE | CRCMODE;
+ wmb();
+
+ /* trigger TX */
+ writel(ENSTART, (ether->reg + REG_TSDR));
+
+ if (++ether->cur_tx >= TX_QUEUE_LEN)
+ ether->cur_tx = 0;
+
+ spin_lock_irqsave(&ether->lock, flags);
+ ether->pending_tx++;
+
+ npcm7xx_clean_tx(dev, true);
+
+ if (ether->pending_tx >= TX_QUEUE_LEN - 1) {
+ __le32 reg_mien;
+ unsigned int index_to_wake = ether->cur_tx +
+ ((TX_QUEUE_LEN * 3) / 4);
+
+ if (index_to_wake >= TX_QUEUE_LEN)
+ index_to_wake -= TX_QUEUE_LEN;
+
+ txbd = (ether->tdesc + index_to_wake);
+ txbd->mode = TX_OWN_DMA | PADDINGMODE | CRCMODE | MACTXINTEN;
+ wmb();
+
+ writel(MISTA_TDU, (ether->reg + REG_MISTA));
+ /* Clear TDU interrupt */
+ reg_mien = readl((ether->reg + REG_MIEN));
+
+ if (reg_mien != 0)
+ /* Enable TDU interrupt */
+ writel(reg_mien | ENTDU, (ether->reg + REG_MIEN));
+
+ ether->tx_tdu++;
+ netif_stop_queue(dev);
+ }
+
+ spin_unlock_irqrestore(&ether->lock, flags);
+
+ return 0;
+}
+
+static irqreturn_t npcm7xx_tx_interrupt(int irq, void *dev_id)
+{
+ struct npcm7xx_ether *ether;
+ struct platform_device *pdev;
+ struct net_device *dev;
+ __le32 status;
+ unsigned long flags;
+
+ dev = dev_id;
+ ether = netdev_priv(dev);
+ pdev = ether->pdev;
+
+ npcm7xx_get_and_clear_int(dev, &status, 0xFFFF0000);
+
+ ether->tx_int_count++;
+
+ if (status & MISTA_EXDEF)
+ dev_err(&pdev->dev, "emc defer exceed interrupt status=0x%08X\n"
+ , status);
+ else if (status & MISTA_TXBERR) {
+ dev_err(&pdev->dev, "emc bus error interrupt status=0x%08X\n",
+ status);
+#ifdef CONFIG_NPCM7XX_EMC_ETH_DEBUG
+ npcm7xx_info_print(dev);
+#endif
+ spin_lock_irqsave(&ether->lock, flags);
+ writel(0, (ether->reg + REG_MIEN)); /* disable any interrupt */
+ spin_unlock_irqrestore(&ether->lock, flags);
+ ether->need_reset = 1;
+ } else if (status & ~(MISTA_TXINTR | MISTA_TXCP | MISTA_TDU))
+ dev_err(&pdev->dev, "emc other error interrupt status=0x%08X\n",
+ status);
+
+ /* if we got MISTA_TXCP | MISTA_TDU remove those interrupt and call napi */
+ if (status & (MISTA_TXCP | MISTA_TDU) &
+ readl((ether->reg + REG_MIEN))) {
+ __le32 reg_mien;
+
+ spin_lock_irqsave(&ether->lock, flags);
+ reg_mien = readl((ether->reg + REG_MIEN));
+ if (reg_mien & ENTDU)
+ /* Disable TDU interrupt */
+ writel(reg_mien & (~ENTDU), (ether->reg + REG_MIEN));
+
+ spin_unlock_irqrestore(&ether->lock, flags);
+
+ if (status & MISTA_TXCP)
+ ether->tx_cp_i++;
+ if (status & MISTA_TDU)
+ ether->tx_tdu_i++;
+ } else {
+ dev_dbg(&pdev->dev, "status=0x%08X\n", status);
+ }
+
+ napi_schedule(&ether->napi);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t npcm7xx_rx_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct npcm7xx_ether *ether = netdev_priv(dev);
+ struct platform_device *pdev = ether->pdev;
+ __le32 status;
+ unsigned long flags;
+ unsigned int any_err = 0;
+ __le32 rxfsm;
+
+ npcm7xx_get_and_clear_int(dev, &status, 0xFFFF);
+ ether->rx_int_count++;
+
+ if (unlikely(status & MISTA_RXBERR)) {
+ ether->rx_berr++;
+ dev_err(&pdev->dev, "emc rx bus error status=0x%08X\n", status);
+#ifdef CONFIG_NPCM7XX_EMC_ETH_DEBUG
+ npcm7xx_info_print(dev);
+#endif
+ spin_lock_irqsave(&ether->lock, flags);
+ writel(0, (ether->reg + REG_MIEN)); /* disable any interrupt */
+ spin_unlock_irqrestore(&ether->lock, flags);
+ ether->need_reset = 1;
+ napi_schedule(&ether->napi);
+ return IRQ_HANDLED;
+ }
+
+ if (unlikely(status & (MISTA_RXOV | MISTA_RDU))) {
+ /*
+ * filter out all received packets until we have
+ * enough available buffer descriptors
+ */
+ writel(0, (ether->reg + REG_CAMCMR));
+ any_err = 1;
+ if (status & (MISTA_RXOV))
+ ether->rxov++;
+ if (status & (MISTA_RDU))
+ ether->rdu++;
+
+ /*
+ * workaround Errata 1.36: EMC Hangs on receiving 253-256
+ * byte packet
+ */
+ rxfsm = readl((ether->reg + REG_RXFSM));
+
+ if ((rxfsm & 0xFFFFF000) == 0x08044000) {
+ int i;
+
+ for (i = 0; i < 32; i++) {
+ rxfsm = readl((ether->reg + REG_RXFSM));
+ if ((rxfsm & 0xFFFFF000) != 0x08044000)
+ break;
+ }
+ if (i == 32) {
+ ether->rx_stuck++;
+ spin_lock_irqsave(&ether->lock, flags);
+#ifdef CONFIG_NPCM7XX_EMC_ETH_DEBUG
+ npcm7xx_info_print(dev);
+#endif
+ writel(0, (ether->reg + REG_MIEN));
+ spin_unlock_irqrestore(&ether->lock, flags);
+ ether->need_reset = 1;
+ napi_schedule(&ether->napi);
+ dev_err(&pdev->dev, "stuck on REG_RXFSM = 0x%08X status=%08X doing reset!\n", rxfsm, status);
+ return IRQ_HANDLED;
+ }
+ }
+ }
+
+ /* echo MISTA status on unexpected flags although we don't do anithing with them */
+ if (unlikely(status &
+ (/* MISTA_RXINTR | */ /* Receive - all RX interrupt set this */
+ MISTA_CRCE | /* CRC Error */
+ /* MISTA_RXOV | */ /* Receive FIFO Overflow - we alread handled it */
+ (MISTA_PTLE * !IS_VLAN) | /* Packet Too Long is needed if VLAN is not supported */
+ /* MISTA_RXGD | */ /* Receive Good - this is the common good case */
+ MISTA_ALIE | /* Alignment Error */
+ MISTA_RP | /* Runt Packet */
+ MISTA_MMP | /* More Missed Packet */
+ MISTA_DFOI | /* Maximum Frame Length */
+ /* MISTA_DENI | */ /* DMA Early Notification - every packet get this */
+ /* MISTA_RDU | */ /* Receive Descriptor Unavailable */
+ /* MISTA_RXBERR | */ /* Receive Bus Error Interrupt - we alread handled it */
+ /* MISTA_CFR | */ /* Control Frame Receive - not an error */
+ 0))) {
+ dev_dbg(&pdev->dev, "emc rx MISTA status=0x%08X\n", status);
+ any_err = 1;
+ ether->rx_err++;
+ }
+
+ if (!any_err && ((status & MISTA_RXGD) == 0))
+ dev_err(&pdev->dev, "emc rx MISTA status=0x%08X\n", status);
+
+ spin_lock_irqsave(&ether->lock, flags);
+ writel(readl((ether->reg + REG_MIEN)) & ~ENRXGD,
+ (ether->reg + REG_MIEN));
+ spin_unlock_irqrestore(&ether->lock, flags);
+ napi_schedule(&ether->napi);
+
+ return IRQ_HANDLED;
+}
+
+static int npcm7xx_poll(struct napi_struct *napi, int budget)
+{
+ struct npcm7xx_ether *ether =
+ container_of(napi, struct npcm7xx_ether, napi);
+ struct npcm7xx_rxbd *rxbd;
+ struct net_device *dev = ether->ndev;
+ struct platform_device *pdev = ether->pdev;
+ struct sk_buff *skb, *s;
+ unsigned int length;
+ __le32 status;
+ unsigned long flags;
+ int rx_cnt = 0;
+ int complete = 0;
+ unsigned int rx_offset = (readl((ether->reg + REG_CRXDSA)) -
+ ether->start_rx_ptr) /
+ sizeof(struct npcm7xx_txbd);
+ unsigned int local_count = (rx_offset >= ether->cur_rx) ?
+ rx_offset - ether->cur_rx : rx_offset +
+ RX_QUEUE_LEN - ether->cur_rx;
+
+ if (local_count > ether->max_waiting_rx)
+ ether->max_waiting_rx = local_count;
+
+ if (local_count > (4 * RX_POLL_SIZE))
+ /*
+ * we are porbably in a storm of short packets and we don't
+ * want to get into RDU since short packets in RDU cause
+ * many RXOV which may cause EMC halt, so we filter out all
+ * coming packets
+ */
+ writel(0, (ether->reg + REG_CAMCMR));
+
+ if (local_count <= budget)
+ /* we can restore accepting of packets */
+ writel(ether->camcmr, (ether->reg + REG_CAMCMR));
+
+ spin_lock_irqsave(&ether->lock, flags);
+ npcm7xx_clean_tx(dev, false);
+ spin_unlock_irqrestore(&ether->lock, flags);
+
+ rxbd = (ether->rdesc + ether->cur_rx);
+
+ while (rx_cnt < budget) {
+ status = rxbd->sl;
+ if ((status & RX_OWN_DMA) == RX_OWN_DMA) {
+ complete = 1;
+ break;
+ }
+ /* for debug puposes we save the previous value */
+ rxbd->reserved = status;
+ s = ether->rx_skb[ether->cur_rx];
+ length = status & 0xFFFF;
+
+ /*
+ * If VLAN is not supporte RXDS_PTLE (packet too long) is also
+ * an error
+ */
+ if (likely((status & (RXDS_RXGD | RXDS_CRCE | RXDS_ALIE |
+ RXDS_RP | (IS_VLAN ? 0 : RXDS_PTLE))) ==
+ RXDS_RXGD) && likely(length <= MAX_PACKET_SIZE)) {
+ dma_unmap_single(&dev->dev, (dma_addr_t)rxbd->buffer,
+ roundup(MAX_PACKET_SIZE_W_CRC, 4),
+ DMA_FROM_DEVICE);
+
+ skb_put(s, length);
+ s->protocol = eth_type_trans(s, dev);
+ netif_receive_skb(s);
+ ether->stats.rx_packets++;
+ ether->stats.rx_bytes += length;
+ rx_cnt++;
+ ether->rx_count_pool++;
+
+ /* now we allocate new skb instead if the used one. */
+ skb = dev_alloc_skb(roundup(MAX_PACKET_SIZE_W_CRC, 4));
+ if (!skb) {
+ dev_err(&pdev->dev, "get skb buffer error\n");
+ ether->stats.rx_dropped++;
+ goto rx_out;
+ }
+
+ /* Do not unmark the following skb_reserve() Receive
+ * Buffer Starting Address must be aligned
+ * to 4 bytes and the following line if unmarked
+ * will make it align to 2 and this likely
+ * will hult the RX and crash the linux
+ * skb_reserve(skb, NET_IP_ALIGN);
+ */
+ skb->dev = dev;
+
+ rxbd->buffer = dma_map_single(&dev->dev, skb->data,
+ roundup(MAX_PACKET_SIZE_W_CRC, 4),
+ DMA_FROM_DEVICE);
+ ether->rx_skb[ether->cur_rx] = skb;
+ } else {
+ ether->rx_err_count++;
+ ether->stats.rx_errors++;
+ dev_dbg(&pdev->dev, "rx_errors = %lu status = 0x%08X\n",
+ ether->stats.rx_errors, status);
+
+ if (status & RXDS_RP) {
+ ether->stats.rx_length_errors++;
+ dev_dbg(&pdev->dev, "rx_length_errors = %lu\n",
+ ether->stats.rx_length_errors);
+ } else if (status & RXDS_CRCE) {
+ ether->stats.rx_crc_errors++;
+ dev_dbg(&pdev->dev, "rx_crc_errors = %lu\n",
+ ether->stats.rx_crc_errors);
+ } else if (status & RXDS_ALIE) {
+ ether->stats.rx_frame_errors++;
+ dev_dbg(&pdev->dev, "rx_frame_errors = %lu\n",
+ ether->stats.rx_frame_errors);
+ } else if (((!IS_VLAN) && (status & RXDS_PTLE)) ||
+ length > MAX_PACKET_SIZE) {
+ ether->stats.rx_length_errors++;
+ dev_dbg(&pdev->dev, "rx_length_errors = %lu\n",
+ ether->stats.rx_length_errors);
+ }
+ }
+
+ wmb();
+ rxbd->sl = RX_OWN_DMA;
+ wmb();
+
+ if (++ether->cur_rx >= RX_QUEUE_LEN)
+ ether->cur_rx = 0;
+
+ rxbd = (ether->rdesc + ether->cur_rx);
+ }
+
+ if (complete) {
+ napi_complete(napi);
+
+ if (ether->need_reset) {
+ dev_dbg(&pdev->dev, "Reset\n");
+ npcm7xx_reset_mac(dev, 1);
+ }
+
+ spin_lock_irqsave(&ether->lock, flags);
+ writel(readl((ether->reg + REG_MIEN)) | ENRXGD, (ether->reg +
+ REG_MIEN));
+ spin_unlock_irqrestore(&ether->lock, flags);
+ } else {
+ rx_offset = (readl((ether->reg + REG_CRXDSA)) -
+ ether->start_rx_ptr) / sizeof(struct npcm7xx_txbd);
+ local_count = (rx_offset >= ether->cur_rx) ? rx_offset -
+ ether->cur_rx : rx_offset + RX_QUEUE_LEN -
+ ether->cur_rx;
+
+ if (local_count > ether->max_waiting_rx)
+ ether->max_waiting_rx = local_count;
+
+ if (local_count > (3 * RX_POLL_SIZE))
+ /*
+ * we are porbably in a storm of short packets and
+ * we don't want to get into RDU since short packets in
+ * RDU cause many RXOV which may cause
+ * EMC halt, so we filter out all coming packets
+ */
+ writel(0, (ether->reg + REG_CAMCMR));
+ if (local_count <= RX_POLL_SIZE)
+ /* we can restore accepting of packets */
+ writel(ether->camcmr, (ether->reg + REG_CAMCMR));
+ }
+rx_out:
+
+ /* trigger RX */
+ writel(ENSTART, (ether->reg + REG_RSDR));
+ return rx_cnt;
+}
+
+static int npcm7xx_ether_open(struct net_device *dev)
+{
+ struct npcm7xx_ether *ether;
+ struct platform_device *pdev;
+
+ ether = netdev_priv(dev);
+ pdev = ether->pdev;
+
+ if (ether->use_ncsi) {
+ ether->speed = 100;
+ ether->duplex = DUPLEX_FULL;
+ npcm7xx_opmode(dev, 100, DUPLEX_FULL);
+ }
+ npcm7xx_reset_mac(dev, 0);
+
+ if (request_irq(ether->txirq, npcm7xx_tx_interrupt, 0x0, pdev->name,
+ dev)) {
+ dev_err(&pdev->dev, "register irq tx failed\n");
+ npcm7xx_ether_close(dev);
+ return -EAGAIN;
+ }
+
+ if (request_irq(ether->rxirq, npcm7xx_rx_interrupt, 0x0, pdev->name,
+ dev)) {
+ dev_err(&pdev->dev, "register irq rx failed\n");
+ npcm7xx_ether_close(dev);
+ return -EAGAIN;
+ }
+
+ if (ether->phy_dev)
+ phy_start(ether->phy_dev);
+ else if (ether->use_ncsi)
+ netif_carrier_on(dev);
+
+ netif_start_queue(dev);
+ napi_enable(&ether->napi);
+
+ /* trigger RX */
+ writel(ENSTART, (ether->reg + REG_RSDR));
+
+ /* Start the NCSI device */
+ if (ether->use_ncsi) {
+ int err = ncsi_start_dev(ether->ncsidev);
+
+ if (err) {
+ npcm7xx_ether_close(dev);
+ return err;
+ }
+ }
+
+ dev_info(&pdev->dev, "%s is OPENED\n", dev->name);
+
+ return 0;
+}
+
+static int npcm7xx_ether_ioctl(struct net_device *dev,
+ struct ifreq *ifr, int cmd)
+{
+ struct npcm7xx_ether *ether = netdev_priv(dev);
+ struct phy_device *phydev = ether->phy_dev;
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ if (!phydev)
+ return -ENODEV;
+
+ return phy_mii_ioctl(phydev, ifr, cmd);
+}
+
+static void npcm7xx_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
+ strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
+ strlcpy(info->bus_info, "N/A", sizeof(info->bus_info));
+}
+
+static int npcm7xx_get_settings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct npcm7xx_ether *ether = netdev_priv(dev);
+ struct phy_device *phydev = ether->phy_dev;
+
+ if (!phydev)
+ return -ENODEV;
+
+ dev_info(&ether->pdev->dev, "\n\nnpcm7xx_get_settings\n");
+ phy_ethtool_ksettings_get(phydev, cmd);
+
+ return 0;
+}
+
+static int npcm7xx_set_settings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct npcm7xx_ether *ether = netdev_priv(dev);
+ struct phy_device *phydev = ether->phy_dev;
+ int ret;
+
+ if (!phydev)
+ return -ENODEV;
+
+ dev_info(&ether->pdev->dev, "\n\nnpcm7xx_set_settings\n");
+ ret = phy_ethtool_ksettings_set(phydev, cmd);
+
+ return ret;
+}
+
+static u32 npcm7xx_get_msglevel(struct net_device *dev)
+{
+ struct npcm7xx_ether *ether = netdev_priv(dev);
+
+ return ether->msg_enable;
+}
+
+static void npcm7xx_set_msglevel(struct net_device *dev, u32 level)
+{
+ struct npcm7xx_ether *ether = netdev_priv(dev);
+
+ ether->msg_enable = level;
+}
+
+static const struct ethtool_ops npcm7xx_ether_ethtool_ops = {
+ .get_link_ksettings = npcm7xx_get_settings,
+ .set_link_ksettings = npcm7xx_set_settings,
+ .get_drvinfo = npcm7xx_get_drvinfo,
+ .get_msglevel = npcm7xx_get_msglevel,
+ .set_msglevel = npcm7xx_set_msglevel,
+ .get_link = ethtool_op_get_link,
+};
+
+static const struct net_device_ops npcm7xx_ether_netdev_ops = {
+ .ndo_open = npcm7xx_ether_open,
+ .ndo_stop = npcm7xx_ether_close,
+ .ndo_start_xmit = npcm7xx_ether_start_xmit,
+ .ndo_get_stats = npcm7xx_ether_stats,
+ .ndo_set_rx_mode = npcm7xx_ether_set_rx_mode,
+ .ndo_set_mac_address = npcm7xx_set_mac_address,
+ .ndo_do_ioctl = npcm7xx_ether_ioctl,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static void get_mac_address(struct net_device *dev)
+{
+ struct npcm7xx_ether *ether = netdev_priv(dev);
+ struct platform_device *pdev = ether->pdev;
+ struct device_node *np = ether->pdev->dev.of_node;
+ const u8 *mac_address = NULL;
+
+ mac_address = of_get_mac_address(np);
+
+ if (mac_address != 0)
+ ether_addr_copy(dev->dev_addr, mac_address);
+
+ if (is_valid_ether_addr(dev->dev_addr)) {
+ dev_info(&pdev->dev, "%s: device MAC address : %pM\n",
+ pdev->name, dev->dev_addr);
+ } else {
+ eth_hw_addr_random(dev);
+ dev_info(&pdev->dev, "%s: device MAC address (random generator) %pM\n",
+ dev->name, dev->dev_addr);
+ }
+}
+
+static int npcm7xx_mii_setup(struct net_device *dev)
+{
+ struct npcm7xx_ether *ether = netdev_priv(dev);
+ struct platform_device *pdev;
+ struct phy_device *phydev = NULL;
+ int i, err = 0;
+
+ pdev = ether->pdev;
+
+ ether->mii_bus = mdiobus_alloc();
+ if (!ether->mii_bus) {
+ err = -ENOMEM;
+ dev_err(&pdev->dev, "mdiobus_alloc() failed\n");
+ goto out0;
+ }
+
+ ether->mii_bus->name = "npcm7xx_rmii";
+ ether->mii_bus->read = &npcm7xx_mdio_read;
+ ether->mii_bus->write = &npcm7xx_mdio_write;
+ ether->mii_bus->reset = &npcm7xx_mdio_reset;
+ snprintf(ether->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
+ ether->pdev->name, ether->pdev->id);
+ dev_dbg(&pdev->dev, "%s ether->mii_bus->id=%s\n", __func__,
+ ether->mii_bus->id);
+ ether->mii_bus->priv = ether;
+ ether->mii_bus->parent = &ether->pdev->dev;
+
+ for (i = 0; i < PHY_MAX_ADDR; i++)
+ ether->mii_bus->irq[i] = PHY_POLL;
+
+ platform_set_drvdata(ether->pdev, ether->mii_bus);
+
+ /* Enable MDIO Clock */
+ writel(readl((ether->reg + REG_MCMDR)) | MCMDR_ENMDC,
+ (ether->reg + REG_MCMDR));
+
+ if (mdiobus_register(ether->mii_bus)) {
+ dev_err(&pdev->dev, "mdiobus_register() failed\n");
+ goto out2;
+ }
+
+ phydev = phy_find_first(ether->mii_bus);
+ if (!phydev) {
+ dev_err(&pdev->dev, "phy_find_first() failed\n");
+ goto out3;
+ }
+
+ dev_info(&pdev->dev, " name = %s ETH-Phy-Id = 0x%x\n",
+ phydev_name(phydev), phydev->phy_id);
+
+ phydev = phy_connect(dev, phydev_name(phydev),
+ &adjust_link,
+ PHY_INTERFACE_MODE_RMII);
+
+ dev_info(&pdev->dev, " ETH-Phy-Id = 0x%x name = %s\n",
+ phydev->phy_id, phydev->drv->name);
+
+ if (IS_ERR(phydev)) {
+ err = PTR_ERR(phydev);
+ dev_err(&pdev->dev, "phy_connect() failed - %d\n", err);
+ goto out3;
+ }
+
+ linkmode_and(phydev->supported, phydev->supported, PHY_BASIC_FEATURES);
+ linkmode_copy(phydev->advertising, phydev->supported);
+ ether->phy_dev = phydev;
+
+ return 0;
+
+out3:
+ mdiobus_unregister(ether->mii_bus);
+out2:
+ kfree(ether->mii_bus->irq);
+ mdiobus_free(ether->mii_bus);
+out0:
+
+ return err;
+}
+
+static const struct of_device_id emc_dt_id[] = {
+ { .compatible = "nuvoton,npcm750-emc", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, emc_dt_id);
+
+static void npcm7xx_ncsi_handler(struct ncsi_dev *nd)
+{
+ if (unlikely(nd->state != ncsi_dev_state_functional))
+ return;
+
+ netdev_info(nd->dev, "NCSI interface %s\n",
+ nd->link_up ? "up" : "down");
+}
+
+static int npcm7xx_ether_probe(struct platform_device *pdev)
+{
+ struct npcm7xx_ether *ether;
+ struct net_device *dev;
+ int error;
+
+ struct clk *emc_clk = NULL;
+ struct device_node *np = pdev->dev.of_node;
+
+ pdev->id = of_alias_get_id(np, "ethernet");
+ if (pdev->id < 0)
+ pdev->id = 0;
+
+ emc_clk = devm_clk_get(&pdev->dev, NULL);
+
+ if (IS_ERR(emc_clk))
+ return PTR_ERR(emc_clk);
+
+ /* Enable Clock */
+ clk_prepare_enable(emc_clk);
+
+ error = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (error)
+ return -ENODEV;
+
+ dev = alloc_etherdev(sizeof(struct npcm7xx_ether));
+ if (!dev)
+ return -ENOMEM;
+
+ ether = netdev_priv(dev);
+
+ ether->rst_regmap =
+ syscon_regmap_lookup_by_compatible("nuvoton,npcm750-rst");
+ if (IS_ERR(ether->rst_regmap)) {
+ dev_err(&pdev->dev, "%s: failed to find nuvoton,npcm750-rst\n", __func__);
+ return IS_ERR(ether->rst_regmap);
+ }
+
+ /* Reset EMC module */
+ if (pdev->id == 0) {
+ regmap_update_bits(ether->rst_regmap, IPSRST1_OFFSET,
+ (0x1 << 6), (0x1 << 6));
+ regmap_update_bits(ether->rst_regmap, IPSRST1_OFFSET,
+ (0x1 << 6), 0);
+ }
+ if (pdev->id == 1) {
+ regmap_update_bits(ether->rst_regmap, IPSRST1_OFFSET,
+ (0x1 << 21), (0x1 << 21));
+ regmap_update_bits(ether->rst_regmap, IPSRST1_OFFSET,
+ (0x1 << 21), 0);
+ }
+
+ ether->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!ether->res) {
+ dev_err(&pdev->dev, "failed to get I/O memory\n");
+ error = -ENXIO;
+ goto failed_free;
+ }
+
+ if (!request_mem_region(ether->res->start,
+ resource_size(ether->res), pdev->name)) {
+ dev_err(&pdev->dev, "failed to request I/O memory\n");
+ error = -EBUSY;
+ goto failed_free;
+ }
+
+ ether->reg = ioremap(ether->res->start, resource_size(ether->res));
+ dev_dbg(&pdev->dev, "%s ether->reg = 0x%x\n", __func__,
+ (unsigned int)ether->reg);
+
+ if (!ether->reg) {
+ dev_err(&pdev->dev, "failed to remap I/O memory\n");
+ error = -ENXIO;
+ goto failed_free_mem;
+ }
+
+ ether->txirq = platform_get_irq(pdev, 0);
+ if (ether->txirq < 0) {
+ dev_err(&pdev->dev, "failed to get ether tx irq\n");
+ error = -ENXIO;
+ goto failed_free_io;
+ }
+
+ ether->rxirq = platform_get_irq(pdev, 1);
+ if (ether->rxirq < 0) {
+ dev_err(&pdev->dev, "failed to get ether rx irq\n");
+ error = -ENXIO;
+ goto failed_free_io;
+ }
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+ platform_set_drvdata(pdev, dev);
+ ether->ndev = dev;
+
+ ether->pdev = pdev;
+ ether->msg_enable = NETIF_MSG_LINK;
+
+ dev->netdev_ops = &npcm7xx_ether_netdev_ops;
+ dev->ethtool_ops = &npcm7xx_ether_ethtool_ops;
+
+ dev->tx_queue_len = TX_QUEUE_LEN;
+ dev->dma = 0x0;
+ dev->watchdog_timeo = TX_TIMEOUT;
+
+ get_mac_address(dev);
+
+ ether->cur_tx = 0x0;
+ ether->cur_rx = 0x0;
+ ether->finish_tx = 0x0;
+ ether->pending_tx = 0x0;
+ ether->link = 0;
+ ether->speed = 100;
+ ether->duplex = DUPLEX_FULL;
+ ether->need_reset = 0;
+ ether->dump_buf = NULL;
+ ether->rx_berr = 0;
+ ether->rx_err = 0;
+ ether->rdu = 0;
+ ether->rxov = 0;
+ ether->rx_stuck = 0;
+ /* debug counters */
+ ether->max_waiting_rx = 0;
+ ether->rx_count_pool = 0;
+ ether->count_xmit = 0;
+ ether->rx_int_count = 0;
+ ether->rx_err_count = 0;
+ ether->tx_int_count = 0;
+ ether->count_finish = 0;
+ ether->tx_tdu = 0;
+ ether->tx_tdu_i = 0;
+ ether->tx_cp_i = 0;
+
+ spin_lock_init(&ether->lock);
+
+ netif_napi_add(dev, &ether->napi, npcm7xx_poll, RX_POLL_SIZE);
+
+ if (pdev->dev.of_node &&
+ of_get_property(pdev->dev.of_node, "use-ncsi", NULL)) {
+ if (!IS_ENABLED(CONFIG_NET_NCSI)) {
+ dev_err(&pdev->dev, "CONFIG_NET_NCSI not enabled\n");
+ error = -ENODEV;
+ goto failed_free_napi;
+ }
+ dev_info(&pdev->dev, "Using NCSI interface\n");
+ ether->use_ncsi = true;
+ ether->ncsidev = ncsi_register_dev(dev, npcm7xx_ncsi_handler);
+ if (!ether->ncsidev) {
+ error = -ENODEV;
+ goto failed_free_napi;
+ }
+ } else {
+ ether->use_ncsi = false;
+ error = npcm7xx_mii_setup(dev);
+ if (error < 0) {
+ dev_err(&pdev->dev, "npcm7xx_mii_setup err\n");
+ goto failed_free_napi;
+ }
+ }
+
+ error = register_netdev(dev);
+ if (error != 0) {
+ dev_err(&pdev->dev, "register_netdev() failed\n");
+ error = -ENODEV;
+ goto failed_free_napi;
+ }
+
+#ifdef CONFIG_DEBUG_FS
+ npcm7xx_debug_fs(ether);
+#endif
+
+ return 0;
+
+failed_free_napi:
+ netif_napi_del(&ether->napi);
+ platform_set_drvdata(pdev, NULL);
+failed_free_io:
+ iounmap(ether->reg);
+failed_free_mem:
+ release_mem_region(ether->res->start, resource_size(ether->res));
+failed_free:
+ free_netdev(dev);
+
+ return error;
+}
+
+static int npcm7xx_ether_remove(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct npcm7xx_ether *ether = netdev_priv(dev);
+
+#ifdef CONFIG_DEBUG_FS
+ debugfs_remove_recursive(ether->dbgfs_dir);
+#endif
+
+ unregister_netdev(dev);
+
+ free_irq(ether->txirq, dev);
+ free_irq(ether->rxirq, dev);
+
+ if (ether->phy_dev)
+ phy_disconnect(ether->phy_dev);
+
+ mdiobus_unregister(ether->mii_bus);
+ kfree(ether->mii_bus->irq);
+ mdiobus_free(ether->mii_bus);
+
+ platform_set_drvdata(pdev, NULL);
+
+ free_netdev(dev);
+ return 0;
+}
+
+static struct platform_driver npcm7xx_ether_driver = {
+ .probe = npcm7xx_ether_probe,
+ .remove = npcm7xx_ether_remove,
+ .driver = {
+ .name = DRV_MODULE_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(emc_dt_id),
+ },
+};
+
+module_platform_driver(npcm7xx_ether_driver);
+
+MODULE_AUTHOR("Nuvoton Technology Corp.");
+MODULE_DESCRIPTION("NPCM750 EMC driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:npcm750-emc");
+MODULE_VERSION(DRV_MODULE_VERSION);
diff --git a/drivers/peci/Kconfig b/drivers/peci/Kconfig
new file mode 100644
index 000000000000..a64fed7bb367
--- /dev/null
+++ b/drivers/peci/Kconfig
@@ -0,0 +1,37 @@
+#
+# Platform Environment Control Interface (PECI) subsystem configuration
+#
+
+menu "PECI support"
+
+config PECI
+ tristate "PECI support"
+ select CRC8
+ help
+ The Platform Environment Control Interface (PECI) is a one-wire bus
+ interface that provides a communication channel from Intel processors
+ and chipset components to external monitoring or control devices.
+
+ If you want PECI support, you should say Y here and also to the
+ specific driver for your bus adapter(s) below.
+
+ This support is also available as a module. If so, the module
+ will be called peci-core.
+
+if PECI
+
+config PECI_CHARDEV
+ tristate "PECI device interface"
+ help
+ Say Y here to use peci-* device files, usually found in the /dev
+ directory on your system. They make it possible to have user-space
+ programs use the PECI bus.
+
+ This support is also available as a module. If so, the module
+ will be called peci-dev.
+
+source "drivers/peci/busses/Kconfig"
+
+endif # PECI
+
+endmenu
diff --git a/drivers/peci/Makefile b/drivers/peci/Makefile
new file mode 100644
index 000000000000..da8b0a33fa42
--- /dev/null
+++ b/drivers/peci/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the PECI core drivers.
+#
+
+# Core functionality
+obj-$(CONFIG_PECI) += peci-core.o
+obj-$(CONFIG_PECI_CHARDEV) += peci-dev.o
+
+# Hardware specific bus drivers
+obj-y += busses/
diff --git a/drivers/peci/busses/Kconfig b/drivers/peci/busses/Kconfig
new file mode 100644
index 000000000000..4316234db67c
--- /dev/null
+++ b/drivers/peci/busses/Kconfig
@@ -0,0 +1,34 @@
+#
+# PECI hardware bus configuration
+#
+
+menu "PECI Hardware Bus support"
+
+config PECI_ASPEED
+ tristate "ASPEED PECI support"
+ depends on ARCH_ASPEED || COMPILE_TEST
+ depends on OF
+ depends on HAS_IOMEM
+ depends on PECI
+ help
+ Say Y here if you want support for the Platform Environment Control
+ Interface (PECI) bus adapter driver on the ASPEED SoCs.
+
+ This support is also available as a module. If so, the module
+ will be called peci-aspeed.
+
+config PECI_NPCM
+ tristate "Nuvoton NPCM PECI support"
+ select REGMAP_MMIO
+ depends on OF
+ depends on HAS_IOMEM
+ depends on ARCH_NPCM || COMPILE_TEST
+ depends on PECI
+ help
+ Say Y here if you want support for the Platform Environment Control
+ Interface (PECI) bus adapter driver on the Nuvoton NPCM SoCs.
+
+ This support is also available as a module. If so, the module
+ will be called peci-npcm.
+
+endmenu
diff --git a/drivers/peci/busses/Makefile b/drivers/peci/busses/Makefile
new file mode 100644
index 000000000000..aa8ce3ae5947
--- /dev/null
+++ b/drivers/peci/busses/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the PECI hardware bus drivers.
+#
+
+obj-$(CONFIG_PECI_ASPEED) += peci-aspeed.o
+obj-$(CONFIG_PECI_NPCM) += peci-npcm.o
diff --git a/drivers/peci/busses/peci-aspeed.c b/drivers/peci/busses/peci-aspeed.c
new file mode 100644
index 000000000000..2673d4c4dcf9
--- /dev/null
+++ b/drivers/peci/busses/peci-aspeed.c
@@ -0,0 +1,484 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2012-2017 ASPEED Technology Inc.
+// Copyright (c) 2018-2019 Intel Corporation
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/peci.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+/* ASPEED PECI Registers */
+/* Control Register */
+#define ASPEED_PECI_CTRL 0x00
+#define ASPEED_PECI_CTRL_SAMPLING_MASK GENMASK(19, 16)
+#define ASPEED_PECI_CTRL_READ_MODE_MASK GENMASK(13, 12)
+#define ASPEED_PECI_CTRL_READ_MODE_COUNT BIT(12)
+#define ASPEED_PECI_CTRL_READ_MODE_DBG BIT(13)
+#define ASPEED_PECI_CTRL_CLK_SOURCE_MASK BIT(11)
+#define ASPEED_PECI_CTRL_CLK_DIV_MASK GENMASK(10, 8)
+#define ASPEED_PECI_CTRL_INVERT_OUT BIT(7)
+#define ASPEED_PECI_CTRL_INVERT_IN BIT(6)
+#define ASPEED_PECI_CTRL_BUS_CONTENT_EN BIT(5)
+#define ASPEED_PECI_CTRL_PECI_EN BIT(4)
+#define ASPEED_PECI_CTRL_PECI_CLK_EN BIT(0)
+
+/* Timing Negotiation Register */
+#define ASPEED_PECI_TIMING_NEGOTIATION 0x04
+#define ASPEED_PECI_TIMING_MESSAGE_MASK GENMASK(15, 8)
+#define ASPEED_PECI_TIMING_ADDRESS_MASK GENMASK(7, 0)
+
+/* Command Register */
+#define ASPEED_PECI_CMD 0x08
+#define ASPEED_PECI_CMD_PIN_MON BIT(31)
+#define ASPEED_PECI_CMD_STS_MASK GENMASK(27, 24)
+#define ASPEED_PECI_CMD_IDLE_MASK \
+ (ASPEED_PECI_CMD_STS_MASK | ASPEED_PECI_CMD_PIN_MON)
+#define ASPEED_PECI_CMD_FIRE BIT(0)
+
+/* Read/Write Length Register */
+#define ASPEED_PECI_RW_LENGTH 0x0c
+#define ASPEED_PECI_AW_FCS_EN BIT(31)
+#define ASPEED_PECI_READ_LEN_MASK GENMASK(23, 16)
+#define ASPEED_PECI_WRITE_LEN_MASK GENMASK(15, 8)
+#define ASPEED_PECI_TAGET_ADDR_MASK GENMASK(7, 0)
+
+/* Expected FCS Data Register */
+#define ASPEED_PECI_EXP_FCS 0x10
+#define ASPEED_PECI_EXP_READ_FCS_MASK GENMASK(23, 16)
+#define ASPEED_PECI_EXP_AW_FCS_AUTO_MASK GENMASK(15, 8)
+#define ASPEED_PECI_EXP_WRITE_FCS_MASK GENMASK(7, 0)
+
+/* Captured FCS Data Register */
+#define ASPEED_PECI_CAP_FCS 0x14
+#define ASPEED_PECI_CAP_READ_FCS_MASK GENMASK(23, 16)
+#define ASPEED_PECI_CAP_WRITE_FCS_MASK GENMASK(7, 0)
+
+/* Interrupt Register */
+#define ASPEED_PECI_INT_CTRL 0x18
+#define ASPEED_PECI_TIMING_NEGO_SEL_MASK GENMASK(31, 30)
+#define ASPEED_PECI_1ST_BIT_OF_ADDR_NEGO 0
+#define ASPEED_PECI_2ND_BIT_OF_ADDR_NEGO 1
+#define ASPEED_PECI_MESSAGE_NEGO 2
+#define ASPEED_PECI_INT_MASK GENMASK(4, 0)
+#define ASPEED_PECI_INT_BUS_TIMEOUT BIT(4)
+#define ASPEED_PECI_INT_BUS_CONNECT BIT(3)
+#define ASPEED_PECI_INT_W_FCS_BAD BIT(2)
+#define ASPEED_PECI_INT_W_FCS_ABORT BIT(1)
+#define ASPEED_PECI_INT_CMD_DONE BIT(0)
+
+/* Interrupt Status Register */
+#define ASPEED_PECI_INT_STS 0x1c
+#define ASPEED_PECI_INT_TIMING_RESULT_MASK GENMASK(29, 16)
+ /* bits[4..0]: Same bit fields in the 'Interrupt Register' */
+
+/* Rx/Tx Data Buffer Registers */
+#define ASPEED_PECI_W_DATA0 0x20
+#define ASPEED_PECI_W_DATA1 0x24
+#define ASPEED_PECI_W_DATA2 0x28
+#define ASPEED_PECI_W_DATA3 0x2c
+#define ASPEED_PECI_R_DATA0 0x30
+#define ASPEED_PECI_R_DATA1 0x34
+#define ASPEED_PECI_R_DATA2 0x38
+#define ASPEED_PECI_R_DATA3 0x3c
+#define ASPEED_PECI_W_DATA4 0x40
+#define ASPEED_PECI_W_DATA5 0x44
+#define ASPEED_PECI_W_DATA6 0x48
+#define ASPEED_PECI_W_DATA7 0x4c
+#define ASPEED_PECI_R_DATA4 0x50
+#define ASPEED_PECI_R_DATA5 0x54
+#define ASPEED_PECI_R_DATA6 0x58
+#define ASPEED_PECI_R_DATA7 0x5c
+#define ASPEED_PECI_DATA_BUF_SIZE_MAX 32
+
+/* Timing Negotiation */
+#define ASPEED_PECI_RD_SAMPLING_POINT_DEFAULT 8
+#define ASPEED_PECI_RD_SAMPLING_POINT_MAX 15
+#define ASPEED_PECI_CLK_DIV_DEFAULT 0
+#define ASPEED_PECI_CLK_DIV_MAX 7
+#define ASPEED_PECI_MSG_TIMING_DEFAULT 1
+#define ASPEED_PECI_MSG_TIMING_MAX 255
+#define ASPEED_PECI_ADDR_TIMING_DEFAULT 1
+#define ASPEED_PECI_ADDR_TIMING_MAX 255
+
+/* Timeout */
+#define ASPEED_PECI_IDLE_CHECK_TIMEOUT_USEC 50000
+#define ASPEED_PECI_IDLE_CHECK_INTERVAL_USEC 10000
+#define ASPEED_PECI_CMD_TIMEOUT_MS_DEFAULT 1000
+#define ASPEED_PECI_CMD_TIMEOUT_MS_MAX 60000
+
+struct aspeed_peci {
+ struct peci_adapter *adapter;
+ struct device *dev;
+ void __iomem *base;
+ struct clk *clk;
+ struct reset_control *rst;
+ int irq;
+ spinlock_t lock; /* to sync completion status handling */
+ struct completion xfer_complete;
+ u32 status;
+ u32 cmd_timeout_ms;
+};
+
+static inline int aspeed_peci_check_idle(struct aspeed_peci *priv)
+{
+ u32 cmd_sts;
+
+ return readl_poll_timeout(priv->base + ASPEED_PECI_CMD,
+ cmd_sts,
+ !(cmd_sts & ASPEED_PECI_CMD_IDLE_MASK),
+ ASPEED_PECI_IDLE_CHECK_INTERVAL_USEC,
+ ASPEED_PECI_IDLE_CHECK_TIMEOUT_USEC);
+}
+
+static int aspeed_peci_xfer(struct peci_adapter *adapter,
+ struct peci_xfer_msg *msg)
+{
+ struct aspeed_peci *priv = peci_get_adapdata(adapter);
+ long err, timeout = msecs_to_jiffies(priv->cmd_timeout_ms);
+ u32 peci_head, peci_state, rx_data = 0;
+ ulong flags;
+ int i, ret;
+ uint reg;
+
+ if (msg->tx_len > ASPEED_PECI_DATA_BUF_SIZE_MAX ||
+ msg->rx_len > ASPEED_PECI_DATA_BUF_SIZE_MAX)
+ return -EINVAL;
+
+ /* Check command sts and bus idle state */
+ ret = aspeed_peci_check_idle(priv);
+ if (ret)
+ return ret; /* -ETIMEDOUT */
+
+ spin_lock_irqsave(&priv->lock, flags);
+ reinit_completion(&priv->xfer_complete);
+
+ peci_head = FIELD_PREP(ASPEED_PECI_TAGET_ADDR_MASK, msg->addr) |
+ FIELD_PREP(ASPEED_PECI_WRITE_LEN_MASK, msg->tx_len) |
+ FIELD_PREP(ASPEED_PECI_READ_LEN_MASK, msg->rx_len);
+
+ writel(peci_head, priv->base + ASPEED_PECI_RW_LENGTH);
+
+ for (i = 0; i < msg->tx_len; i += 4) {
+ reg = i < 16 ? ASPEED_PECI_W_DATA0 + i % 16 :
+ ASPEED_PECI_W_DATA4 + i % 16;
+ writel(le32_to_cpup((__le32 *)&msg->tx_buf[i]),
+ priv->base + reg);
+ }
+
+ dev_dbg(priv->dev, "HEAD : 0x%08x\n", peci_head);
+ print_hex_dump_debug("TX : ", DUMP_PREFIX_NONE, 16, 1,
+ msg->tx_buf, msg->tx_len, true);
+
+ priv->status = 0;
+ writel(ASPEED_PECI_CMD_FIRE, priv->base + ASPEED_PECI_CMD);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ err = wait_for_completion_interruptible_timeout(&priv->xfer_complete,
+ timeout);
+
+ spin_lock_irqsave(&priv->lock, flags);
+ dev_dbg(priv->dev, "INT_STS : 0x%08x\n", priv->status);
+ peci_state = readl(priv->base + ASPEED_PECI_CMD);
+ dev_dbg(priv->dev, "PECI_STATE : 0x%lx\n",
+ FIELD_GET(ASPEED_PECI_CMD_STS_MASK, peci_state));
+
+ writel(0, priv->base + ASPEED_PECI_CMD);
+
+ if (err <= 0 || priv->status != ASPEED_PECI_INT_CMD_DONE) {
+ if (err < 0) { /* -ERESTARTSYS */
+ ret = (int)err;
+ goto err_irqrestore;
+ } else if (err == 0) {
+ dev_dbg(priv->dev, "Timeout waiting for a response!\n");
+ ret = -ETIMEDOUT;
+ goto err_irqrestore;
+ }
+
+ dev_dbg(priv->dev, "No valid response!\n");
+ ret = -EIO;
+ goto err_irqrestore;
+ }
+
+ /*
+ * Note that rx_len and rx_buf size can be an odd number.
+ * Byte handling is more efficient.
+ */
+ for (i = 0; i < msg->rx_len; i++) {
+ u8 byte_offset = i % 4;
+
+ if (byte_offset == 0) {
+ reg = i < 16 ? ASPEED_PECI_R_DATA0 + i % 16 :
+ ASPEED_PECI_R_DATA4 + i % 16;
+ rx_data = readl(priv->base + reg);
+ }
+
+ msg->rx_buf[i] = (u8)(rx_data >> (byte_offset << 3));
+ }
+
+ print_hex_dump_debug("RX : ", DUMP_PREFIX_NONE, 16, 1,
+ msg->rx_buf, msg->rx_len, true);
+
+ peci_state = readl(priv->base + ASPEED_PECI_CMD);
+ dev_dbg(priv->dev, "PECI_STATE : 0x%lx\n",
+ FIELD_GET(ASPEED_PECI_CMD_STS_MASK, peci_state));
+ dev_dbg(priv->dev, "------------------------\n");
+
+err_irqrestore:
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return ret;
+}
+
+static irqreturn_t aspeed_peci_irq_handler(int irq, void *arg)
+{
+ struct aspeed_peci *priv = arg;
+ u32 status;
+
+ spin_lock(&priv->lock);
+ status = readl(priv->base + ASPEED_PECI_INT_STS);
+ writel(status, priv->base + ASPEED_PECI_INT_STS);
+ priv->status |= (status & ASPEED_PECI_INT_MASK);
+
+ /*
+ * In most cases, interrupt bits will be set one by one but also note
+ * that multiple interrupt bits could be set at the same time.
+ */
+ if (status & ASPEED_PECI_INT_BUS_TIMEOUT)
+ dev_dbg(priv->dev, "ASPEED_PECI_INT_BUS_TIMEOUT\n");
+
+ if (status & ASPEED_PECI_INT_BUS_CONNECT)
+ dev_dbg(priv->dev, "ASPEED_PECI_INT_BUS_CONNECT\n");
+
+ if (status & ASPEED_PECI_INT_W_FCS_BAD)
+ dev_dbg(priv->dev, "ASPEED_PECI_INT_W_FCS_BAD\n");
+
+ if (status & ASPEED_PECI_INT_W_FCS_ABORT)
+ dev_dbg(priv->dev, "ASPEED_PECI_INT_W_FCS_ABORT\n");
+
+ /*
+ * All commands should be ended up with a ASPEED_PECI_INT_CMD_DONE bit
+ * set even in an error case.
+ */
+ if (status & ASPEED_PECI_INT_CMD_DONE) {
+ dev_dbg(priv->dev, "ASPEED_PECI_INT_CMD_DONE\n");
+ complete(&priv->xfer_complete);
+ }
+
+ spin_unlock(&priv->lock);
+
+ return IRQ_HANDLED;
+}
+
+static int aspeed_peci_init_ctrl(struct aspeed_peci *priv)
+{
+ u32 msg_timing, addr_timing, rd_sampling_point;
+ u32 clk_freq, clk_divisor, clk_div_val = 0;
+ int ret;
+
+ priv->clk = devm_clk_get(priv->dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ dev_err(priv->dev, "Failed to get clk source.\n");
+ return PTR_ERR(priv->clk);
+ }
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret) {
+ dev_err(priv->dev, "Failed to enable clock.\n");
+ return ret;
+ }
+
+ ret = device_property_read_u32(priv->dev, "clock-frequency", &clk_freq);
+ if (ret) {
+ dev_err(priv->dev,
+ "Could not read clock-frequency property.\n");
+ clk_disable_unprepare(priv->clk);
+ return ret;
+ }
+
+ clk_divisor = clk_get_rate(priv->clk) / clk_freq;
+
+ while ((clk_divisor >> 1) && (clk_div_val < ASPEED_PECI_CLK_DIV_MAX))
+ clk_div_val++;
+
+ ret = device_property_read_u32(priv->dev, "msg-timing", &msg_timing);
+ if (ret || msg_timing > ASPEED_PECI_MSG_TIMING_MAX) {
+ if (!ret)
+ dev_warn(priv->dev,
+ "Invalid msg-timing : %u, Use default : %u\n",
+ msg_timing, ASPEED_PECI_MSG_TIMING_DEFAULT);
+ msg_timing = ASPEED_PECI_MSG_TIMING_DEFAULT;
+ }
+
+ ret = device_property_read_u32(priv->dev, "addr-timing", &addr_timing);
+ if (ret || addr_timing > ASPEED_PECI_ADDR_TIMING_MAX) {
+ if (!ret)
+ dev_warn(priv->dev,
+ "Invalid addr-timing : %u, Use default : %u\n",
+ addr_timing, ASPEED_PECI_ADDR_TIMING_DEFAULT);
+ addr_timing = ASPEED_PECI_ADDR_TIMING_DEFAULT;
+ }
+
+ ret = device_property_read_u32(priv->dev, "rd-sampling-point",
+ &rd_sampling_point);
+ if (ret || rd_sampling_point > ASPEED_PECI_RD_SAMPLING_POINT_MAX) {
+ if (!ret)
+ dev_warn(priv->dev,
+ "Invalid rd-sampling-point : %u. Use default : %u\n",
+ rd_sampling_point,
+ ASPEED_PECI_RD_SAMPLING_POINT_DEFAULT);
+ rd_sampling_point = ASPEED_PECI_RD_SAMPLING_POINT_DEFAULT;
+ }
+
+ ret = device_property_read_u32(priv->dev, "cmd-timeout-ms",
+ &priv->cmd_timeout_ms);
+ if (ret || priv->cmd_timeout_ms > ASPEED_PECI_CMD_TIMEOUT_MS_MAX ||
+ priv->cmd_timeout_ms == 0) {
+ if (!ret)
+ dev_warn(priv->dev,
+ "Invalid cmd-timeout-ms : %u. Use default : %u\n",
+ priv->cmd_timeout_ms,
+ ASPEED_PECI_CMD_TIMEOUT_MS_DEFAULT);
+ priv->cmd_timeout_ms = ASPEED_PECI_CMD_TIMEOUT_MS_DEFAULT;
+ }
+
+ writel(FIELD_PREP(ASPEED_PECI_CTRL_CLK_DIV_MASK,
+ ASPEED_PECI_CLK_DIV_DEFAULT) |
+ ASPEED_PECI_CTRL_PECI_CLK_EN, priv->base + ASPEED_PECI_CTRL);
+
+ /*
+ * Timing negotiation period setting.
+ * The unit of the programmed value is 4 times of PECI clock period.
+ */
+ writel(FIELD_PREP(ASPEED_PECI_TIMING_MESSAGE_MASK, msg_timing) |
+ FIELD_PREP(ASPEED_PECI_TIMING_ADDRESS_MASK, addr_timing),
+ priv->base + ASPEED_PECI_TIMING_NEGOTIATION);
+
+ /* Clear interrupts */
+ writel(readl(priv->base + ASPEED_PECI_INT_STS) | ASPEED_PECI_INT_MASK,
+ priv->base + ASPEED_PECI_INT_STS);
+
+ /* Set timing negotiation mode and enable interrupts */
+ writel(FIELD_PREP(ASPEED_PECI_TIMING_NEGO_SEL_MASK,
+ ASPEED_PECI_1ST_BIT_OF_ADDR_NEGO) |
+ ASPEED_PECI_INT_MASK, priv->base + ASPEED_PECI_INT_CTRL);
+
+ /* Read sampling point and clock speed setting */
+ writel(FIELD_PREP(ASPEED_PECI_CTRL_SAMPLING_MASK, rd_sampling_point) |
+ FIELD_PREP(ASPEED_PECI_CTRL_CLK_DIV_MASK, clk_div_val) |
+ ASPEED_PECI_CTRL_PECI_EN | ASPEED_PECI_CTRL_PECI_CLK_EN,
+ priv->base + ASPEED_PECI_CTRL);
+
+ return 0;
+}
+
+static int aspeed_peci_probe(struct platform_device *pdev)
+{
+ struct peci_adapter *adapter;
+ struct aspeed_peci *priv;
+ int ret;
+
+ adapter = peci_alloc_adapter(&pdev->dev, sizeof(*priv));
+ if (!adapter)
+ return -ENOMEM;
+
+ priv = peci_get_adapdata(adapter);
+ priv->adapter = adapter;
+ priv->dev = &pdev->dev;
+ dev_set_drvdata(&pdev->dev, priv);
+
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->base)) {
+ ret = PTR_ERR(priv->base);
+ goto err_put_adapter_dev;
+ }
+
+ priv->irq = platform_get_irq(pdev, 0);
+ if (!priv->irq) {
+ ret = -ENODEV;
+ goto err_put_adapter_dev;
+ }
+
+ ret = devm_request_irq(&pdev->dev, priv->irq, aspeed_peci_irq_handler,
+ 0, "peci-aspeed-irq", priv);
+ if (ret)
+ goto err_put_adapter_dev;
+
+ init_completion(&priv->xfer_complete);
+ spin_lock_init(&priv->lock);
+
+ priv->adapter->owner = THIS_MODULE;
+ priv->adapter->dev.of_node = of_node_get(dev_of_node(priv->dev));
+ strlcpy(priv->adapter->name, pdev->name, sizeof(priv->adapter->name));
+ priv->adapter->xfer = aspeed_peci_xfer;
+ priv->adapter->use_dma = false;
+
+ priv->rst = devm_reset_control_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->rst)) {
+ dev_err(&pdev->dev,
+ "missing or invalid reset controller entry\n");
+ ret = PTR_ERR(priv->rst);
+ goto err_put_adapter_dev;
+ }
+ reset_control_deassert(priv->rst);
+
+ ret = aspeed_peci_init_ctrl(priv);
+ if (ret)
+ goto err_put_adapter_dev;
+
+ ret = peci_add_adapter(priv->adapter);
+ if (ret)
+ goto err_put_adapter_dev;
+
+ dev_info(&pdev->dev, "peci bus %d registered, irq %d\n",
+ priv->adapter->nr, priv->irq);
+
+ return 0;
+
+err_put_adapter_dev:
+ put_device(&adapter->dev);
+
+ return ret;
+}
+
+static int aspeed_peci_remove(struct platform_device *pdev)
+{
+ struct aspeed_peci *priv = dev_get_drvdata(&pdev->dev);
+
+ clk_disable_unprepare(priv->clk);
+ reset_control_assert(priv->rst);
+ peci_del_adapter(priv->adapter);
+ of_node_put(priv->adapter->dev.of_node);
+
+ return 0;
+}
+
+static const struct of_device_id aspeed_peci_of_table[] = {
+ { .compatible = "aspeed,ast2400-peci", },
+ { .compatible = "aspeed,ast2500-peci", },
+ { .compatible = "aspeed,ast2600-peci", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, aspeed_peci_of_table);
+
+static struct platform_driver aspeed_peci_driver = {
+ .probe = aspeed_peci_probe,
+ .remove = aspeed_peci_remove,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = of_match_ptr(aspeed_peci_of_table),
+ },
+};
+module_platform_driver(aspeed_peci_driver);
+
+MODULE_AUTHOR("Ryan Chen <ryan_chen@aspeedtech.com>");
+MODULE_AUTHOR("Jae Hyun Yoo <jae.hyun.yoo@linux.intel.com>");
+MODULE_DESCRIPTION("ASPEED PECI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/peci/busses/peci-npcm.c b/drivers/peci/busses/peci-npcm.c
new file mode 100644
index 000000000000..bdebbf1ec7f1
--- /dev/null
+++ b/drivers/peci/busses/peci-npcm.c
@@ -0,0 +1,406 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019 Nuvoton Technology corporation.
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/peci.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+#include <linux/reset.h>
+
+/* NPCM7xx GCR module */
+#define NPCM7XX_INTCR3_OFFSET 0x9C
+#define NPCM7XX_INTCR3_PECIVSEL BIT(19)
+
+/* NPCM PECI Registers */
+#define NPCM_PECI_CTL_STS 0x00
+#define NPCM_PECI_RD_LENGTH 0x04
+#define NPCM_PECI_ADDR 0x08
+#define NPCM_PECI_CMD 0x0C
+#define NPCM_PECI_CTL2 0x10
+#define NPCM_PECI_WR_LENGTH 0x1C
+#define NPCM_PECI_PDDR 0x2C
+#define NPCM_PECI_DAT_INOUT(n) (0x100 + ((n) * 4))
+
+#define NPCM_PECI_MAX_REG 0x200
+
+/* NPCM_PECI_CTL_STS - 0x00 : Control Register */
+#define NPCM_PECI_CTRL_DONE_INT_EN BIT(6)
+#define NPCM_PECI_CTRL_ABRT_ERR BIT(4)
+#define NPCM_PECI_CTRL_CRC_ERR BIT(3)
+#define NPCM_PECI_CTRL_DONE BIT(1)
+#define NPCM_PECI_CTRL_START_BUSY BIT(0)
+
+/* NPCM_PECI_RD_LENGTH - 0x04 : Command Register */
+#define NPCM_PECI_RD_LEN_MASK GENMASK(6, 0)
+
+/* NPCM_PECI_CMD - 0x10 : Command Register */
+#define NPCM_PECI_CTL2_MASK GENMASK(7, 6)
+
+/* NPCM_PECI_WR_LENGTH - 0x1C : Command Register */
+#define NPCM_PECI_WR_LEN_MASK GENMASK(6, 0)
+
+/* NPCM_PECI_PDDR - 0x2C : Command Register */
+#define NPCM_PECI_PDDR_MASK GENMASK(4, 0)
+
+#define NPCM_PECI_INT_MASK \
+ (NPCM_PECI_CTRL_ABRT_ERR | NPCM_PECI_CTRL_CRC_ERR | NPCM_PECI_CTRL_DONE)
+
+#define NPCM_PECI_IDLE_CHECK_TIMEOUT_USEC 50000
+#define NPCM_PECI_IDLE_CHECK_INTERVAL_USEC 10000
+#define NPCM_PECI_CMD_TIMEOUT_MS_DEFAULT 1000
+#define NPCM_PECI_CMD_TIMEOUT_MS_MAX 60000
+#define NPCM_PECI_HOST_NEG_BIT_RATE_MAX 31
+#define NPCM_PECI_HOST_NEG_BIT_RATE_MIN 7
+#define NPCM_PECI_HOST_NEG_BIT_RATE_DEFAULT 15
+#define NPCM_PECI_PULL_DOWN_DEFAULT 0
+#define NPCM_PECI_PULL_DOWN_MAX 2
+
+struct npcm_peci {
+ u32 cmd_timeout_ms;
+ u32 host_bit_rate;
+ struct completion xfer_complete;
+ struct regmap *gcr_regmap;
+ struct peci_adapter *adapter;
+ struct regmap *regmap;
+ u32 status;
+ spinlock_t lock; /* to sync completion status handling */
+ struct device *dev;
+ struct clk *clk;
+ int irq;
+};
+
+static int npcm_peci_xfer_native(struct npcm_peci *priv,
+ struct peci_xfer_msg *msg)
+{
+ long err, timeout = msecs_to_jiffies(priv->cmd_timeout_ms);
+ unsigned long flags;
+ unsigned int msg_rd;
+ u32 cmd_sts;
+ int i, rc;
+
+ /* Check command sts and bus idle state */
+ rc = regmap_read_poll_timeout(priv->regmap, NPCM_PECI_CTL_STS, cmd_sts,
+ !(cmd_sts & NPCM_PECI_CTRL_START_BUSY),
+ NPCM_PECI_IDLE_CHECK_INTERVAL_USEC,
+ NPCM_PECI_IDLE_CHECK_TIMEOUT_USEC);
+ if (rc)
+ return rc; /* -ETIMEDOUT */
+
+ spin_lock_irqsave(&priv->lock, flags);
+ reinit_completion(&priv->xfer_complete);
+
+ regmap_write(priv->regmap, NPCM_PECI_ADDR, msg->addr);
+ regmap_write(priv->regmap, NPCM_PECI_RD_LENGTH,
+ NPCM_PECI_WR_LEN_MASK & msg->rx_len);
+ regmap_write(priv->regmap, NPCM_PECI_WR_LENGTH,
+ NPCM_PECI_WR_LEN_MASK & msg->tx_len);
+
+ if (msg->tx_len) {
+ regmap_write(priv->regmap, NPCM_PECI_CMD, msg->tx_buf[0]);
+
+ for (i = 0; i < (msg->tx_len - 1); i++)
+ regmap_write(priv->regmap, NPCM_PECI_DAT_INOUT(i),
+ msg->tx_buf[i + 1]);
+ }
+
+ priv->status = 0;
+ regmap_update_bits(priv->regmap, NPCM_PECI_CTL_STS,
+ NPCM_PECI_CTRL_START_BUSY,
+ NPCM_PECI_CTRL_START_BUSY);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ err = wait_for_completion_interruptible_timeout(&priv->xfer_complete,
+ timeout);
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ regmap_write(priv->regmap, NPCM_PECI_CMD, 0);
+
+ if (err <= 0 || priv->status != NPCM_PECI_CTRL_DONE) {
+ if (err < 0) { /* -ERESTARTSYS */
+ rc = (int)err;
+ goto err_irqrestore;
+ } else if (err == 0) {
+ dev_dbg(priv->dev, "Timeout waiting for a response!\n");
+ rc = -ETIMEDOUT;
+ goto err_irqrestore;
+ }
+
+ dev_dbg(priv->dev, "No valid response!\n");
+ rc = -EIO;
+ goto err_irqrestore;
+ }
+
+ for (i = 0; i < msg->rx_len; i++) {
+ regmap_read(priv->regmap, NPCM_PECI_DAT_INOUT(i), &msg_rd);
+ msg->rx_buf[i] = (u8)msg_rd;
+ }
+
+err_irqrestore:
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return rc;
+}
+
+static irqreturn_t npcm_peci_irq_handler(int irq, void *arg)
+{
+ struct npcm_peci *priv = arg;
+ u32 status_ack = 0;
+ u32 status;
+
+ spin_lock(&priv->lock);
+ regmap_read(priv->regmap, NPCM_PECI_CTL_STS, &status);
+ priv->status |= (status & NPCM_PECI_INT_MASK);
+
+ if (status & NPCM_PECI_CTRL_CRC_ERR) {
+ dev_dbg(priv->dev, "PECI_INT_W_FCS_BAD\n");
+ status_ack |= NPCM_PECI_CTRL_CRC_ERR;
+ }
+
+ if (status & NPCM_PECI_CTRL_ABRT_ERR) {
+ dev_dbg(priv->dev, "NPCM_PECI_CTRL_ABRT_ERR\n");
+ status_ack |= NPCM_PECI_CTRL_ABRT_ERR;
+ }
+
+ /*
+ * All commands should be ended up with a NPCM_PECI_CTRL_DONE
+ * bit set even in an error case.
+ */
+ if (status & NPCM_PECI_CTRL_DONE) {
+ dev_dbg(priv->dev, "NPCM_PECI_CTRL_DONE\n");
+ status_ack |= NPCM_PECI_CTRL_DONE;
+ complete(&priv->xfer_complete);
+ }
+
+ regmap_write_bits(priv->regmap, NPCM_PECI_CTL_STS,
+ NPCM_PECI_INT_MASK, status_ack);
+
+ spin_unlock(&priv->lock);
+ return IRQ_HANDLED;
+}
+
+static int npcm_peci_init_ctrl(struct npcm_peci *priv)
+{
+ u32 cmd_sts, host_neg_bit_rate = 0, pull_down = 0;
+ int ret;
+
+ priv->clk = devm_clk_get(priv->dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ dev_err(priv->dev, "Failed to get clk source.\n");
+ return PTR_ERR(priv->clk);
+ }
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret) {
+ dev_err(priv->dev, "Failed to enable clock.\n");
+ return ret;
+ }
+
+ ret = of_property_read_u32(priv->dev->of_node, "cmd-timeout-ms",
+ &priv->cmd_timeout_ms);
+ if (ret || priv->cmd_timeout_ms > NPCM_PECI_CMD_TIMEOUT_MS_MAX ||
+ priv->cmd_timeout_ms == 0) {
+ if (ret)
+ dev_warn(priv->dev,
+ "cmd-timeout-ms not found, use default : %u\n",
+ NPCM_PECI_CMD_TIMEOUT_MS_DEFAULT);
+ else
+ dev_warn(priv->dev,
+ "Invalid cmd-timeout-ms : %u. Use default : %u\n",
+ priv->cmd_timeout_ms,
+ NPCM_PECI_CMD_TIMEOUT_MS_DEFAULT);
+
+ priv->cmd_timeout_ms = NPCM_PECI_CMD_TIMEOUT_MS_DEFAULT;
+ }
+
+ if (of_device_is_compatible(priv->dev->of_node,
+ "nuvoton,npcm750-peci")) {
+ priv->gcr_regmap = syscon_regmap_lookup_by_compatible
+ ("nuvoton,npcm750-gcr");
+ if (!IS_ERR(priv->gcr_regmap)) {
+ bool volt = of_property_read_bool(priv->dev->of_node,
+ "high-volt-range");
+ if (volt)
+ regmap_update_bits(priv->gcr_regmap,
+ NPCM7XX_INTCR3_OFFSET,
+ NPCM7XX_INTCR3_PECIVSEL,
+ NPCM7XX_INTCR3_PECIVSEL);
+ else
+ regmap_update_bits(priv->gcr_regmap,
+ NPCM7XX_INTCR3_OFFSET,
+ NPCM7XX_INTCR3_PECIVSEL, 0);
+ }
+ }
+
+ ret = of_property_read_u32(priv->dev->of_node, "pull-down",
+ &pull_down);
+ if (ret || pull_down > NPCM_PECI_PULL_DOWN_MAX) {
+ if (ret)
+ dev_warn(priv->dev,
+ "pull-down not found, use default : %u\n",
+ NPCM_PECI_PULL_DOWN_DEFAULT);
+ else
+ dev_warn(priv->dev,
+ "Invalid pull-down : %u. Use default : %u\n",
+ pull_down,
+ NPCM_PECI_PULL_DOWN_DEFAULT);
+ pull_down = NPCM_PECI_PULL_DOWN_DEFAULT;
+ }
+
+ regmap_update_bits(priv->regmap, NPCM_PECI_CTL2, NPCM_PECI_CTL2_MASK,
+ pull_down << 6);
+
+ ret = of_property_read_u32(priv->dev->of_node, "host-neg-bit-rate",
+ &host_neg_bit_rate);
+ if (ret || host_neg_bit_rate > NPCM_PECI_HOST_NEG_BIT_RATE_MAX ||
+ host_neg_bit_rate < NPCM_PECI_HOST_NEG_BIT_RATE_MIN) {
+ if (ret)
+ dev_warn(priv->dev,
+ "host-neg-bit-rate not found, use default : %u\n",
+ NPCM_PECI_HOST_NEG_BIT_RATE_DEFAULT);
+ else
+ dev_warn(priv->dev,
+ "Invalid host-neg-bit-rate : %u. Use default : %u\n",
+ host_neg_bit_rate,
+ NPCM_PECI_HOST_NEG_BIT_RATE_DEFAULT);
+ host_neg_bit_rate = NPCM_PECI_HOST_NEG_BIT_RATE_DEFAULT;
+ }
+
+ regmap_update_bits(priv->regmap, NPCM_PECI_PDDR, NPCM_PECI_PDDR_MASK,
+ host_neg_bit_rate);
+
+ priv->host_bit_rate = clk_get_rate(priv->clk) /
+ (4 * (host_neg_bit_rate + 1));
+
+ ret = regmap_read_poll_timeout(priv->regmap, NPCM_PECI_CTL_STS, cmd_sts,
+ !(cmd_sts & NPCM_PECI_CTRL_START_BUSY),
+ NPCM_PECI_IDLE_CHECK_INTERVAL_USEC,
+ NPCM_PECI_IDLE_CHECK_TIMEOUT_USEC);
+ if (ret)
+ return ret; /* -ETIMEDOUT */
+
+ /* PECI interrupt enable */
+ regmap_update_bits(priv->regmap, NPCM_PECI_CTL_STS,
+ NPCM_PECI_CTRL_DONE_INT_EN,
+ NPCM_PECI_CTRL_DONE_INT_EN);
+
+ return 0;
+}
+
+static const struct regmap_config npcm_peci_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = NPCM_PECI_MAX_REG,
+ .fast_io = true,
+};
+
+static int npcm_peci_xfer(struct peci_adapter *adapter,
+ struct peci_xfer_msg *msg)
+{
+ struct npcm_peci *priv = peci_get_adapdata(adapter);
+
+ return npcm_peci_xfer_native(priv, msg);
+}
+
+static int npcm_peci_probe(struct platform_device *pdev)
+{
+ struct peci_adapter *adapter;
+ struct npcm_peci *priv;
+ void __iomem *base;
+ int ret;
+
+ adapter = peci_alloc_adapter(&pdev->dev, sizeof(*priv));
+ if (!adapter)
+ return -ENOMEM;
+
+ priv = peci_get_adapdata(adapter);
+ priv->adapter = adapter;
+ priv->dev = &pdev->dev;
+ dev_set_drvdata(&pdev->dev, priv);
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base)) {
+ ret = PTR_ERR(base);
+ goto err_put_adapter_dev;
+ }
+
+ priv->regmap = devm_regmap_init_mmio(&pdev->dev, base,
+ &npcm_peci_regmap_config);
+ if (IS_ERR(priv->regmap)) {
+ ret = PTR_ERR(priv->regmap);
+ goto err_put_adapter_dev;
+ }
+
+ priv->irq = platform_get_irq(pdev, 0);
+ if (!priv->irq) {
+ ret = -ENODEV;
+ goto err_put_adapter_dev;
+ }
+
+ ret = devm_request_irq(&pdev->dev, priv->irq, npcm_peci_irq_handler,
+ 0, "peci-npcm-irq", priv);
+ if (ret)
+ goto err_put_adapter_dev;
+
+ init_completion(&priv->xfer_complete);
+ spin_lock_init(&priv->lock);
+
+ priv->adapter->owner = THIS_MODULE;
+ priv->adapter->dev.of_node = of_node_get(dev_of_node(priv->dev));
+ strlcpy(priv->adapter->name, pdev->name, sizeof(priv->adapter->name));
+ priv->adapter->xfer = npcm_peci_xfer;
+
+ ret = npcm_peci_init_ctrl(priv);
+ if (ret)
+ goto err_put_adapter_dev;
+
+ ret = peci_add_adapter(priv->adapter);
+ if (ret)
+ goto err_put_adapter_dev;
+
+ dev_info(&pdev->dev, "peci bus %d registered, host negotiation bit rate %dHz",
+ priv->adapter->nr, priv->host_bit_rate);
+
+ return 0;
+
+err_put_adapter_dev:
+ put_device(&adapter->dev);
+ return ret;
+}
+
+static int npcm_peci_remove(struct platform_device *pdev)
+{
+ struct npcm_peci *priv = dev_get_drvdata(&pdev->dev);
+
+ clk_disable_unprepare(priv->clk);
+ peci_del_adapter(priv->adapter);
+ of_node_put(priv->adapter->dev.of_node);
+
+ return 0;
+}
+
+static const struct of_device_id npcm_peci_of_table[] = {
+ { .compatible = "nuvoton,npcm750-peci", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, npcm_peci_of_table);
+
+static struct platform_driver npcm_peci_driver = {
+ .probe = npcm_peci_probe,
+ .remove = npcm_peci_remove,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = of_match_ptr(npcm_peci_of_table),
+ },
+};
+module_platform_driver(npcm_peci_driver);
+
+MODULE_AUTHOR("Tomer Maimon <tomer.maimon@nuvoton.com>");
+MODULE_DESCRIPTION("NPCM Platform Environment Control Interface (PECI) driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/peci/peci-core.c b/drivers/peci/peci-core.c
new file mode 100644
index 000000000000..9aedb74710e6
--- /dev/null
+++ b/drivers/peci/peci-core.c
@@ -0,0 +1,2089 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2018-2019 Intel Corporation
+
+#include <linux/bitfield.h>
+#include <linux/crc8.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/peci.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
+#include <linux/sched/task_stack.h>
+#include <linux/slab.h>
+
+/* Mask for getting minor revision number from DIB */
+#define REVISION_NUM_MASK GENMASK(15, 8)
+
+/* CRC8 table for Assured Write Frame Check */
+#define PECI_CRC8_POLYNOMIAL 0x07
+DECLARE_CRC8_TABLE(peci_crc8_table);
+
+static bool is_registered;
+
+static DEFINE_MUTEX(core_lock);
+static DEFINE_IDR(peci_adapter_idr);
+
+struct peci_adapter *peci_get_adapter(int nr)
+{
+ struct peci_adapter *adapter;
+
+ mutex_lock(&core_lock);
+ adapter = idr_find(&peci_adapter_idr, nr);
+ if (!adapter)
+ goto out_unlock;
+
+ if (try_module_get(adapter->owner))
+ get_device(&adapter->dev);
+ else
+ adapter = NULL;
+
+out_unlock:
+ mutex_unlock(&core_lock);
+
+ return adapter;
+}
+EXPORT_SYMBOL_GPL(peci_get_adapter);
+
+void peci_put_adapter(struct peci_adapter *adapter)
+{
+ if (!adapter)
+ return;
+
+ put_device(&adapter->dev);
+ module_put(adapter->owner);
+}
+EXPORT_SYMBOL_GPL(peci_put_adapter);
+
+static ssize_t name_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%s\n", dev->type == &peci_client_type ?
+ to_peci_client(dev)->name : to_peci_adapter(dev)->name);
+}
+static DEVICE_ATTR_RO(name);
+
+static void peci_client_dev_release(struct device *dev)
+{
+ struct peci_client *client = to_peci_client(dev);
+
+ dev_dbg(dev, "%s: %s\n", __func__, client->name);
+ peci_put_adapter(client->adapter);
+ kfree(client);
+}
+
+static struct attribute *peci_device_attrs[] = {
+ &dev_attr_name.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(peci_device);
+
+struct device_type peci_client_type = {
+ .groups = peci_device_groups,
+ .release = peci_client_dev_release,
+};
+EXPORT_SYMBOL_GPL(peci_client_type);
+
+/**
+ * peci_verify_client - return parameter as peci_client, or NULL
+ * @dev: device, probably from some driver model iterator
+ *
+ * Return: pointer to peci_client on success, else NULL.
+ */
+struct peci_client *peci_verify_client(struct device *dev)
+{
+ return (dev->type == &peci_client_type)
+ ? to_peci_client(dev)
+ : NULL;
+}
+EXPORT_SYMBOL_GPL(peci_verify_client);
+
+/**
+ * peci_get_xfer_msg() - get a DMA safe peci_xfer_msg for the given tx and rx
+ * length
+ * @tx_len: the length of tx_buf. May be 0 if tx_buf isn't needed.
+ * @rx_len: the length of rx_buf. May be 0 if rx_buf isn't needed.
+ *
+ * Return: NULL if a DMA safe buffer was not obtained.
+ * Or a valid pointer to be used with DMA. After use, release it by
+ * calling peci_put_xfer_msg().
+ *
+ * This function must only be called from process context!
+ */
+struct peci_xfer_msg *peci_get_xfer_msg(u8 tx_len, u8 rx_len)
+{
+ struct peci_xfer_msg *msg;
+ u8 *tx_buf, *rx_buf;
+
+ if (tx_len) {
+ tx_buf = kzalloc(tx_len, GFP_KERNEL);
+ if (!tx_buf)
+ return NULL;
+ } else {
+ tx_buf = NULL;
+ }
+
+ if (rx_len) {
+ rx_buf = kzalloc(rx_len, GFP_KERNEL);
+ if (!rx_buf)
+ goto err_free_tx_buf;
+ } else {
+ rx_buf = NULL;
+ }
+
+ msg = kzalloc(sizeof(*msg), GFP_KERNEL);
+ if (!msg)
+ goto err_free_tx_rx_buf;
+
+ msg->tx_len = tx_len;
+ msg->tx_buf = tx_buf;
+ msg->rx_len = rx_len;
+ msg->rx_buf = rx_buf;
+
+ return msg;
+
+err_free_tx_rx_buf:
+ kfree(rx_buf);
+err_free_tx_buf:
+ kfree(tx_buf);
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(peci_get_xfer_msg);
+
+/**
+ * peci_put_xfer_msg - release a DMA safe peci_xfer_msg
+ * @msg: the message obtained from peci_get_xfer_msg(). May be NULL.
+ */
+void peci_put_xfer_msg(struct peci_xfer_msg *msg)
+{
+ if (!msg)
+ return;
+
+ kfree(msg->rx_buf);
+ kfree(msg->tx_buf);
+ kfree(msg);
+}
+EXPORT_SYMBOL_GPL(peci_put_xfer_msg);
+
+/* Calculate an Assured Write Frame Check Sequence byte */
+static int peci_aw_fcs(struct peci_xfer_msg *msg, int len, u8 *aw_fcs)
+{
+ u8 *tmp_buf;
+
+ /* Allocate a temporary buffer to use a contiguous byte array */
+ tmp_buf = kmalloc(len, GFP_KERNEL);
+ if (!tmp_buf)
+ return -ENOMEM;
+
+ tmp_buf[0] = msg->addr;
+ tmp_buf[1] = msg->tx_len;
+ tmp_buf[2] = msg->rx_len;
+ memcpy(&tmp_buf[3], msg->tx_buf, len - 3);
+
+ *aw_fcs = crc8(peci_crc8_table, tmp_buf, (size_t)len, 0);
+
+ kfree(tmp_buf);
+
+ return 0;
+}
+
+static int __peci_xfer(struct peci_adapter *adapter, struct peci_xfer_msg *msg,
+ bool do_retry, bool has_aw_fcs)
+{
+ uint interval_ms = PECI_DEV_RETRY_INTERVAL_MIN_MSEC;
+ ulong timeout = jiffies;
+ u8 aw_fcs;
+ int ret;
+
+ /*
+ * In case if adapter uses DMA, check at here whether tx and rx buffers
+ * are DMA capable or not.
+ */
+ if (IS_ENABLED(CONFIG_HAS_DMA) && adapter->use_dma) {
+ if (is_vmalloc_addr(msg->tx_buf) ||
+ is_vmalloc_addr(msg->rx_buf)) {
+ WARN_ONCE(1, "xfer msg is not dma capable\n");
+ return -EAGAIN;
+ } else if (object_is_on_stack(msg->tx_buf) ||
+ object_is_on_stack(msg->rx_buf)) {
+ WARN_ONCE(1, "xfer msg is on stack\n");
+ return -EAGAIN;
+ }
+ }
+
+ /*
+ * For some commands, the PECI originator may need to retry a command if
+ * the processor PECI client responds with a 0x8x completion code. In
+ * each instance, the processor PECI client may have started the
+ * operation but not completed it yet. When the 'retry' bit is set, the
+ * PECI client will ignore a new request if it exactly matches a
+ * previous valid request. For better performance and for reducing
+ * retry traffic, the interval time will be increased exponentially.
+ */
+
+ if (do_retry)
+ timeout += PECI_DEV_RETRY_TIMEOUT;
+
+ for (;;) {
+ ret = adapter->xfer(adapter, msg);
+
+ if (!do_retry || ret || !msg->rx_buf)
+ break;
+
+ /* Retry is needed when completion code is 0x8x */
+ if ((msg->rx_buf[0] & PECI_DEV_CC_RETRY_CHECK_MASK) !=
+ PECI_DEV_CC_NEED_RETRY)
+ break;
+
+ /* Set the retry bit to indicate a retry attempt */
+ msg->tx_buf[1] |= PECI_DEV_RETRY_BIT;
+
+ /* Recalculate the AW FCS if it has one */
+ if (has_aw_fcs) {
+ ret = peci_aw_fcs(msg, 2 + msg->tx_len, &aw_fcs);
+ if (ret)
+ break;
+
+ msg->tx_buf[msg->tx_len - 1] = 0x80 ^ aw_fcs;
+ }
+
+ /* Retry it for 'timeout' before returning an error. */
+ if (time_after(jiffies, timeout)) {
+ dev_dbg(&adapter->dev, "Timeout retrying xfer!\n");
+ ret = -ETIMEDOUT;
+ break;
+ }
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (schedule_timeout(msecs_to_jiffies(interval_ms))) {
+ ret = -EINTR;
+ break;
+ }
+
+ interval_ms *= 2;
+ if (interval_ms > PECI_DEV_RETRY_INTERVAL_MAX_MSEC)
+ interval_ms = PECI_DEV_RETRY_INTERVAL_MAX_MSEC;
+ }
+
+ if (ret)
+ dev_dbg(&adapter->dev, "xfer error: %d\n", ret);
+
+ return ret;
+}
+
+static int peci_xfer(struct peci_adapter *adapter, struct peci_xfer_msg *msg)
+{
+ return __peci_xfer(adapter, msg, false, false);
+}
+
+static int peci_xfer_with_retries(struct peci_adapter *adapter,
+ struct peci_xfer_msg *msg,
+ bool has_aw_fcs)
+{
+ return __peci_xfer(adapter, msg, true, has_aw_fcs);
+}
+
+static int peci_scan_cmd_mask(struct peci_adapter *adapter)
+{
+ struct peci_xfer_msg *msg;
+ u8 revision;
+ int ret;
+ u64 dib;
+
+ /* Update command mask just once */
+ if (adapter->cmd_mask & BIT(PECI_CMD_XFER))
+ return 0;
+
+ msg = peci_get_xfer_msg(PECI_GET_DIB_WR_LEN, PECI_GET_DIB_RD_LEN);
+ if (!msg)
+ return -ENOMEM;
+
+ msg->addr = PECI_BASE_ADDR;
+ msg->tx_buf[0] = PECI_GET_DIB_CMD;
+
+ ret = peci_xfer(adapter, msg);
+ if (ret)
+ return ret;
+
+ dib = le64_to_cpup((__le64 *)msg->rx_buf);
+
+ /* Check special case for Get DIB command */
+ if (dib == 0) {
+ dev_dbg(&adapter->dev, "DIB read as 0\n");
+ ret = -EIO;
+ goto out;
+ }
+
+ /*
+ * Setting up the supporting commands based on revision number.
+ * See PECI Spec Table 3-1.
+ */
+ revision = FIELD_GET(REVISION_NUM_MASK, dib);
+ if (revision >= 0x40) { /* Rev. 4.0 */
+ adapter->cmd_mask |= BIT(PECI_CMD_RD_IA_MSREX);
+ adapter->cmd_mask |= BIT(PECI_CMD_RD_END_PT_CFG);
+ adapter->cmd_mask |= BIT(PECI_CMD_WR_END_PT_CFG);
+ adapter->cmd_mask |= BIT(PECI_CMD_CRASHDUMP_DISC);
+ adapter->cmd_mask |= BIT(PECI_CMD_CRASHDUMP_GET_FRAME);
+ }
+ if (revision >= 0x36) /* Rev. 3.6 */
+ adapter->cmd_mask |= BIT(PECI_CMD_WR_IA_MSR);
+ if (revision >= 0x35) /* Rev. 3.5 */
+ adapter->cmd_mask |= BIT(PECI_CMD_WR_PCI_CFG);
+ if (revision >= 0x34) /* Rev. 3.4 */
+ adapter->cmd_mask |= BIT(PECI_CMD_RD_PCI_CFG);
+ if (revision >= 0x33) { /* Rev. 3.3 */
+ adapter->cmd_mask |= BIT(PECI_CMD_RD_PCI_CFG_LOCAL);
+ adapter->cmd_mask |= BIT(PECI_CMD_WR_PCI_CFG_LOCAL);
+ }
+ if (revision >= 0x32) /* Rev. 3.2 */
+ adapter->cmd_mask |= BIT(PECI_CMD_RD_IA_MSR);
+ if (revision >= 0x31) { /* Rev. 3.1 */
+ adapter->cmd_mask |= BIT(PECI_CMD_RD_PKG_CFG);
+ adapter->cmd_mask |= BIT(PECI_CMD_WR_PKG_CFG);
+ }
+
+ adapter->cmd_mask |= BIT(PECI_CMD_XFER);
+ adapter->cmd_mask |= BIT(PECI_CMD_GET_TEMP);
+ adapter->cmd_mask |= BIT(PECI_CMD_GET_DIB);
+ adapter->cmd_mask |= BIT(PECI_CMD_PING);
+
+out:
+ peci_put_xfer_msg(msg);
+
+ return ret;
+}
+
+static int peci_check_cmd_support(struct peci_adapter *adapter,
+ enum peci_cmd cmd)
+{
+ if (!(adapter->cmd_mask & BIT(PECI_CMD_PING)) &&
+ peci_scan_cmd_mask(adapter) < 0) {
+ dev_dbg(&adapter->dev, "Failed to scan command mask\n");
+ return -EIO;
+ }
+
+ if (!(adapter->cmd_mask & BIT(cmd))) {
+ dev_dbg(&adapter->dev, "Command %d is not supported\n", cmd);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int peci_cmd_xfer(struct peci_adapter *adapter, void *vmsg)
+{
+ struct peci_xfer_msg *msg = vmsg;
+ u8 aw_fcs;
+ int ret;
+
+ if (!msg->tx_len) {
+ ret = peci_xfer(adapter, msg);
+ } else {
+ switch (msg->tx_buf[0]) {
+ case PECI_RDPKGCFG_CMD:
+ case PECI_RDIAMSR_CMD:
+ case PECI_RDIAMSREX_CMD:
+ case PECI_RDPCICFG_CMD:
+ case PECI_RDPCICFGLOCAL_CMD:
+ case PECI_RDENDPTCFG_CMD:
+ case PECI_CRASHDUMP_CMD:
+ ret = peci_xfer_with_retries(adapter, msg, false);
+ break;
+ case PECI_WRPKGCFG_CMD:
+ case PECI_WRIAMSR_CMD:
+ case PECI_WRPCICFG_CMD:
+ case PECI_WRPCICFGLOCAL_CMD:
+ case PECI_WRENDPTCFG_CMD:
+ /* Check if the AW FCS byte is already provided */
+ ret = peci_aw_fcs(msg, 2 + msg->tx_len, &aw_fcs);
+ if (ret)
+ break;
+
+ if (msg->tx_buf[msg->tx_len - 1] != (0x80 ^ aw_fcs)) {
+ /*
+ * Add an Assured Write Frame Check Sequence
+ * byte and increment the tx_len to include
+ * the new byte.
+ */
+ msg->tx_len++;
+ ret = peci_aw_fcs(msg, 2 + msg->tx_len,
+ &aw_fcs);
+ if (ret)
+ break;
+
+ msg->tx_buf[msg->tx_len - 1] = 0x80 ^ aw_fcs;
+ }
+
+ ret = peci_xfer_with_retries(adapter, msg, true);
+ break;
+ case PECI_GET_DIB_CMD:
+ case PECI_GET_TEMP_CMD:
+ default:
+ ret = peci_xfer(adapter, msg);
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int peci_cmd_ping(struct peci_adapter *adapter, void *vmsg)
+{
+ struct peci_ping_msg *umsg = vmsg;
+ struct peci_xfer_msg *msg;
+ int ret;
+
+ msg = peci_get_xfer_msg(0, 0);
+ if (!msg)
+ return -ENOMEM;
+
+ msg->addr = umsg->addr;
+
+ ret = peci_xfer(adapter, msg);
+
+ peci_put_xfer_msg(msg);
+
+ return ret;
+}
+
+static int peci_cmd_get_dib(struct peci_adapter *adapter, void *vmsg)
+{
+ struct peci_get_dib_msg *umsg = vmsg;
+ struct peci_xfer_msg *msg;
+ int ret;
+
+ msg = peci_get_xfer_msg(PECI_GET_DIB_WR_LEN, PECI_GET_DIB_RD_LEN);
+ if (!msg)
+ return -ENOMEM;
+
+ msg->addr = umsg->addr;
+ msg->tx_buf[0] = PECI_GET_DIB_CMD;
+
+ ret = peci_xfer(adapter, msg);
+ if (ret)
+ goto out;
+
+ umsg->dib = le64_to_cpup((__le64 *)msg->rx_buf);
+
+out:
+ peci_put_xfer_msg(msg);
+
+ return ret;
+}
+
+static int peci_cmd_get_temp(struct peci_adapter *adapter, void *vmsg)
+{
+ struct peci_get_temp_msg *umsg = vmsg;
+ struct peci_xfer_msg *msg;
+ int ret;
+
+ msg = peci_get_xfer_msg(PECI_GET_TEMP_WR_LEN, PECI_GET_TEMP_RD_LEN);
+ if (!msg)
+ return -ENOMEM;
+
+ msg->addr = umsg->addr;
+ msg->tx_buf[0] = PECI_GET_TEMP_CMD;
+
+ ret = peci_xfer(adapter, msg);
+ if (ret)
+ goto out;
+
+ umsg->temp_raw = le16_to_cpup((__le16 *)msg->rx_buf);
+
+out:
+ peci_put_xfer_msg(msg);
+
+ return ret;
+}
+
+static int peci_cmd_rd_pkg_cfg(struct peci_adapter *adapter, void *vmsg)
+{
+ struct peci_rd_pkg_cfg_msg *umsg = vmsg;
+ struct peci_xfer_msg *msg;
+ int ret;
+
+ /* Per the PECI spec, the read length must be a byte, word, or dword */
+ if (umsg->rx_len != 1 && umsg->rx_len != 2 && umsg->rx_len != 4) {
+ dev_dbg(&adapter->dev, "Invalid read length, rx_len: %d\n",
+ umsg->rx_len);
+ return -EINVAL;
+ }
+
+ msg = peci_get_xfer_msg(PECI_RDPKGCFG_WRITE_LEN,
+ PECI_RDPKGCFG_READ_LEN_BASE + umsg->rx_len);
+ if (!msg)
+ return -ENOMEM;
+
+ msg->addr = umsg->addr;
+ msg->tx_buf[0] = PECI_RDPKGCFG_CMD;
+ msg->tx_buf[1] = 0; /* request byte for Host ID | Retry bit */
+ /* Host ID is 0 for PECI 3.0 */
+ msg->tx_buf[2] = umsg->index; /* RdPkgConfig index */
+ msg->tx_buf[3] = (u8)umsg->param; /* LSB - Config parameter */
+ msg->tx_buf[4] = (u8)(umsg->param >> 8); /* MSB - Config parameter */
+
+ ret = peci_xfer_with_retries(adapter, msg, false);
+ if (!ret)
+ memcpy(umsg->pkg_config, &msg->rx_buf[1], umsg->rx_len);
+
+ umsg->cc = msg->rx_buf[0];
+ peci_put_xfer_msg(msg);
+
+ return ret;
+}
+
+static int peci_cmd_wr_pkg_cfg(struct peci_adapter *adapter, void *vmsg)
+{
+ struct peci_wr_pkg_cfg_msg *umsg = vmsg;
+ struct peci_xfer_msg *msg;
+ int ret, i;
+ u8 aw_fcs;
+
+ /* Per the PECI spec, the write length must be a dword */
+ if (umsg->tx_len != 4) {
+ dev_dbg(&adapter->dev, "Invalid write length, tx_len: %d\n",
+ umsg->tx_len);
+ return -EINVAL;
+ }
+
+ msg = peci_get_xfer_msg(PECI_WRPKGCFG_WRITE_LEN_BASE + umsg->tx_len,
+ PECI_WRPKGCFG_READ_LEN);
+ if (!msg)
+ return -ENOMEM;
+
+ msg->addr = umsg->addr;
+ msg->tx_buf[0] = PECI_WRPKGCFG_CMD;
+ msg->tx_buf[1] = 0; /* request byte for Host ID | Retry bit */
+ /* Host ID is 0 for PECI 3.0 */
+ msg->tx_buf[2] = umsg->index; /* RdPkgConfig index */
+ msg->tx_buf[3] = (u8)umsg->param; /* LSB - Config parameter */
+ msg->tx_buf[4] = (u8)(umsg->param >> 8); /* MSB - Config parameter */
+ for (i = 0; i < umsg->tx_len; i++)
+ msg->tx_buf[5 + i] = (u8)(umsg->value >> (i << 3));
+
+ /* Add an Assured Write Frame Check Sequence byte */
+ ret = peci_aw_fcs(msg, 8 + umsg->tx_len, &aw_fcs);
+ if (ret)
+ goto out;
+
+ msg->tx_buf[5 + i] = 0x80 ^ aw_fcs;
+
+ ret = peci_xfer_with_retries(adapter, msg, true);
+
+out:
+ umsg->cc = msg->rx_buf[0];
+ peci_put_xfer_msg(msg);
+
+ return ret;
+}
+
+static int peci_cmd_rd_ia_msr(struct peci_adapter *adapter, void *vmsg)
+{
+ struct peci_rd_ia_msr_msg *umsg = vmsg;
+ struct peci_xfer_msg *msg;
+ int ret;
+
+ msg = peci_get_xfer_msg(PECI_RDIAMSR_WRITE_LEN, PECI_RDIAMSR_READ_LEN);
+ if (!msg)
+ return -ENOMEM;
+
+ msg->addr = umsg->addr;
+ msg->tx_buf[0] = PECI_RDIAMSR_CMD;
+ msg->tx_buf[1] = 0;
+ msg->tx_buf[2] = umsg->thread_id;
+ msg->tx_buf[3] = (u8)umsg->address;
+ msg->tx_buf[4] = (u8)(umsg->address >> 8);
+
+ ret = peci_xfer_with_retries(adapter, msg, false);
+ if (!ret)
+ memcpy(&umsg->value, &msg->rx_buf[1], sizeof(uint64_t));
+
+ umsg->cc = msg->rx_buf[0];
+ peci_put_xfer_msg(msg);
+
+ return ret;
+}
+
+static int peci_cmd_rd_ia_msrex(struct peci_adapter *adapter, void *vmsg)
+{
+ struct peci_rd_ia_msrex_msg *umsg = vmsg;
+ struct peci_xfer_msg *msg;
+ int ret;
+
+ msg = peci_get_xfer_msg(PECI_RDIAMSREX_WRITE_LEN,
+ PECI_RDIAMSREX_READ_LEN);
+ if (!msg)
+ return -ENOMEM;
+
+ msg->addr = umsg->addr;
+ msg->tx_buf[0] = PECI_RDIAMSREX_CMD;
+ msg->tx_buf[1] = 0;
+ msg->tx_buf[2] = (u8)umsg->thread_id;
+ msg->tx_buf[3] = (u8)(umsg->thread_id >> 8);
+ msg->tx_buf[4] = (u8)umsg->address;
+ msg->tx_buf[5] = (u8)(umsg->address >> 8);
+
+ ret = peci_xfer_with_retries(adapter, msg, false);
+ if (!ret)
+ memcpy(&umsg->value, &msg->rx_buf[1], sizeof(uint64_t));
+
+ umsg->cc = msg->rx_buf[0];
+ peci_put_xfer_msg(msg);
+
+ return ret;
+}
+
+static int peci_cmd_wr_ia_msr(struct peci_adapter *adapter, void *vmsg)
+{
+ return -ENOSYS; /* Not implemented yet */
+}
+
+static int peci_cmd_rd_pci_cfg(struct peci_adapter *adapter, void *vmsg)
+{
+ struct peci_rd_pci_cfg_msg *umsg = vmsg;
+ struct peci_xfer_msg *msg;
+ u32 address;
+ int ret;
+
+ msg = peci_get_xfer_msg(PECI_RDPCICFG_WRITE_LEN,
+ PECI_RDPCICFG_READ_LEN);
+ if (!msg)
+ return -ENOMEM;
+
+ address = umsg->reg; /* [11:0] - Register */
+ address |= (u32)umsg->function << 12; /* [14:12] - Function */
+ address |= (u32)umsg->device << 15; /* [19:15] - Device */
+ address |= (u32)umsg->bus << 20; /* [27:20] - Bus */
+ /* [31:28] - Reserved */
+ msg->addr = umsg->addr;
+ msg->tx_buf[0] = PECI_RDPCICFG_CMD;
+ msg->tx_buf[1] = 0; /* request byte for Host ID | Retry bit */
+ /* Host ID is 0 for PECI 3.0 */
+ msg->tx_buf[2] = (u8)address; /* LSB - PCI Config Address */
+ msg->tx_buf[3] = (u8)(address >> 8); /* PCI Config Address */
+ msg->tx_buf[4] = (u8)(address >> 16); /* PCI Config Address */
+ msg->tx_buf[5] = (u8)(address >> 24); /* MSB - PCI Config Address */
+
+ ret = peci_xfer_with_retries(adapter, msg, false);
+ if (!ret)
+ memcpy(umsg->pci_config, &msg->rx_buf[1], 4);
+
+ umsg->cc = msg->rx_buf[0];
+ peci_put_xfer_msg(msg);
+
+ return ret;
+}
+
+static int peci_cmd_wr_pci_cfg(struct peci_adapter *adapter, void *vmsg)
+{
+ return -ENOSYS; /* Not implemented yet */
+}
+
+static int peci_cmd_rd_pci_cfg_local(struct peci_adapter *adapter, void *vmsg)
+{
+ struct peci_rd_pci_cfg_local_msg *umsg = vmsg;
+ struct peci_xfer_msg *msg;
+ u32 address;
+ int ret;
+
+ /* Per the PECI spec, the read length must be a byte, word, or dword */
+ if (umsg->rx_len != 1 && umsg->rx_len != 2 && umsg->rx_len != 4) {
+ dev_dbg(&adapter->dev, "Invalid read length, rx_len: %d\n",
+ umsg->rx_len);
+ return -EINVAL;
+ }
+
+ msg = peci_get_xfer_msg(PECI_RDPCICFGLOCAL_WRITE_LEN,
+ PECI_RDPCICFGLOCAL_READ_LEN_BASE +
+ umsg->rx_len);
+ if (!msg)
+ return -ENOMEM;
+
+ address = umsg->reg; /* [11:0] - Register */
+ address |= (u32)umsg->function << 12; /* [14:12] - Function */
+ address |= (u32)umsg->device << 15; /* [19:15] - Device */
+ address |= (u32)umsg->bus << 20; /* [23:20] - Bus */
+
+ msg->addr = umsg->addr;
+ msg->tx_buf[0] = PECI_RDPCICFGLOCAL_CMD;
+ msg->tx_buf[1] = 0; /* request byte for Host ID | Retry bit */
+ /* Host ID is 0 for PECI 3.0 */
+ msg->tx_buf[2] = (u8)address; /* LSB - PCI Configuration Address */
+ msg->tx_buf[3] = (u8)(address >> 8); /* PCI Configuration Address */
+ msg->tx_buf[4] = (u8)(address >> 16); /* PCI Configuration Address */
+
+ ret = peci_xfer_with_retries(adapter, msg, false);
+ if (!ret)
+ memcpy(umsg->pci_config, &msg->rx_buf[1], umsg->rx_len);
+
+ umsg->cc = msg->rx_buf[0];
+ peci_put_xfer_msg(msg);
+
+ return ret;
+}
+
+static int peci_cmd_wr_pci_cfg_local(struct peci_adapter *adapter, void *vmsg)
+{
+ struct peci_wr_pci_cfg_local_msg *umsg = vmsg;
+ struct peci_xfer_msg *msg;
+ u32 address;
+ int ret, i;
+ u8 aw_fcs;
+
+ /* Per the PECI spec, the write length must be a byte, word, or dword */
+ if (umsg->tx_len != 1 && umsg->tx_len != 2 && umsg->tx_len != 4) {
+ dev_dbg(&adapter->dev, "Invalid write length, tx_len: %d\n",
+ umsg->tx_len);
+ return -EINVAL;
+ }
+
+ msg = peci_get_xfer_msg(PECI_WRPCICFGLOCAL_WRITE_LEN_BASE +
+ umsg->tx_len, PECI_WRPCICFGLOCAL_READ_LEN);
+ if (!msg)
+ return -ENOMEM;
+
+ address = umsg->reg; /* [11:0] - Register */
+ address |= (u32)umsg->function << 12; /* [14:12] - Function */
+ address |= (u32)umsg->device << 15; /* [19:15] - Device */
+ address |= (u32)umsg->bus << 20; /* [23:20] - Bus */
+
+ msg->addr = umsg->addr;
+ msg->tx_buf[0] = PECI_WRPCICFGLOCAL_CMD;
+ msg->tx_buf[1] = 0; /* request byte for Host ID | Retry bit */
+ /* Host ID is 0 for PECI 3.0 */
+ msg->tx_buf[2] = (u8)address; /* LSB - PCI Configuration Address */
+ msg->tx_buf[3] = (u8)(address >> 8); /* PCI Configuration Address */
+ msg->tx_buf[4] = (u8)(address >> 16); /* PCI Configuration Address */
+ for (i = 0; i < umsg->tx_len; i++)
+ msg->tx_buf[5 + i] = (u8)(umsg->value >> (i << 3));
+
+ /* Add an Assured Write Frame Check Sequence byte */
+ ret = peci_aw_fcs(msg, 8 + umsg->tx_len, &aw_fcs);
+ if (ret)
+ goto out;
+
+ msg->tx_buf[5 + i] = 0x80 ^ aw_fcs;
+
+ ret = peci_xfer_with_retries(adapter, msg, true);
+
+out:
+ umsg->cc = msg->rx_buf[0];
+ peci_put_xfer_msg(msg);
+
+ return ret;
+}
+
+static int peci_cmd_rd_end_pt_cfg(struct peci_adapter *adapter, void *vmsg)
+{
+ struct peci_rd_end_pt_cfg_msg *umsg = vmsg;
+ struct peci_xfer_msg *msg = NULL;
+ u32 address;
+ u8 tx_size;
+ int ret;
+
+ switch (umsg->msg_type) {
+ case PECI_ENDPTCFG_TYPE_LOCAL_PCI:
+ case PECI_ENDPTCFG_TYPE_PCI:
+ /*
+ * Per the PECI spec, the read length must be a byte, word,
+ * or dword
+ */
+ if (umsg->rx_len != 1 && umsg->rx_len != 2 &&
+ umsg->rx_len != 4) {
+ dev_dbg(&adapter->dev,
+ "Invalid read length, rx_len: %d\n",
+ umsg->rx_len);
+ return -EINVAL;
+ }
+
+ msg = peci_get_xfer_msg(PECI_RDENDPTCFG_PCI_WRITE_LEN,
+ PECI_RDENDPTCFG_READ_LEN_BASE +
+ umsg->rx_len);
+ if (!msg)
+ return -ENOMEM;
+
+ address = umsg->params.pci_cfg.reg; /* [11:0] - Register */
+ address |= (u32)umsg->params.pci_cfg.function
+ << 12; /* [14:12] - Function */
+ address |= (u32)umsg->params.pci_cfg.device
+ << 15; /* [19:15] - Device */
+ address |= (u32)umsg->params.pci_cfg.bus
+ << 20; /* [27:20] - Bus */
+ /* [31:28] - Reserved */
+ msg->addr = umsg->addr;
+ msg->tx_buf[0] = PECI_RDENDPTCFG_CMD;
+ msg->tx_buf[1] = 0x00; /* request byte for Host ID|Retry bit */
+ msg->tx_buf[2] = umsg->msg_type; /* Message Type */
+ msg->tx_buf[3] = 0x00; /* Endpoint ID */
+ msg->tx_buf[4] = 0x00; /* Reserved */
+ msg->tx_buf[5] = 0x00; /* Reserved */
+ msg->tx_buf[6] = PECI_ENDPTCFG_ADDR_TYPE_PCI; /* Addr Type */
+ msg->tx_buf[7] = umsg->params.pci_cfg.seg; /* PCI Segment */
+ msg->tx_buf[8] = (u8)address; /* LSB - PCI Config Address */
+ msg->tx_buf[9] = (u8)(address >> 8); /* PCI Config Address */
+ msg->tx_buf[10] = (u8)(address >> 16); /* PCI Config Address */
+ msg->tx_buf[11] =
+ (u8)(address >> 24); /* MSB - PCI Config Address */
+ break;
+
+ case PECI_ENDPTCFG_TYPE_MMIO:
+ /*
+ * Per the PECI spec, the read length must be a byte, word,
+ * dword, or qword
+ */
+ if (umsg->rx_len != 1 && umsg->rx_len != 2 &&
+ umsg->rx_len != 4 && umsg->rx_len != 8) {
+ dev_dbg(&adapter->dev,
+ "Invalid read length, rx_len: %d\n",
+ umsg->rx_len);
+ return -EINVAL;
+ }
+ /*
+ * Per the PECI spec, the address type must specify either DWORD
+ * or QWORD
+ */
+ if (umsg->params.mmio.addr_type !=
+ PECI_ENDPTCFG_ADDR_TYPE_MMIO_D &&
+ umsg->params.mmio.addr_type !=
+ PECI_ENDPTCFG_ADDR_TYPE_MMIO_Q) {
+ dev_dbg(&adapter->dev,
+ "Invalid address type, addr_type: %d\n",
+ umsg->params.mmio.addr_type);
+ return -EINVAL;
+ }
+
+ if (umsg->params.mmio.addr_type ==
+ PECI_ENDPTCFG_ADDR_TYPE_MMIO_D)
+ tx_size = PECI_RDENDPTCFG_MMIO_D_WRITE_LEN;
+ else
+ tx_size = PECI_RDENDPTCFG_MMIO_Q_WRITE_LEN;
+ msg = peci_get_xfer_msg(tx_size,
+ PECI_RDENDPTCFG_READ_LEN_BASE +
+ umsg->rx_len);
+ if (!msg)
+ return -ENOMEM;
+
+ address = umsg->params.mmio.function; /* [2:0] - Function */
+ address |= (u32)umsg->params.mmio.device
+ << 3; /* [7:3] - Device */
+
+ msg->addr = umsg->addr;
+ msg->tx_buf[0] = PECI_RDENDPTCFG_CMD;
+ msg->tx_buf[1] = 0x00; /* request byte for Host ID|Retry bit */
+ msg->tx_buf[2] = umsg->msg_type; /* Message Type */
+ msg->tx_buf[3] = 0x00; /* Endpoint ID */
+ msg->tx_buf[4] = 0x00; /* Reserved */
+ msg->tx_buf[5] = umsg->params.mmio.bar; /* BAR # */
+ msg->tx_buf[6] = umsg->params.mmio.addr_type; /* Address Type */
+ msg->tx_buf[7] = umsg->params.mmio.seg; /* PCI Segment */
+ msg->tx_buf[8] = (u8)address; /* Function/Device */
+ msg->tx_buf[9] = umsg->params.mmio.bus; /* PCI Bus */
+ msg->tx_buf[10] = (u8)umsg->params.mmio
+ .offset; /* LSB - Register Offset */
+ msg->tx_buf[11] = (u8)(umsg->params.mmio.offset
+ >> 8); /* Register Offset */
+ msg->tx_buf[12] = (u8)(umsg->params.mmio.offset
+ >> 16); /* Register Offset */
+ msg->tx_buf[13] = (u8)(umsg->params.mmio.offset
+ >> 24); /* MSB - DWORD Register Offset */
+ if (umsg->params.mmio.addr_type ==
+ PECI_ENDPTCFG_ADDR_TYPE_MMIO_Q) {
+ msg->tx_buf[14] = (u8)(umsg->params.mmio.offset
+ >> 32); /* Register Offset */
+ msg->tx_buf[15] = (u8)(umsg->params.mmio.offset
+ >> 40); /* Register Offset */
+ msg->tx_buf[16] = (u8)(umsg->params.mmio.offset
+ >> 48); /* Register Offset */
+ msg->tx_buf[17] =
+ (u8)(umsg->params.mmio.offset
+ >> 56); /* MSB - QWORD Register Offset */
+ }
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ ret = peci_xfer_with_retries(adapter, msg, false);
+ if (!ret)
+ memcpy(umsg->data, &msg->rx_buf[1], umsg->rx_len);
+
+ umsg->cc = msg->rx_buf[0];
+ peci_put_xfer_msg(msg);
+
+ return ret;
+}
+
+static int peci_cmd_wr_end_pt_cfg(struct peci_adapter *adapter, void *vmsg)
+{
+ struct peci_wr_end_pt_cfg_msg *umsg = vmsg;
+ struct peci_xfer_msg *msg = NULL;
+ u8 tx_size, aw_fcs;
+ int ret, i, idx;
+ u32 address;
+
+ switch (umsg->msg_type) {
+ case PECI_ENDPTCFG_TYPE_LOCAL_PCI:
+ case PECI_ENDPTCFG_TYPE_PCI:
+ /*
+ * Per the PECI spec, the write length must be a byte, word,
+ * or dword
+ */
+ if (umsg->tx_len != 1 && umsg->tx_len != 2 &&
+ umsg->tx_len != 4) {
+ dev_dbg(&adapter->dev,
+ "Invalid write length, tx_len: %d\n",
+ umsg->tx_len);
+ return -EINVAL;
+ }
+
+ msg = peci_get_xfer_msg(PECI_WRENDPTCFG_PCI_WRITE_LEN_BASE +
+ umsg->tx_len, PECI_WRENDPTCFG_READ_LEN);
+ if (!msg)
+ return -ENOMEM;
+
+ address = umsg->params.pci_cfg.reg; /* [11:0] - Register */
+ address |= (u32)umsg->params.pci_cfg.function
+ << 12; /* [14:12] - Function */
+ address |= (u32)umsg->params.pci_cfg.device
+ << 15; /* [19:15] - Device */
+ address |= (u32)umsg->params.pci_cfg.bus
+ << 20; /* [27:20] - Bus */
+ /* [31:28] - Reserved */
+ msg->addr = umsg->addr;
+ msg->tx_buf[0] = PECI_WRENDPTCFG_CMD;
+ msg->tx_buf[1] = 0x00; /* request byte for Host ID|Retry bit */
+ msg->tx_buf[2] = umsg->msg_type; /* Message Type */
+ msg->tx_buf[3] = 0x00; /* Endpoint ID */
+ msg->tx_buf[4] = 0x00; /* Reserved */
+ msg->tx_buf[5] = 0x00; /* Reserved */
+ msg->tx_buf[6] = PECI_ENDPTCFG_ADDR_TYPE_PCI; /* Addr Type */
+ msg->tx_buf[7] = umsg->params.pci_cfg.seg; /* PCI Segment */
+ msg->tx_buf[8] = (u8)address; /* LSB - PCI Config Address */
+ msg->tx_buf[9] = (u8)(address >> 8); /* PCI Config Address */
+ msg->tx_buf[10] = (u8)(address >> 16); /* PCI Config Address */
+ msg->tx_buf[11] =
+ (u8)(address >> 24); /* MSB - PCI Config Address */
+ for (i = 0; i < umsg->tx_len; i++)
+ msg->tx_buf[12 + i] = (u8)(umsg->value >> (i << 3));
+
+ /* Add an Assured Write Frame Check Sequence byte */
+ ret = peci_aw_fcs(msg, 15 + umsg->tx_len, &aw_fcs);
+ if (ret)
+ goto out;
+
+ msg->tx_buf[12 + i] = 0x80 ^ aw_fcs;
+ break;
+
+ case PECI_ENDPTCFG_TYPE_MMIO:
+ /*
+ * Per the PECI spec, the write length must be a byte, word,
+ * dword, or qword
+ */
+ if (umsg->tx_len != 1 && umsg->tx_len != 2 &&
+ umsg->tx_len != 4 && umsg->tx_len != 8) {
+ dev_dbg(&adapter->dev,
+ "Invalid write length, tx_len: %d\n",
+ umsg->tx_len);
+ return -EINVAL;
+ }
+ /*
+ * Per the PECI spec, the address type must specify either DWORD
+ * or QWORD
+ */
+ if (umsg->params.mmio.addr_type !=
+ PECI_ENDPTCFG_ADDR_TYPE_MMIO_D &&
+ umsg->params.mmio.addr_type !=
+ PECI_ENDPTCFG_ADDR_TYPE_MMIO_Q) {
+ dev_dbg(&adapter->dev,
+ "Invalid address type, addr_type: %d\n",
+ umsg->params.mmio.addr_type);
+ return -EINVAL;
+ }
+
+ if (umsg->params.mmio.addr_type ==
+ PECI_ENDPTCFG_ADDR_TYPE_MMIO_D)
+ tx_size = PECI_WRENDPTCFG_MMIO_D_WRITE_LEN_BASE +
+ umsg->tx_len;
+ else
+ tx_size = PECI_WRENDPTCFG_MMIO_Q_WRITE_LEN_BASE +
+ umsg->tx_len;
+ msg = peci_get_xfer_msg(tx_size, PECI_WRENDPTCFG_READ_LEN);
+ if (!msg)
+ return -ENOMEM;
+
+ address = umsg->params.mmio.function; /* [2:0] - Function */
+ address |= (u32)umsg->params.mmio.device
+ << 3; /* [7:3] - Device */
+
+ msg->addr = umsg->addr;
+ msg->tx_buf[0] = PECI_WRENDPTCFG_CMD;
+ msg->tx_buf[1] = 0x00; /* request byte for Host ID|Retry bit */
+ msg->tx_buf[2] = umsg->msg_type; /* Message Type */
+ msg->tx_buf[3] = 0x00; /* Endpoint ID */
+ msg->tx_buf[4] = 0x00; /* Reserved */
+ msg->tx_buf[5] = umsg->params.mmio.bar; /* BAR # */
+ msg->tx_buf[6] = umsg->params.mmio.addr_type; /* Address Type */
+ msg->tx_buf[7] = umsg->params.mmio.seg; /* PCI Segment */
+ msg->tx_buf[8] = (u8)address; /* Function/Device */
+ msg->tx_buf[9] = umsg->params.mmio.bus; /* PCI Bus */
+ msg->tx_buf[10] = (u8)umsg->params.mmio
+ .offset; /* LSB - Register Offset */
+ msg->tx_buf[11] = (u8)(umsg->params.mmio.offset
+ >> 8); /* Register Offset */
+ msg->tx_buf[12] = (u8)(umsg->params.mmio.offset
+ >> 16); /* Register Offset */
+ msg->tx_buf[13] = (u8)(umsg->params.mmio.offset
+ >> 24); /* MSB - DWORD Register Offset */
+ if (umsg->params.mmio.addr_type ==
+ PECI_ENDPTCFG_ADDR_TYPE_MMIO_Q) {
+ msg->tx_len = PECI_WRENDPTCFG_MMIO_Q_WRITE_LEN_BASE;
+ msg->tx_buf[14] = (u8)(umsg->params.mmio.offset
+ >> 32); /* Register Offset */
+ msg->tx_buf[15] = (u8)(umsg->params.mmio.offset
+ >> 40); /* Register Offset */
+ msg->tx_buf[16] = (u8)(umsg->params.mmio.offset
+ >> 48); /* Register Offset */
+ msg->tx_buf[17] =
+ (u8)(umsg->params.mmio.offset
+ >> 56); /* MSB - QWORD Register Offset */
+ idx = 18;
+ } else {
+ idx = 14;
+ }
+ for (i = 0; i < umsg->tx_len; i++)
+ msg->tx_buf[idx + i] = (u8)(umsg->value >> (i << 3));
+
+ /* Add an Assured Write Frame Check Sequence byte */
+ ret = peci_aw_fcs(msg, idx + 3 + umsg->tx_len, &aw_fcs);
+ if (ret)
+ goto out;
+
+ msg->tx_buf[idx + i] = 0x80 ^ aw_fcs;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ ret = peci_xfer_with_retries(adapter, msg, false);
+
+out:
+ umsg->cc = msg->rx_buf[0];
+ peci_put_xfer_msg(msg);
+
+ return ret;
+}
+
+static int peci_cmd_crashdump_disc(struct peci_adapter *adapter, void *vmsg)
+{
+ struct peci_crashdump_disc_msg *umsg = vmsg;
+ struct peci_xfer_msg *msg;
+ int ret;
+
+ /* Per the EDS, the read length must be a byte, word, or qword */
+ if (umsg->rx_len != 1 && umsg->rx_len != 2 && umsg->rx_len != 8) {
+ dev_dbg(&adapter->dev, "Invalid read length, rx_len: %d\n",
+ umsg->rx_len);
+ return -EINVAL;
+ }
+
+ msg = peci_get_xfer_msg(PECI_CRASHDUMP_DISC_WRITE_LEN,
+ PECI_CRASHDUMP_DISC_READ_LEN_BASE +
+ umsg->rx_len);
+ if (!msg)
+ return -ENOMEM;
+
+ msg->addr = umsg->addr;
+ msg->tx_buf[0] = PECI_CRASHDUMP_CMD;
+ msg->tx_buf[1] = 0x00; /* request byte for Host ID | Retry bit */
+ /* Host ID is 0 for PECI 3.0 */
+ msg->tx_buf[2] = PECI_CRASHDUMP_DISC_VERSION;
+ msg->tx_buf[3] = PECI_CRASHDUMP_DISC_OPCODE;
+ msg->tx_buf[4] = umsg->subopcode;
+ msg->tx_buf[5] = umsg->param0;
+ msg->tx_buf[6] = (u8)umsg->param1;
+ msg->tx_buf[7] = (u8)(umsg->param1 >> 8);
+ msg->tx_buf[8] = umsg->param2;
+
+ ret = peci_xfer_with_retries(adapter, msg, false);
+ if (!ret)
+ memcpy(umsg->data, &msg->rx_buf[1], umsg->rx_len);
+
+ umsg->cc = msg->rx_buf[0];
+ peci_put_xfer_msg(msg);
+
+ return ret;
+}
+
+static int peci_cmd_crashdump_get_frame(struct peci_adapter *adapter,
+ void *vmsg)
+{
+ struct peci_crashdump_get_frame_msg *umsg = vmsg;
+ struct peci_xfer_msg *msg;
+ int ret;
+
+ /* Per the EDS, the read length must be a qword or dqword */
+ if (umsg->rx_len != 8 && umsg->rx_len != 16) {
+ dev_dbg(&adapter->dev, "Invalid read length, rx_len: %d\n",
+ umsg->rx_len);
+ return -EINVAL;
+ }
+
+ msg = peci_get_xfer_msg(PECI_CRASHDUMP_GET_FRAME_WRITE_LEN,
+ PECI_CRASHDUMP_GET_FRAME_READ_LEN_BASE +
+ umsg->rx_len);
+ if (!msg)
+ return -ENOMEM;
+
+ msg->addr = umsg->addr;
+ msg->tx_buf[0] = PECI_CRASHDUMP_CMD;
+ msg->tx_buf[1] = 0x00; /* request byte for Host ID | Retry bit */
+ /* Host ID is 0 for PECI 3.0 */
+ msg->tx_buf[2] = PECI_CRASHDUMP_GET_FRAME_VERSION;
+ msg->tx_buf[3] = PECI_CRASHDUMP_GET_FRAME_OPCODE;
+ msg->tx_buf[4] = (u8)umsg->param0;
+ msg->tx_buf[5] = (u8)(umsg->param0 >> 8);
+ msg->tx_buf[6] = (u8)umsg->param1;
+ msg->tx_buf[7] = (u8)(umsg->param1 >> 8);
+ msg->tx_buf[8] = (u8)umsg->param2;
+ msg->tx_buf[9] = (u8)(umsg->param2 >> 8);
+
+ ret = peci_xfer_with_retries(adapter, msg, false);
+ if (!ret)
+ memcpy(umsg->data, &msg->rx_buf[1], umsg->rx_len);
+
+ umsg->cc = msg->rx_buf[0];
+ peci_put_xfer_msg(msg);
+
+ return ret;
+}
+
+typedef int (*peci_cmd_fn_type)(struct peci_adapter *, void *);
+
+static const peci_cmd_fn_type peci_cmd_fn[PECI_CMD_MAX] = {
+ peci_cmd_xfer,
+ peci_cmd_ping,
+ peci_cmd_get_dib,
+ peci_cmd_get_temp,
+ peci_cmd_rd_pkg_cfg,
+ peci_cmd_wr_pkg_cfg,
+ peci_cmd_rd_ia_msr,
+ peci_cmd_wr_ia_msr,
+ peci_cmd_rd_ia_msrex,
+ peci_cmd_rd_pci_cfg,
+ peci_cmd_wr_pci_cfg,
+ peci_cmd_rd_pci_cfg_local,
+ peci_cmd_wr_pci_cfg_local,
+ peci_cmd_rd_end_pt_cfg,
+ peci_cmd_wr_end_pt_cfg,
+ peci_cmd_crashdump_disc,
+ peci_cmd_crashdump_get_frame,
+};
+
+/**
+ * peci_command - transfer function of a PECI command
+ * @adapter: pointer to peci_adapter
+ * @vmsg: pointer to PECI messages
+ * Context: can sleep
+ *
+ * This performs a transfer of a PECI command using PECI messages parameter
+ * which has various formats on each command.
+ *
+ * Return: zero on success, else a negative error code.
+ */
+int peci_command(struct peci_adapter *adapter, enum peci_cmd cmd, void *vmsg)
+{
+ int ret;
+
+ if (cmd >= PECI_CMD_MAX || cmd < PECI_CMD_XFER)
+ return -ENOTTY;
+
+ dev_dbg(&adapter->dev, "%s, cmd=0x%02x\n", __func__, cmd);
+
+ if (!peci_cmd_fn[cmd])
+ return -EINVAL;
+
+ mutex_lock(&adapter->bus_lock);
+
+ ret = peci_check_cmd_support(adapter, cmd);
+ if (!ret)
+ ret = peci_cmd_fn[cmd](adapter, vmsg);
+
+ mutex_unlock(&adapter->bus_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(peci_command);
+
+static int peci_detect(struct peci_adapter *adapter, u8 addr)
+{
+ struct peci_ping_msg msg;
+
+ msg.addr = addr;
+
+ return peci_command(adapter, PECI_CMD_PING, &msg);
+}
+
+static const struct of_device_id *
+peci_of_match_device(const struct of_device_id *matches,
+ struct peci_client *client)
+{
+#if IS_ENABLED(CONFIG_OF)
+ if (!(client && matches))
+ return NULL;
+
+ return of_match_device(matches, &client->dev);
+#else /* CONFIG_OF */
+ return NULL;
+#endif /* CONFIG_OF */
+}
+
+static const struct peci_device_id *
+peci_match_id(const struct peci_device_id *id, struct peci_client *client)
+{
+ if (!(id && client))
+ return NULL;
+
+ while (id->name[0]) {
+ if (!strncmp(client->name, id->name, PECI_NAME_SIZE))
+ return id;
+ id++;
+ }
+
+ return NULL;
+}
+
+static int peci_device_match(struct device *dev, struct device_driver *drv)
+{
+ struct peci_client *client = peci_verify_client(dev);
+ struct peci_driver *driver;
+
+ /* Attempt an OF style match */
+ if (peci_of_match_device(drv->of_match_table, client))
+ return 1;
+
+ driver = to_peci_driver(drv);
+
+ /* Finally an ID match */
+ if (peci_match_id(driver->id_table, client))
+ return 1;
+
+ return 0;
+}
+
+static int peci_device_probe(struct device *dev)
+{
+ struct peci_client *client = peci_verify_client(dev);
+ struct peci_driver *driver;
+ int status = -EINVAL;
+
+ if (!client)
+ return 0;
+
+ driver = to_peci_driver(dev->driver);
+
+ if (!driver->id_table &&
+ !peci_of_match_device(dev->driver->of_match_table, client))
+ return -ENODEV;
+
+ dev_dbg(dev, "%s: name:%s\n", __func__, client->name);
+
+ status = dev_pm_domain_attach(&client->dev, true);
+ if (status == -EPROBE_DEFER)
+ return status;
+
+ if (driver->probe)
+ status = driver->probe(client);
+ else
+ status = -EINVAL;
+
+ if (status)
+ goto err_detach_pm_domain;
+
+ return 0;
+
+err_detach_pm_domain:
+ dev_pm_domain_detach(&client->dev, true);
+
+ return status;
+}
+
+static int peci_device_remove(struct device *dev)
+{
+ struct peci_client *client = peci_verify_client(dev);
+ struct peci_driver *driver;
+ int status = 0;
+
+ if (!client || !dev->driver)
+ return 0;
+
+ driver = to_peci_driver(dev->driver);
+ if (driver->remove) {
+ dev_dbg(dev, "%s: name:%s\n", __func__, client->name);
+ status = driver->remove(client);
+ }
+
+ dev_pm_domain_detach(&client->dev, true);
+
+ return status;
+}
+
+static void peci_device_shutdown(struct device *dev)
+{
+ struct peci_client *client = peci_verify_client(dev);
+ struct peci_driver *driver;
+
+ if (!client || !dev->driver)
+ return;
+
+ dev_dbg(dev, "%s: name:%s\n", __func__, client->name);
+
+ driver = to_peci_driver(dev->driver);
+ if (driver->shutdown)
+ driver->shutdown(client);
+}
+
+struct bus_type peci_bus_type = {
+ .name = "peci",
+ .match = peci_device_match,
+ .probe = peci_device_probe,
+ .remove = peci_device_remove,
+ .shutdown = peci_device_shutdown,
+};
+EXPORT_SYMBOL_GPL(peci_bus_type);
+
+static int peci_check_addr_validity(u8 addr)
+{
+ if (addr < PECI_BASE_ADDR && addr > PECI_BASE_ADDR + PECI_OFFSET_MAX)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int peci_check_client_busy(struct device *dev, void *client_new_p)
+{
+ struct peci_client *client = peci_verify_client(dev);
+ struct peci_client *client_new = client_new_p;
+
+ if (client && client->addr == client_new->addr)
+ return -EBUSY;
+
+ return 0;
+}
+
+/**
+ * peci_get_cpu_id - read CPU ID from the Package Configuration Space of CPU
+ * @adapter: pointer to peci_adapter
+ * @addr: address of the PECI client CPU
+ * @cpu_id: where the CPU ID will be stored
+ * Context: can sleep
+ *
+ * Return: zero on success, else a negative error code.
+ */
+int peci_get_cpu_id(struct peci_adapter *adapter, u8 addr, u32 *cpu_id)
+{
+ struct peci_rd_pkg_cfg_msg msg;
+ int ret;
+
+ msg.addr = addr;
+ msg.index = PECI_MBX_INDEX_CPU_ID;
+ msg.param = PECI_PKG_ID_CPU_ID;
+ msg.rx_len = 4;
+
+ ret = peci_command(adapter, PECI_CMD_RD_PKG_CFG, &msg);
+ if (msg.cc != PECI_DEV_CC_SUCCESS)
+ ret = -EAGAIN;
+ if (ret)
+ return ret;
+
+ *cpu_id = le32_to_cpup((__le32 *)msg.pkg_config);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(peci_get_cpu_id);
+
+static struct peci_client *peci_new_device(struct peci_adapter *adapter,
+ struct peci_board_info const *info)
+{
+ struct peci_client *client;
+ int ret;
+
+ /* Increase reference count for the adapter assigned */
+ if (!peci_get_adapter(adapter->nr))
+ return NULL;
+
+ client = kzalloc(sizeof(*client), GFP_KERNEL);
+ if (!client)
+ goto err_put_adapter;
+
+ client->adapter = adapter;
+ client->addr = info->addr;
+ strlcpy(client->name, info->type, sizeof(client->name));
+
+ ret = peci_check_addr_validity(client->addr);
+ if (ret) {
+ dev_err(&adapter->dev, "Invalid PECI CPU address 0x%02hx\n",
+ client->addr);
+ goto err_free_client_silent;
+ }
+
+ /* Check online status of client */
+ ret = peci_detect(adapter, client->addr);
+ if (ret)
+ goto err_free_client;
+
+ ret = device_for_each_child(&adapter->dev, client,
+ peci_check_client_busy);
+ if (ret)
+ goto err_free_client;
+
+ client->dev.parent = &client->adapter->dev;
+ client->dev.bus = &peci_bus_type;
+ client->dev.type = &peci_client_type;
+ client->dev.of_node = of_node_get(info->of_node);
+ dev_set_name(&client->dev, "%d-%02x", adapter->nr, client->addr);
+
+ ret = device_register(&client->dev);
+ if (ret)
+ goto err_put_of_node;
+
+ dev_dbg(&adapter->dev, "client [%s] registered with bus id %s\n",
+ client->name, dev_name(&client->dev));
+
+ return client;
+
+err_put_of_node:
+ of_node_put(info->of_node);
+err_free_client:
+ dev_err(&adapter->dev,
+ "Failed to register peci client %s at 0x%02x (%d)\n",
+ client->name, client->addr, ret);
+err_free_client_silent:
+ kfree(client);
+err_put_adapter:
+ peci_put_adapter(adapter);
+
+ return NULL;
+}
+
+static void peci_unregister_device(struct peci_client *client)
+{
+ if (!client)
+ return;
+
+ if (client->dev.of_node) {
+ of_node_clear_flag(client->dev.of_node, OF_POPULATED);
+ of_node_put(client->dev.of_node);
+ }
+
+ device_unregister(&client->dev);
+}
+
+static int peci_unregister_client(struct device *dev, void *dummy)
+{
+ struct peci_client *client = peci_verify_client(dev);
+
+ peci_unregister_device(client);
+
+ return 0;
+}
+
+static void peci_adapter_dev_release(struct device *dev)
+{
+ struct peci_adapter *adapter = to_peci_adapter(dev);
+
+ dev_dbg(dev, "%s: %s\n", __func__, adapter->name);
+ mutex_destroy(&adapter->userspace_clients_lock);
+ mutex_destroy(&adapter->bus_lock);
+ kfree(adapter);
+}
+
+static ssize_t peci_sysfs_new_device(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct peci_adapter *adapter = to_peci_adapter(dev);
+ struct peci_board_info info = {};
+ struct peci_client *client;
+ char *blank, end;
+ short addr;
+ int ret;
+
+ /* Parse device type */
+ blank = strchr(buf, ' ');
+ if (!blank) {
+ dev_err(dev, "%s: Missing parameters\n", "new_device");
+ return -EINVAL;
+ }
+ if (blank - buf > PECI_NAME_SIZE - 1) {
+ dev_err(dev, "%s: Invalid device type\n", "new_device");
+ return -EINVAL;
+ }
+ memcpy(info.type, buf, blank - buf);
+
+ /* Parse remaining parameters, reject extra parameters */
+ ret = sscanf(++blank, "%hi%c", &addr, &end);
+ if (ret < 1) {
+ dev_err(dev, "%s: Can't parse client address\n", "new_device");
+ return -EINVAL;
+ }
+ if (ret > 1 && end != '\n') {
+ dev_err(dev, "%s: Extra parameters\n", "new_device");
+ return -EINVAL;
+ }
+
+ info.addr = (u8)addr;
+ client = peci_new_device(adapter, &info);
+ if (!client)
+ return -EINVAL;
+
+ /* Keep track of the added device */
+ mutex_lock(&adapter->userspace_clients_lock);
+ list_add_tail(&client->detected, &adapter->userspace_clients);
+ mutex_unlock(&adapter->userspace_clients_lock);
+ dev_dbg(dev, "%s: Instantiated device %s at 0x%02hx\n", "new_device",
+ info.type, info.addr);
+
+ return count;
+}
+static DEVICE_ATTR(new_device, 0200, NULL, peci_sysfs_new_device);
+
+static ssize_t peci_sysfs_delete_device(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct peci_adapter *adapter = to_peci_adapter(dev);
+ struct peci_client *client, *next;
+ struct peci_board_info info = {};
+ char *blank, end;
+ short addr;
+ int ret;
+
+ /* Parse device type */
+ blank = strchr(buf, ' ');
+ if (!blank) {
+ dev_err(dev, "%s: Missing parameters\n", "delete_device");
+ return -EINVAL;
+ }
+ if (blank - buf > PECI_NAME_SIZE - 1) {
+ dev_err(dev, "%s: Invalid device type\n", "delete_device");
+ return -EINVAL;
+ }
+ memcpy(info.type, buf, blank - buf);
+
+ /* Parse remaining parameters, reject extra parameters */
+ ret = sscanf(++blank, "%hi%c", &addr, &end);
+ if (ret < 1) {
+ dev_err(dev, "%s: Can't parse client address\n",
+ "delete_device");
+ return -EINVAL;
+ }
+ if (ret > 1 && end != '\n') {
+ dev_err(dev, "%s: Extra parameters\n", "delete_device");
+ return -EINVAL;
+ }
+
+ info.addr = (u8)addr;
+
+ /* Make sure the device was added through sysfs */
+ ret = -ENOENT;
+ mutex_lock(&adapter->userspace_clients_lock);
+ list_for_each_entry_safe(client, next, &adapter->userspace_clients,
+ detected) {
+ if (client->addr == info.addr &&
+ !strncmp(client->name, info.type, PECI_NAME_SIZE)) {
+ dev_dbg(dev, "%s: Deleting device %s at 0x%02hx\n",
+ "delete_device", client->name, client->addr);
+ list_del(&client->detected);
+ peci_unregister_device(client);
+ ret = count;
+ break;
+ }
+ }
+ mutex_unlock(&adapter->userspace_clients_lock);
+
+ if (ret < 0)
+ dev_dbg(dev, "%s: Can't find device in list\n",
+ "delete_device");
+
+ return ret;
+}
+static DEVICE_ATTR_IGNORE_LOCKDEP(delete_device, 0200, NULL,
+ peci_sysfs_delete_device);
+
+static struct attribute *peci_adapter_attrs[] = {
+ &dev_attr_name.attr,
+ &dev_attr_new_device.attr,
+ &dev_attr_delete_device.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(peci_adapter);
+
+struct device_type peci_adapter_type = {
+ .groups = peci_adapter_groups,
+ .release = peci_adapter_dev_release,
+};
+EXPORT_SYMBOL_GPL(peci_adapter_type);
+
+/**
+ * peci_verify_adapter - return parameter as peci_adapter, or NULL
+ * @dev: device, probably from some driver model iterator
+ *
+ * Return: pointer to peci_adapter on success, else NULL.
+ */
+struct peci_adapter *peci_verify_adapter(struct device *dev)
+{
+ return (dev->type == &peci_adapter_type)
+ ? to_peci_adapter(dev)
+ : NULL;
+}
+EXPORT_SYMBOL_GPL(peci_verify_adapter);
+
+#if IS_ENABLED(CONFIG_OF)
+static struct peci_client *peci_of_register_device(struct peci_adapter *adapter,
+ struct device_node *node)
+{
+ struct peci_board_info info = {};
+ struct peci_client *client;
+ u32 addr;
+ int ret;
+
+ dev_dbg(&adapter->dev, "register %pOF\n", node);
+
+ ret = of_property_read_u32(node, "reg", &addr);
+ if (ret) {
+ dev_err(&adapter->dev, "invalid reg on %pOF\n", node);
+ return ERR_PTR(ret);
+ }
+
+ info.addr = addr;
+ info.of_node = node;
+
+ client = peci_new_device(adapter, &info);
+ if (!client)
+ client = ERR_PTR(-EINVAL);
+
+ return client;
+}
+
+static void peci_of_register_devices(struct peci_adapter *adapter)
+{
+ struct device_node *bus, *node;
+ struct peci_client *client;
+
+ /* Only register child devices if the adapter has a node pointer set */
+ if (!adapter->dev.of_node)
+ return;
+
+ bus = of_get_child_by_name(adapter->dev.of_node, "peci-bus");
+ if (!bus)
+ bus = of_node_get(adapter->dev.of_node);
+
+ for_each_available_child_of_node(bus, node) {
+ if (of_node_test_and_set_flag(node, OF_POPULATED))
+ continue;
+
+ client = peci_of_register_device(adapter, node);
+ if (IS_ERR(client)) {
+ dev_warn(&adapter->dev,
+ "Failed to create PECI device for %pOF\n",
+ node);
+ of_node_clear_flag(node, OF_POPULATED);
+ }
+ }
+
+ of_node_put(bus);
+}
+#else /* CONFIG_OF */
+static void peci_of_register_devices(struct peci_adapter *adapter) { }
+#endif /* CONFIG_OF */
+
+#if IS_ENABLED(CONFIG_OF_DYNAMIC)
+static int peci_of_match_node(struct device *dev, const void *data)
+{
+ return dev->of_node == data;
+}
+
+/* must call put_device() when done with returned peci_client device */
+static struct peci_client *peci_of_find_device(struct device_node *node)
+{
+ struct peci_client *client;
+ struct device *dev;
+
+ dev = bus_find_device(&peci_bus_type, NULL, node, peci_of_match_node);
+ if (!dev)
+ return NULL;
+
+ client = peci_verify_client(dev);
+ if (!client)
+ put_device(dev);
+
+ return client;
+}
+
+/* must call put_device() when done with returned peci_adapter device */
+static struct peci_adapter *peci_of_find_adapter(struct device_node *node)
+{
+ struct peci_adapter *adapter;
+ struct device *dev;
+
+ dev = bus_find_device(&peci_bus_type, NULL, node, peci_of_match_node);
+ if (!dev)
+ return NULL;
+
+ adapter = peci_verify_adapter(dev);
+ if (!adapter)
+ put_device(dev);
+
+ return adapter;
+}
+
+static int peci_of_notify(struct notifier_block *nb, ulong action, void *arg)
+{
+ struct of_reconfig_data *rd = arg;
+ struct peci_adapter *adapter;
+ struct peci_client *client;
+
+ switch (of_reconfig_get_state_change(action, rd)) {
+ case OF_RECONFIG_CHANGE_ADD:
+ adapter = peci_of_find_adapter(rd->dn->parent);
+ if (!adapter)
+ return NOTIFY_OK; /* not for us */
+
+ if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) {
+ put_device(&adapter->dev);
+ return NOTIFY_OK;
+ }
+
+ client = peci_of_register_device(adapter, rd->dn);
+ put_device(&adapter->dev);
+
+ if (IS_ERR(client)) {
+ dev_err(&adapter->dev,
+ "failed to create client for '%pOF'\n", rd->dn);
+ of_node_clear_flag(rd->dn, OF_POPULATED);
+ return notifier_from_errno(PTR_ERR(client));
+ }
+ break;
+ case OF_RECONFIG_CHANGE_REMOVE:
+ /* already depopulated? */
+ if (!of_node_check_flag(rd->dn, OF_POPULATED))
+ return NOTIFY_OK;
+
+ /* find our device by node */
+ client = peci_of_find_device(rd->dn);
+ if (!client)
+ return NOTIFY_OK; /* no? not meant for us */
+
+ /* unregister takes one ref away */
+ peci_unregister_device(client);
+
+ /* and put the reference of the find */
+ put_device(&client->dev);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block peci_of_notifier = {
+ .notifier_call = peci_of_notify,
+};
+#else /* CONFIG_OF_DYNAMIC */
+extern struct notifier_block peci_of_notifier;
+#endif /* CONFIG_OF_DYNAMIC */
+
+/**
+ * peci_alloc_adapter - allocate a PECI adapter
+ * @dev: the adapter, possibly using the platform_bus
+ * @size: how much zeroed driver-private data to allocate; the pointer to this
+ * memory is in the driver_data field of the returned device,
+ * accessible with peci_get_adapdata().
+ * Context: can sleep
+ *
+ * This call is used only by PECI adapter drivers, which are the only ones
+ * directly touching chip registers. It's how they allocate a peci_adapter
+ * structure, prior to calling peci_add_adapter().
+ *
+ * This must be called from context that can sleep.
+ *
+ * The caller is responsible for initializing the adapter's methods before
+ * calling peci_add_adapter(); and (after errors while adding the device)
+ * calling put_device() to prevent a memory leak.
+ *
+ * Return: the peci_adapter structure on success, else NULL.
+ */
+struct peci_adapter *peci_alloc_adapter(struct device *dev, uint size)
+{
+ struct peci_adapter *adapter;
+
+ if (!dev)
+ return NULL;
+
+ adapter = kzalloc(size + sizeof(*adapter), GFP_KERNEL);
+ if (!adapter)
+ return NULL;
+
+ device_initialize(&adapter->dev);
+ adapter->dev.parent = dev;
+ adapter->dev.bus = &peci_bus_type;
+ adapter->dev.type = &peci_adapter_type;
+ peci_set_adapdata(adapter, &adapter[1]);
+
+ return adapter;
+}
+EXPORT_SYMBOL_GPL(peci_alloc_adapter);
+
+static int peci_register_adapter(struct peci_adapter *adapter)
+{
+ int ret = -EINVAL;
+
+ /* Can't register until after driver model init */
+ if (WARN_ON(!is_registered))
+ goto err_free_idr;
+
+ if (WARN(!adapter->name[0], "peci adapter has no name"))
+ goto err_free_idr;
+
+ if (WARN(!adapter->xfer, "peci adapter has no xfer function\n"))
+ goto err_free_idr;
+
+ mutex_init(&adapter->bus_lock);
+ mutex_init(&adapter->userspace_clients_lock);
+ INIT_LIST_HEAD(&adapter->userspace_clients);
+
+ dev_set_name(&adapter->dev, "peci-%d", adapter->nr);
+
+ ret = device_add(&adapter->dev);
+ if (ret) {
+ pr_err("adapter '%s': can't add device (%d)\n",
+ adapter->name, ret);
+ goto err_free_idr;
+ }
+
+ dev_dbg(&adapter->dev, "adapter [%s] registered\n", adapter->name);
+
+ pm_runtime_no_callbacks(&adapter->dev);
+ pm_suspend_ignore_children(&adapter->dev, true);
+ pm_runtime_enable(&adapter->dev);
+
+ /* create pre-declared device nodes */
+ peci_of_register_devices(adapter);
+
+ return 0;
+
+err_free_idr:
+ mutex_lock(&core_lock);
+ idr_remove(&peci_adapter_idr, adapter->nr);
+ mutex_unlock(&core_lock);
+ return ret;
+}
+
+static int peci_add_numbered_adapter(struct peci_adapter *adapter)
+{
+ int id;
+
+ mutex_lock(&core_lock);
+ id = idr_alloc(&peci_adapter_idr, adapter,
+ adapter->nr, adapter->nr + 1, GFP_KERNEL);
+ mutex_unlock(&core_lock);
+ if (WARN(id < 0, "couldn't get idr"))
+ return id == -ENOSPC ? -EBUSY : id;
+
+ return peci_register_adapter(adapter);
+}
+
+/**
+ * peci_add_adapter - add a PECI adapter
+ * @adapter: initialized adapter, originally from peci_alloc_adapter()
+ * Context: can sleep
+ *
+ * PECI adapters connect to their drivers using some non-PECI bus,
+ * such as the platform bus. The final stage of probe() in that code
+ * includes calling peci_add_adapter() to hook up to this PECI bus glue.
+ *
+ * This must be called from context that can sleep.
+ *
+ * It returns zero on success, else a negative error code (dropping the
+ * adapter's refcount). After a successful return, the caller is responsible
+ * for calling peci_del_adapter().
+ *
+ * Return: zero on success, else a negative error code.
+ */
+int peci_add_adapter(struct peci_adapter *adapter)
+{
+ struct device *dev = &adapter->dev;
+ int id;
+
+ id = of_alias_get_id(dev->of_node, "peci");
+ if (id >= 0) {
+ adapter->nr = id;
+ return peci_add_numbered_adapter(adapter);
+ }
+
+ mutex_lock(&core_lock);
+ id = idr_alloc(&peci_adapter_idr, adapter, 0, 0, GFP_KERNEL);
+ mutex_unlock(&core_lock);
+ if (WARN(id < 0, "couldn't get idr"))
+ return id;
+
+ adapter->nr = id;
+
+ return peci_register_adapter(adapter);
+}
+EXPORT_SYMBOL_GPL(peci_add_adapter);
+
+/**
+ * peci_del_adapter - delete a PECI adapter
+ * @adapter: the adpater being deleted
+ * Context: can sleep
+ *
+ * This call is used only by PECI adpater drivers, which are the only ones
+ * directly touching chip registers.
+ *
+ * This must be called from context that can sleep.
+ *
+ * Note that this function also drops a reference to the adapter.
+ */
+void peci_del_adapter(struct peci_adapter *adapter)
+{
+ struct peci_client *client, *next;
+ struct peci_adapter *found;
+ int nr;
+
+ /* First make sure that this adapter was ever added */
+ mutex_lock(&core_lock);
+ found = idr_find(&peci_adapter_idr, adapter->nr);
+ mutex_unlock(&core_lock);
+
+ if (found != adapter)
+ return;
+
+ /* Remove devices instantiated from sysfs */
+ mutex_lock(&adapter->userspace_clients_lock);
+ list_for_each_entry_safe(client, next, &adapter->userspace_clients,
+ detected) {
+ dev_dbg(&adapter->dev, "Removing %s at 0x%x\n", client->name,
+ client->addr);
+ list_del(&client->detected);
+ peci_unregister_device(client);
+ }
+ mutex_unlock(&adapter->userspace_clients_lock);
+
+ /*
+ * Detach any active clients. This can't fail, thus we do not
+ * check the returned value.
+ */
+ device_for_each_child(&adapter->dev, NULL, peci_unregister_client);
+
+ /* device name is gone after device_unregister */
+ dev_dbg(&adapter->dev, "adapter [%s] unregistered\n", adapter->name);
+
+ pm_runtime_disable(&adapter->dev);
+ nr = adapter->nr;
+ device_unregister(&adapter->dev);
+
+ /* free bus id */
+ mutex_lock(&core_lock);
+ idr_remove(&peci_adapter_idr, nr);
+ mutex_unlock(&core_lock);
+}
+EXPORT_SYMBOL_GPL(peci_del_adapter);
+
+int peci_for_each_dev(void *data, int (*fn)(struct device *, void *))
+{
+ int ret;
+
+ mutex_lock(&core_lock);
+ ret = bus_for_each_dev(&peci_bus_type, NULL, data, fn);
+ mutex_unlock(&core_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(peci_for_each_dev);
+
+/**
+ * peci_register_driver - register a PECI driver
+ * @owner: owner module of the driver being registered
+ * @driver: the driver being registered
+ * Context: can sleep
+ *
+ * Return: zero on success, else a negative error code.
+ */
+int peci_register_driver(struct module *owner, struct peci_driver *driver)
+{
+ int ret;
+
+ /* Can't register until after driver model init */
+ if (WARN_ON(!is_registered))
+ return -EAGAIN;
+
+ /* add the driver to the list of peci drivers in the driver core */
+ driver->driver.owner = owner;
+ driver->driver.bus = &peci_bus_type;
+
+ /*
+ * When registration returns, the driver core
+ * will have called probe() for all matching-but-unbound devices.
+ */
+ ret = driver_register(&driver->driver);
+ if (ret)
+ return ret;
+
+ pr_debug("driver [%s] registered\n", driver->driver.name);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(peci_register_driver);
+
+/**
+ * peci_del_driver - unregister a PECI driver
+ * @driver: the driver being unregistered
+ * Context: can sleep
+ */
+void peci_del_driver(struct peci_driver *driver)
+{
+ driver_unregister(&driver->driver);
+ pr_debug("driver [%s] unregistered\n", driver->driver.name);
+}
+EXPORT_SYMBOL_GPL(peci_del_driver);
+
+static int __init peci_init(void)
+{
+ int ret;
+
+ ret = bus_register(&peci_bus_type);
+ if (ret < 0) {
+ pr_err("peci: Failed to register PECI bus type!\n");
+ return ret;
+ }
+
+ crc8_populate_msb(peci_crc8_table, PECI_CRC8_POLYNOMIAL);
+
+ if (IS_ENABLED(CONFIG_OF_DYNAMIC))
+ WARN_ON(of_reconfig_notifier_register(&peci_of_notifier));
+
+ is_registered = true;
+
+ return 0;
+}
+
+static void __exit peci_exit(void)
+{
+ if (IS_ENABLED(CONFIG_OF_DYNAMIC))
+ WARN_ON(of_reconfig_notifier_unregister(&peci_of_notifier));
+
+ bus_unregister(&peci_bus_type);
+}
+
+subsys_initcall(peci_init);
+module_exit(peci_exit);
+
+MODULE_AUTHOR("Jason M Biils <jason.m.bills@linux.intel.com>");
+MODULE_AUTHOR("Jae Hyun Yoo <jae.hyun.yoo@linux.intel.com>");
+MODULE_DESCRIPTION("PECI bus core module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/peci/peci-dev.c b/drivers/peci/peci-dev.c
new file mode 100644
index 000000000000..e0fe09467a80
--- /dev/null
+++ b/drivers/peci/peci-dev.c
@@ -0,0 +1,348 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2018-2019 Intel Corporation
+
+#include <linux/cdev.h>
+#include <linux/fs.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/peci.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+/*
+ * A peci_dev represents an peci_adapter ... an PECI or SMBus master, not a
+ * slave (peci_client) with which messages will be exchanged. It's coupled
+ * with a character special file which is accessed by user mode drivers.
+ *
+ * The list of peci_dev structures is parallel to the peci_adapter lists
+ * maintained by the driver model, and is updated using bus notifications.
+ */
+struct peci_dev {
+ struct list_head list;
+ struct peci_adapter *adapter;
+ struct device *dev;
+ struct cdev cdev;
+};
+
+#define PECI_MINORS MINORMASK
+
+static dev_t peci_devt;
+static LIST_HEAD(peci_dev_list);
+static DEFINE_SPINLOCK(peci_dev_list_lock);
+
+static struct peci_dev *peci_dev_get_by_minor(uint index)
+{
+ struct peci_dev *peci_dev;
+
+ spin_lock(&peci_dev_list_lock);
+ list_for_each_entry(peci_dev, &peci_dev_list, list) {
+ if (peci_dev->adapter->nr == index)
+ goto found;
+ }
+ peci_dev = NULL;
+found:
+ spin_unlock(&peci_dev_list_lock);
+
+ return peci_dev;
+}
+
+static struct peci_dev *peci_dev_alloc(struct peci_adapter *adapter)
+{
+ struct peci_dev *peci_dev;
+
+ if (adapter->nr >= PECI_MINORS) {
+ dev_err(&adapter->dev, "Out of device minors (%d)\n",
+ adapter->nr);
+ return ERR_PTR(-ENODEV);
+ }
+
+ peci_dev = kzalloc(sizeof(*peci_dev), GFP_KERNEL);
+ if (!peci_dev)
+ return ERR_PTR(-ENOMEM);
+ peci_dev->adapter = adapter;
+
+ spin_lock(&peci_dev_list_lock);
+ list_add_tail(&peci_dev->list, &peci_dev_list);
+ spin_unlock(&peci_dev_list_lock);
+
+ return peci_dev;
+}
+
+static void peci_dev_put(struct peci_dev *peci_dev)
+{
+ spin_lock(&peci_dev_list_lock);
+ list_del(&peci_dev->list);
+ spin_unlock(&peci_dev_list_lock);
+ kfree(peci_dev);
+}
+
+static ssize_t name_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct peci_dev *peci_dev = peci_dev_get_by_minor(MINOR(dev->devt));
+
+ if (!peci_dev)
+ return -ENODEV;
+
+ return sprintf(buf, "%s\n", peci_dev->adapter->name);
+}
+static DEVICE_ATTR_RO(name);
+
+static struct attribute *peci_dev_attrs[] = {
+ &dev_attr_name.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(peci_dev);
+
+static long peci_dev_ioctl(struct file *file, uint iocmd, ulong arg)
+{
+ struct peci_dev *peci_dev = file->private_data;
+ void __user *umsg = (void __user *)arg;
+ struct peci_xfer_msg *xmsg = NULL;
+ struct peci_xfer_msg uxmsg;
+ enum peci_cmd cmd;
+ u8 *msg = NULL;
+ uint msg_len;
+ int ret;
+
+ cmd = _IOC_NR(iocmd);
+ msg_len = _IOC_SIZE(iocmd);
+
+ switch (cmd) {
+ case PECI_CMD_XFER:
+ if (msg_len != sizeof(struct peci_xfer_msg)) {
+ ret = -EFAULT;
+ break;
+ }
+
+ if (copy_from_user(&uxmsg, umsg, msg_len)) {
+ ret = -EFAULT;
+ break;
+ }
+
+ xmsg = peci_get_xfer_msg(uxmsg.tx_len, uxmsg.rx_len);
+ if (IS_ERR(xmsg)) {
+ ret = PTR_ERR(xmsg);
+ break;
+ }
+
+ if (uxmsg.tx_len &&
+ copy_from_user(xmsg->tx_buf, (__u8 __user *)uxmsg.tx_buf,
+ uxmsg.tx_len)) {
+ ret = -EFAULT;
+ break;
+ }
+
+ xmsg->addr = uxmsg.addr;
+ xmsg->tx_len = uxmsg.tx_len;
+ xmsg->rx_len = uxmsg.rx_len;
+
+ ret = peci_command(peci_dev->adapter, cmd, xmsg);
+ if (!ret && xmsg->rx_len &&
+ copy_to_user((__u8 __user *)uxmsg.rx_buf, xmsg->rx_buf,
+ xmsg->rx_len))
+ ret = -EFAULT;
+
+ break;
+
+ default:
+ msg = memdup_user(umsg, msg_len);
+ if (IS_ERR(msg)) {
+ ret = PTR_ERR(msg);
+ break;
+ }
+
+ ret = peci_command(peci_dev->adapter, cmd, msg);
+ if ((!ret || ret == -ETIMEDOUT) &&
+ copy_to_user(umsg, msg, msg_len))
+ ret = -EFAULT;
+
+ break;
+ }
+
+ peci_put_xfer_msg(xmsg);
+ kfree(msg);
+
+ return (long)ret;
+}
+
+static int peci_dev_open(struct inode *inode, struct file *file)
+{
+ struct peci_adapter *adapter;
+ struct peci_dev *peci_dev;
+
+ peci_dev = peci_dev_get_by_minor(iminor(inode));
+ if (!peci_dev)
+ return -ENODEV;
+
+ adapter = peci_get_adapter(peci_dev->adapter->nr);
+ if (!adapter)
+ return -ENODEV;
+
+ file->private_data = peci_dev;
+
+ return 0;
+}
+
+static int peci_dev_release(struct inode *inode, struct file *file)
+{
+ struct peci_dev *peci_dev = file->private_data;
+
+ peci_put_adapter(peci_dev->adapter);
+ file->private_data = NULL;
+
+ return 0;
+}
+
+static const struct file_operations peci_dev_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = peci_dev_ioctl,
+ .open = peci_dev_open,
+ .release = peci_dev_release,
+ .llseek = no_llseek,
+};
+
+static struct class *peci_dev_class;
+
+static int peci_dev_attach_adapter(struct device *dev, void *dummy)
+{
+ struct peci_adapter *adapter;
+ struct peci_dev *peci_dev;
+ dev_t devt;
+ int ret;
+
+ if (dev->type != &peci_adapter_type)
+ return 0;
+
+ adapter = to_peci_adapter(dev);
+ peci_dev = peci_dev_alloc(adapter);
+ if (IS_ERR(peci_dev))
+ return PTR_ERR(peci_dev);
+
+ cdev_init(&peci_dev->cdev, &peci_dev_fops);
+ peci_dev->cdev.owner = THIS_MODULE;
+ devt = MKDEV(MAJOR(peci_devt), adapter->nr);
+
+ ret = cdev_add(&peci_dev->cdev, devt, 1);
+ if (ret)
+ goto err_put_dev;
+
+ /* register this peci device with the driver core */
+ peci_dev->dev = device_create(peci_dev_class, &adapter->dev, devt, NULL,
+ "peci-%d", adapter->nr);
+ if (IS_ERR(peci_dev->dev)) {
+ ret = PTR_ERR(peci_dev->dev);
+ goto err_del_cdev;
+ }
+
+ dev_info(dev, "cdev of adapter [%s] registered as minor %d\n",
+ adapter->name, adapter->nr);
+
+ return 0;
+
+err_del_cdev:
+ cdev_del(&peci_dev->cdev);
+err_put_dev:
+ peci_dev_put(peci_dev);
+
+ return ret;
+}
+
+static int peci_dev_detach_adapter(struct device *dev, void *dummy)
+{
+ struct peci_adapter *adapter;
+ struct peci_dev *peci_dev;
+ dev_t devt;
+
+ if (dev->type != &peci_adapter_type)
+ return 0;
+
+ adapter = to_peci_adapter(dev);
+ peci_dev = peci_dev_get_by_minor(adapter->nr);
+ if (!peci_dev)
+ return 0;
+
+ cdev_del(&peci_dev->cdev);
+ devt = peci_dev->dev->devt;
+ peci_dev_put(peci_dev);
+ device_destroy(peci_dev_class, devt);
+
+ dev_info(dev, "cdev of adapter [%s] unregistered\n", adapter->name);
+
+ return 0;
+}
+
+static int peci_dev_notifier_call(struct notifier_block *nb, ulong action,
+ void *data)
+{
+ struct device *dev = data;
+
+ switch (action) {
+ case BUS_NOTIFY_ADD_DEVICE:
+ return peci_dev_attach_adapter(dev, NULL);
+ case BUS_NOTIFY_DEL_DEVICE:
+ return peci_dev_detach_adapter(dev, NULL);
+ }
+
+ return 0;
+}
+
+static struct notifier_block peci_dev_notifier = {
+ .notifier_call = peci_dev_notifier_call,
+};
+
+static int __init peci_dev_init(void)
+{
+ int ret;
+
+ pr_debug("peci /dev entries driver\n");
+
+ ret = alloc_chrdev_region(&peci_devt, 0, PECI_MINORS, "peci");
+ if (ret < 0) {
+ pr_err("peci: Failed to allocate chr dev region!\n");
+ bus_unregister(&peci_bus_type);
+ goto err;
+ }
+
+ peci_dev_class = class_create(THIS_MODULE, KBUILD_MODNAME);
+ if (IS_ERR(peci_dev_class)) {
+ ret = PTR_ERR(peci_dev_class);
+ goto err_unreg_chrdev;
+ }
+ peci_dev_class->dev_groups = peci_dev_groups;
+
+ /* Keep track of adapters which will be added or removed later */
+ ret = bus_register_notifier(&peci_bus_type, &peci_dev_notifier);
+ if (ret)
+ goto err_destroy_class;
+
+ /* Bind to already existing adapters right away */
+ peci_for_each_dev(NULL, peci_dev_attach_adapter);
+
+ return 0;
+
+err_destroy_class:
+ class_destroy(peci_dev_class);
+err_unreg_chrdev:
+ unregister_chrdev_region(peci_devt, PECI_MINORS);
+err:
+ pr_err("%s: Driver Initialization failed\n", __FILE__);
+
+ return ret;
+}
+
+static void __exit peci_dev_exit(void)
+{
+ bus_unregister_notifier(&peci_bus_type, &peci_dev_notifier);
+ peci_for_each_dev(NULL, peci_dev_detach_adapter);
+ class_destroy(peci_dev_class);
+ unregister_chrdev_region(peci_devt, PECI_MINORS);
+}
+
+module_init(peci_dev_init);
+module_exit(peci_dev_exit);
+
+MODULE_AUTHOR("Jae Hyun Yoo <jae.hyun.yoo@linux.intel.com>");
+MODULE_DESCRIPTION("PECI /dev entries driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
index fa32c3e9c9d1..7efe6dbe4398 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
@@ -46,6 +46,7 @@
#define SCU634 0x634 /* Disable GPIO Internal Pull-Down #5 */
#define SCU638 0x638 /* Disable GPIO Internal Pull-Down #6 */
#define SCU694 0x694 /* Multi-function Pin Control #25 */
+#define SCU69C 0x69C /* Multi-function Pin Control #27 */
#define SCUC20 0xC20 /* PCIE configuration Setting Control */
#define ASPEED_G6_NR_PINS 256
@@ -819,11 +820,13 @@ FUNC_DECL_2(PWM14, PWM14G0, PWM14G1);
#define Y23 127
SIG_EXPR_LIST_DECL_SEMG(Y23, PWM15, PWM15G1, PWM15, SIG_DESC_SET(SCU41C, 31));
SIG_EXPR_LIST_DECL_SESG(Y23, THRUOUT3, THRU3, SIG_DESC_SET(SCU4BC, 31));
-PIN_DECL_2(Y23, GPIOP7, PWM15, THRUOUT3);
+SIG_EXPR_LIST_DECL_SESG(Y23, HEARTBEAT, HEARTBEAT, SIG_DESC_SET(SCU69C, 31));
+PIN_DECL_3(Y23, GPIOP7, PWM15, THRUOUT3, HEARTBEAT);
GROUP_DECL(PWM15G1, Y23);
FUNC_DECL_2(PWM15, PWM15G0, PWM15G1);
FUNC_GROUP_DECL(THRU3, AB24, Y23);
+FUNC_GROUP_DECL(HEARTBEAT, Y23);
#define AA25 128
SSSF_PIN_DECL(AA25, GPIOQ0, TACH0, SIG_DESC_SET(SCU430, 0));
@@ -1920,6 +1923,7 @@ static const struct aspeed_pin_group aspeed_g6_groups[] = {
ASPEED_PINCTRL_GROUP(GPIU5),
ASPEED_PINCTRL_GROUP(GPIU6),
ASPEED_PINCTRL_GROUP(GPIU7),
+ ASPEED_PINCTRL_GROUP(HEARTBEAT),
ASPEED_PINCTRL_GROUP(HVI3C3),
ASPEED_PINCTRL_GROUP(HVI3C4),
ASPEED_PINCTRL_GROUP(I2C1),
@@ -2158,6 +2162,7 @@ static const struct aspeed_pin_function aspeed_g6_functions[] = {
ASPEED_PINCTRL_FUNC(GPIU5),
ASPEED_PINCTRL_FUNC(GPIU6),
ASPEED_PINCTRL_FUNC(GPIU7),
+ ASPEED_PINCTRL_FUNC(HEARTBEAT),
ASPEED_PINCTRL_FUNC(I2C1),
ASPEED_PINCTRL_FUNC(I2C10),
ASPEED_PINCTRL_FUNC(I2C11),
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed.c b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
index b625a657171e..53f3f8aec695 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
@@ -76,6 +76,9 @@ static int aspeed_sig_expr_enable(struct aspeed_pinmux_data *ctx,
{
int ret;
+ pr_debug("Enabling signal %s for %s\n", expr->signal,
+ expr->function);
+
ret = aspeed_sig_expr_eval(ctx, expr, true);
if (ret < 0)
return ret;
@@ -91,6 +94,9 @@ static int aspeed_sig_expr_disable(struct aspeed_pinmux_data *ctx,
{
int ret;
+ pr_debug("Disabling signal %s for %s\n", expr->signal,
+ expr->function);
+
ret = aspeed_sig_expr_eval(ctx, expr, true);
if (ret < 0)
return ret;
@@ -229,7 +235,7 @@ int aspeed_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned int function,
const struct aspeed_sig_expr **funcs;
const struct aspeed_sig_expr ***prios;
- pr_debug("Muxing pin %d for %s\n", pin, pfunc->name);
+ pr_debug("Muxing pin %s for %s\n", pdesc->name, pfunc->name);
if (!pdesc)
return -EINVAL;
@@ -269,6 +275,9 @@ int aspeed_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned int function,
ret = aspeed_sig_expr_enable(&pdata->pinmux, expr);
if (ret)
return ret;
+
+ pr_debug("Muxed pin %s as %s for %s\n", pdesc->name, expr->signal,
+ expr->function);
}
return 0;
@@ -317,6 +326,8 @@ int aspeed_gpio_request_enable(struct pinctrl_dev *pctldev,
if (!prios)
return -ENXIO;
+ pr_debug("Muxing pin %s for GPIO\n", pdesc->name);
+
/* Disable any functions of higher priority than GPIO */
while ((funcs = *prios)) {
if (aspeed_gpio_in_exprs(funcs))
@@ -346,14 +357,22 @@ int aspeed_gpio_request_enable(struct pinctrl_dev *pctldev,
* lowest-priority signal type. As such it has no associated
* expression.
*/
- if (!expr)
+ if (!expr) {
+ pr_debug("Muxed pin %s as GPIO\n", pdesc->name);
return 0;
+ }
/*
* If GPIO is not the lowest priority signal type, assume there is only
* one expression defined to enable the GPIO function
*/
- return aspeed_sig_expr_enable(&pdata->pinmux, expr);
+ ret = aspeed_sig_expr_enable(&pdata->pinmux, expr);
+ if (ret)
+ return ret;
+
+ pr_debug("Muxed pin %s as %s\n", pdesc->name, expr->signal);
+
+ return 0;
}
int aspeed_pinctrl_probe(struct platform_device *pdev,
diff --git a/drivers/reset/reset-simple.c b/drivers/reset/reset-simple.c
index 067e7e7b34f1..795c9063fe7b 100644
--- a/drivers/reset/reset-simple.c
+++ b/drivers/reset/reset-simple.c
@@ -125,6 +125,7 @@ static const struct of_device_id reset_simple_dt_ids[] = {
.data = &reset_simple_active_low },
{ .compatible = "aspeed,ast2400-lpc-reset" },
{ .compatible = "aspeed,ast2500-lpc-reset" },
+ { .compatible = "aspeed,ast2600-lpc-reset" },
{ .compatible = "bitmain,bm1880-reset",
.data = &reset_simple_active_low },
{ .compatible = "snps,dw-high-reset" },
diff --git a/drivers/soc/aspeed/Kconfig b/drivers/soc/aspeed/Kconfig
index 323e177aa74d..a4b3cac87ce2 100644
--- a/drivers/soc/aspeed/Kconfig
+++ b/drivers/soc/aspeed/Kconfig
@@ -5,6 +5,14 @@ config SOC_ASPEED
def_bool y
depends on ARCH_ASPEED || COMPILE_TEST
+config ASPEED_BMC_MISC
+ bool "Miscellaneous ASPEED BMC interfaces"
+ depends on ARCH_ASPEED || COMPILE_TEST
+ default ARCH_ASPEED
+ help
+ Say yes to expose VGA and LPC scratch registers, and other
+ miscellaneous control interfaces specific to the ASPEED BMC SoCs
+
config ASPEED_LPC_CTRL
depends on SOC_ASPEED && REGMAP && MFD_SYSCON
tristate "Aspeed ast2400/2500 HOST LPC to BMC bridge control"
@@ -29,4 +37,12 @@ config ASPEED_P2A_CTRL
ioctl()s, the driver also provides an interface for userspace mappings to
a pre-defined region.
+config ASPEED_XDMA
+ tristate "Aspeed XDMA Engine Driver"
+ depends on SOC_ASPEED && REGMAP && MFD_SYSCON && HAS_DMA
+ help
+ Enable support for the Aspeed XDMA Engine found on the Aspeed AST2XXX
+ SOCs. The XDMA engine can perform automatic PCI DMA operations
+ between the AST2XXX (acting as a BMC) and a host processor.
+
endmenu
diff --git a/drivers/soc/aspeed/Makefile b/drivers/soc/aspeed/Makefile
index b64be47f2b1f..217d876fec25 100644
--- a/drivers/soc/aspeed/Makefile
+++ b/drivers/soc/aspeed/Makefile
@@ -1,4 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_ASPEED_BMC_MISC) += aspeed-bmc-misc.o
obj-$(CONFIG_ASPEED_LPC_CTRL) += aspeed-lpc-ctrl.o
obj-$(CONFIG_ASPEED_LPC_SNOOP) += aspeed-lpc-snoop.o
obj-$(CONFIG_ASPEED_P2A_CTRL) += aspeed-p2a-ctrl.o
+obj-$(CONFIG_ASPEED_XDMA) += aspeed-xdma.o
diff --git a/drivers/soc/aspeed/aspeed-bmc-misc.c b/drivers/soc/aspeed/aspeed-bmc-misc.c
new file mode 100644
index 000000000000..314007bad74f
--- /dev/null
+++ b/drivers/soc/aspeed/aspeed-bmc-misc.c
@@ -0,0 +1,190 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright 2018 IBM Corp.
+
+#include <linux/kobject.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+
+#define DEVICE_NAME "aspeed-bmc-misc"
+
+struct aspeed_bmc_ctrl {
+ const char *name;
+ u32 offset;
+ u32 mask;
+ u32 shift;
+ struct regmap *map;
+ struct kobj_attribute attr;
+};
+
+struct aspeed_bmc_misc {
+ struct device *dev;
+ struct regmap *map;
+ struct aspeed_bmc_ctrl *ctrls;
+ int nr_ctrls;
+};
+
+static int aspeed_bmc_misc_parse_dt_child(struct device_node *child,
+ struct aspeed_bmc_ctrl *ctrl)
+{
+ int rc;
+
+ /* Example child:
+ *
+ * ilpc2ahb {
+ * offset = <0x80>;
+ * bit-mask = <0x1>;
+ * bit-shift = <6>;
+ * label = "foo";
+ * }
+ */
+ if (of_property_read_string(child, "label", &ctrl->name))
+ ctrl->name = child->name;
+
+ rc = of_property_read_u32(child, "offset", &ctrl->offset);
+ if (rc < 0)
+ return rc;
+
+ rc = of_property_read_u32(child, "bit-mask", &ctrl->mask);
+ if (rc < 0)
+ return rc;
+
+ rc = of_property_read_u32(child, "bit-shift", &ctrl->shift);
+ if (rc < 0)
+ return rc;
+
+ ctrl->mask <<= ctrl->shift;
+
+ return 0;
+}
+
+static int aspeed_bmc_misc_parse_dt(struct aspeed_bmc_misc *bmc,
+ struct device_node *parent)
+{
+ struct aspeed_bmc_ctrl *ctrl;
+ struct device_node *child;
+ int rc;
+
+ bmc->nr_ctrls = of_get_child_count(parent);
+ bmc->ctrls = devm_kcalloc(bmc->dev, bmc->nr_ctrls, sizeof(*bmc->ctrls),
+ GFP_KERNEL);
+ if (!bmc->ctrls)
+ return -ENOMEM;
+
+ ctrl = bmc->ctrls;
+ for_each_child_of_node(parent, child) {
+ rc = aspeed_bmc_misc_parse_dt_child(child, ctrl++);
+ if (rc < 0) {
+ of_node_put(child);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+static ssize_t aspeed_bmc_misc_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct aspeed_bmc_ctrl *ctrl;
+ unsigned int val;
+ int rc;
+
+ ctrl = container_of(attr, struct aspeed_bmc_ctrl, attr);
+ rc = regmap_read(ctrl->map, ctrl->offset, &val);
+ if (rc)
+ return rc;
+
+ val &= ctrl->mask;
+ val >>= ctrl->shift;
+
+ return sprintf(buf, "%u\n", val);
+}
+
+static ssize_t aspeed_bmc_misc_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct aspeed_bmc_ctrl *ctrl;
+ long val;
+ int rc;
+
+ rc = kstrtol(buf, 0, &val);
+ if (rc)
+ return rc;
+
+ ctrl = container_of(attr, struct aspeed_bmc_ctrl, attr);
+ val <<= ctrl->shift;
+ rc = regmap_update_bits(ctrl->map, ctrl->offset, ctrl->mask, val);
+
+ return rc < 0 ? rc : count;
+}
+
+static int aspeed_bmc_misc_add_sysfs_attr(struct aspeed_bmc_misc *bmc,
+ struct aspeed_bmc_ctrl *ctrl)
+{
+ ctrl->map = bmc->map;
+
+ sysfs_attr_init(&ctrl->attr.attr);
+ ctrl->attr.attr.name = ctrl->name;
+ ctrl->attr.attr.mode = 0664;
+ ctrl->attr.show = aspeed_bmc_misc_show;
+ ctrl->attr.store = aspeed_bmc_misc_store;
+
+ return sysfs_create_file(&bmc->dev->kobj, &ctrl->attr.attr);
+}
+
+static int aspeed_bmc_misc_populate_sysfs(struct aspeed_bmc_misc *bmc)
+{
+ int rc;
+ int i;
+
+ for (i = 0; i < bmc->nr_ctrls; i++) {
+ rc = aspeed_bmc_misc_add_sysfs_attr(bmc, &bmc->ctrls[i]);
+ if (rc < 0)
+ return rc;
+ }
+
+ return 0;
+}
+
+static int aspeed_bmc_misc_probe(struct platform_device *pdev)
+{
+ struct aspeed_bmc_misc *bmc;
+ int rc;
+
+ bmc = devm_kzalloc(&pdev->dev, sizeof(*bmc), GFP_KERNEL);
+ if (!bmc)
+ return -ENOMEM;
+
+ bmc->dev = &pdev->dev;
+ bmc->map = syscon_node_to_regmap(pdev->dev.parent->of_node);
+ if (IS_ERR(bmc->map))
+ return PTR_ERR(bmc->map);
+
+ rc = aspeed_bmc_misc_parse_dt(bmc, pdev->dev.of_node);
+ if (rc < 0)
+ return rc;
+
+ return aspeed_bmc_misc_populate_sysfs(bmc);
+}
+
+static const struct of_device_id aspeed_bmc_misc_match[] = {
+ { .compatible = "aspeed,bmc-misc" },
+ { },
+};
+
+static struct platform_driver aspeed_bmc_misc = {
+ .driver = {
+ .name = DEVICE_NAME,
+ .of_match_table = aspeed_bmc_misc_match,
+ },
+ .probe = aspeed_bmc_misc_probe,
+};
+
+module_platform_driver(aspeed_bmc_misc);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Andrew Jeffery <andrew@aj.id.au>");
diff --git a/drivers/soc/aspeed/aspeed-lpc-ctrl.c b/drivers/soc/aspeed/aspeed-lpc-ctrl.c
index 01ed21e8bfee..ee2def4ffda3 100644
--- a/drivers/soc/aspeed/aspeed-lpc-ctrl.c
+++ b/drivers/soc/aspeed/aspeed-lpc-ctrl.c
@@ -4,6 +4,7 @@
*/
#include <linux/clk.h>
+#include <linux/log2.h>
#include <linux/mfd/syscon.h>
#include <linux/miscdevice.h>
#include <linux/mm.h>
@@ -21,6 +22,9 @@
#define HICR5_ENL2H BIT(8)
#define HICR5_ENFWH BIT(10)
+#define HICR6 0x4
+#define SW_FWH2AHB BIT(17)
+
#define HICR7 0x8
#define HICR8 0xc
@@ -32,6 +36,7 @@ struct aspeed_lpc_ctrl {
resource_size_t mem_size;
u32 pnor_size;
u32 pnor_base;
+ bool fwh2ahb;
};
static struct aspeed_lpc_ctrl *file_aspeed_lpc_ctrl(struct file *file)
@@ -177,6 +182,16 @@ static long aspeed_lpc_ctrl_ioctl(struct file *file, unsigned int cmd,
return rc;
/*
+ * Switch to FWH2AHB mode, AST2600 only.
+ *
+ * The other bits in this register are interrupt status bits
+ * that are cleared by writing 1. As we don't want to clear
+ * them, set only the bit of interest.
+ */
+ if (lpc_ctrl->fwh2ahb)
+ regmap_write(lpc_ctrl->regmap, HICR6, SW_FWH2AHB);
+
+ /*
* Enable LPC FHW cycles. This is required for the host to
* access the regions specified.
*/
@@ -241,6 +256,18 @@ static int aspeed_lpc_ctrl_probe(struct platform_device *pdev)
lpc_ctrl->mem_size = resource_size(&resm);
lpc_ctrl->mem_base = resm.start;
+
+ if (!is_power_of_2(lpc_ctrl->mem_size)) {
+ dev_err(dev, "Reserved memory size must be a power of 2, got %u\n",
+ (unsigned int)lpc_ctrl->mem_size);
+ return -EINVAL;
+ }
+
+ if (!IS_ALIGNED(lpc_ctrl->mem_base, lpc_ctrl->mem_size)) {
+ dev_err(dev, "Reserved memory must be naturally aligned for size %u\n",
+ (unsigned int)lpc_ctrl->mem_size);
+ return -EINVAL;
+ }
}
lpc_ctrl->regmap = syscon_node_to_regmap(
@@ -261,6 +288,9 @@ static int aspeed_lpc_ctrl_probe(struct platform_device *pdev)
return rc;
}
+ if (of_device_is_compatible(dev->of_node, "aspeed,ast2600-lpc-ctrl"))
+ lpc_ctrl->fwh2ahb = true;
+
lpc_ctrl->miscdev.minor = MISC_DYNAMIC_MINOR;
lpc_ctrl->miscdev.name = DEVICE_NAME;
lpc_ctrl->miscdev.fops = &aspeed_lpc_ctrl_fops;
@@ -291,6 +321,7 @@ static int aspeed_lpc_ctrl_remove(struct platform_device *pdev)
static const struct of_device_id aspeed_lpc_ctrl_match[] = {
{ .compatible = "aspeed,ast2400-lpc-ctrl" },
{ .compatible = "aspeed,ast2500-lpc-ctrl" },
+ { .compatible = "aspeed,ast2600-lpc-ctrl" },
{ },
};
diff --git a/drivers/soc/aspeed/aspeed-lpc-snoop.c b/drivers/soc/aspeed/aspeed-lpc-snoop.c
index f3d8d53ab84d..682ba0eb4eba 100644
--- a/drivers/soc/aspeed/aspeed-lpc-snoop.c
+++ b/drivers/soc/aspeed/aspeed-lpc-snoop.c
@@ -325,6 +325,8 @@ static const struct of_device_id aspeed_lpc_snoop_match[] = {
.data = &ast2400_model_data },
{ .compatible = "aspeed,ast2500-lpc-snoop",
.data = &ast2500_model_data },
+ { .compatible = "aspeed,ast2600-lpc-snoop",
+ .data = &ast2500_model_data },
{ },
};
diff --git a/drivers/soc/aspeed/aspeed-xdma.c b/drivers/soc/aspeed/aspeed-xdma.c
new file mode 100644
index 000000000000..4d8af9e64471
--- /dev/null
+++ b/drivers/soc/aspeed/aspeed-xdma.c
@@ -0,0 +1,1205 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+// Copyright IBM Corp 2019
+
+#include <linux/aspeed-xdma.h>
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/fs.h>
+#include <linux/genalloc.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/jiffies.h>
+#include <linux/mfd/syscon.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_device.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/platform_device.h>
+#include <linux/poll.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/uaccess.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+
+#define DEVICE_NAME "aspeed-xdma"
+
+#define SCU_AST2600_MISC_CTRL 0x0c0
+#define SCU_AST2600_MISC_CTRL_XDMA_BMC BIT(8)
+
+#define SCU_AST2500_PCIE_CONF 0x180
+#define SCU_AST2600_PCIE_CONF 0xc20
+#define SCU_PCIE_CONF_VGA_EN BIT(0)
+#define SCU_PCIE_CONF_VGA_EN_MMIO BIT(1)
+#define SCU_PCIE_CONF_VGA_EN_LPC BIT(2)
+#define SCU_PCIE_CONF_VGA_EN_MSI BIT(3)
+#define SCU_PCIE_CONF_VGA_EN_MCTP BIT(4)
+#define SCU_PCIE_CONF_VGA_EN_IRQ BIT(5)
+#define SCU_PCIE_CONF_VGA_EN_DMA BIT(6)
+#define SCU_PCIE_CONF_BMC_EN BIT(8)
+#define SCU_PCIE_CONF_BMC_EN_MMIO BIT(9)
+#define SCU_PCIE_CONF_BMC_EN_MSI BIT(11)
+#define SCU_PCIE_CONF_BMC_EN_MCTP BIT(12)
+#define SCU_PCIE_CONF_BMC_EN_IRQ BIT(13)
+#define SCU_PCIE_CONF_BMC_EN_DMA BIT(14)
+
+#define SCU_AST2500_BMC_CLASS_REV 0x19c
+#define SCU_AST2600_BMC_CLASS_REV 0xc68
+#define SCU_BMC_CLASS_REV_XDMA 0xff000001
+
+#define XDMA_CMDQ_SIZE PAGE_SIZE
+#define XDMA_NUM_CMDS \
+ (XDMA_CMDQ_SIZE / sizeof(struct aspeed_xdma_cmd))
+
+/* Aspeed specification requires 100us after disabling the reset */
+#define XDMA_ENGINE_SETUP_TIME_MAX_US 1000
+#define XDMA_ENGINE_SETUP_TIME_MIN_US 100
+
+#define XDMA_CMD_AST2500_PITCH_SHIFT 3
+#define XDMA_CMD_AST2500_PITCH_BMC GENMASK_ULL(62, 51)
+#define XDMA_CMD_AST2500_PITCH_HOST GENMASK_ULL(46, 35)
+#define XDMA_CMD_AST2500_PITCH_UPSTREAM BIT_ULL(31)
+#define XDMA_CMD_AST2500_PITCH_ADDR GENMASK_ULL(29, 4)
+#define XDMA_CMD_AST2500_PITCH_ID BIT_ULL(0)
+#define XDMA_CMD_AST2500_CMD_IRQ_EN BIT_ULL(31)
+#define XDMA_CMD_AST2500_CMD_LINE_NO GENMASK_ULL(27, 16)
+#define XDMA_CMD_AST2500_CMD_IRQ_BMC BIT_ULL(15)
+#define XDMA_CMD_AST2500_CMD_LINE_SIZE_SHIFT 4
+#define XDMA_CMD_AST2500_CMD_LINE_SIZE \
+ GENMASK_ULL(14, XDMA_CMD_AST2500_CMD_LINE_SIZE_SHIFT)
+#define XDMA_CMD_AST2500_CMD_ID BIT_ULL(1)
+
+#define XDMA_CMD_AST2600_PITCH_BMC GENMASK_ULL(62, 48)
+#define XDMA_CMD_AST2600_PITCH_HOST GENMASK_ULL(46, 32)
+#define XDMA_CMD_AST2600_PITCH_ADDR GENMASK_ULL(30, 0)
+#define XDMA_CMD_AST2600_CMD_64_EN BIT_ULL(40)
+#define XDMA_CMD_AST2600_CMD_IRQ_BMC BIT_ULL(37)
+#define XDMA_CMD_AST2600_CMD_IRQ_HOST BIT_ULL(36)
+#define XDMA_CMD_AST2600_CMD_UPSTREAM BIT_ULL(32)
+#define XDMA_CMD_AST2600_CMD_LINE_NO GENMASK_ULL(27, 16)
+#define XDMA_CMD_AST2600_CMD_LINE_SIZE GENMASK_ULL(14, 0)
+#define XDMA_CMD_AST2600_CMD_MULTILINE_SIZE GENMASK_ULL(14, 12)
+
+#define XDMA_AST2500_QUEUE_ENTRY_SIZE 4
+#define XDMA_AST2500_HOST_CMDQ_ADDR0 0x00
+#define XDMA_AST2500_HOST_CMDQ_ENDP 0x04
+#define XDMA_AST2500_HOST_CMDQ_WRITEP 0x08
+#define XDMA_AST2500_HOST_CMDQ_READP 0x0c
+#define XDMA_AST2500_BMC_CMDQ_ADDR 0x10
+#define XDMA_AST2500_BMC_CMDQ_ENDP 0x14
+#define XDMA_AST2500_BMC_CMDQ_WRITEP 0x18
+#define XDMA_AST2500_BMC_CMDQ_READP 0x1c
+#define XDMA_BMC_CMDQ_READP_RESET 0xee882266
+#define XDMA_AST2500_CTRL 0x20
+#define XDMA_AST2500_CTRL_US_COMP BIT(4)
+#define XDMA_AST2500_CTRL_DS_COMP BIT(5)
+#define XDMA_AST2500_CTRL_DS_DIRTY BIT(6)
+#define XDMA_AST2500_CTRL_DS_SIZE_256 BIT(17)
+#define XDMA_AST2500_CTRL_DS_TIMEOUT BIT(28)
+#define XDMA_AST2500_CTRL_DS_CHECK_ID BIT(29)
+#define XDMA_AST2500_STATUS 0x24
+#define XDMA_AST2500_STATUS_US_COMP BIT(4)
+#define XDMA_AST2500_STATUS_DS_COMP BIT(5)
+#define XDMA_AST2500_STATUS_DS_DIRTY BIT(6)
+#define XDMA_AST2500_INPRG_DS_CMD1 0x38
+#define XDMA_AST2500_INPRG_DS_CMD2 0x3c
+#define XDMA_AST2500_INPRG_US_CMD00 0x40
+#define XDMA_AST2500_INPRG_US_CMD01 0x44
+#define XDMA_AST2500_INPRG_US_CMD10 0x48
+#define XDMA_AST2500_INPRG_US_CMD11 0x4c
+#define XDMA_AST2500_INPRG_US_CMD20 0x50
+#define XDMA_AST2500_INPRG_US_CMD21 0x54
+#define XDMA_AST2500_HOST_CMDQ_ADDR1 0x60
+#define XDMA_AST2500_VGA_CMDQ_ADDR0 0x64
+#define XDMA_AST2500_VGA_CMDQ_ENDP 0x68
+#define XDMA_AST2500_VGA_CMDQ_WRITEP 0x6c
+#define XDMA_AST2500_VGA_CMDQ_READP 0x70
+#define XDMA_AST2500_VGA_CMD_STATUS 0x74
+#define XDMA_AST2500_VGA_CMDQ_ADDR1 0x78
+
+#define XDMA_AST2600_QUEUE_ENTRY_SIZE 2
+#define XDMA_AST2600_HOST_CMDQ_ADDR0 0x00
+#define XDMA_AST2600_HOST_CMDQ_ADDR1 0x04
+#define XDMA_AST2600_HOST_CMDQ_ENDP 0x08
+#define XDMA_AST2600_HOST_CMDQ_WRITEP 0x0c
+#define XDMA_AST2600_HOST_CMDQ_READP 0x10
+#define XDMA_AST2600_BMC_CMDQ_ADDR 0x14
+#define XDMA_AST2600_BMC_CMDQ_ENDP 0x18
+#define XDMA_AST2600_BMC_CMDQ_WRITEP 0x1c
+#define XDMA_AST2600_BMC_CMDQ_READP 0x20
+#define XDMA_AST2600_VGA_CMDQ_ADDR0 0x24
+#define XDMA_AST2600_VGA_CMDQ_ADDR1 0x28
+#define XDMA_AST2600_VGA_CMDQ_ENDP 0x2c
+#define XDMA_AST2600_VGA_CMDQ_WRITEP 0x30
+#define XDMA_AST2600_VGA_CMDQ_READP 0x34
+#define XDMA_AST2600_CTRL 0x38
+#define XDMA_AST2600_CTRL_US_COMP BIT(16)
+#define XDMA_AST2600_CTRL_DS_COMP BIT(17)
+#define XDMA_AST2600_CTRL_DS_DIRTY BIT(18)
+#define XDMA_AST2600_CTRL_DS_SIZE_256 BIT(20)
+#define XDMA_AST2600_STATUS 0x3c
+#define XDMA_AST2600_STATUS_US_COMP BIT(16)
+#define XDMA_AST2600_STATUS_DS_COMP BIT(17)
+#define XDMA_AST2600_STATUS_DS_DIRTY BIT(18)
+#define XDMA_AST2600_INPRG_DS_CMD00 0x40
+#define XDMA_AST2600_INPRG_DS_CMD01 0x44
+#define XDMA_AST2600_INPRG_DS_CMD10 0x48
+#define XDMA_AST2600_INPRG_DS_CMD11 0x4c
+#define XDMA_AST2600_INPRG_DS_CMD20 0x50
+#define XDMA_AST2600_INPRG_DS_CMD21 0x54
+#define XDMA_AST2600_INPRG_US_CMD00 0x60
+#define XDMA_AST2600_INPRG_US_CMD01 0x64
+#define XDMA_AST2600_INPRG_US_CMD10 0x68
+#define XDMA_AST2600_INPRG_US_CMD11 0x6c
+#define XDMA_AST2600_INPRG_US_CMD20 0x70
+#define XDMA_AST2600_INPRG_US_CMD21 0x74
+
+struct aspeed_xdma_cmd {
+ u64 host_addr;
+ u64 pitch;
+ u64 cmd;
+ u64 reserved;
+};
+
+struct aspeed_xdma_regs {
+ u8 bmc_cmdq_addr;
+ u8 bmc_cmdq_endp;
+ u8 bmc_cmdq_writep;
+ u8 bmc_cmdq_readp;
+ u8 control;
+ u8 status;
+};
+
+struct aspeed_xdma_status_bits {
+ u32 us_comp;
+ u32 ds_comp;
+ u32 ds_dirty;
+};
+
+struct aspeed_xdma;
+
+struct aspeed_xdma_chip {
+ u32 control;
+ u32 scu_bmc_class;
+ u32 scu_misc_ctrl;
+ u32 scu_pcie_conf;
+ unsigned int queue_entry_size;
+ struct aspeed_xdma_regs regs;
+ struct aspeed_xdma_status_bits status_bits;
+ unsigned int (*set_cmd)(struct aspeed_xdma *ctx,
+ struct aspeed_xdma_cmd cmds[2],
+ struct aspeed_xdma_op *op, u32 bmc_addr);
+};
+
+struct aspeed_xdma_client;
+
+struct aspeed_xdma {
+ struct kobject kobj;
+ const struct aspeed_xdma_chip *chip;
+
+ int irq;
+ int pcie_irq;
+ struct clk *clock;
+ struct device *dev;
+ void __iomem *base;
+ resource_size_t res_size;
+ resource_size_t res_start;
+ struct reset_control *reset;
+ struct reset_control *reset_rc;
+
+ /* Protects current_client */
+ spinlock_t client_lock;
+ struct aspeed_xdma_client *current_client;
+
+ /* Protects engine configuration */
+ spinlock_t engine_lock;
+ struct aspeed_xdma_cmd *cmdq;
+ unsigned int cmd_idx;
+ bool in_reset;
+ bool upstream;
+
+ /* Queue waiters for idle engine */
+ wait_queue_head_t wait;
+
+ struct work_struct reset_work;
+
+ u32 mem_phys;
+ u32 mem_size;
+ void *mem_virt;
+ dma_addr_t mem_coherent;
+ dma_addr_t cmdq_phys;
+ struct gen_pool *pool;
+
+ struct miscdevice misc;
+};
+
+struct aspeed_xdma_client {
+ struct aspeed_xdma *ctx;
+
+ bool error;
+ bool in_progress;
+ void *virt;
+ dma_addr_t phys;
+ u32 size;
+};
+
+static u32 aspeed_xdma_readl(struct aspeed_xdma *ctx, u8 reg)
+{
+ u32 v = readl(ctx->base + reg);
+
+ dev_dbg(ctx->dev, "read %02x[%08x]\n", reg, v);
+ return v;
+}
+
+static void aspeed_xdma_writel(struct aspeed_xdma *ctx, u8 reg, u32 val)
+{
+ writel(val, ctx->base + reg);
+ dev_dbg(ctx->dev, "write %02x[%08x]\n", reg, val);
+}
+
+static void aspeed_xdma_init_eng(struct aspeed_xdma *ctx)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->engine_lock, flags);
+ aspeed_xdma_writel(ctx, ctx->chip->regs.bmc_cmdq_endp,
+ ctx->chip->queue_entry_size * XDMA_NUM_CMDS);
+ aspeed_xdma_writel(ctx, ctx->chip->regs.bmc_cmdq_readp,
+ XDMA_BMC_CMDQ_READP_RESET);
+ aspeed_xdma_writel(ctx, ctx->chip->regs.bmc_cmdq_writep, 0);
+ aspeed_xdma_writel(ctx, ctx->chip->regs.control, ctx->chip->control);
+ aspeed_xdma_writel(ctx, ctx->chip->regs.bmc_cmdq_addr, ctx->cmdq_phys);
+
+ ctx->cmd_idx = 0;
+ spin_unlock_irqrestore(&ctx->engine_lock, flags);
+}
+
+static unsigned int aspeed_xdma_ast2500_set_cmd(struct aspeed_xdma *ctx,
+ struct aspeed_xdma_cmd cmds[2],
+ struct aspeed_xdma_op *op,
+ u32 bmc_addr)
+{
+ unsigned int rc = 1;
+ unsigned int pitch = 1;
+ unsigned int line_no = 1;
+ unsigned int line_size = op->len >>
+ XDMA_CMD_AST2500_CMD_LINE_SIZE_SHIFT;
+ u64 cmd = XDMA_CMD_AST2500_CMD_IRQ_EN | XDMA_CMD_AST2500_CMD_IRQ_BMC |
+ XDMA_CMD_AST2500_CMD_ID;
+ u64 cmd_pitch = (op->direction ? XDMA_CMD_AST2500_PITCH_UPSTREAM : 0) |
+ XDMA_CMD_AST2500_PITCH_ID;
+
+ dev_dbg(ctx->dev, "xdma %s ast2500: bmc[%08x] len[%08x] host[%08x]\n",
+ op->direction ? "upstream" : "downstream", bmc_addr, op->len,
+ (u32)op->host_addr);
+
+ if (op->len > XDMA_CMD_AST2500_CMD_LINE_SIZE) {
+ unsigned int rem;
+ unsigned int total;
+
+ line_no = op->len / XDMA_CMD_AST2500_CMD_LINE_SIZE;
+ total = XDMA_CMD_AST2500_CMD_LINE_SIZE * line_no;
+ rem = (op->len - total) >>
+ XDMA_CMD_AST2500_CMD_LINE_SIZE_SHIFT;
+ line_size = XDMA_CMD_AST2500_CMD_LINE_SIZE;
+ pitch = line_size >> XDMA_CMD_AST2500_PITCH_SHIFT;
+ line_size >>= XDMA_CMD_AST2500_CMD_LINE_SIZE_SHIFT;
+
+ if (rem) {
+ u32 rbmc = bmc_addr + total;
+
+ cmds[1].host_addr = op->host_addr + (u64)total;
+ cmds[1].pitch = cmd_pitch |
+ ((u64)rbmc & XDMA_CMD_AST2500_PITCH_ADDR) |
+ FIELD_PREP(XDMA_CMD_AST2500_PITCH_HOST, 1) |
+ FIELD_PREP(XDMA_CMD_AST2500_PITCH_BMC, 1);
+ cmds[1].cmd = cmd |
+ FIELD_PREP(XDMA_CMD_AST2500_CMD_LINE_NO, 1) |
+ FIELD_PREP(XDMA_CMD_AST2500_CMD_LINE_SIZE,
+ rem);
+ cmds[1].reserved = 0ULL;
+
+ print_hex_dump_debug("xdma rem ", DUMP_PREFIX_OFFSET,
+ 16, 1, &cmds[1], sizeof(*cmds),
+ true);
+
+ cmd &= ~(XDMA_CMD_AST2500_CMD_IRQ_EN |
+ XDMA_CMD_AST2500_CMD_IRQ_BMC);
+
+ rc++;
+ }
+ }
+
+ cmds[0].host_addr = op->host_addr;
+ cmds[0].pitch = cmd_pitch |
+ ((u64)bmc_addr & XDMA_CMD_AST2500_PITCH_ADDR) |
+ FIELD_PREP(XDMA_CMD_AST2500_PITCH_HOST, pitch) |
+ FIELD_PREP(XDMA_CMD_AST2500_PITCH_BMC, pitch);
+ cmds[0].cmd = cmd | FIELD_PREP(XDMA_CMD_AST2500_CMD_LINE_NO, line_no) |
+ FIELD_PREP(XDMA_CMD_AST2500_CMD_LINE_SIZE, line_size);
+ cmds[0].reserved = 0ULL;
+
+ print_hex_dump_debug("xdma cmd ", DUMP_PREFIX_OFFSET, 16, 1, cmds,
+ sizeof(*cmds), true);
+
+ return rc;
+}
+
+static unsigned int aspeed_xdma_ast2600_set_cmd(struct aspeed_xdma *ctx,
+ struct aspeed_xdma_cmd cmds[2],
+ struct aspeed_xdma_op *op,
+ u32 bmc_addr)
+{
+ unsigned int rc = 1;
+ unsigned int pitch = 1;
+ unsigned int line_no = 1;
+ unsigned int line_size = op->len;
+ u64 cmd = XDMA_CMD_AST2600_CMD_IRQ_BMC |
+ (op->direction ? XDMA_CMD_AST2600_CMD_UPSTREAM : 0);
+
+ if (op->host_addr & 0xffffffff00000000ULL ||
+ (op->host_addr + (u64)op->len) & 0xffffffff00000000ULL)
+ cmd |= XDMA_CMD_AST2600_CMD_64_EN;
+
+ dev_dbg(ctx->dev, "xdma %s ast2600: bmc[%08x] len[%08x] "
+ "host[%016llx]\n", op->direction ? "upstream" : "downstream",
+ bmc_addr, op->len, op->host_addr);
+
+ if (op->len > XDMA_CMD_AST2600_CMD_LINE_SIZE) {
+ unsigned int rem;
+ unsigned int total;
+
+ line_no = op->len / XDMA_CMD_AST2600_CMD_MULTILINE_SIZE;
+ total = XDMA_CMD_AST2600_CMD_MULTILINE_SIZE * line_no;
+ rem = op->len - total;
+ line_size = XDMA_CMD_AST2600_CMD_MULTILINE_SIZE;
+ pitch = line_size;
+
+ if (rem) {
+ u32 rbmc = bmc_addr + total;
+
+ cmds[1].host_addr = op->host_addr + (u64)total;
+ cmds[1].pitch =
+ ((u64)rbmc & XDMA_CMD_AST2600_PITCH_ADDR) |
+ FIELD_PREP(XDMA_CMD_AST2600_PITCH_HOST, 1) |
+ FIELD_PREP(XDMA_CMD_AST2600_PITCH_BMC, 1);
+ cmds[1].cmd = cmd |
+ FIELD_PREP(XDMA_CMD_AST2600_CMD_LINE_NO, 1) |
+ FIELD_PREP(XDMA_CMD_AST2600_CMD_LINE_SIZE,
+ rem);
+ cmds[1].reserved = 0ULL;
+
+ print_hex_dump_debug("xdma rem ", DUMP_PREFIX_OFFSET,
+ 16, 1, &cmds[1], sizeof(*cmds),
+ true);
+
+ cmd &= ~XDMA_CMD_AST2600_CMD_IRQ_BMC;
+
+ rc++;
+ }
+ }
+
+ cmds[0].host_addr = op->host_addr;
+ cmds[0].pitch = ((u64)bmc_addr & XDMA_CMD_AST2600_PITCH_ADDR) |
+ FIELD_PREP(XDMA_CMD_AST2600_PITCH_HOST, pitch) |
+ FIELD_PREP(XDMA_CMD_AST2600_PITCH_BMC, pitch);
+ cmds[0].cmd = cmd | FIELD_PREP(XDMA_CMD_AST2600_CMD_LINE_NO, line_no) |
+ FIELD_PREP(XDMA_CMD_AST2600_CMD_LINE_SIZE, line_size);
+ cmds[0].reserved = 0ULL;
+
+ print_hex_dump_debug("xdma cmd ", DUMP_PREFIX_OFFSET, 16, 1, cmds,
+ sizeof(*cmds), true);
+
+ return rc;
+}
+
+static int aspeed_xdma_start(struct aspeed_xdma *ctx, unsigned int num_cmds,
+ struct aspeed_xdma_cmd cmds[2], bool upstream,
+ struct aspeed_xdma_client *client)
+{
+ unsigned int i;
+ int rc = -EBUSY;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->engine_lock, flags);
+ if (ctx->in_reset)
+ goto unlock;
+
+ spin_lock(&ctx->client_lock);
+ if (ctx->current_client) {
+ spin_unlock(&ctx->client_lock);
+ goto unlock;
+ }
+
+ client->error = false;
+ client->in_progress = true;
+ ctx->current_client = client;
+ spin_unlock(&ctx->client_lock);
+
+ ctx->upstream = upstream;
+ for (i = 0; i < num_cmds; ++i) {
+ /*
+ * Use memcpy_toio here to get some barriers before starting
+ * the operation. The command(s) need to be in physical memory
+ * before the XDMA engine starts.
+ */
+ memcpy_toio(&ctx->cmdq[ctx->cmd_idx], &cmds[i],
+ sizeof(struct aspeed_xdma_cmd));
+ ctx->cmd_idx = (ctx->cmd_idx + 1) % XDMA_NUM_CMDS;
+ }
+
+ aspeed_xdma_writel(ctx, ctx->chip->regs.bmc_cmdq_writep,
+ ctx->cmd_idx * ctx->chip->queue_entry_size);
+ rc = 0;
+
+unlock:
+ spin_unlock_irqrestore(&ctx->engine_lock, flags);
+ return rc;
+}
+
+static void aspeed_xdma_done(struct aspeed_xdma *ctx, bool error)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->client_lock, flags);
+ if (ctx->current_client) {
+ ctx->current_client->error = error;
+ ctx->current_client->in_progress = false;
+ ctx->current_client = NULL;
+ }
+ spin_unlock_irqrestore(&ctx->client_lock, flags);
+
+ wake_up_interruptible_all(&ctx->wait);
+}
+
+static irqreturn_t aspeed_xdma_irq(int irq, void *arg)
+{
+ struct aspeed_xdma *ctx = arg;
+ u32 status;
+
+ spin_lock(&ctx->engine_lock);
+ status = aspeed_xdma_readl(ctx, ctx->chip->regs.status);
+
+ if (status & ctx->chip->status_bits.ds_dirty) {
+ aspeed_xdma_done(ctx, true);
+ } else {
+ if (status & ctx->chip->status_bits.us_comp) {
+ if (ctx->upstream)
+ aspeed_xdma_done(ctx, false);
+ }
+
+ if (status & ctx->chip->status_bits.ds_comp) {
+ if (!ctx->upstream)
+ aspeed_xdma_done(ctx, false);
+ }
+ }
+
+ aspeed_xdma_writel(ctx, ctx->chip->regs.status, status);
+ spin_unlock(&ctx->engine_lock);
+
+ return IRQ_HANDLED;
+}
+
+static void aspeed_xdma_reset(struct aspeed_xdma *ctx)
+{
+ unsigned long flags;
+
+ reset_control_assert(ctx->reset);
+ usleep_range(XDMA_ENGINE_SETUP_TIME_MIN_US,
+ XDMA_ENGINE_SETUP_TIME_MAX_US);
+ reset_control_deassert(ctx->reset);
+ usleep_range(XDMA_ENGINE_SETUP_TIME_MIN_US,
+ XDMA_ENGINE_SETUP_TIME_MAX_US);
+
+ aspeed_xdma_init_eng(ctx);
+
+ aspeed_xdma_done(ctx, true);
+
+ spin_lock_irqsave(&ctx->engine_lock, flags);
+ ctx->in_reset = false;
+ spin_unlock_irqrestore(&ctx->engine_lock, flags);
+
+ wake_up_interruptible(&ctx->wait);
+}
+
+static void aspeed_xdma_reset_work(struct work_struct *work)
+{
+ struct aspeed_xdma *ctx = container_of(work, struct aspeed_xdma,
+ reset_work);
+
+ aspeed_xdma_reset(ctx);
+}
+
+static irqreturn_t aspeed_xdma_pcie_irq(int irq, void *arg)
+{
+ struct aspeed_xdma *ctx = arg;
+
+ dev_dbg(ctx->dev, "PCI-E reset requested.\n");
+
+ spin_lock(&ctx->engine_lock);
+ if (ctx->in_reset) {
+ spin_unlock(&ctx->engine_lock);
+ return IRQ_HANDLED;
+ }
+
+ ctx->in_reset = true;
+ spin_unlock(&ctx->engine_lock);
+
+ schedule_work(&ctx->reset_work);
+ return IRQ_HANDLED;
+}
+
+static ssize_t aspeed_xdma_write(struct file *file, const char __user *buf,
+ size_t len, loff_t *offset)
+{
+ int rc;
+ unsigned int num_cmds;
+ struct aspeed_xdma_op op;
+ struct aspeed_xdma_cmd cmds[2];
+ struct aspeed_xdma_client *client = file->private_data;
+ struct aspeed_xdma *ctx = client->ctx;
+
+ if (len != sizeof(op))
+ return -EINVAL;
+
+ rc = copy_from_user(&op, buf, len);
+ if (rc)
+ return rc;
+
+ if (!op.len || op.len > client->size ||
+ op.direction > ASPEED_XDMA_DIRECTION_UPSTREAM)
+ return -EINVAL;
+
+ num_cmds = ctx->chip->set_cmd(ctx, cmds, &op, client->phys);
+ do {
+ rc = aspeed_xdma_start(ctx, num_cmds, cmds, !!op.direction,
+ client);
+ if (!rc)
+ break;
+
+ if ((file->f_flags & O_NONBLOCK) || rc != -EBUSY)
+ return rc;
+
+ rc = wait_event_interruptible(ctx->wait,
+ !(ctx->current_client ||
+ ctx->in_reset));
+ } while (!rc);
+
+ if (rc)
+ return -EINTR;
+
+ if (!(file->f_flags & O_NONBLOCK)) {
+ rc = wait_event_interruptible(ctx->wait, !client->in_progress);
+ if (rc)
+ return -EINTR;
+
+ if (client->error)
+ return -EIO;
+ }
+
+ return len;
+}
+
+static __poll_t aspeed_xdma_poll(struct file *file,
+ struct poll_table_struct *wait)
+{
+ __poll_t mask = 0;
+ __poll_t req = poll_requested_events(wait);
+ struct aspeed_xdma_client *client = file->private_data;
+ struct aspeed_xdma *ctx = client->ctx;
+
+ if (req & (EPOLLIN | EPOLLRDNORM)) {
+ if (READ_ONCE(client->in_progress))
+ poll_wait(file, &ctx->wait, wait);
+
+ if (!READ_ONCE(client->in_progress)) {
+ if (READ_ONCE(client->error))
+ mask |= EPOLLERR;
+ else
+ mask |= EPOLLIN | EPOLLRDNORM;
+ }
+ }
+
+ if (req & (EPOLLOUT | EPOLLWRNORM)) {
+ if (READ_ONCE(ctx->current_client))
+ poll_wait(file, &ctx->wait, wait);
+
+ if (!READ_ONCE(ctx->current_client))
+ mask |= EPOLLOUT | EPOLLWRNORM;
+ }
+
+ return mask;
+}
+
+static long aspeed_xdma_ioctl(struct file *file, unsigned int cmd,
+ unsigned long param)
+{
+ unsigned long flags;
+ struct aspeed_xdma_client *client = file->private_data;
+ struct aspeed_xdma *ctx = client->ctx;
+
+ switch (cmd) {
+ case ASPEED_XDMA_IOCTL_RESET:
+ spin_lock_irqsave(&ctx->engine_lock, flags);
+ if (ctx->in_reset) {
+ spin_unlock_irqrestore(&ctx->engine_lock, flags);
+ return 0;
+ }
+
+ ctx->in_reset = true;
+ spin_unlock_irqrestore(&ctx->engine_lock, flags);
+
+ if (READ_ONCE(ctx->current_client))
+ dev_warn(ctx->dev,
+ "User reset with transfer in progress.\n");
+
+ aspeed_xdma_reset(ctx);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void aspeed_xdma_vma_close(struct vm_area_struct *vma)
+{
+ int rc;
+ struct aspeed_xdma_client *client = vma->vm_private_data;
+
+ rc = wait_event_interruptible(client->ctx->wait, !client->in_progress);
+ if (rc)
+ return;
+
+ gen_pool_free(client->ctx->pool, (unsigned long)client->virt,
+ client->size);
+
+ client->virt = NULL;
+ client->phys = 0;
+ client->size = 0;
+}
+
+static const struct vm_operations_struct aspeed_xdma_vm_ops = {
+ .close = aspeed_xdma_vma_close,
+};
+
+static int aspeed_xdma_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ int rc;
+ struct aspeed_xdma_client *client = file->private_data;
+ struct aspeed_xdma *ctx = client->ctx;
+
+ /* restrict file to one mapping */
+ if (client->size)
+ return -EBUSY;
+
+ client->size = vma->vm_end - vma->vm_start;
+ client->virt = gen_pool_dma_alloc(ctx->pool, client->size,
+ &client->phys);
+ if (!client->virt) {
+ client->phys = 0;
+ client->size = 0;
+ return -ENOMEM;
+ }
+
+ vma->vm_pgoff = (client->phys - ctx->mem_phys) >> PAGE_SHIFT;
+ vma->vm_ops = &aspeed_xdma_vm_ops;
+ vma->vm_private_data = client;
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ rc = io_remap_pfn_range(vma, vma->vm_start, client->phys >> PAGE_SHIFT,
+ client->size, vma->vm_page_prot);
+ if (rc) {
+ dev_warn(ctx->dev, "mmap err: v[%08lx] to p[%08x], s[%08x]\n",
+ vma->vm_start, (u32)client->phys, client->size);
+
+ gen_pool_free(ctx->pool, (unsigned long)client->virt,
+ client->size);
+
+ client->virt = NULL;
+ client->phys = 0;
+ client->size = 0;
+ return rc;
+ }
+
+ dev_dbg(ctx->dev, "mmap: v[%08lx] to p[%08x], s[%08x]\n",
+ vma->vm_start, (u32)client->phys, client->size);
+
+ return 0;
+}
+
+static int aspeed_xdma_open(struct inode *inode, struct file *file)
+{
+ struct miscdevice *misc = file->private_data;
+ struct aspeed_xdma *ctx = container_of(misc, struct aspeed_xdma, misc);
+ struct aspeed_xdma_client *client = kzalloc(sizeof(*client),
+ GFP_KERNEL);
+
+ if (!client)
+ return -ENOMEM;
+
+ kobject_get(&ctx->kobj);
+ client->ctx = ctx;
+ file->private_data = client;
+ return 0;
+}
+
+static int aspeed_xdma_release(struct inode *inode, struct file *file)
+{
+ bool reset = false;
+ unsigned long flags;
+ struct aspeed_xdma_client *client = file->private_data;
+ struct aspeed_xdma *ctx = client->ctx;
+
+ spin_lock_irqsave(&ctx->client_lock, flags);
+ if (client == ctx->current_client) {
+ spin_lock(&ctx->engine_lock);
+ if (ctx->in_reset) {
+ ctx->current_client = NULL;
+ } else {
+ ctx->in_reset = true;
+ reset = true;
+ }
+ spin_unlock(&ctx->engine_lock);
+ }
+ spin_unlock_irqrestore(&ctx->client_lock, flags);
+
+ if (reset)
+ aspeed_xdma_reset(ctx);
+
+ if (client->virt)
+ gen_pool_free(ctx->pool, (unsigned long)client->virt,
+ client->size);
+
+ kfree(client);
+ kobject_put(&ctx->kobj);
+ return 0;
+}
+
+static const struct file_operations aspeed_xdma_fops = {
+ .owner = THIS_MODULE,
+ .write = aspeed_xdma_write,
+ .poll = aspeed_xdma_poll,
+ .unlocked_ioctl = aspeed_xdma_ioctl,
+ .mmap = aspeed_xdma_mmap,
+ .open = aspeed_xdma_open,
+ .release = aspeed_xdma_release,
+};
+
+static int aspeed_xdma_init_scu(struct aspeed_xdma *ctx, struct device *dev)
+{
+ struct regmap *scu = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "aspeed,scu");
+
+ if (!IS_ERR(scu)) {
+ u32 selection;
+ bool pcie_device_bmc = true;
+ const u32 bmc = SCU_PCIE_CONF_BMC_EN |
+ SCU_PCIE_CONF_BMC_EN_MSI | SCU_PCIE_CONF_BMC_EN_IRQ |
+ SCU_PCIE_CONF_BMC_EN_DMA;
+ const u32 vga = SCU_PCIE_CONF_VGA_EN |
+ SCU_PCIE_CONF_VGA_EN_MSI | SCU_PCIE_CONF_VGA_EN_IRQ |
+ SCU_PCIE_CONF_VGA_EN_DMA;
+ const char *pcie = NULL;
+
+ if (!of_property_read_string(dev->of_node,
+ "aspeed,pcie-device", &pcie)) {
+ if (!strcmp(pcie, "vga")) {
+ pcie_device_bmc = false;
+ } else if (strcmp(pcie, "bmc")) {
+ dev_err(dev,
+ "Invalid pcie-device property %s.\n",
+ pcie);
+ return -EINVAL;
+ }
+ }
+
+ if (pcie_device_bmc) {
+ selection = bmc;
+ regmap_write(scu, ctx->chip->scu_bmc_class,
+ SCU_BMC_CLASS_REV_XDMA);
+ } else {
+ selection = vga;
+ }
+
+ regmap_update_bits(scu, ctx->chip->scu_pcie_conf, bmc | vga,
+ selection);
+
+ if (ctx->chip->scu_misc_ctrl)
+ regmap_update_bits(scu, ctx->chip->scu_misc_ctrl,
+ SCU_AST2600_MISC_CTRL_XDMA_BMC,
+ SCU_AST2600_MISC_CTRL_XDMA_BMC);
+ } else {
+ dev_warn(dev, "Unable to configure PCIe: %ld; continuing.\n",
+ PTR_ERR(scu));
+ }
+
+ return 0;
+}
+
+static void aspeed_xdma_kobject_release(struct kobject *kobj)
+{
+ struct aspeed_xdma *ctx = container_of(kobj, struct aspeed_xdma, kobj);
+
+ if (ctx->pcie_irq >= 0)
+ free_irq(ctx->pcie_irq, ctx);
+
+ gen_pool_free(ctx->pool, (unsigned long)ctx->cmdq, XDMA_CMDQ_SIZE);
+
+ gen_pool_destroy(ctx->pool);
+
+ dma_free_coherent(ctx->dev, ctx->mem_size, ctx->mem_virt,
+ ctx->mem_coherent);
+
+ if (ctx->reset_rc)
+ reset_control_put(ctx->reset_rc);
+ reset_control_put(ctx->reset);
+
+ clk_put(ctx->clock);
+
+ free_irq(ctx->irq, ctx);
+
+ iounmap(ctx->base);
+ release_mem_region(ctx->res_start, ctx->res_size);
+
+ kfree(ctx);
+}
+
+static struct kobj_type aspeed_xdma_kobject_type = {
+ .release = aspeed_xdma_kobject_release,
+};
+
+static int aspeed_xdma_iomap(struct aspeed_xdma *ctx,
+ struct platform_device *pdev)
+{
+ resource_size_t size;
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ if (!res)
+ return -ENOMEM;
+
+ size = resource_size(res);
+ if (!request_mem_region(res->start, size, dev_name(ctx->dev)))
+ return -ENOMEM;
+
+ ctx->base = ioremap(res->start, size);
+ if (!ctx->base) {
+ release_mem_region(res->start, size);
+ return -ENOMEM;
+ }
+
+ ctx->res_start = res->start;
+ ctx->res_size = size;
+
+ return 0;
+}
+
+static int aspeed_xdma_probe(struct platform_device *pdev)
+{
+ int rc;
+ struct aspeed_xdma *ctx;
+ struct reserved_mem *mem;
+ struct device *dev = &pdev->dev;
+ struct device_node *memory_region;
+ const void *md = of_device_get_match_data(dev);
+
+ if (!md)
+ return -ENODEV;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->chip = md;
+ ctx->dev = dev;
+ platform_set_drvdata(pdev, ctx);
+ spin_lock_init(&ctx->client_lock);
+ spin_lock_init(&ctx->engine_lock);
+ INIT_WORK(&ctx->reset_work, aspeed_xdma_reset_work);
+ init_waitqueue_head(&ctx->wait);
+
+ rc = aspeed_xdma_iomap(ctx, pdev);
+ if (rc) {
+ dev_err(dev, "Failed to map registers.\n");
+ goto err_nomap;
+ }
+
+ ctx->irq = platform_get_irq(pdev, 0);
+ if (ctx->irq < 0) {
+ dev_err(dev, "Failed to find IRQ.\n");
+ rc = ctx->irq;
+ goto err_noirq;
+ }
+
+ rc = request_irq(ctx->irq, aspeed_xdma_irq, 0, DEVICE_NAME, ctx);
+ if (rc < 0) {
+ dev_err(dev, "Failed to request IRQ %d.\n", ctx->irq);
+ goto err_noirq;
+ }
+
+ ctx->clock = clk_get(dev, NULL);
+ if (IS_ERR(ctx->clock)) {
+ dev_err(dev, "Failed to request clock.\n");
+ rc = PTR_ERR(ctx->clock);
+ goto err_noclk;
+ }
+
+ ctx->reset = reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(ctx->reset)) {
+ dev_err(dev, "Failed to request reset control.\n");
+ rc = PTR_ERR(ctx->reset);
+ goto err_noreset;
+ }
+
+ ctx->reset_rc = reset_control_get_exclusive(dev, "root-complex");
+ if (IS_ERR(ctx->reset_rc)) {
+ dev_dbg(dev, "Failed to request reset RC control.\n");
+ ctx->reset_rc = NULL;
+ }
+
+ memory_region = of_parse_phandle(dev->of_node, "memory-region", 0);
+ if (!memory_region) {
+ dev_err(dev, "Failed to find memory-region.\n");
+ rc = -ENOMEM;
+ goto err_nomem;
+ }
+
+ mem = of_reserved_mem_lookup(memory_region);
+ of_node_put(memory_region);
+ if (!mem) {
+ dev_err(dev, "Failed to find reserved memory.\n");
+ rc = -ENOMEM;
+ goto err_nomem;
+ }
+
+ ctx->mem_phys = mem->base;
+ ctx->mem_size = mem->size;
+
+ rc = of_reserved_mem_device_init(dev);
+ if (rc) {
+ dev_err(dev, "Failed to init reserved memory.\n");
+ goto err_nomem;
+ }
+
+ rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (rc) {
+ dev_err(dev, "Failed to mask DMA.\n");
+ goto err_nomem;
+ }
+
+ ctx->mem_virt = dma_alloc_coherent(dev, ctx->mem_size,
+ &ctx->mem_coherent, 0);
+ if (!ctx->mem_virt) {
+ dev_err(dev, "Failed to allocate reserved memory.\n");
+ rc = -ENOMEM;
+ goto err_nomem;
+ }
+
+ ctx->pool = gen_pool_create(ilog2(PAGE_SIZE), -1);
+ if (!ctx->pool) {
+ dev_err(dev, "Failed to setup genalloc pool.\n");
+ rc = -ENOMEM;
+ goto err_nopool;
+ }
+
+ rc = gen_pool_add_virt(ctx->pool, (unsigned long)ctx->mem_virt,
+ ctx->mem_phys, ctx->mem_size, -1);
+ if (rc) {
+ dev_err(ctx->dev, "Failed to add memory to genalloc pool.\n");
+ goto err_pool_scu_clk;
+ }
+
+ rc = aspeed_xdma_init_scu(ctx, dev);
+ if (rc)
+ goto err_pool_scu_clk;
+
+ rc = clk_prepare_enable(ctx->clock);
+ if (rc) {
+ dev_err(dev, "Failed to enable the clock.\n");
+ goto err_pool_scu_clk;
+ }
+
+ if (ctx->reset_rc) {
+ rc = reset_control_deassert(ctx->reset_rc);
+ if (rc) {
+ dev_err(dev, "Failed to clear the RC reset.\n");
+ goto err_reset_rc;
+ }
+ usleep_range(XDMA_ENGINE_SETUP_TIME_MIN_US,
+ XDMA_ENGINE_SETUP_TIME_MAX_US);
+ }
+
+ rc = reset_control_deassert(ctx->reset);
+ if (rc) {
+ dev_err(dev, "Failed to clear the reset.\n");
+ goto err_reset;
+ }
+ usleep_range(XDMA_ENGINE_SETUP_TIME_MIN_US,
+ XDMA_ENGINE_SETUP_TIME_MAX_US);
+
+ ctx->cmdq = gen_pool_dma_alloc(ctx->pool, XDMA_CMDQ_SIZE,
+ &ctx->cmdq_phys);
+ if (!ctx->cmdq) {
+ dev_err(ctx->dev, "Failed to genalloc cmdq.\n");
+ rc = -ENOMEM;
+ goto err_pool;
+ }
+
+ aspeed_xdma_init_eng(ctx);
+
+ ctx->misc.minor = MISC_DYNAMIC_MINOR;
+ ctx->misc.fops = &aspeed_xdma_fops;
+ ctx->misc.name = "aspeed-xdma";
+ ctx->misc.parent = dev;
+ rc = misc_register(&ctx->misc);
+ if (rc) {
+ dev_err(dev, "Failed to register xdma miscdevice.\n");
+ goto err_misc;
+ }
+
+ /*
+ * This interrupt could fire immediately so only request it once the
+ * engine and driver are initialized.
+ */
+ ctx->pcie_irq = platform_get_irq(pdev, 1);
+ if (ctx->pcie_irq < 0) {
+ dev_warn(dev, "Failed to find PCI-E IRQ.\n");
+ } else {
+ rc = request_irq(ctx->pcie_irq, aspeed_xdma_pcie_irq,
+ IRQF_SHARED, DEVICE_NAME, ctx);
+ if (rc < 0) {
+ dev_warn(dev, "Failed to request PCI-E IRQ %d.\n", rc);
+ ctx->pcie_irq = -1;
+ }
+ }
+
+ kobject_init(&ctx->kobj, &aspeed_xdma_kobject_type);
+ return 0;
+
+err_misc:
+ gen_pool_free(ctx->pool, (unsigned long)ctx->cmdq, XDMA_CMDQ_SIZE);
+err_pool:
+ reset_control_assert(ctx->reset);
+err_reset:
+ if (ctx->reset_rc)
+ reset_control_assert(ctx->reset_rc);
+err_reset_rc:
+ clk_disable_unprepare(ctx->clock);
+err_pool_scu_clk:
+ gen_pool_destroy(ctx->pool);
+err_nopool:
+ dma_free_coherent(ctx->dev, ctx->mem_size, ctx->mem_virt,
+ ctx->mem_coherent);
+err_nomem:
+ if (ctx->reset_rc)
+ reset_control_put(ctx->reset_rc);
+ reset_control_put(ctx->reset);
+err_noreset:
+ clk_put(ctx->clock);
+err_noclk:
+ free_irq(ctx->irq, ctx);
+err_noirq:
+ iounmap(ctx->base);
+ release_mem_region(ctx->res_start, ctx->res_size);
+err_nomap:
+ kfree(ctx);
+ return rc;
+}
+
+static int aspeed_xdma_remove(struct platform_device *pdev)
+{
+ struct aspeed_xdma *ctx = platform_get_drvdata(pdev);
+
+ reset_control_assert(ctx->reset);
+ if (ctx->reset_rc)
+ reset_control_assert(ctx->reset_rc);
+ clk_disable_unprepare(ctx->clock);
+
+ aspeed_xdma_done(ctx, true);
+
+ misc_deregister(&ctx->misc);
+ kobject_put(&ctx->kobj);
+
+ return 0;
+}
+
+static const struct aspeed_xdma_chip aspeed_ast2500_xdma_chip = {
+ .control = XDMA_AST2500_CTRL_US_COMP | XDMA_AST2500_CTRL_DS_COMP |
+ XDMA_AST2500_CTRL_DS_DIRTY | XDMA_AST2500_CTRL_DS_SIZE_256 |
+ XDMA_AST2500_CTRL_DS_TIMEOUT | XDMA_AST2500_CTRL_DS_CHECK_ID,
+ .scu_bmc_class = SCU_AST2500_BMC_CLASS_REV,
+ .scu_misc_ctrl = 0,
+ .scu_pcie_conf = SCU_AST2500_PCIE_CONF,
+ .queue_entry_size = XDMA_AST2500_QUEUE_ENTRY_SIZE,
+ .regs = {
+ .bmc_cmdq_addr = XDMA_AST2500_BMC_CMDQ_ADDR,
+ .bmc_cmdq_endp = XDMA_AST2500_BMC_CMDQ_ENDP,
+ .bmc_cmdq_writep = XDMA_AST2500_BMC_CMDQ_WRITEP,
+ .bmc_cmdq_readp = XDMA_AST2500_BMC_CMDQ_READP,
+ .control = XDMA_AST2500_CTRL,
+ .status = XDMA_AST2500_STATUS,
+ },
+ .status_bits = {
+ .us_comp = XDMA_AST2500_STATUS_US_COMP,
+ .ds_comp = XDMA_AST2500_STATUS_DS_COMP,
+ .ds_dirty = XDMA_AST2500_STATUS_DS_DIRTY,
+ },
+ .set_cmd = aspeed_xdma_ast2500_set_cmd,
+};
+
+static const struct aspeed_xdma_chip aspeed_ast2600_xdma_chip = {
+ .control = XDMA_AST2600_CTRL_US_COMP | XDMA_AST2600_CTRL_DS_COMP |
+ XDMA_AST2600_CTRL_DS_DIRTY | XDMA_AST2600_CTRL_DS_SIZE_256,
+ .scu_bmc_class = SCU_AST2600_BMC_CLASS_REV,
+ .scu_misc_ctrl = SCU_AST2600_MISC_CTRL,
+ .scu_pcie_conf = SCU_AST2600_PCIE_CONF,
+ .queue_entry_size = XDMA_AST2600_QUEUE_ENTRY_SIZE,
+ .regs = {
+ .bmc_cmdq_addr = XDMA_AST2600_BMC_CMDQ_ADDR,
+ .bmc_cmdq_endp = XDMA_AST2600_BMC_CMDQ_ENDP,
+ .bmc_cmdq_writep = XDMA_AST2600_BMC_CMDQ_WRITEP,
+ .bmc_cmdq_readp = XDMA_AST2600_BMC_CMDQ_READP,
+ .control = XDMA_AST2600_CTRL,
+ .status = XDMA_AST2600_STATUS,
+ },
+ .status_bits = {
+ .us_comp = XDMA_AST2600_STATUS_US_COMP,
+ .ds_comp = XDMA_AST2600_STATUS_DS_COMP,
+ .ds_dirty = XDMA_AST2600_STATUS_DS_DIRTY,
+ },
+ .set_cmd = aspeed_xdma_ast2600_set_cmd,
+};
+
+static const struct of_device_id aspeed_xdma_match[] = {
+ {
+ .compatible = "aspeed,ast2500-xdma",
+ .data = &aspeed_ast2500_xdma_chip,
+ },
+ {
+ .compatible = "aspeed,ast2600-xdma",
+ .data = &aspeed_ast2600_xdma_chip,
+ },
+ { },
+};
+
+static struct platform_driver aspeed_xdma_driver = {
+ .probe = aspeed_xdma_probe,
+ .remove = aspeed_xdma_remove,
+ .driver = {
+ .name = DEVICE_NAME,
+ .of_match_table = aspeed_xdma_match,
+ },
+};
+
+module_platform_driver(aspeed_xdma_driver);
+
+MODULE_AUTHOR("Eddie James");
+MODULE_DESCRIPTION("Aspeed XDMA Engine Driver");
+MODULE_LICENSE("GPL v2");