summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/jtag-dev23
-rw-r--r--Documentation/devicetree/bindings/i2c/aspeed,i2c.yaml27
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c.txt6
-rw-r--r--Documentation/devicetree/bindings/i3c/i3c.yaml18
-rw-r--r--Documentation/devicetree/bindings/i3c/snps,dw-i3c-master.txt15
-rw-r--r--Documentation/devicetree/bindings/ipmi/aspeed,ast2400-kcs-bmc.yaml5
-rw-r--r--Documentation/devicetree/bindings/jtag/aspeed-jtag.yaml85
-rw-r--r--Documentation/devicetree/bindings/mfd/intel,peci-client.yaml67
-rw-r--r--Documentation/devicetree/bindings/peci/peci-aspeed.yaml124
-rw-r--r--Documentation/devicetree/bindings/peci/peci-bus.yaml129
-rw-r--r--Documentation/devicetree/bindings/peci/peci-client.yaml54
-rw-r--r--Documentation/devicetree/bindings/peci/peci-npcm.yaml102
-rw-r--r--Documentation/devicetree/bindings/soc/aspeed/aspeed-espi-slave.txt20
-rw-r--r--Documentation/devicetree/bindings/soc/aspeed/aspeed-lpc-sio.txt17
-rw-r--r--Documentation/devicetree/bindings/soc/aspeed/aspeed-mctp.txt25
-rw-r--r--Documentation/devicetree/bindings/soc/aspeed/aspeed-vga-sharedmem.txt20
-rw-r--r--Documentation/hwmon/index.rst4
-rw-r--r--Documentation/hwmon/peci-cpupower.rst65
-rw-r--r--Documentation/hwmon/peci-cputemp.rst95
-rw-r--r--Documentation/hwmon/peci-dimmpower.rst57
-rw-r--r--Documentation/hwmon/peci-dimmtemp.rst60
-rw-r--r--Documentation/i2c/slave-mqueue-backend.rst124
-rw-r--r--Documentation/index.rst1
-rw-r--r--Documentation/jtag/index.rst18
-rw-r--r--Documentation/jtag/jtag-summary.rst47
-rw-r--r--Documentation/jtag/jtagdev.rst207
-rw-r--r--Documentation/misc-devices/aspeed-espi-slave.rst118
-rw-r--r--Documentation/userspace-api/ioctl/ioctl-number.rst5
-rw-r--r--MAINTAINERS11
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-intel-ast2500.dts517
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-intel-ast2600.dts975
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-intel-s2600wf.dts362
-rw-r--r--arch/arm/boot/dts/aspeed-g4.dtsi108
-rw-r--r--arch/arm/boot/dts/aspeed-g5.dtsi132
-rw-r--r--arch/arm/boot/dts/aspeed-g6-pinctrl.dtsi10
-rw-r--r--arch/arm/boot/dts/aspeed-g6.dtsi250
-rw-r--r--arch/arm/boot/dts/openbmc-flash-layout-intel-128MB.dtsi69
-rw-r--r--arch/arm/boot/dts/openbmc-flash-layout-intel-64MB.dtsi38
-rw-r--r--arch/arm/configs/intel_bmc_defconfig314
-rw-r--r--drivers/Kconfig4
-rw-r--r--drivers/Makefile2
-rw-r--r--drivers/char/ipmi/bt-bmc.c21
-rw-r--r--drivers/char/ipmi/ipmb_dev_int.c4
-rw-r--r--drivers/char/ipmi/kcs_bmc_aspeed.c31
-rw-r--r--drivers/clk/clk-aspeed.c44
-rw-r--r--drivers/clk/clk-ast2600.c116
-rw-r--r--drivers/hwmon/Kconfig67
-rw-r--r--drivers/hwmon/Makefile5
-rw-r--r--drivers/hwmon/aspeed-g6-pwm-tacho.c1163
-rw-r--r--drivers/hwmon/aspeed-pwm-tacho.c2
-rw-r--r--drivers/hwmon/peci-cpupower.c754
-rw-r--r--drivers/hwmon/peci-cputemp.c550
-rw-r--r--drivers/hwmon/peci-dimmpower.c673
-rw-r--r--drivers/hwmon/peci-dimmtemp.c554
-rw-r--r--drivers/hwmon/peci-hwmon.h659
-rw-r--r--drivers/hwmon/pmbus/Kconfig9
-rw-r--r--drivers/hwmon/pmbus/Makefile1
-rw-r--r--drivers/hwmon/pmbus/pmbus.c8
-rw-r--r--drivers/hwmon/pmbus/pmbus.h1
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c88
-rw-r--r--drivers/hwmon/pmbus/raa229126.c92
-rw-r--r--drivers/i2c/Kconfig23
-rw-r--r--drivers/i2c/Makefile1
-rw-r--r--drivers/i2c/busses/i2c-aspeed.c764
-rw-r--r--drivers/i2c/i2c-core-base.c87
-rw-r--r--drivers/i2c/i2c-core-smbus.c22
-rw-r--r--drivers/i2c/i2c-mux.c117
-rw-r--r--drivers/i2c/i2c-slave-mqueue.c243
-rw-r--r--drivers/i3c/Kconfig29
-rw-r--r--drivers/i3c/Makefile4
-rw-r--r--drivers/i3c/device.c47
-rw-r--r--drivers/i3c/i3c-hub.c699
-rw-r--r--drivers/i3c/i3cdev.c429
-rw-r--r--drivers/i3c/internals.h5
-rw-r--r--drivers/i3c/master.c403
-rw-r--r--drivers/i3c/master/dw-i3c-master.c1099
-rw-r--r--drivers/i3c/mctp/Kconfig6
-rw-r--r--drivers/i3c/mctp/Makefile2
-rw-r--r--drivers/i3c/mctp/i3c-mctp.c349
-rw-r--r--drivers/jtag/Kconfig31
-rw-r--r--drivers/jtag/Makefile2
-rw-r--r--drivers/jtag/jtag-aspeed.c1574
-rw-r--r--drivers/jtag/jtag.c337
-rw-r--r--drivers/mfd/Kconfig17
-rw-r--r--drivers/mfd/Makefile1
-rw-r--r--drivers/mfd/intel-peci-client.c163
-rw-r--r--drivers/mfd/syscon.c4
-rw-r--r--drivers/mtd/spi-nor/controllers/aspeed-smc.c49
-rw-r--r--drivers/mtd/spi-nor/micron-st.c7
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c22
-rw-r--r--drivers/peci/Kconfig37
-rw-r--r--drivers/peci/Makefile11
-rw-r--r--drivers/peci/busses/Kconfig47
-rw-r--r--drivers/peci/busses/Makefile8
-rw-r--r--drivers/peci/busses/peci-aspeed.c522
-rw-r--r--drivers/peci/busses/peci-mctp.c450
-rw-r--r--drivers/peci/busses/peci-npcm.c406
-rw-r--r--drivers/peci/peci-core.c2158
-rw-r--r--drivers/peci/peci-dev.c359
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c27
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c29
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed.c53
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed.h3
-rw-r--r--drivers/pwm/Kconfig9
-rw-r--r--drivers/pwm/Makefile1
-rw-r--r--drivers/pwm/pwm-fttmr010.c437
-rw-r--r--drivers/rtc/Kconfig10
-rw-r--r--drivers/rtc/Makefile1
-rw-r--r--drivers/rtc/rtc-pchc620.c150
-rw-r--r--drivers/soc/aspeed/Kconfig46
-rw-r--r--drivers/soc/aspeed/Makefile7
-rw-r--r--drivers/soc/aspeed/aspeed-bmc-misc.c289
-rw-r--r--drivers/soc/aspeed/aspeed-espi-ctrl.h337
-rw-r--r--drivers/soc/aspeed/aspeed-espi-oob.c488
-rw-r--r--drivers/soc/aspeed/aspeed-espi-oob.h71
-rw-r--r--drivers/soc/aspeed/aspeed-espi-slave.c466
-rw-r--r--drivers/soc/aspeed/aspeed-espi-vw.c94
-rw-r--r--drivers/soc/aspeed/aspeed-espi-vw.h20
-rw-r--r--drivers/soc/aspeed/aspeed-lpc-ctrl.c1
-rw-r--r--drivers/soc/aspeed/aspeed-lpc-mbox.c459
-rw-r--r--drivers/soc/aspeed/aspeed-lpc-sio.c437
-rw-r--r--drivers/soc/aspeed/aspeed-mctp.c1837
-rw-r--r--drivers/soc/aspeed/aspeed-uart-routing.c68
-rw-r--r--drivers/soc/aspeed/aspeed-vga-sharedmem.c163
-rw-r--r--drivers/usb/gadget/configfs.c3
-rw-r--r--drivers/usb/gadget/function/f_hid.c2
-rw-r--r--drivers/usb/gadget/function/f_mass_storage.c12
-rw-r--r--drivers/usb/gadget/function/storage_common.c9
-rw-r--r--drivers/usb/gadget/function/storage_common.h29
-rw-r--r--drivers/watchdog/aspeed_wdt.c21
-rw-r--r--fs/jffs2/writev.c16
-rw-r--r--include/dt-bindings/clock/aspeed-clock.h2
-rw-r--r--include/dt-bindings/clock/ast2600-clock.h13
-rw-r--r--include/linux/aspeed-mctp.h149
-rw-r--r--include/linux/i2c-mux.h5
-rw-r--r--include/linux/i2c.h27
-rw-r--r--include/linux/i3c/ccc.h12
-rw-r--r--include/linux/i3c/device.h14
-rw-r--r--include/linux/i3c/master.h24
-rw-r--r--include/linux/i3c/target.h23
-rw-r--r--include/linux/jtag.h47
-rw-r--r--include/linux/mfd/intel-peci-client.h163
-rw-r--r--include/linux/peci.h151
-rw-r--r--include/uapi/linux/aspeed-espi-ioc.h198
-rw-r--r--include/uapi/linux/aspeed-lpc-mbox.h11
-rw-r--r--include/uapi/linux/aspeed-lpc-sio.h46
-rw-r--r--include/uapi/linux/aspeed-mctp.h128
-rw-r--r--include/uapi/linux/i2c.h1
-rw-r--r--include/uapi/linux/i3c/i3cdev.h38
-rw-r--r--include/uapi/linux/jtag.h369
-rw-r--r--include/uapi/linux/jtag_drv.h73
-rw-r--r--include/uapi/linux/peci-ioctl.h703
-rw-r--r--net/ncsi/ncsi-manage.c19
153 files changed, 27199 insertions, 318 deletions
diff --git a/Documentation/ABI/testing/jtag-dev b/Documentation/ABI/testing/jtag-dev
new file mode 100644
index 000000000000..423baab18761
--- /dev/null
+++ b/Documentation/ABI/testing/jtag-dev
@@ -0,0 +1,23 @@
+What: /dev/jtag[0-9]+
+Date: July 2018
+KernelVersion: 4.20
+Contact: oleksandrs@mellanox.com
+Description:
+ The misc device files /dev/jtag* are the interface
+ between JTAG master interface and userspace.
+
+ The ioctl(2)-based ABI is defined and documented in
+ [include/uapi]<linux/jtag.h>.
+
+ The following file operations are supported:
+
+ open(2)
+ Opens and allocates file descriptor.
+
+ ioctl(2)
+ Initiate various actions.
+ See the inline documentation in [include/uapi]<linux/jtag.h>
+ for descriptions of all ioctls.
+
+Users:
+ userspace tools which wants to access to JTAG bus
diff --git a/Documentation/devicetree/bindings/i2c/aspeed,i2c.yaml b/Documentation/devicetree/bindings/i2c/aspeed,i2c.yaml
index ea643e6c3ef5..e334ad7f80c0 100644
--- a/Documentation/devicetree/bindings/i2c/aspeed,i2c.yaml
+++ b/Documentation/devicetree/bindings/i2c/aspeed,i2c.yaml
@@ -49,6 +49,33 @@ properties:
description:
states that there is another master active on this bus
+ bus-timeout-ms:
+ default: 1000
+ description:
+ bus timeout in milliseconds defaults to 1 second when not specified
+
+ #retries:
+ description:
+ Number of retries for master transfer
+
+ aspeed,dma-buf-size:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ size of DMA buffer (from 2 to 4095 in case of AST2500)
+ Only AST2500/2600 support DMA mode.
+ Limitations on AST2500 - I2C is sharing the DMA H/W with UHCI host
+ controller and MCTP controller. Since those controllers operate with DMA
+ mode only, I2C has to use buffer mode or byte mode instead if one of
+ those controllers is enabled. Also make sure that if SD/eMMC or Port80
+ snoop uses DMA mode instead of PIO or FIFO respectively, I2C can't use
+ DMA mode. IF both DMA and buffer modes are enabled, DMA mode will be
+ selected.
+
+ general-call:
+ type: boolean
+ description:
+ enables general call receiving
+
required:
- reg
- compatible
diff --git a/Documentation/devicetree/bindings/i2c/i2c.txt b/Documentation/devicetree/bindings/i2c/i2c.txt
index fc3dd7ec0445..1f9c8d0e3d62 100644
--- a/Documentation/devicetree/bindings/i2c/i2c.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c.txt
@@ -137,6 +137,12 @@ wants to support one of the below features, it should adapt these bindings.
- wakeup-source
device can be used as a wakeup source.
+- bus-timeout-ms
+ Bus timeout in milliseconds.
+
+- #retries
+ Number of retries for master transfer.
+
Binding may contain optional "interrupts" property, describing interrupts
used by the device. I2C core will assign "irq" interrupt (or the very first
interrupt if not using interrupt names) as primary interrupt for the slave.
diff --git a/Documentation/devicetree/bindings/i3c/i3c.yaml b/Documentation/devicetree/bindings/i3c/i3c.yaml
index 1f82fc923799..b220a3a90bc5 100644
--- a/Documentation/devicetree/bindings/i3c/i3c.yaml
+++ b/Documentation/devicetree/bindings/i3c/i3c.yaml
@@ -55,6 +55,24 @@ properties:
May not be supported by all controllers.
+ initial-role:
+ enum:
+ - primary
+ - secondary
+ - target
+ description: |
+ If it is not specified, by default the initial I3C device role will be "primary".
+
+ pid:
+ $ref: '/schemas/types.yaml#/definitions/uint64'
+ description: |
+ Target Device Provisioned ID
+
+ dcr:
+ $ref: '/schemas/types.yaml#/definitions/uint32'
+ description: |
+ Target Device Characteristic Register value
+
required:
- "#address-cells"
- "#size-cells"
diff --git a/Documentation/devicetree/bindings/i3c/snps,dw-i3c-master.txt b/Documentation/devicetree/bindings/i3c/snps,dw-i3c-master.txt
index 07f35f36085d..1409ea495e79 100644
--- a/Documentation/devicetree/bindings/i3c/snps,dw-i3c-master.txt
+++ b/Documentation/devicetree/bindings/i3c/snps,dw-i3c-master.txt
@@ -20,6 +20,21 @@ Documentation/devicetree/bindings/i3c/i3c.yaml for more details):
- i2c-scl-hz
- i3c-scl-hz
+Optional properties specific for this driver:
+
+- i3c-od-scl-low-ns: SCL low period for Open-Drain phase. Expressed in
+ nanoseconds. It is validated by the driver. Minimum value is 200.
+- i3c-od-scl-high-ns: SCL high period for Open-Drain phase. Expressed in
+ nanoseconds. It is validated by the driver. Minimum value is 25.
+- i3c-pp-scl-low-ns: SCL low period for Push-Pull phase. Expressed in
+ nanoseconds. It is validated by the driver. Minimum value is 25.
+- i3c-pp-scl-high-ns: SCL high period for Push-Pull phase. It is expressed in
+ nanoseconds. It is validated by the driver. Minimum value is 25.
+- sda-tx-hold-ns: SDA Hold Time (Thd_dat). Expressed in nanoseconds. It is not
+ validated by driver. Minimum value is 5. Maximum value is 35. Other
+ values will be trimmed to the range. This parameter is not supported by
+ old IP versions.
+
I3C device connected on the bus follow the generic description (see
Documentation/devicetree/bindings/i3c/i3c.yaml for more details).
diff --git a/Documentation/devicetree/bindings/ipmi/aspeed,ast2400-kcs-bmc.yaml b/Documentation/devicetree/bindings/ipmi/aspeed,ast2400-kcs-bmc.yaml
index 4ff6fabfcb30..6a6e4f6a772c 100644
--- a/Documentation/devicetree/bindings/ipmi/aspeed,ast2400-kcs-bmc.yaml
+++ b/Documentation/devicetree/bindings/ipmi/aspeed,ast2400-kcs-bmc.yaml
@@ -40,6 +40,9 @@ properties:
- description: ODR register
- description: STR register
+ clocks:
+ minItems: 1
+
aspeed,lpc-io-reg:
$ref: '/schemas/types.yaml#/definitions/uint32-array'
minItems: 1
@@ -74,6 +77,7 @@ properties:
required:
- compatible
- interrupts
+ - clocks
additionalProperties: false
@@ -103,4 +107,5 @@ examples:
aspeed,lpc-io-reg = <0xca2>;
aspeed,lpc-interrupts = <11 IRQ_TYPE_LEVEL_LOW>;
interrupts = <8>;
+ clocks = <&syscon ASPEED_CLK_GATE_LCLK>;
};
diff --git a/Documentation/devicetree/bindings/jtag/aspeed-jtag.yaml b/Documentation/devicetree/bindings/jtag/aspeed-jtag.yaml
new file mode 100644
index 000000000000..26faec29a5fb
--- /dev/null
+++ b/Documentation/devicetree/bindings/jtag/aspeed-jtag.yaml
@@ -0,0 +1,85 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/jtag/aspeed-jtag.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Aspeed JTAG driver for ast2400, ast2500 and ast2600 SoC
+
+description:
+ Driver adds support of Aspeed 24/25/2600 series SOC JTAG master controller.
+ Driver implements the following jtag ops
+ freq_get
+ freq_set
+ status_get
+ status_set
+ xfer
+ mode_set
+ bitbang
+ enable
+ disable
+
+ It has been tested on Mellanox system with BMC equipped with
+ Aspeed 2520 SoC for programming CPLD devices.
+
+ It has also been tested on Intel system using Aspeed 25xx SoC
+ for JTAG communication.
+
+ Tested on Intel system using Aspeed 26xx SoC for JTAG communication.
+
+maintainers:
+ - Oleksandr Shamray <oleksandrs@mellanox.com>
+ - Jiri Pirko <jiri@mellanox.com>
+ - Ernesto Corona<ernesto.corona@intel.com>
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - aspeed,ast2400-jtag
+ - aspeed,ast2500-jtag
+ - aspeed,ast2600-jtag
+
+
+ reg:
+ items:
+ - description: JTAG Master controller register range
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+
+examples:
+ - |
+ #include <dt-bindings/clock/aspeed-clock.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ jtag: jtag@1e6e4000 {
+ compatible = "aspeed,ast2500-jtag";
+ reg = <0x1e6e4000 0x1c>;
+ clocks = <&syscon ASPEED_CLK_APB>;
+ resets = <&syscon ASPEED_RESET_JTAG_MASTER>;
+ interrupts = <43>;
+ };
+ - |
+ #include <dt-bindings/clock/aspeed-clock.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ jtag1: jtag@1e6e4100 {
+ compatible = "aspeed,ast2600-jtag";
+ reg = <0x1e6e4100 0x40>;
+ clocks = <&syscon ASPEED_CLK_APB1>;
+ resets = <&syscon ASPEED_RESET_JTAG_MASTER2>;
+ interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/mfd/intel,peci-client.yaml b/Documentation/devicetree/bindings/mfd/intel,peci-client.yaml
new file mode 100644
index 000000000000..7baddce0a92c
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/intel,peci-client.yaml
@@ -0,0 +1,67 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mfd/intel,peci-client.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Intel PECI Client Device Tree Bindings
+
+maintainers:
+ - Jae Hyun Yoo <jae.hyun.yoo@linux.intel.com>
+
+description: |
+ PECI (Platform Environment Control Interface) is a one-wire bus interface
+ that provides a communication channel from PECI clients in Intel processors
+ and chipset components to external monitoring or control devices. PECI is
+ designed to support the following sideband functions:
+ - Processor and DRAM thermal management
+ - Platform Manageability
+ - Processor Interface Tuning and Diagnostics
+ - Failure Analysis
+
+properties:
+ compatible:
+ const: intel,peci-client
+
+ reg:
+ description: |
+ Address of a client CPU. According to the PECI specification, client
+ addresses start from 0x30.
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/clock/ast2600-clock.h>
+ peci: bus@1e78b000 {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x1e78b000 0x60>;
+
+ peci0: peci-bus@0 {
+ compatible = "aspeed,ast2600-peci";
+ reg = <0x0 0x100>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&syscon ASPEED_CLK_GATE_REF0CLK>;
+ resets = <&syscon ASPEED_RESET_PECI>;
+ clock-frequency = <24000000>;
+
+ peci-client@30 {
+ compatible = "intel,peci-client";
+ reg = <0x30>;
+ };
+
+ peci-client@31 {
+ compatible = "intel,peci-client";
+ reg = <0x31>;
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/peci/peci-aspeed.yaml b/Documentation/devicetree/bindings/peci/peci-aspeed.yaml
new file mode 100644
index 000000000000..0f5c2993fe9b
--- /dev/null
+++ b/Documentation/devicetree/bindings/peci/peci-aspeed.yaml
@@ -0,0 +1,124 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/peci/peci-aspeed.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Aspeed PECI Bus Device Tree Bindings
+
+maintainers:
+ - Jae Hyun Yoo <jae.hyun.yoo@linux.intel.com>
+
+properties:
+ compatible:
+ enum:
+ - aspeed,ast2400-peci
+ - aspeed,ast2500-peci
+ - aspeed,ast2600-peci
+
+ reg:
+ maxItems: 1
+
+ "#address-cells":
+ # Required to define a client address.
+ const: 1
+
+ "#size-cells":
+ # Required to define a client address.
+ const: 0
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ description: |
+ Clock source for PECI controller. Should reference the external
+ oscillator clock.
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+ clock-frequency:
+ # Operation frequency of PECI controller in units of Hz.
+ minimum: 187500
+ maximum: 24000000
+
+ msg-timing:
+ description: |
+ Message timing negotiation period. This value will determine the period
+ of message timing negotiation to be issued by PECI controller. The unit
+ of the programmed value is four times of PECI clock period.
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - minimum: 0
+ maximum: 255
+ default: 1
+
+ addr-timing:
+ description: |
+ Address timing negotiation period. This value will determine the period
+ of address timing negotiation to be issued by PECI controller. The unit
+ of the programmed value is four times of PECI clock period.
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - minimum: 0
+ maximum: 255
+ default: 1
+
+ rd-sampling-point:
+ description: |
+ Read sampling point selection. The whole period of a bit time will be
+ divided into 16 time frames. This value will determine the time frame
+ in which the controller will sample PECI signal for data read back.
+ Usually in the middle of a bit time is the best.
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - minimum: 0
+ maximum: 15
+ default: 8
+
+ cmd-timeout-ms:
+ # Command timeout in units of ms.
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - minimum: 1
+ maximum: 60000
+ default: 1000
+
+required:
+ - compatible
+ - reg
+ - "#address-cells"
+ - "#size-cells"
+ - interrupts
+ - clocks
+ - resets
+ - clock-frequency
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/clock/ast2600-clock.h>
+ peci: bus@1e78b000 {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x1e78b000 0x60>;
+
+ peci0: peci-bus@0 {
+ compatible = "aspeed,ast2600-peci";
+ reg = <0x0 0x100>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&syscon ASPEED_CLK_GATE_REF0CLK>;
+ resets = <&syscon ASPEED_RESET_PECI>;
+ clock-frequency = <24000000>;
+ msg-timing = <1>;
+ addr-timing = <1>;
+ rd-sampling-point = <8>;
+ cmd-timeout-ms = <1000>;
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/peci/peci-bus.yaml b/Documentation/devicetree/bindings/peci/peci-bus.yaml
new file mode 100644
index 000000000000..b085e67089cf
--- /dev/null
+++ b/Documentation/devicetree/bindings/peci/peci-bus.yaml
@@ -0,0 +1,129 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/peci/peci-bus.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Generic Device Tree Bindings for PECI bus
+
+maintainers:
+ - Jae Hyun Yoo <jae.hyun.yoo@linux.intel.com>
+
+description: |
+ PECI (Platform Environment Control Interface) is a one-wire bus interface that
+ provides a communication channel from Intel processors and chipset components
+ to external monitoring or control devices. PECI is designed to support the
+ following sideband functions:
+
+ * Processor and DRAM thermal management
+ - Processor fan speed control is managed by comparing Digital Thermal
+ Sensor (DTS) thermal readings acquired via PECI against the
+ processor-specific fan speed control reference point, or TCONTROL. Both
+ TCONTROL and DTS thermal readings are accessible via the processor PECI
+ client. These variables are referenced to a common temperature, the TCC
+ activation point, and are both defined as negative offsets from that
+ reference.
+ - PECI based access to the processor package configuration space provides
+ a means for Baseboard Management Controllers (BMC) or other platform
+ management devices to actively manage the processor and memory power
+ and thermal features.
+
+ * Platform Manageability
+ - Platform manageability functions including thermal, power, and error
+ monitoring. Note that platform 'power' management includes monitoring
+ and control for both the processor and DRAM subsystem to assist with
+ data center power limiting.
+ - PECI allows read access to certain error registers in the processor MSR
+ space and status monitoring registers in the PCI configuration space
+ within the processor and downstream devices.
+ - PECI permits writes to certain registers in the processor PCI
+ configuration space.
+
+ * Processor Interface Tuning and Diagnostics
+ - Processor interface tuning and diagnostics capabilities
+ (Intel Interconnect BIST). The processors Intel Interconnect Built In
+ Self Test (Intel IBIST) allows for infield diagnostic capabilities in
+ the Intel UPI and memory controller interfaces. PECI provides a port to
+ execute these diagnostics via its PCI Configuration read and write
+ capabilities.
+
+ * Failure Analysis
+ - Output the state of the processor after a failure for analysis via
+ Crashdump.
+
+ PECI uses a single wire for self-clocking and data transfer. The bus
+ requires no additional control lines. The physical layer is a self-clocked
+ one-wire bus that begins each bit with a driven, rising edge from an idle
+ level near zero volts. The duration of the signal driven high depends on
+ whether the bit value is a logic '0' or logic '1'. PECI also includes
+ variable data transfer rate established with every message. In this way, it
+ is highly flexible even though underlying logic is simple.
+
+ The interface design was optimized for interfacing between an Intel
+ processor and chipset components in both single processor and multiple
+ processor environments. The single wire interface provides low board
+ routing overhead for the multiple load connections in the congested routing
+ area near the processor and chipset components. Bus speed, error checking,
+ and low protocol overhead provides adequate link bandwidth and reliability
+ to transfer critical device operating conditions and configuration
+ information.
+
+ PECI subsystem provides single or multiple bus nodes support so each bus can
+ have one adapter node and multiple device specific client nodes that can be
+ attached to the PECI bus so each processor client's features can be supported
+ by the client node through an adapter connection in the bus.
+
+properties:
+ compatible:
+ const: simple-bus
+
+ "#address-cells":
+ # Required to define bus device control resource address.
+ const: 1
+
+ "#size-cells":
+ # Required to define bus device control resource address.
+ const: 1
+
+ ranges: true
+
+required:
+ - compatible
+ - "#address-cells"
+ - "#size-cells"
+ - ranges
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/clock/ast2600-clock.h>
+ peci: bus@1e78b000 {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x1e78b000 0x200>;
+
+ peci0: peci-bus@0 {
+ compatible = "aspeed,ast2600-peci";
+ reg = <0x0 0x100>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&syscon ASPEED_CLK_GATE_REF0CLK>;
+ resets = <&syscon ASPEED_RESET_PECI>;
+ clock-frequency = <24000000>;
+ };
+
+ // Just an example. ast2600 doesn't have a second PECI module actually.
+ peci1: peci-bus@100 {
+ compatible = "aspeed,ast2600-peci";
+ reg = <0x100 0x100>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&syscon ASPEED_CLK_GATE_REF0CLK>;
+ resets = <&syscon ASPEED_RESET_PECI>;
+ clock-frequency = <24000000>;
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/peci/peci-client.yaml b/Documentation/devicetree/bindings/peci/peci-client.yaml
new file mode 100644
index 000000000000..fc7c4110e929
--- /dev/null
+++ b/Documentation/devicetree/bindings/peci/peci-client.yaml
@@ -0,0 +1,54 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/peci/peci-client.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Generic Device Tree Bindings for PECI clients
+
+maintainers:
+ - Jae Hyun Yoo <jae.hyun.yoo@linux.intel.com>
+
+properties:
+ compatible:
+ enum:
+ - intel,peci-client
+
+ reg:
+ description: |
+ Address of a client CPU. According to the PECI specification, client
+ addresses start from 0x30.
+ maxItems: 1
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/clock/ast2600-clock.h>
+ peci: bus@1e78b000 {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x1e78b000 0x60>;
+
+ peci0: peci-bus@0 {
+ compatible = "aspeed,ast2600-peci";
+ reg = <0x0 0x100>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&syscon ASPEED_CLK_GATE_REF0CLK>;
+ resets = <&syscon ASPEED_RESET_PECI>;
+ clock-frequency = <24000000>;
+
+ peci-client@30 {
+ compatible = "intel,peci-client";
+ reg = <0x30>;
+ };
+
+ peci-client@31 {
+ compatible = "intel,peci-client";
+ reg = <0x31>;
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/peci/peci-npcm.yaml b/Documentation/devicetree/bindings/peci/peci-npcm.yaml
new file mode 100644
index 000000000000..bcd5626e68e7
--- /dev/null
+++ b/Documentation/devicetree/bindings/peci/peci-npcm.yaml
@@ -0,0 +1,102 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/peci/peci-npcm.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Nuvoton NPCM PECI Bus Device Tree Bindings
+
+maintainers:
+ - Tomer Maimon <tmaimon77@gmail.com>
+
+properties:
+ compatible:
+ const: nuvoton,npcm750-peci # for the NPCM7XX BMC.
+
+ reg:
+ maxItems: 1
+
+ "#address-cells":
+ # Required to define a client address.
+ const: 1
+
+ "#size-cells":
+ # Required to define a client address.
+ const: 0
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ # PECI reference clock.
+ maxItems: 1
+
+ cmd-timeout-ms:
+ # Command timeout in units of ms.
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - minimum: 1
+ maximum: 60000
+ default: 1000
+
+ pull-down:
+ description: |
+ Defines the PECI I/O internal pull down operation.
+ 0: pull down always enable
+ 1: pull down only during transactions.
+ 2: pull down always disable.
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - minimum: 0
+ maximum: 2
+ default: 0
+
+ host-neg-bit-rate:
+ description: |
+ Define host negotiation bit rate divider.
+ the host negotiation bit rate calculate with formula:
+ clock frequency[Hz] / [4 x {host-neg-bit-rate + 1}]
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - minimum: 7
+ maximum: 31
+ default: 15
+
+ high-volt-range:
+ description: |
+ Adapts PECI I/O interface to voltage range.
+ 0: PECI I/O interface voltage range of 0.8-1.06V (default)
+ 1: PECI I/O interface voltage range of 0.95-1.26V
+ type: boolean
+
+required:
+ - compatible
+ - reg
+ - "#address-cells"
+ - "#size-cells"
+ - interrupts
+ - clocks
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/clock/nuvoton,npcm7xx-clock.h>
+ peci: bus@100000 {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x100000 0x200>;
+
+ peci0: peci-bus@0 {
+ compatible = "nuvoton,npcm750-peci";
+ reg = <0x0 0x200>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk NPCM7XX_CLK_APB3>;
+ cmd-timeout-ms = <1000>;
+ pull-down = <0>;
+ host-neg-bit-rate = <15>;
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/soc/aspeed/aspeed-espi-slave.txt b/Documentation/devicetree/bindings/soc/aspeed/aspeed-espi-slave.txt
new file mode 100644
index 000000000000..f72d9ae32f3e
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/aspeed/aspeed-espi-slave.txt
@@ -0,0 +1,20 @@
+ASPEED eSPI Slave Controller
+
+Required properties:
+ - compatible: must be one of:
+ - "aspeed,ast2500-espi-slave"
+ - "aspeed,ast2600-espi-slave"
+
+ - reg: physical base address of the controller and length of memory mapped
+ region
+
+ - interrupts: interrupt generated by the controller
+
+Example:
+
+ espi: espi@1e6ee000 {
+ compatible = "aspeed,ast2500-espi-slave";
+ reg = <0x1e6ee000 0x100>;
+ interrupts = <23>;
+ status = "disabled";
+};
diff --git a/Documentation/devicetree/bindings/soc/aspeed/aspeed-lpc-sio.txt b/Documentation/devicetree/bindings/soc/aspeed/aspeed-lpc-sio.txt
new file mode 100644
index 000000000000..c74ea3a4e5ac
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/aspeed/aspeed-lpc-sio.txt
@@ -0,0 +1,17 @@
+* Aspeed LPC SIO driver.
+
+Required properties:
+- compatible : Should be one of:
+ "aspeed,ast2400-lpc-sio"
+ "aspeed,ast2500-lpc-sio"
+- reg : Should contain lpc-sio registers location and length
+- clocks: contains a phandle to the syscon node describing the clocks.
+ There should then be one cell representing the clock to use.
+
+Example:
+lpc_sio: lpc-sio@100 {
+ compatible = "aspeed,ast2500-lpc-sio";
+ reg = <0x100 0x20>;
+ clocks = <&syscon ASPEED_CLK_GATE_LCLK>;
+ status = "disabled";
+};
diff --git a/Documentation/devicetree/bindings/soc/aspeed/aspeed-mctp.txt b/Documentation/devicetree/bindings/soc/aspeed/aspeed-mctp.txt
new file mode 100644
index 000000000000..5dc30fdfc53a
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/aspeed/aspeed-mctp.txt
@@ -0,0 +1,25 @@
+* Aspeed AST2600 MCTP PCIe VDM Controller
+
+Required properties:
+- compatible : must be "aspeed,ast2600-mctp"
+- reg : contains the address and size of the memory region
+ associated with the MCTP controller
+- resets : reset specifier for the syscon reset associated with
+ the MCTP controller
+- interrupts-extended : two interrupt cells; the first specifies the global
+ interrupt for MCTP controller and the second
+ specifies the PCIe reset or PERST interrupt
+- aspeed,pcieh : a phandle to the PCIe Host Controller node to be
+ used in conjunction with the PCIe reset or PERST
+ interrupt
+Example:
+
+mctp: mctp@1e6e8000 {
+ compatible = "aspeed,ast2600-mctp";
+ reg = <0x1e6e8000 0x1000>;
+ interrupts-extended = <&gic GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>,
+ <&scu_ic0 ASPEED_AST2600_SCU_IC0_PCIE_PERST_LO_TO_HI>;
+ resets = <&syscon ASPEED_RESET_DEV_MCTP>;
+ aspeed,pcieh = <&pcieh>;
+ status = "disabled";
+};
diff --git a/Documentation/devicetree/bindings/soc/aspeed/aspeed-vga-sharedmem.txt b/Documentation/devicetree/bindings/soc/aspeed/aspeed-vga-sharedmem.txt
new file mode 100644
index 000000000000..03f57c53e844
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/aspeed/aspeed-vga-sharedmem.txt
@@ -0,0 +1,20 @@
+* Aspeed VGA shared memory driver
+
+Aspeed VGA shared memory driver allow user to read data from AST2500
+VGA memory. This driver is required by ManagedDataRegionlV2
+specification. In the spec, BIOS will transfer whole SMBIOS table to
+VGA memroy and BMC get the table from VGA memory. 0penBMC project do
+not allow to use /dev/mem for security concerns. To get the data in
+VGA shared memory in user space, implement this driver only allowed
+user to mmap limited memory area.
+
+Required properties:
+- compatible: "aspeed,ast2500-vga-sharedmem"
+ - aspeed,ast2500-vga-sharedmem: Aspeed AST2500 family
+- reg: Should contain VGA shared memory start address and length
+
+Example:
+vga-shared-memory {
+ compatible = "aspeed,ast2500-vga-sharedmem";
+ reg = <0x9ff00000 0x100000>;
+};
diff --git a/Documentation/hwmon/index.rst b/Documentation/hwmon/index.rst
index f790f1260c33..7257214e5408 100644
--- a/Documentation/hwmon/index.rst
+++ b/Documentation/hwmon/index.rst
@@ -153,6 +153,10 @@ Hardware Monitoring Kernel Drivers
pc87360
pc87427
pcf8591
+ peci-cputemp
+ peci-dimmtemp
+ peci-cpupower
+ peci-dimmpower
pim4328
pm6764tr
pmbus
diff --git a/Documentation/hwmon/peci-cpupower.rst b/Documentation/hwmon/peci-cpupower.rst
new file mode 100644
index 000000000000..d3b6a666f076
--- /dev/null
+++ b/Documentation/hwmon/peci-cpupower.rst
@@ -0,0 +1,65 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Kernel driver peci-cpupower
+==========================
+
+:Copyright: |copy| 2018-2020 Intel Corporation
+
+Supported chips:
+ One of Intel server CPUs listed below which is connected to a PECI bus.
+ * Intel Xeon E5/E7 v3 server processors
+ Intel Xeon E5-14xx v3 family
+ Intel Xeon E5-24xx v3 family
+ Intel Xeon E5-16xx v3 family
+ Intel Xeon E5-26xx v3 family
+ Intel Xeon E5-46xx v3 family
+ Intel Xeon E7-48xx v3 family
+ Intel Xeon E7-88xx v3 family
+ * Intel Xeon E5/E7 v4 server processors
+ Intel Xeon E5-16xx v4 family
+ Intel Xeon E5-26xx v4 family
+ Intel Xeon E5-46xx v4 family
+ Intel Xeon E7-48xx v4 family
+ Intel Xeon E7-88xx v4 family
+ * Intel Xeon Scalable server processors
+ Intel Xeon D family
+ Intel Xeon Bronze family
+ Intel Xeon Silver family
+ Intel Xeon Gold family
+ Intel Xeon Platinum family
+
+ Addresses scanned: PECI client address 0x30 - 0x37
+ Datasheet: Available from http://www.intel.com/design/literature.htm
+
+Author:
+ Zhikui Ren <zhikui.ren@.intel.com>
+
+Description
+-----------
+
+This driver implements a generic PECI hwmon feature which provides
+average power and energy consumption readings of the CPU package based on
+energy counter.
+
+Power values are average power since last measure given in milli Watt and
+will be measurable only when the target CPU is powered on.
+
+Energy values are energy consumption in micro Joules.
+
+Driver provides current package power limit, maximal (TDP) and minimal power
+setting as well.
+
+All needed processor registers are accessible using the PECI Client Command
+Suite via the processor PECI client.
+
+``sysfs`` interface
+-------------------
+======================= =======================================================
+power1_label Provides string "cpu power".
+power1_average Provides average power since last read in milli Watt.
+power1_cap Provides current package power limit 1 (PPL1).
+power1_cap_max Provides maximal (TDP) package power setting.
+power1_cap_min Provides minimal package power setting.
+energy1_label Provides string "cpu energy".
+energy1_input Provides energy consumption in micro Joules.
+======================= =======================================================
diff --git a/Documentation/hwmon/peci-cputemp.rst b/Documentation/hwmon/peci-cputemp.rst
new file mode 100644
index 000000000000..bf08e16dd989
--- /dev/null
+++ b/Documentation/hwmon/peci-cputemp.rst
@@ -0,0 +1,95 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Kernel driver peci-cputemp
+==========================
+
+:Copyright: |copy| 2018-2019 Intel Corporation
+
+Supported chips:
+ One of Intel server CPUs listed below which is connected to a PECI bus.
+ * Intel Xeon E5/E7 v3 server processors
+ Intel Xeon E5-14xx v3 family
+ Intel Xeon E5-24xx v3 family
+ Intel Xeon E5-16xx v3 family
+ Intel Xeon E5-26xx v3 family
+ Intel Xeon E5-46xx v3 family
+ Intel Xeon E7-48xx v3 family
+ Intel Xeon E7-88xx v3 family
+ * Intel Xeon E5/E7 v4 server processors
+ Intel Xeon E5-16xx v4 family
+ Intel Xeon E5-26xx v4 family
+ Intel Xeon E5-46xx v4 family
+ Intel Xeon E7-48xx v4 family
+ Intel Xeon E7-88xx v4 family
+ * Intel Xeon Scalable server processors
+ Intel Xeon D family
+ Intel Xeon Bronze family
+ Intel Xeon Silver family
+ Intel Xeon Gold family
+ Intel Xeon Platinum family
+
+ Addresses scanned: PECI client address 0x30 - 0x37
+ Datasheet: Available from http://www.intel.com/design/literature.htm
+
+Author:
+ Jae Hyun Yoo <jae.hyun.yoo@linux.intel.com>
+
+Description
+-----------
+
+This driver implements a generic PECI hwmon feature which provides Digital
+Thermal Sensor (DTS) thermal readings of the CPU package and CPU cores that are
+accessible using the PECI Client Command Suite via the processor PECI client.
+
+All temperature values are given in millidegree Celsius and will be measurable
+only when the target CPU is powered on.
+
+``sysfs`` interface
+-------------------
+======================= =======================================================
+temp1_label "Die"
+temp1_input Provides current die temperature of the CPU package.
+temp1_max Provides thermal control temperature of the CPU package
+ which is also known as Tcontrol.
+temp1_crit Provides shutdown temperature of the CPU package which
+ is also known as the maximum processor junction
+ temperature, Tjmax or Tprochot.
+temp1_crit_hyst Provides the hysteresis value from Tcontrol to Tjmax of
+ the CPU package.
+
+temp2_label "DTS"
+temp2_input Provides current DTS temperature of the CPU package.
+temp2_max Provides thermal control temperature of the CPU package
+ which is also known as Tcontrol.
+temp2_crit Provides shutdown temperature of the CPU package which
+ is also known as the maximum processor junction
+ temperature, Tjmax or Tprochot.
+temp2_crit_hyst Provides the hysteresis value from Tcontrol to Tjmax of
+ the CPU package.
+
+temp3_label "Tcontrol"
+temp3_input Provides current Tcontrol temperature of the CPU
+ package which is also known as Fan Temperature target.
+ Indicates the relative value from thermal monitor trip
+ temperature at which fans should be engaged.
+temp3_crit Provides Tcontrol critical value of the CPU package
+ which is same to Tjmax.
+
+temp4_label "Tthrottle"
+temp4_input Provides current Tthrottle temperature of the CPU
+ package. Used for throttling temperature. If this value
+ is allowed and lower than Tjmax - the throttle will
+ occur and reported at lower than Tjmax.
+
+temp5_label "Tjmax"
+temp5_input Provides the maximum junction temperature, Tjmax of the
+ CPU package.
+
+temp[6-N]_label Provides string "Core X", where X is resolved core
+ number.
+temp[6-N]_input Provides current temperature of each core.
+temp[6-N]_max Provides thermal control temperature of the core.
+temp[6-N]_crit Provides shutdown temperature of the core.
+temp[6-N]_crit_hyst Provides the hysteresis value from Tcontrol to Tjmax of
+ the core.
+======================= ======================================================= \ No newline at end of file
diff --git a/Documentation/hwmon/peci-dimmpower.rst b/Documentation/hwmon/peci-dimmpower.rst
new file mode 100644
index 000000000000..0d9c58fdaf9c
--- /dev/null
+++ b/Documentation/hwmon/peci-dimmpower.rst
@@ -0,0 +1,57 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Kernel driver peci-dimmpower
+==========================
+
+:Copyright: |copy| 2020 Intel Corporation
+
+Supported chips:
+ One of Intel server CPUs listed below which is connected to a PECI bus.
+ * Intel Xeon E5/E7 v3 server processors
+ Intel Xeon E5-14xx v3 family
+ Intel Xeon E5-24xx v3 family
+ Intel Xeon E5-16xx v3 family
+ Intel Xeon E5-26xx v3 family
+ Intel Xeon E5-46xx v3 family
+ Intel Xeon E7-48xx v3 family
+ Intel Xeon E7-88xx v3 family
+ * Intel Xeon E5/E7 v4 server processors
+ Intel Xeon E5-16xx v4 family
+ Intel Xeon E5-26xx v4 family
+ Intel Xeon E5-46xx v4 family
+ Intel Xeon E7-48xx v4 family
+ Intel Xeon E7-88xx v4 family
+ * Intel Xeon Scalable server processors
+ Intel Xeon D family
+ Intel Xeon Bronze family
+ Intel Xeon Silver family
+ Intel Xeon Gold family
+ Intel Xeon Platinum family
+
+ Addresses scanned: PECI client address 0x30 - 0x37
+ Datasheet: Available from http://www.intel.com/design/literature.htm
+
+Author:
+ Zbigniew Lukwinski <zbigniew.lukwinski@linux.intel.com>
+
+Description
+-----------
+
+This driver implements a generic PECI hwmon feature which provides
+average power consumption readings of the memory basing on energy counter.
+Power value is average power since last measure given in milli Watt and
+will be measurable only when the target CPU is powered on.
+Driver provides current plane power limit, maximal and minimal power setting
+as well.
+All needed processor registers are accessible using the PECI Client Command
+Suite via the processor PECI client.
+
+``sysfs`` interface
+-------------------
+======================= =======================================================
+power1_label Provides string "dimm power".
+power1_average Provides average DRAM power since last read in milli Watt.
+power1_cap Provides current DRAM plane power limit.
+power1_cap_max Provides maximal DRAM power setting.
+power1_cap_min Provides minimal DRAM power setting.
+======================= =======================================================
diff --git a/Documentation/hwmon/peci-dimmtemp.rst b/Documentation/hwmon/peci-dimmtemp.rst
new file mode 100644
index 000000000000..e3581811de2d
--- /dev/null
+++ b/Documentation/hwmon/peci-dimmtemp.rst
@@ -0,0 +1,60 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Kernel driver peci-dimmtemp
+===========================
+
+:Copyright: |copy| 2018-2019 Intel Corporation
+
+Supported chips:
+ One of Intel server CPUs listed below which is connected to a PECI bus.
+ * Intel Xeon E5/E7 v3 server processors
+ Intel Xeon E5-14xx v3 family
+ Intel Xeon E5-24xx v3 family
+ Intel Xeon E5-16xx v3 family
+ Intel Xeon E5-26xx v3 family
+ Intel Xeon E5-46xx v3 family
+ Intel Xeon E7-48xx v3 family
+ Intel Xeon E7-88xx v3 family
+ * Intel Xeon E5/E7 v4 server processors
+ Intel Xeon E5-16xx v4 family
+ Intel Xeon E5-26xx v4 family
+ Intel Xeon E5-46xx v4 family
+ Intel Xeon E7-48xx v4 family
+ Intel Xeon E7-88xx v4 family
+ * Intel Xeon Scalable server processors
+ Intel Xeon D family
+ Intel Xeon Bronze family
+ Intel Xeon Silver family
+ Intel Xeon Gold family
+ Intel Xeon Platinum family
+
+ Addresses scanned: PECI client address 0x30 - 0x37
+ Datasheet: Available from http://www.intel.com/design/literature.htm
+
+Author:
+ Jae Hyun Yoo <jae.hyun.yoo@linux.intel.com>
+
+Description
+-----------
+
+This driver implements a generic PECI hwmon feature which provides Digital
+Thermal Sensor (DTS) thermal readings of DIMM components that are accessible
+using the PECI Client Command Suite via the processor PECI client.
+
+All temperature values are given in millidegree Celsius and will be measurable
+only when the target CPU is powered on.
+
+``sysfs`` interface
+-------------------
+======================= =======================================================
+
+temp[N]_label Provides string "DIMM CI", where C is DIMM channel and
+ I is DIMM index of the populated DIMM.
+temp[N]_input Provides current temperature of the populated DIMM.
+temp[N]_max Provides thermal control temperature of the DIMM.
+temp[N]_crit Provides shutdown temperature of the DIMM.
+======================= =======================================================
+
+Note:
+ DIMM temperature attributes will appear when the client CPU's BIOS
+ completes memory training and testing.
diff --git a/Documentation/i2c/slave-mqueue-backend.rst b/Documentation/i2c/slave-mqueue-backend.rst
new file mode 100644
index 000000000000..2d0d06d8df9d
--- /dev/null
+++ b/Documentation/i2c/slave-mqueue-backend.rst
@@ -0,0 +1,124 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=====================================
+Linux I2C slave message queue backend
+=====================================
+
+:Author: Haiyue Wang <haiyue.wang@linux.intel.com>
+
+Some protocols over I2C/SMBus are designed for bi-directional transferring
+messages by using I2C Master Write protocol. This requires that both sides
+of the communication have slave addresses.
+
+Like MCTP (Management Component Transport Protocol) and IPMB (Intelligent
+Platform Management Bus), they both require that the userspace can receive
+messages from i2c dirvers under slave mode.
+
+This I2C slave mqueue (message queue) backend is used to receive and queue
+messages from the remote i2c intelligent device; and it will add the target
+slave address (with R/W# bit is always 0) into the message at the first byte,
+so that userspace can use this byte to dispatch the messages into different
+handling modules. Also, like IPMB, the address byte is in its message format,
+it needs it to do checksum.
+
+For messages are time related, so this backend will flush the oldest message
+to queue the newest one.
+
+Link
+----
+`Intelligent Platform Management Bus
+Communications Protocol Specification
+<https://www.intel.com/content/dam/www/public/us/en/documents/product-briefs/ipmp-spec-v1.0.pdf>`_
+
+`Management Component Transport Protocol (MCTP)
+SMBus/I2C Transport Binding Specification
+<https://www.dmtf.org/sites/default/files/standards/documents/DSP0237_1.1.0.pdf>`_
+
+How to use
+----------
+For example, the I2C5 bus has slave address 0x10, the below command will create
+the related message queue interface:
+
+ echo slave-mqueue 0x1010 > /sys/bus/i2c/devices/i2c-5/new_device
+
+Then you can dump the messages like this:
+
+ hexdump -C /sys/bus/i2c/devices/5-1010/slave-mqueue
+
+Code Example
+------------
+*Note: call 'lseek' before 'read', this is a requirement from kernfs' design.*
+
+::
+
+ #include <sys/types.h>
+ #include <sys/stat.h>
+ #include <unistd.h>
+ #include <poll.h>
+ #include <time.h>
+ #include <fcntl.h>
+ #include <stdio.h>
+
+ int main(int argc, char *argv[])
+ {
+ int i, r;
+ struct pollfd pfd;
+ struct timespec ts;
+ unsigned char data[256];
+
+ pfd.fd = open(argv[1], O_RDONLY | O_NONBLOCK);
+ if (pfd.fd < 0)
+ return -1;
+
+ pfd.events = POLLPRI;
+
+ while (1) {
+ r = poll(&pfd, 1, 5000);
+
+ if (r < 0)
+ break;
+
+ if (r == 0 || !(pfd.revents & POLLPRI))
+ continue;
+
+ lseek(pfd.fd, 0, SEEK_SET);
+ r = read(pfd.fd, data, sizeof(data));
+ if (r <= 0)
+ continue;
+
+ clock_gettime(CLOCK_MONOTONIC, &ts);
+ printf("[%ld.%.9ld] :", ts.tv_sec, ts.tv_nsec);
+ for (i = 0; i < r; i++)
+ printf(" %02x", data[i]);
+ printf("\n");
+ }
+
+ close(pfd.fd);
+
+ return 0;
+ }
+
+Result
+------
+*./a.out "/sys/bus/i2c/devices/5-1010/slave-mqueue"*
+
+::
+
+ [10183.232500449] : 20 18 c8 2c 78 01 5b
+ [10183.479358348] : 20 18 c8 2c 78 01 5b
+ [10183.726556812] : 20 18 c8 2c 78 01 5b
+ [10183.972605863] : 20 18 c8 2c 78 01 5b
+ [10184.220124772] : 20 18 c8 2c 78 01 5b
+ [10184.467764166] : 20 18 c8 2c 78 01 5b
+ [10193.233421784] : 20 18 c8 2c 7c 01 57
+ [10193.480273460] : 20 18 c8 2c 7c 01 57
+ [10193.726788733] : 20 18 c8 2c 7c 01 57
+ [10193.972781945] : 20 18 c8 2c 7c 01 57
+ [10194.220487360] : 20 18 c8 2c 7c 01 57
+ [10194.468089259] : 20 18 c8 2c 7c 01 57
+ [10203.233433099] : 20 18 c8 2c 80 01 53
+ [10203.481058715] : 20 18 c8 2c 80 01 53
+ [10203.727610472] : 20 18 c8 2c 80 01 53
+ [10203.974044856] : 20 18 c8 2c 80 01 53
+ [10204.220734634] : 20 18 c8 2c 80 01 53
+ [10204.468461664] : 20 18 c8 2c 80 01 53
diff --git a/Documentation/index.rst b/Documentation/index.rst
index 54ce34fd6fbd..91f4f7923359 100644
--- a/Documentation/index.rst
+++ b/Documentation/index.rst
@@ -111,6 +111,7 @@ needed).
iio/index
isdn/index
infiniband/index
+ jtag/index
leds/index
netlabel/index
networking/index
diff --git a/Documentation/jtag/index.rst b/Documentation/jtag/index.rst
new file mode 100644
index 000000000000..8a2761d1c17e
--- /dev/null
+++ b/Documentation/jtag/index.rst
@@ -0,0 +1,18 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+==============================
+Joint Test Action Group (JTAG)
+==============================
+
+.. toctree::
+ :maxdepth: 1
+
+ jtag-summary
+ jtagdev
+
+.. only:: subproject and html
+
+ Indices
+ =======
+
+ * :ref:`genindex`
diff --git a/Documentation/jtag/jtag-summary.rst b/Documentation/jtag/jtag-summary.rst
new file mode 100644
index 000000000000..050b16a9f801
--- /dev/null
+++ b/Documentation/jtag/jtag-summary.rst
@@ -0,0 +1,47 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+====================================
+Linux kernel JTAG support
+====================================
+
+Introduction to JTAG
+====================
+
+JTAG is an industry standard for verifying hardware. JTAG provides access to
+many logic signals of a complex integrated circuit, including the device pins.
+
+A JTAG interface is a special interface added to a chip.
+Depending on the version of JTAG, two, four, or five pins are added.
+
+The connector pins are:
+ * TDI (Test Data In)
+ * TDO (Test Data Out)
+ * TCK (Test Clock)
+ * TMS (Test Mode Select)
+ * TRST (Test Reset) optional
+
+JTAG interface is designed to have two parts - basic core driver and
+hardware specific driver. The basic driver introduces a general interface
+which is not dependent of specific hardware. It provides communication
+between user space and hardware specific driver.
+Each JTAG device is represented as a char device from (jtag0, jtag1, ...).
+Access to a JTAG device is performed through IOCTL calls.
+
+Call flow example:
+::
+
+ User: open -> /dev/jatgX -> JTAG core driver -> JTAG hardware specific driver
+ User: ioctl -> /dev/jtagX -> JTAG core driver -> JTAG hardware specific driver
+ User: close -> /dev/jatgX -> JTAG core driver -> JTAG hardware specific driver
+
+
+THANKS TO
+---------
+Contributors to Linux-JTAG discussions include (in alphabetical order,
+by last name):
+
+- Ernesto Corona
+- Steven Filary
+- Vadim Pasternak
+- Jiri Pirko
+- Oleksandr Shamray
diff --git a/Documentation/jtag/jtagdev.rst b/Documentation/jtag/jtagdev.rst
new file mode 100644
index 000000000000..c50ed2b85a07
--- /dev/null
+++ b/Documentation/jtag/jtagdev.rst
@@ -0,0 +1,207 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+==================
+JTAG userspace API
+==================
+JTAG master devices can be accessed through a character misc-device.
+
+Each JTAG master interface can be accessed by using /dev/jtagN.
+
+JTAG system calls set:
+ * SIR (Scan Instruction Register, IEEE 1149.1 Instruction Register scan);
+ * SDR (Scan Data Register, IEEE 1149.1 Data Register scan);
+ * RUNTEST (Forces the IEEE 1149.1 bus to a run state for a specified number of clocks.
+
+open(), close()
+---------------
+Open/Close device:
+::
+
+ jtag_fd = open("/dev/jtag0", O_RDWR);
+ close(jtag_fd);
+
+ioctl()
+-------
+All access operations to JTAG devices are performed through ioctl interface.
+The IOCTL interface supports these requests:
+::
+
+ JTAG_SIOCSTATE - Force JTAG state machine to go into a TAPC state
+ JTAG_SIOCFREQ - Set JTAG TCK frequency
+ JTAG_GIOCFREQ - Get JTAG TCK frequency
+ JTAG_IOCXFER - send/receive JTAG data Xfer
+ JTAG_GIOCSTATUS - get current JTAG TAP state
+ JTAG_SIOCMODE - set JTAG mode flags.
+ JTAG_IOCBITBANG - JTAG bitbang low level control.
+
+JTAG_SIOCFREQ
+~~~~~~~~~~~~~
+Set JTAG clock speed:
+::
+
+ unsigned int jtag_fd;
+ ioctl(jtag_fd, JTAG_SIOCFREQ, &frq);
+
+JTAG_GIOCFREQ
+~~~~~~~~~~~~~
+Get JTAG clock speed:
+::
+
+ unsigned int jtag_fd;
+ ioctl(jtag_fd, JTAG_GIOCFREQ, &frq);
+
+JTAG_SIOCSTATE
+~~~~~~~~~~~~~~
+Force JTAG state machine to go into a TAPC state
+::
+
+ struct jtag_tap_state {
+ __u8 reset;
+ __u8 from;
+ __u8 endstate;
+ __u8 tck;
+ };
+
+reset: one of below options
+::
+
+ JTAG_NO_RESET - go through selected endstate from current state
+ JTAG_FORCE_RESET - go through TEST_LOGIC/RESET state before selected endstate
+
+endstate: any state listed in jtag_tapstate enum
+::
+
+ enum jtag_tapstate {
+ JTAG_STATE_TLRESET,
+ JTAG_STATE_IDLE,
+ JTAG_STATE_SELECTDR,
+ JTAG_STATE_CAPTUREDR,
+ JTAG_STATE_SHIFTDR,
+ JTAG_STATE_EXIT1DR,
+ JTAG_STATE_PAUSEDR,
+ JTAG_STATE_EXIT2DR,
+ JTAG_STATE_UPDATEDR,
+ JTAG_STATE_SELECTIR,
+ JTAG_STATE_CAPTUREIR,
+ JTAG_STATE_SHIFTIR,
+ JTAG_STATE_EXIT1IR,
+ JTAG_STATE_PAUSEIR,
+ JTAG_STATE_EXIT2IR,
+ JTAG_STATE_UPDATEIR
+ };
+
+tck: clock counter
+
+Example:
+::
+
+ struct jtag_tap_state tap_state;
+
+ tap_state.endstate = JTAG_STATE_IDLE;
+ tap_state.reset = 0;
+ tap_state.tck = data_p->tck;
+ usleep(25 * 1000);
+ ioctl(jtag_fd, JTAG_SIOCSTATE, &tap_state);
+
+JTAG_GIOCSTATUS
+~~~~~~~~~~~~~~~
+Get JTAG TAPC current machine state
+::
+
+ unsigned int jtag_fd;
+ jtag_tapstate tapstate;
+ ioctl(jtag_fd, JTAG_GIOCSTATUS, &tapstate);
+
+JTAG_IOCXFER
+~~~~~~~~~~~~
+Send SDR/SIR transaction
+::
+
+ struct jtag_xfer {
+ __u8 type;
+ __u8 direction;
+ __u8 from;
+ __u8 endstate;
+ __u32 padding;
+ __u32 length;
+ __u64 tdio;
+ };
+
+type: transfer type - JTAG_SIR_XFER/JTAG_SDR_XFER
+
+direction: xfer direction - JTAG_READ_XFER/JTAG_WRITE_XFER/JTAG_READ_WRITE_XFER
+
+from: jtag_tapstate enum representing the initial tap state of the chain before xfer.
+
+endstate: end state after transaction finish any of jtag_tapstate enum
+
+padding: padding configuration. See the following table with bitfield descriptions.
+
+=============== ========= ======= =====================================================
+Bit Field Bit begin Bit end Description
+=============== ========= ======= =====================================================
+rsvd 25 31 Reserved, not used
+pad data 24 24 Value used for pre and post padding. Either 1 or 0.
+post pad count 12 23 Number of padding bits to be executed after transfer.
+pre pad count 0 11 Number of padding bit to be executed before transfer.
+=============== ========= ======= =====================================================
+
+length: xfer data length in bits
+
+tdio : xfer data array
+
+Example:
+::
+
+ struct jtag_xfer xfer;
+ static char buf[64];
+ static unsigned int buf_len = 0;
+ [...]
+ xfer.type = JTAG_SDR_XFER;
+ xfer.tdio = (__u64)buf;
+ xfer.length = buf_len;
+ xfer.from = JTAG_STATE_TLRESET;
+ xfer.endstate = JTAG_STATE_IDLE;
+
+ if (is_read)
+ xfer.direction = JTAG_READ_XFER;
+ else if (is_write)
+ xfer.direction = JTAG_WRITE_XFER;
+ else
+ xfer.direction = JTAG_READ_WRITE_XFER;
+
+ ioctl(jtag_fd, JTAG_IOCXFER, &xfer);
+
+JTAG_SIOCMODE
+~~~~~~~~~~~~~
+If hardware driver can support different running modes you can change it.
+
+Example:
+::
+
+ struct jtag_mode mode;
+ mode.feature = JTAG_XFER_MODE;
+ mode.mode = JTAG_XFER_HW_MODE;
+ ioctl(jtag_fd, JTAG_SIOCMODE, &mode);
+
+JTAG_IOCBITBANG
+~~~~~~~~~~~~~~~
+JTAG Bitbang low level operation.
+
+Example:
+::
+
+ struct tck_bitbang bitbang
+ bitbang.tms = 1;
+ bitbang.tdi = 0;
+ ioctl(jtag_fd, JTAG_IOCBITBANG, &bitbang);
+ tdo = bitbang.tdo;
+
+
+THANKS TO
+---------
+Contributors to Linux-JTAG discussions include (in alphabetical order,
+by last name):
+
+- Ernesto Corona
+- Jiri Pirko
diff --git a/Documentation/misc-devices/aspeed-espi-slave.rst b/Documentation/misc-devices/aspeed-espi-slave.rst
new file mode 100644
index 000000000000..887a69a7130a
--- /dev/null
+++ b/Documentation/misc-devices/aspeed-espi-slave.rst
@@ -0,0 +1,118 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+==========
+eSPI Slave
+==========
+
+:Author: Haiyue Wang <haiyue.wang@linux.intel.com>
+
+The PCH (**eSPI master**) provides the eSPI to support connection of a
+BMC (**eSPI slave**) to the platform.
+
+The LPC and eSPI interfaces are mutually exclusive. Both use the same
+pins, but on power-up, a HW strap determines if the eSPI or the LPC bus
+is operational. Once selected, it’s not possible to change to the other
+interface.
+
+``eSPI Channels and Supported Transactions``
+ +------+---------------------+----------------------+--------------------+
+ | CH # | Channel | Posted Cycles | Non-Posted Cycles |
+ +======+=====================+======================+====================+
+ | 0 | Peripheral | Memory Write, | Memory Read, |
+ | | | Completions | I/O Read/Write |
+ +------+---------------------+----------------------+--------------------+
+ | 1 | Virtual Wire | Virtual Wire GET/PUT | N/A |
+ +------+---------------------+----------------------+--------------------+
+ | 2 | Out-of-Band Message | SMBus Packet GET/PUT | N/A |
+ +------+---------------------+----------------------+--------------------+
+ | 3 | Flash Access | N/A | Flash Read, Write, |
+ | | | | Erase |
+ +------+---------------------+----------------------+--------------------+
+ | N/A | General | Register Accesses | N/A |
+ +------+---------------------+----------------------+--------------------+
+
+Virtual Wire Channel (Channel 1) Overview
+-----------------------------------------
+
+The Virtual Wire channel uses a standard message format to communicate
+several types of signals between the components on the platform::
+
+ - Sideband and GPIO Pins: System events and other dedicated signals
+ between the PCH and eSPI slave. These signals are tunneled between the
+ two components over eSPI.
+
+ - Serial IRQ Interrupts: Interrupts are tunneled from the eSPI slave to
+ the PCH. Both edge and triggered interrupts are supported.
+
+When PCH runs on eSPI mode, from BMC side, the following VW messages are
+done in firmware::
+
+ 1. SLAVE_BOOT_LOAD_DONE / SLAVE_BOOT_LOAD_STATUS
+ 2. SUS_ACK
+ 3. OOB_RESET_ACK
+ 4. HOST_RESET_ACK
+
+``eSPI Virtual Wires (VW)``
+ +----------------------+---------+---------------------------------------+
+ |Virtual Wire |PCH Pin |Comments |
+ | |Direction| |
+ +======================+=========+=======================================+
+ |SUS_WARN# |Output |PCH pin is a GPIO when eSPI is enabled.|
+ | | |eSPI controller receives as VW message.|
+ +----------------------+---------+---------------------------------------+
+ |SUS_ACK# |Input |PCH pin is a GPIO when eSPI is enabled.|
+ | | |eSPI controller receives as VW message.|
+ +----------------------+---------+---------------------------------------+
+ |SLAVE_BOOT_LOAD_DONE |Input |Sent when the BMC has completed its |
+ | | |boot process as an indication to |
+ | | |eSPI-MC to continue with the G3 to S0 |
+ | | |exit. |
+ | | |The eSPI Master waits for the assertion|
+ | | |of this virtual wire before proceeding |
+ | | |with the SLP_S5# deassertion. |
+ | | |The intent is that it is never changed |
+ | | |except on a G3 exit - it is reset on a |
+ | | |G3 entry. |
+ +----------------------+---------+---------------------------------------+
+ |SLAVE_BOOT_LOAD_STATUS|Input |Sent upon completion of the Slave Boot |
+ | | |Load from the attached flash. A stat of|
+ | | |1 indicates that the boot code load was|
+ | | |successful and that the integrity of |
+ | | |the image is intact. |
+ +----------------------+---------+---------------------------------------+
+ |HOST_RESET_WARN |Output |Sent from the MC just before the Host |
+ | | |is about to enter reset. Upon receiving|
+ | | |, the BMC must flush and quiesce its |
+ | | |upstream Peripheral Channel request |
+ | | |queues and assert HOST_RESET_ACK VWire.|
+ | | |The MC subsequently completes any |
+ | | |outstanding posted transactions or |
+ | | |completions and then disables the |
+ | | |Peripheral Channel via a write to |
+ | | |the Slave's Configuration Register. |
+ +----------------------+---------+---------------------------------------+
+ |HOST_RESET_ACK |Input |ACK for the HOST_RESET_WARN message |
+ +----------------------+---------+---------------------------------------+
+ |OOB_RESET_WARN |Output |Sent from the MC just before the OOB |
+ | | |processor is about to enter reset. Upon|
+ | | |receiving, the BMC must flush and |
+ | | |quiesce its OOB Channel upstream |
+ | | |request queues and assert OOB_RESET_ACK|
+ | | |VWire. The-MC subsequently completes |
+ | | |any outstanding posted transactions or |
+ | | |completions and then disables the OOB |
+ | | |Channel via a write to the Slave's |
+ | | |Configuration Register. |
+ +----------------------+---------+---------------------------------------+
+ |OOB_RESET_ACK |Input |ACK for OOB_RESET_WARN message |
+ +----------------------+---------+---------------------------------------+
+
+`Intel C620 Series Chipset Platform Controller Hub
+<https://www.intel.com/content/www/us/en/chipsets/c620-series-chipset-datasheet.html>`_
+
+ -- 17. Enhanced Serial Peripheral Interface
+
+
+`Enhanced Serial Peripheral Interface (eSPI)
+- Interface Base Specification (for Client and Server Platforms)
+<https://www.intel.com/content/dam/support/us/en/documents/software/chipset-software/327432-004_espi_base_specification_rev1.0.pdf>`_
diff --git a/Documentation/userspace-api/ioctl/ioctl-number.rst b/Documentation/userspace-api/ioctl/ioctl-number.rst
index 6655d929a351..fa1a8521a09a 100644
--- a/Documentation/userspace-api/ioctl/ioctl-number.rst
+++ b/Documentation/userspace-api/ioctl/ioctl-number.rst
@@ -321,6 +321,7 @@ Code Seq# Include File Comments
<mailto:kenji@bitgate.com>
0xA1 0 linux/vtpm_proxy.h TPM Emulator Proxy Driver
0xA2 all uapi/linux/acrn.h ACRN hypervisor
+0xA3 00 include/uapi/linux/aspeed-lpc-mbox.h
0xA3 80-8F Port ACL in development:
<mailto:tlewis@mindspring.com>
0xA3 90-9F linux/dtlk.h
@@ -345,12 +346,16 @@ Code Seq# Include File Comments
<mailto:vgo@ratio.de>
0xB1 00-1F PPPoX
<mailto:mostrows@styx.uwaterloo.ca>
+0xB2 00-0F linux/jtag.h JTAG driver
+ <mailto:oleksandrs@mellanox.com>
0xB3 00 linux/mmc/ioctl.h
0xB4 00-0F linux/gpio.h <mailto:linux-gpio@vger.kernel.org>
0xB5 00-0F uapi/linux/rpmsg.h <mailto:linux-remoteproc@vger.kernel.org>
0xB6 all linux/fpga-dfl.h
0xB7 all uapi/linux/remoteproc_cdev.h <mailto:linux-remoteproc@vger.kernel.org>
0xB7 all uapi/linux/nsfs.h <mailto:Andrei Vagin <avagin@openvz.org>>
+0xB8 all uapi/linux/peci-ioctl.h PECI subsystem
+ <mailto:jae.hyun.yoo@linux.intel.com>
0xC0 00-0F linux/usb/iowarrior.h
0xCA 00-0F uapi/misc/cxl.h
0xCA 10-2F uapi/misc/ocxl.h
diff --git a/MAINTAINERS b/MAINTAINERS
index e25323549e54..03b126257619 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -10081,6 +10081,17 @@ L: linux-serial@vger.kernel.org
S: Orphan
F: drivers/tty/serial/jsm/
+JTAG SUBSYSTEM
+M: Oleksandr Shamray <oleksandrs@mellanox.com>
+M: Vadim Pasternak <vadimp@mellanox.com>
+M Ernesto Corona <ernesto.corona@intel.com>
+S: Maintained
+F: Documentation/ABI/testing/jtag-dev
+F: Documentation/devicetree/bindings/jtag/
+F: drivers/jtag/
+F: include/linux/jtag.h
+F: include/uapi/linux/jtag.h
+
K10TEMP HARDWARE MONITORING DRIVER
M: Clemens Ladisch <clemens@ladisch.de>
L: linux-hwmon@vger.kernel.org
diff --git a/arch/arm/boot/dts/aspeed-bmc-intel-ast2500.dts b/arch/arm/boot/dts/aspeed-bmc-intel-ast2500.dts
new file mode 100644
index 000000000000..a460169e1b7b
--- /dev/null
+++ b/arch/arm/boot/dts/aspeed-bmc-intel-ast2500.dts
@@ -0,0 +1,517 @@
+/dts-v1/;
+
+#include "aspeed-g5.dtsi"
+#include <dt-bindings/gpio/aspeed-gpio.h>
+#include <dt-bindings/i2c/i2c.h>
+
+/ {
+ model = "Intel AST2500 BMC";
+ compatible = "intel,ast2500-bmc", "aspeed,ast2500";
+
+ aliases {
+ serial4 = &uart5;
+ };
+
+ chosen {
+ stdout-path = &uart5;
+ bootargs = "console=ttyS4,115200 earlyprintk";
+ };
+
+ memory@80000000 {
+ reg = <0x80000000 0x20000000>;
+ };
+
+ reserved-memory {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ vga_memory: framebuffer@7f000000 {
+ no-map;
+ reg = <0x7f000000 0x01000000>;
+ };
+
+ gfx_memory: framebuffer {
+ size = <0x01000000>;
+ alignment = <0x01000000>;
+ compatible = "shared-dma-pool";
+ reusable;
+ };
+
+ video_engine_memory: jpegbuffer {
+ size = <0x02000000>; /* 32M */
+ alignment = <0x01000000>;
+ compatible = "shared-dma-pool";
+ reusable;
+ };
+
+ ramoops@9eff0000{
+ compatible = "ramoops";
+ reg = <0x9eff0000 0x10000>;
+ record-size = <0x2000>;
+ console-size = <0x2000>;
+ };
+ };
+
+ vga-shared-memory {
+ compatible = "aspeed,ast2500-vga-sharedmem";
+ reg = <0x9ff00000 0x100000>;
+ };
+
+ iio-hwmon {
+ compatible = "iio-hwmon";
+ io-channels = <&adc 0>, <&adc 1>, <&adc 2>, <&adc 3>,
+ <&adc 4>, <&adc 5>, <&adc 6>, <&adc 7>,
+ <&adc 8>, <&adc 9>, <&adc 10>, <&adc 11>,
+ <&adc 12>, <&adc 13>, <&adc 14>, <&adc 15>;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ identify {
+ default-state = "on";
+ gpios = <&gpio ASPEED_GPIO(S, 6) GPIO_ACTIVE_LOW>;
+ };
+
+ status_amber {
+ default-state = "off";
+ gpios = <&gpio ASPEED_GPIO(S, 5) GPIO_ACTIVE_LOW>;
+ };
+
+ status_green {
+ default-state = "keep";
+ gpios = <&gpio ASPEED_GPIO(S, 4) GPIO_ACTIVE_LOW>;
+ };
+
+ fan1_fault {
+ default-state = "off";
+ gpios = <&sgpio ASPEED_GPIO(C, 4) GPIO_ACTIVE_HIGH>;
+ };
+
+ fan2_fault {
+ default-state = "off";
+ gpios = <&sgpio ASPEED_GPIO(C, 5) GPIO_ACTIVE_HIGH>;
+ };
+
+ fan3_fault {
+ default-state = "off";
+ gpios = <&sgpio ASPEED_GPIO(C, 6) GPIO_ACTIVE_HIGH>;
+ };
+
+ fan4_fault {
+ default-state = "off";
+ gpios = <&sgpio ASPEED_GPIO(C, 7) GPIO_ACTIVE_HIGH>;
+ };
+
+ fan5_fault {
+ default-state = "off";
+ gpios = <&sgpio ASPEED_GPIO(D, 0) GPIO_ACTIVE_HIGH>;
+ };
+
+ fan6_fault {
+ default-state = "off";
+ gpios = <&sgpio ASPEED_GPIO(D, 1) GPIO_ACTIVE_HIGH>;
+ };
+
+ fan7_fault {
+ default-state = "off";
+ gpios = <&sgpio ASPEED_GPIO(D, 2) GPIO_ACTIVE_HIGH>;
+ };
+
+ fan8_fault {
+ default-state = "off";
+ gpios = <&sgpio ASPEED_GPIO(D, 3) GPIO_ACTIVE_HIGH>;
+ };
+ };
+
+ beeper {
+ compatible = "pwm-beeper";
+ pwms = <&timer 6 1000000 0>;
+ };
+};
+
+&fmc {
+ status = "okay";
+ flash@0 {
+ status = "okay";
+ m25p,fast-read;
+#include "openbmc-flash-layout-intel-64MB.dtsi"
+ };
+};
+
+&espi {
+ status = "okay";
+};
+
+&jtag {
+ status = "okay";
+};
+
+&peci0 {
+ status = "okay";
+ gpios = <&gpio ASPEED_GPIO(F, 6) 0>;
+};
+
+&syscon {
+ uart-clock-high-speed;
+ status = "okay";
+
+ misc_control {
+ compatible = "aspeed,bmc-misc";
+ uart_port_debug {
+ offset = <0x2c>;
+ bit-mask = <0x1>;
+ bit-shift = <10>;
+ read-only;
+ };
+ p2a-bridge {
+ offset = <0x180>;
+ bit-mask = <0x1>;
+ bit-shift = <1>;
+ read-only;
+ };
+ boot-2nd-flash {
+ offset = <0x70>;
+ bit-mask = <0x1>;
+ bit-shift = <17>;
+ read-only;
+ };
+ chip_id {
+ offset = <0x150>;
+ bit-mask = <0x0fffffff 0xffffffff>;
+ bit-shift = <0>;
+ read-only;
+ reg-width = <64>;
+ hash-data = "d44f9b804976fa23c2e25d62f16154d26520a7e24c5555095fd1b55c027804f1570dcd16189739c640cd7d9a6ce14944a2c4eaf1dc429eed6940e8a83498a474";
+ };
+ silicon_id {
+ offset = <0x7c>;
+ bit-mask = <0xffffffff>;
+ bit-shift = <0>;
+ read-only;
+ reg-width = <32>;
+ };
+ };
+};
+
+&adc {
+ status = "okay";
+};
+
+&gpio {
+ status = "okay";
+ gpio-line-names =
+ /*A0-A7*/ "","","","","","","","",
+ /*B0-B7*/ "FM_BMC_BOARD_SKU_ID0_N","FM_BMC_BOARD_SKU_ID1_N","FM_BMC_BOARD_SKU_ID2_N","FM_BMC_BOARD_SKU_ID3_N","FM_BMC_BOARD_SKU_ID4_N","","","",
+ /*C0-C7*/ "","","","","","","","",
+ /*D0-D7*/ "","","","","","","","",
+ /*E0-E7*/ "RESET_BUTTON","RESET_OUT","POWER_BUTTON","POWER_OUT","","DEBUG_EN_N","","",
+ /*F0-F7*/ "NMI_OUT","","","","CPU_ERR0","CPU_ERR1","PLTRST_N","PRDY_N",
+ /*G0-G7*/ "CPU_ERR2","CPU_CATERR","PCH_BMC_THERMTRIP","LCP_ENTER_BUTTON","LCP_LEFT_BUTTON","FM_BMC_BOARD_SKU_ID5_N","","",
+ /*H0-H7*/ "","","","FM_NODE_ID_1","FM_NODE_ID_2","FM_NODE_ID_3","FM_NODE_ID_4","FM_240VA_STATUS",
+ /*I0-I7*/ "FM_SYS_FAN0_PRSNT_D_N","FM_SYS_FAN1_PRSNT_D_N","FM_SYS_FAN2_PRSNT_D_N","FM_SYS_FAN3_PRSNT_D_N","FM_SYS_FAN4_PRSNT_D_N","FM_SYS_FAN5_PRSNT_D_N","","",
+ /*J0-J7*/ "","","","","","","","",
+ /*K0-K7*/ "","","","","","","","",
+ /*L0-L7*/ "","","","","","","","",
+ /*M0-M7*/ "","","","","","","","",
+ /*N0-N7*/ "","","","","","","","",
+ /*O0-O7*/ "","","","","","","","",
+ /*P0-P7*/ "","","","","","","","",
+ /*Q0-Q7*/ "","","","","","","","PWR_DEBUG_N",
+ /*R0-R7*/ "","XDP_PRST_N","","","","","","CHASSIS_INTRUSION",
+ /*S0-S7*/ "REMOTE_DEBUG_ENABLE","SYSPWROK","RSMRST_N","","","","","",
+ /*T0-T7*/ "","","","","","","","",
+ /*U0-U7*/ "","","","","","","","",
+ /*V0-V7*/ "","","","","","","","",
+ /*W0-W7*/ "","","","","","","","",
+ /*X0-X7*/ "","","","","","","","",
+ /*Y0-Y7*/ "SIO_S3","SIO_S5","","SIO_ONCONTROL","","","","",
+ /*Z0-Z7*/ "","SIO_POWER_GOOD","","","","","","",
+ /*AA0-AA7*/ "P3VBAT_BRIDGE_EN","","","","PREQ_N","TCK_MUX_SEL","SMI","POST_COMPLETE",
+ /*AB0-AB7*/ "","NMI_BUTTON","ID_BUTTON","PS_PWROK","","","","",
+ /*AC0-AC7*/ "","","","","","","","";
+};
+
+&sgpio {
+ ngpios = <80>;
+ bus-frequency = <2000000>;
+ status = "okay";
+ /* SGPIO lines. even: input, odd: output */
+ gpio-line-names =
+ /*A0-A7*/ "CPU1_PRESENCE","","CPU1_THERMTRIP","","CPU1_VRHOT","","CPU1_FIVR_FAULT","","CPU1_MEM_ABCD_VRHOT","","CPU1_MEM_EFGH_VRHOT","","","","","",
+ /*B0-B7*/ "CPU1_MISMATCH","LED_CPU1_CH1_DIMM1_FAULT","CPU1_MEM_THERM_EVENT","LED_CPU1_CH1_DIMM2_FAULT","CPU2_PRESENCE","LED_CPU1_CH2_DIMM1_FAULT","CPU2_THERMTRIP","LED_CPU1_CH2_DIMM2_FAULT","CPU2_VRHOT","LED_CPU1_CH3_DIMM1_FAULT","CPU2_FIVR_FAULT","LED_CPU1_CH3_DIMM2_FAULT","CPU2_MEM_ABCD_VRHOT","LED_CPU1_CH4_DIMM1_FAULT","CPU2_MEM_EFGH_VRHOT","LED_CPU1_CH4_DIMM2_FAULT",
+ /*C0-C7*/ "","LED_CPU1_CH5_DIMM1_FAULT","","LED_CPU1_CH5_DIMM2_FAULT","CPU2_MISMATCH","LED_CPU1_CH6_DIMM1_FAULT","CPU2_MEM_THERM_EVENT","LED_CPU1_CH6_DIMM2_FAULT","","LED_FAN1_FAULT","","LED_FAN2_FAULT","","LED_FAN3_FAULT","","LED_FAN4_FAULT",
+ /*D0-D7*/ "","LED_FAN5_FAULT","","LED_FAN6_FAULT","","LED_FAN7_FAULT","","LED_FAN8_FAULT","","LED_CPU2_CH1_DIMM1_FAULT","","LED_CPU2_CH1_DIMM2_FAULT","","LED_CPU2_CH2_DIMM1_FAULT","","LED_CPU2_CH2_DIMM2_FAULT",
+ /*E0-E7*/ "","LED_CPU2_CH3_DIMM1_FAULT","","LED_CPU2_CH3_DIMM2_FAULT","","LED_CPU2_CH4_DIMM1_FAULT","","LED_CPU2_CH4_DIMM2_FAULT","","LED_CPU2_CH5_DIMM1_FAULT","","LED_CPU2_CH5_DIMM2_FAULT","","LED_CPU2_CH6_DIMM1_FAULT","","LED_CPU2_CH6_DIMM2_FAULT",
+ /*F0-F7*/ "SGPIO_PLD_MINOR_REV_BIT0","LED_CPU3_CH1_DIMM1_FAULT","SGPIO_PLD_MINOR_REV_BIT1","LED_CPU3_CH1_DIMM2_FAULT","SGPIO_PLD_MINOR_REV_BIT2","LED_CPU3_CH2_DIMM1_FAULT","SGPIO_PLD_MINOR_REV_BIT3","LED_CPU3_CH2_DIMM2_FAULT","SGPIO_PLD_MAJOR_REV_BIT0","LED_CPU3_CH3_DIMM1_FAULT","SGPIO_PLD_MAJOR_REV_BIT1","LED_CPU3_CH3_DIMM2_FAULT","SGPIO_PLD_MAJOR_REV_BIT2","LED_CPU3_CH4_DIMM1_FAULT","SGPIO_PLD_MAJOR_REV_BIT3","LED_CPU3_CH4_DIMM2_FAULT",
+ /*G0-G7*/ "MAIN_PLD_MINOR_REV_BIT0","LED_CPU3_CH5_DIMM1_FAULT","MAIN_PLD_MINOR_REV_BIT1","LED_CPU3_CH5_DIMM2_FAULT","MAIN_PLD_MINOR_REV_BIT2","LED_CPU3_CH6_DIMM1_FAULT","MAIN_PLD_MINOR_REV_BIT3","LED_CPU3_CH6_DIMM2_FAULT","MAIN_PLD_MAJOR_REV_BIT0","LED_CPU4_CH1_DIMM1_FAULT","MAIN_PLD_MAJOR_REV_BIT1","LED_CPU4_CH1_DIMM2_FAULT","MAIN_PLD_MAJOR_REV_BIT2","LED_CPU4_CH2_DIMM1_FAULT","MAIN_PLD_MAJOR_REV_BIT3","LED_CPU4_CH2_DIMM2_FAULT",
+ /*H0-H7*/ "","LED_CPU4_CH3_DIMM1_FAULT","","LED_CPU4_CH3_DIMM2_FAULT","","LED_CPU4_CH4_DIMM1_FAULT","","LED_CPU4_CH4_DIMM2_FAULT","","LED_CPU4_CH5_DIMM1_FAULT","","LED_CPU4_CH5_DIMM2_FAULT","","LED_CPU4_CH6_DIMM1_FAULT","","LED_CPU4_CH6_DIMM2_FAULT",
+ /*I0-I7*/ "","","","","","","","","","","","","","","","",
+ /*J0-J7*/ "","","","","","","","","","","","","","","","";
+};
+
+&kcs3 {
+ aspeed,lpc-io-reg = <0xCA2>;
+ status = "okay";
+};
+
+&kcs4 {
+ aspeed,lpc-io-reg = <0xCA4>;
+ status = "okay";
+};
+
+&sio_regs {
+ status = "okay";
+ sio_status {
+ offset = <0x10C>;
+ bit-mask = <0x1F>;
+ bit-shift = <4>;
+ };
+};
+
+&lpc_sio {
+ status = "okay";
+};
+
+&lpc_snoop {
+ snoop-ports = <0x80>;
+ status = "okay";
+};
+
+&mbox {
+ status = "okay";
+};
+
+&uart_routing {
+ status = "okay";
+};
+
+/**
+ * SAFS through SPI1 is available only on Wilson Point.
+ * These pins are used as fan presence checking gpios in WFP
+ * so commenting it out for now.
+ * &spi1 {
+ * status = "okay";
+ *
+ * flash@0 {
+ * m25p,fast-read;
+ * status = "okay";
+ * };
+ *};
+ */
+
+&uart1 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_txd1_default
+ &pinctrl_rxd1_default
+ &pinctrl_nrts1_default
+ &pinctrl_ndtr1_default
+ &pinctrl_ndsr1_default
+ &pinctrl_ncts1_default
+ &pinctrl_ndcd1_default
+ &pinctrl_nri1_default>;
+};
+
+&uart2 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_txd2_default
+ &pinctrl_rxd2_default
+ &pinctrl_nrts2_default
+ &pinctrl_ndtr2_default
+ &pinctrl_ndsr2_default
+ &pinctrl_ncts2_default
+ &pinctrl_ndcd2_default
+ &pinctrl_nri2_default>;
+};
+
+&uart3 {
+ status = "okay";
+};
+
+&uart4 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <>;
+};
+
+&uart5 {
+ status = "okay";
+};
+
+&mac1 {
+ status = "okay";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_rgmii2_default &pinctrl_mdio2_default>;
+};
+
+&mac0 {
+ status = "okay";
+ use-ncsi;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_rmii1_default>;
+};
+
+&i2c0 {
+ multi-master;
+ general-call;
+ aspeed,dma-buf-size = <4095>;
+ aspeed,hw-timeout-ms = <300>;
+ status = "okay";
+};
+
+&i2c1 {
+ multi-master;
+ aspeed,dma-buf-size = <4095>;
+ aspeed,hw-timeout-ms = <300>;
+ status = "okay";
+};
+
+&i2c2 {
+ multi-master;
+ aspeed,dma-buf-size = <4095>;
+ aspeed,hw-timeout-ms = <300>;
+ status = "okay";
+};
+
+&i2c3 {
+ multi-master;
+ aspeed,dma-buf-size = <4095>;
+ aspeed,hw-timeout-ms = <300>;
+ status = "okay";
+
+ rtc-pch@44 {
+ compatible = "rtc,pchc620";
+ reg = <0x44>;
+ };
+};
+
+&i2c4 {
+ multi-master;
+ aspeed,dma-buf-size = <4095>;
+ aspeed,hw-timeout-ms = <300>;
+ status = "okay";
+};
+
+&i2c5 {
+ bus-frequency = <1000000>;
+ multi-master;
+ aspeed,dma-buf-size = <4095>;
+ aspeed,hw-timeout-ms = <300>;
+ status = "okay";
+};
+
+&i2c6 {
+ multi-master;
+ aspeed,dma-buf-size = <4095>;
+ aspeed,hw-timeout-ms = <300>;
+ status = "okay";
+};
+
+&i2c7 {
+ multi-master;
+ #retries = <3>;
+ aspeed,dma-buf-size = <4095>;
+ aspeed,hw-timeout-ms = <300>;
+ status = "okay";
+};
+
+&i2c9 {
+ multi-master;
+ aspeed,dma-buf-size = <4095>;
+ aspeed,hw-timeout-ms = <300>;
+ status = "okay";
+};
+
+&i2c11 {
+ multi-master;
+ aspeed,dma-buf-size = <4095>;
+ aspeed,hw-timeout-ms = <300>;
+ status = "okay";
+};
+
+&i2c13 {
+ multi-master;
+ aspeed,dma-buf-size = <4095>;
+ aspeed,hw-timeout-ms = <300>;
+ status = "okay";
+};
+
+&gfx {
+ status = "okay";
+ memory-region = <&gfx_memory>;
+};
+
+&vuart {
+ status = "okay";
+};
+
+&pwm_tacho {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pwm0_default &pinctrl_pwm1_default
+ &pinctrl_pwm2_default &pinctrl_pwm3_default
+ &pinctrl_pwm4_default &pinctrl_pwm5_default
+ &pinctrl_pwm6_default &pinctrl_pwm7_default>;
+
+ fan@0 {
+ reg = <0x00>;
+ aspeed,fan-tach-ch = /bits/ 8 <0x00 0x01>;
+ };
+ fan@1 {
+ reg = <0x01>;
+ aspeed,fan-tach-ch = /bits/ 8 <0x02 0x03>;
+ };
+ fan@2 {
+ reg = <0x02>;
+ aspeed,fan-tach-ch = /bits/ 8 <0x04 0x05>;
+ };
+ fan@3 {
+ reg = <0x03>;
+ aspeed,fan-tach-ch = /bits/ 8 <0x06 0x07>;
+ };
+ fan@4 {
+ reg = <0x04>;
+ aspeed,fan-tach-ch = /bits/ 8 <0x08 0x09>;
+ };
+ fan@5 {
+ reg = <0x05>;
+ aspeed,fan-tach-ch = /bits/ 8 <0x0A 0x0B>;
+ };
+ fan@6 {
+ reg = <0x06>;
+ aspeed,fan-tach-ch = /bits/ 8 <0x0C 0x0D>;
+ };
+ fan@7 {
+ reg = <0x07>;
+ aspeed,fan-tach-ch = /bits/ 8 <0x0E 0x0F>;
+ };
+
+};
+
+&timer {
+/*
+ * Available settings:
+ * fttmr010,pwm-outputs = <5>, <6>, <7>, <8>;
+ * pinctrl-0 = <&pinctrl_timer5_default &pinctrl_timer6_default
+ * &pinctrl_timer7_default &pinctrl_timer8_default>;
+ */
+ fttmr010,pwm-outputs = <6>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_timer6_default>;
+ #pwm-cells = <3>;
+ status = "okay";
+};
+
+&video {
+ status = "okay";
+ memory-region = <&video_engine_memory>;
+};
+
+&vhub {
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/aspeed-bmc-intel-ast2600.dts b/arch/arm/boot/dts/aspeed-bmc-intel-ast2600.dts
new file mode 100644
index 000000000000..11af2ae99f67
--- /dev/null
+++ b/arch/arm/boot/dts/aspeed-bmc-intel-ast2600.dts
@@ -0,0 +1,975 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+#include "aspeed-g6.dtsi"
+#include <dt-bindings/gpio/aspeed-gpio.h>
+#include <dt-bindings/i2c/i2c.h>
+
+/ {
+ model = "AST2600 EVB";
+ compatible = "aspeed,ast2600";
+
+ chosen {
+ stdout-path = &uart5;
+ bootargs = "console=ttyS4,115200n8 root=/dev/ram rw init=/linuxrc earlyprintk";
+ };
+
+ memory@80000000 {
+ device_type = "memory";
+ reg = <0x80000000 0x40000000>;
+ };
+
+ reserved-memory {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ gfx_memory: framebuffer {
+ size = <0x01000000>;
+ alignment = <0x01000000>;
+ compatible = "shared-dma-pool";
+ reusable;
+ };
+
+ video_engine_memory: jpegbuffer {
+ size = <0x02000000>; /* 32M */
+ alignment = <0x01000000>;
+ compatible = "shared-dma-pool";
+ reusable;
+ };
+ };
+
+ reserved-memory {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ video_memory: video {
+ size = <0x04000000>;
+ alignment = <0x01000000>;
+ compatible = "shared-dma-pool";
+ no-map;
+ };
+ };
+
+ vga-shared-memory {
+ compatible = "aspeed,ast2500-vga-sharedmem";
+ reg = <0x9f700000 0x100000>;
+ };
+
+ iio-hwmon {
+ compatible = "iio-hwmon";
+ io-channels = <&adc0 0>, <&adc0 1>, <&adc0 2>, <&adc0 3>,
+ <&adc0 4>, <&adc0 5>, <&adc0 6>, <&adc0 7>,
+ <&adc1 0>, <&adc1 1>, <&adc1 2>, <&adc1 3>,
+ <&adc1 4>, <&adc1 5>, <&adc1 6>, <&adc1 7>;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ identify {
+ default-state = "off";
+ gpios = <&gpio0 ASPEED_GPIO(B, 7) GPIO_ACTIVE_LOW>;
+ };
+
+ status_amber {
+ default-state = "off";
+ gpios = <&gpio0 ASPEED_GPIO(G, 3) GPIO_ACTIVE_LOW>;
+ };
+
+ status_green {
+ default-state = "keep";
+ gpios = <&gpio0 ASPEED_GPIO(G, 2) GPIO_ACTIVE_LOW>;
+ };
+
+ status_susack {
+ default-state = "off";
+ gpios = <&gpio0 ASPEED_GPIO(V, 6) GPIO_ACTIVE_LOW>;
+ };
+
+ fan1_fault {
+ default-state = "off";
+ gpios = <&sgpiom0 41 GPIO_ACTIVE_HIGH>;
+ };
+
+ fan2_fault {
+ default-state = "off";
+ gpios = <&sgpiom0 43 GPIO_ACTIVE_HIGH>;
+ };
+
+ fan3_fault {
+ default-state = "off";
+ gpios = <&sgpiom0 45 GPIO_ACTIVE_HIGH>;
+ };
+
+ fan4_fault {
+ default-state = "off";
+ gpios = <&sgpiom0 47 GPIO_ACTIVE_HIGH>;
+ };
+
+ fan5_fault {
+ default-state = "off";
+ gpios = <&sgpiom0 49 GPIO_ACTIVE_HIGH>;
+ };
+
+ fan6_fault {
+ default-state = "off";
+ gpios = <&sgpiom0 51 GPIO_ACTIVE_HIGH>;
+ };
+
+ fan7_fault {
+ default-state = "off";
+ gpios = <&sgpiom0 53 GPIO_ACTIVE_HIGH>;
+ };
+
+ fan8_fault {
+ default-state = "off";
+ gpios = <&sgpiom0 55 GPIO_ACTIVE_HIGH>;
+ };
+ };
+
+ beeper {
+ compatible = "pwm-beeper";
+ pwms = <&pwm_tacho 7 1000000 0>;
+ };
+};
+
+&fmc {
+ status = "okay";
+ flash@0 {
+ status = "okay";
+ spi-max-frequency = <50000000>;
+ spi-tx-bus-width = <4>;
+ m25p,fast-read;
+#include "openbmc-flash-layout-intel-64MB.dtsi"
+ };
+};
+
+&espi {
+ status = "okay";
+ oob,dma-mode;
+ oob,dma-tx-desc-num = <0x2>;
+ oob,dma-rx-desc-num = <0x8>;
+};
+
+&mctp {
+ status = "okay";
+};
+
+&peci0 {
+ status = "okay";
+ gpios = <&gpio0 ASPEED_GPIO(F, 6) 0>;
+};
+
+&syscon {
+ uart-clock-high-speed;
+ status = "okay";
+
+ misc_control {
+ compatible = "aspeed,bmc-misc";
+ uart_port_debug {
+ offset = <0xc8>;
+ bit-mask = <0x1>;
+ bit-shift = <1>;
+ read-only;
+ };
+
+ uart1_port_debug {
+ offset = <0xd8>;
+ bit-mask = <0x1>;
+ bit-shift = <3>;
+ read-only;
+ };
+
+ p2a-bridge {
+ offset = <0xC20>;
+ bit-mask = <0x1>;
+ bit-shift = <1>;
+ read-only;
+ };
+
+ boot-2nd-flash {
+ offset = <0x500>;
+ bit-mask = <0x1>;
+ bit-shift = <31>;
+ read-only;
+ };
+
+ chip_id {
+ offset = <0x5b0>;
+ bit-mask = <0xffffffff 0xffffffff>;
+ bit-shift = <0>;
+ read-only;
+ reg-width = <64>;
+ hash-data = "d44f9b804976fa23c2e25d62f16154d26520a7e24c5555095fd1b55c027804f1570dcd16189739c640cd7d9a6ce14944a2c4eaf1dc429eed6940e8a83498a474";
+ };
+ silicon_id {
+ offset = <0x14>;
+ bit-mask = <0xffffffff>;
+ bit-shift = <0>;
+ read-only;
+ reg-width = <32>;
+ };
+ };
+};
+
+#if 0
+ GPIO Alias: (runtime alias -> schematic name)
+ ID_BUTTON -> FP_ID_BTN_N
+ CPU_CATERR -> FM_PLT_BMC_THERMTRIP_N
+ PCH_BMC_THERMTRIP -> FM_PLT_BMC_THERMTRIP_N
+ RESET_BUTTON -> FP_BMC_RST_BTN_N
+ RESET_OUT -> RST_BMC_RSTBTN_OUT_R_N
+ POWER_BUTTON -> FP_BMC_PWR_BTN_R_N
+ POWER_OUT -> FM_BMC_PWR_BTN_N
+ PREQ_N -> DBP_ASD_BMC_PREQ_R_N
+ POST_COMPLETE -> FM_BIOS_POST_CMPLT_BMC_N
+ CPU_ERR0 -> FM_CPU_ERR0_LVT3_N
+ CPU_ERR1 -> FM_CPU_ERR1_LVT3_N
+ CPU_ERR2 -> FM_CPU_ERR2_LVT3_N
+ DEBUG_EN_N -> FM_JTAG_TCK_MUX_SEL_R
+ NMI_OUT -> IRQ_BMC_CPU_NMI_R
+ PLTRST_N -> RST_PLTRST_BMC_N
+ PRDY_N -> DBP_ASD_BMC_PRDY_R_N
+ PWR_DEBUG_N ->
+ XDP_PRST_N ->
+ SYSPWROK ->
+ RSMRST_N ->
+ SIO_S3 -> FM_SLPS3_R_N
+ SIO_S5 -> FM_SLPS4_R_N
+ SIO_ONCONTROL -> FM_BMC_ONCTL_R_N
+ SIO_POWER_GOOD -> PWRGD_CPU0_LVC3_R
+ PS_PWROK -> PWRGD_BMC_PS_PWROK_R
+ P3VBAT_BRIDGE_EN ->
+ TCK_MUX_SEL ->
+ SMI -> IRQ_SMI_ACTIVE_BMC_N
+ NMI_BUTTON -> FP_NMI_BTN_N
+#endif
+&gpio0 {
+ status = "okay";
+ gpio-line-names =
+ /*A0-A7*/ "","","","","SMB_CPU_PIROM_SCL","SMB_CPU_PIROM_SDA","SMB_IPMB_STBY_LVC3_R_SCL","SMB_IPMB_STBY_LVC3_R_SDA",
+ /*B0-B7*/ "FM_1200VA_OC","NMI_OUT","IRQ_SMB3_M2_ALERT_N","","RGMII_BMC_RMM4_LVC3_R_MDC","RGMII_BMC_RMM4_LVC3_R_MDIO","FM_BMC_BMCINIT_R","FP_ID_LED_N",
+ /*C0-C7*/ "FM_FORCE_BMC_UPDATE_N","RST_RGMII_PHYRST_N","FM_TPM_EN_PULSE","FM_BMC_CRASHLOG_TRIG_N","IRQ_BMC_PCH_NMI_R","FM_CPU1_DISABLE_COD_N","FM_4S_8S_N_MODE","FM_STANDALONE_MODE_N",
+ /*D0-D7*/ "CPU_ERR0","CPU_ERR1","CPU_ERR2","PRDY_N","FM_SPD_SWITCH_CTRL_N","","","",
+ /*E0-E7*/ "FM_SKT1_FAULT_LED","FM_SKT0_FAULT_LED","CLK_50M_CKMNG_BMCB","FM_BMC_BOARD_REV_ID2_N","","","","",
+ /*F0-F7*/ "FM_BMC_BOARD_SKU_ID0_N","FM_BMC_BOARD_SKU_ID1_N","FM_BMC_BOARD_SKU_ID2_N","FM_BMC_BOARD_SKU_ID3_N","FM_BMC_BOARD_SKU_ID4_N","FM_BMC_BOARD_SKU_ID5_N","ID_BUTTON","PS_PWROK",
+ /*G0-G7*/ "FM_SMB_BMC_NVME_LVC3_ALERT_N","RST_BMC_I2C_M2_R_N","FP_LED_STATUS_GREEN_N","FP_LED_STATUS_AMBER_N","FM_BMC_BOARD_REV_ID0_N","FM_BMC_BOARD_REV_ID1_N","FM_BMC_CPU_FBRK_OUT_R_N","DBP_PRESENT_IN_R2_N",
+ /*H0-H7*/ "SGPIO_BMC_CLK_R","SGPIO_BMC_LD_R","SGPIO_BMC_DOUT_R","SGPIO_BMC_DIN","PLTRST_N","CPU_CATERR","PCH_BMC_THERMTRIP","",
+ /*I0-I7*/ "JTAG_ASD_NTRST_R_N","JTAG_ASD_TDI_R","JTAG_ASD_TCK_R","JTAG_ASD_TMS_R","JTAG_ASD_TDO","FM_BMC_PWRBTN_OUT_R_N","FM_BMC_PWR_BTN_N","",
+ /*J0-J7*/ "SMB_CHASSENSOR_STBY_LVC3_SCL","SMB_CHASSENSOR_STBY_LVC3_SDA","FM_NODE_ID0","FM_NODE_ID1","","","","",
+ /*K0-K7*/ "SMB_HSBP_STBY_LVC3_R_SCL","SMB_HSBP_STBY_LVC3_R_SDA","SMB_SMLINK0_STBY_LVC3_R2_SCL","SMB_SMLINK0_STBY_LVC3_R2_SDA","SMB_TEMPSENSOR_STBY_LVC3_R_SCL","SMB_TEMPSENSOR_STBY_LVC3_R_SDA","SMB_PMBUS_SML1_STBY_LVC3_R_SCL","SMB_PMBUS_SML1_STBY_LVC3_R_SDA",
+ /*L0-L7*/ "SMB_PCIE_STBY_LVC3_R_SCL","SMB_PCIE_STBY_LVC3_R_SDA","SMB_HOST_STBY_BMC_LVC3_R_SCL","SMB_HOST_STBY_BMC_LVC3_R_SDA","PREQ_N","TCK_MUX_SEL","V_BMC_GFX_HSYNC_R","V_BMC_GFX_VSYNC_R",
+ /*M0-M7*/ "SPA_CTS_N","SPA_DCD_N","SPA_DSR_N","PU_SPA_RI_N","SPA_DTR_N","SPA_RTS_N","SPA_SOUT","SPA_SIN",
+ /*N0-N7*/ "SPB_CTS_N","SPB_DCD_N","SPB_DSR_N","PU_SPB_RI_N","SPB_DTR_N","SPB_RTS_N","SPB_SOUT","SPB_SIN",
+ /*O0-O7*/ "FAN_BMC_PWM0","FAN_BMC_PWM1","FAN_BMC_PWM2","FAN_BMC_PWM3","FAN_BMC_PWM4","FAN_BMC_PWM5","NMI_BUTTON","SPEAKER_BMC_R",
+ /*P0-P7*/ "RESET_BUTTON","RESET_OUT","POWER_BUTTON","POWER_OUT","FAN_BMC_PWM6","FAN_BMC_PWM7","FAN_BMC_PWM8","FAN_BMC_PWM9",
+ /*Q0-Q7*/ "FAN_BMC_TACH0","FAN_BMC_TACH1","FAN_BMC_TACH2","FAN_BMC_TACH3","FAN_BMC_TACH4","FAN_BMC_TACH5","FAN_BMC_TACH6","FAN_BMC_TACH7",
+ /*R0-R7*/ "FAN_BMC_TACH8","FAN_BMC_TACH9","","","","","","",
+ /*S0-S7*/ "RST_BMC_PCIE_MUX_N","FM_BMC_EUP_LOT6_N","","","","A_P3V_BAT_SCALED_EN","REMOTE_DEBUG_ENABLE","FM_PCHHOT_N",
+ /*T0-T7*/ "A_P12V_PSU_SCALED","A_P12V_AUX_SCALED","A_P3V3_SCALED","A_P5V_SCALED","A_PVNN_PCH_AUX_SENSOR","A_P1V05_PCH_AUX_SENSOR","A_P1V8_AUX_SENSOR","A_P3V_BAT_SCALED",
+ /*U0-U7*/ "A_PVCCIN_CPU0_SENSOR","A_PVCCIN_CPU1_SENSOR","A_PVCCINFAON_CPU0_SENSOR","A_PVCCINFAON_CPU1_SENSOR","A_PVCCFA_EHV_FIVRA_CPU0_SENSOR","A_PVCCFA_EHV_FIVRA_CPU1_SENSOR","A_PVCCD_HV_CPU0_SENSOR","A_PVCCD_HV_CPU1_SENSOR",
+ /*V0-V7*/ "SIO_S3","SIO_S5","TP_BMC_SIO_PWREQ_N","SIO_ONCONTROL","SIO_POWER_GOOD","LED_BMC_HB_LED_N","FM_BMC_SUSACK_N","",
+ /*W0-W7*/ "LPC_LAD0_ESPI_R_IO0","LPC_LAD1_ESPI_R_IO1","LPC_LAD2_ESPI_R_IO2","LPC_LAD3_ESPI_R_IO3","CLK_24M_66M_LPC0_ESPI_BMC","LPC_LFRAME_N_ESPI_CS0_BMC_N","IRQ_LPC_SERIRQ_ESPI_ALERT_N","RST_LPC_LRST_ESPI_RST_BMC_R_N",
+ /*X0-X7*/ "","SMI","POST_COMPLETE","","","","","",
+ /*Y0-Y7*/ "","IRQ_SML0_ALERT_BMC_R2_N","","IRQ_SML1_PMBUS_BMC_ALERT_N","SPI_BMC_BOOT_R_IO2","SPI_BMC_BOOT_R_IO3","PU_SPI_BMC_BOOT_ABR","PU_SPI_BMC_BOOT_WP_N",
+ /*Z0-Z7*/ "PWRGD_P3V3_RISER1","PWRGD_P3V3_RISER2","","HW_STRAP_5","HW_STRAP_6","HW_STRAP_7","HW_STRAP_2","HW_STRAP_3";
+};
+
+&gpio1 {
+ status = "okay";
+ gpio-line-names = /* GPIO18 A-E */
+ /*A0-A7*/ "","","RST_EMMC_BMC_R_N","FM_SYS_FAN6_PRSNT_D_N","FM_SYS_FAN0_PRSNT_D_N","FM_SYS_FAN1_PRSNT_D_N","FM_SYS_FAN2_PRSNT_D_N","FM_SYS_FAN3_PRSNT_D_N",
+ /*B0-B7*/ "FM_SYS_FAN4_PRSNT_D_N","FM_SYS_FAN5_PRSNT_D_N","","FM_SYS_FAN7_PRSNT_D_N","RGMII_BMC_RMM4_TX_R_CLK","RGMII_BMC_RMM4_TX_R_CTRL","RGMII_BMC_RMM4_R_TXD0","RGMII_BMC_RMM4_R_TXD1",
+ /*C0-C7*/ "RGMII_BMC_RMM4_R_TXD2","RGMII_BMC_RMM4_R_TXD3","RGMII_BMC_RMM4_RX_CLK","RGMII_BMC_RMM4_RX_CTRL","RGMII_BMC_RMM4_RXD0","RGMII_BMC_RMM4_RXD1","RGMII_BMC_RMM4_RXD2","RGMII_BMC_RMM4_RXD3",
+ /*D0-D7*/ "EMMC_BMC_R_CLK","EMMC_BMC_R_CMD","EMMC_BMC_R_DATA0","EMMC_BMC_R_DATA1","EMMC_BMC_R_DATA2","EMMC_BMC_R_DATA3","EMMC_BMC_CD_N","EMMC_BMC_WP_N",
+ /*E0-E3*/ "EMMC_BMC_R_DATA4","EMMC_BMC_R_DATA5","EMMC_BMC_R_DATA6","EMMC_BMC_R_DATA7";
+};
+
+&sgpiom0 {
+ ngpios = <80>;
+ bus-frequency = <2000000>;
+ status = "okay";
+#if 0
+ SGPIO Alias: (runtime alias -> net name)
+ CPU1_PRESENCE -> FM_CPU0_SKTOCC_LVT3_N
+ CPU1_THERMTRIP -> H_CPU0_THERMTRIP_LVC1_N
+ CPU1_VRHOT -> IRQ_CPU0_VRHOT_N
+ CPU1_FIVR_FAULT -> H_CPU0_MON_FAIL_LVC1_N
+ CPU1_MEM_VRHOT -> IRQ_CPU0_MEM_VRHOT_N
+ CPU1_MEM_THERM_EVENT -> H_CPU0_MEMHOT_OUT_LVC1_N
+ CPU1_MISMATCH -> FM_CPU0_MISMATCH
+ CPU2_PRESENCE -> FM_CPU1_SKTOCC_LVT3_N
+ CPU2_THERMTRIP -> H_CPU1_THERMTRIP_LVC1_N
+ CPU2_VRHOT -> IRQ_CPU1_VRHOT_N
+ CPU2_FIVR_FAULT -> H_CPU1_MON_FAIL_LVC1_N
+ CPU2_MEM_VRHOT -> IRQ_CPU1_MEM_VRHOT_N
+ CPU2_MEM_THERM_EVENT -> H_CPU1_MEMHOT_OUT_LVC1_N
+ CPU2_MISMATCH -> FM_CPU1_MISMATCH
+#endif
+ /* SGPIO lines. even: input, odd: output */
+ gpio-line-names =
+ /*A0-A7*/ "CPU1_PRESENCE","","CPU1_THERMTRIP","","CPU1_VRHOT","","CPU1_FIVR_FAULT","","CPU1_MEM_VRHOT","","CPU1_MEM_THERM_EVENT","","FM_CPU0_PROC_ID0","","FM_CPU0_PROC_ID1","",
+ /*B0-B7*/ "CPU1_MISMATCH","LED_CPU1_CH1_DIMM1_FAULT","","LED_CPU1_CH1_DIMM2_FAULT","CPU2_PRESENCE","LED_CPU1_CH2_DIMM1_FAULT","CPU2_THERMTRIP","LED_CPU1_CH2_DIMM2_FAULT","CPU2_VRHOT","LED_CPU1_CH3_DIMM1_FAULT","CPU2_FIVR_FAULT","LED_CPU1_CH3_DIMM2_FAULT","CPU2_MEM_VRHOT","LED_CPU1_CH4_DIMM1_FAULT","CPU2_MEM_THERM_EVENT","LED_CPU1_CH4_DIMM2_FAULT",
+ /*C0-C7*/ "FM_CPU1_PROC_ID0","LED_CPU1_CH5_DIMM1_FAULT","FM_CPU1_PROC_ID1","LED_CPU1_CH5_DIMM2_FAULT","CPU2_MISMATCH","LED_CPU1_CH6_DIMM1_FAULT","","LED_CPU1_CH6_DIMM2_FAULT","","LED_FAN1_FAULT","","LED_FAN2_FAULT","","LED_FAN3_FAULT","","LED_FAN4_FAULT",
+ /*D0-D7*/ "","LED_FAN5_FAULT","","LED_FAN6_FAULT","","LED_FAN7_FAULT","","LED_FAN8_FAULT","","LED_CPU2_CH1_DIMM1_FAULT","","LED_CPU2_CH1_DIMM2_FAULT","","LED_CPU2_CH2_DIMM1_FAULT","","LED_CPU2_CH2_DIMM2_FAULT",
+ /*E0-E7*/ "","LED_CPU2_CH3_DIMM1_FAULT","","LED_CPU2_CH3_DIMM2_FAULT","","LED_CPU2_CH4_DIMM1_FAULT","","LED_CPU2_CH4_DIMM2_FAULT","","LED_CPU2_CH5_DIMM1_FAULT","","LED_CPU2_CH5_DIMM2_FAULT","","LED_CPU2_CH6_DIMM1_FAULT","","LED_CPU2_CH6_DIMM2_FAULT",
+ /*F0-F7*/ "CPU1_CPLD_CRC_ERROR","","CPU2_CPLD_CRC_ERROR","","","","","","","","","","","","","",
+ /*G0-G7*/ "MAIN_PLD_MINOR_REV_BIT0","","MAIN_PLD_MINOR_REV_BIT1","","MAIN_PLD_MINOR_REV_BIT2","","MAIN_PLD_MINOR_REV_BIT3","","MAIN_PLD_MAJOR_REV_BIT0","","MAIN_PLD_MAJOR_REV_BIT1","","MAIN_PLD_MAJOR_REV_BIT2","","MAIN_PLD_MAJOR_REV_BIT3","",
+ /*H0-H7*/ "","","WMEMX_PWR_FLT","","WCPUX_MEM_PWR_FLT","","PWRGD_P3V3_FF","","WPSU_PWR_FLT","","","","","","WPCH_PWR_FLT","",
+ /*I0-I7*/ "FM_CPU0_PKGID0","LED_CPU1_CH7_DIMM1_FAULT","FM_CPU0_PKGID1","LED_CPU1_CH7_DIMM2_FAULT","FM_CPU0_PKGID2","LED_CPU1_CH8_DIMM1_FAULT","H_CPU0_MEMTRIP_LVC1_N","LED_CPU1_CH8_DIMM2_FAULT","FM_CPU1_PKGID0","LED_CPU2_CH7_DIMM1_FAULT","FM_CPU1_PKGID1","LED_CPU2_CH7_DIMM2_FAULT","FM_CPU1_PKGID2","LED_CPU2_CH8_DIMM1_FAULT","H_CPU1_MEMTRIP_LVC1_N","LED_CPU2_CH8_DIMM2_FAULT",
+ /*J0-J7*/ "","","","","","","","","","","","","","","","";
+};
+
+&kcs3 {
+ aspeed,lpc-io-reg = <0xCA2>;
+ status = "okay";
+};
+
+&kcs4 {
+ aspeed,lpc-io-reg = <0xCA4>;
+ status = "okay";
+};
+
+&sio_regs {
+ status = "okay";
+ sio_status {
+ offset = <0x10C>;
+ bit-mask = <0x1F>;
+ bit-shift = <4>;
+ };
+};
+
+&lpc_sio {
+ status = "okay";
+};
+
+&lpc_snoop {
+ snoop-ports = <0x80>;
+ status = "okay";
+};
+
+&mbox {
+ status = "okay";
+};
+
+&mdio1 {
+ status = "okay";
+
+ ethphy1: ethernet-phy@0 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <0>;
+ };
+};
+
+&mac1 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_rgmii2_default>;
+ clocks = <&syscon ASPEED_CLK_GATE_MAC2CLK>,
+ <&syscon ASPEED_CLK_MAC2RCLK>;
+ clock-names = "MACCLK", "RCLK";
+ phy-mode = "rgmii";
+ phy-handle = <&ethphy1>;
+};
+
+&mdio2 {
+ status = "okay";
+
+ ethphy2: ethernet-phy@1 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <0>;
+ };
+};
+
+&adc0 {
+ aspeed,int-vref-microvolt = <2500000>;
+ aspeed,battery-sensing;
+ status = "okay";
+};
+
+&adc1 {
+ aspeed,int-vref-microvolt = <2500000>;
+ aspeed,battery-sensing;
+ status = "okay";
+};
+
+&uart1 {
+ status = "okay";
+ pinctrl-0 = <&pinctrl_txd1_default
+ &pinctrl_rxd1_default
+ &pinctrl_nrts1_default
+ &pinctrl_ndtr1_default
+ &pinctrl_ndsr1_default
+ &pinctrl_ncts1_default
+ &pinctrl_ndcd1_default
+ &pinctrl_nri1_default>;
+};
+
+&uart2 {
+ status = "okay";
+ pinctrl-0 = <&pinctrl_txd2_default
+ &pinctrl_rxd2_default
+ &pinctrl_nrts2_default
+ &pinctrl_ndtr2_default
+ &pinctrl_ndsr2_default
+ &pinctrl_ncts2_default
+ &pinctrl_ndcd2_default
+ &pinctrl_nri2_default>;
+};
+
+&uart3 {
+ status = "okay";
+ pinctrl-0 = <>;
+};
+
+&uart4 {
+ status = "okay";
+ pinctrl-0 = <>;
+};
+
+&uart5 {
+ status = "okay";
+ // Workaround for A0
+ compatible = "snps,dw-apb-uart";
+};
+
+&uart_routing {
+ status = "okay";
+};
+
+&emmc_controller{
+ status = "okay";
+};
+
+&emmc {
+ non-removable;
+ bus-width = <4>;
+ max-frequency = <52000000>;
+};
+
+&i2c0 {
+ /* SMB_CHASSENSOR_STBY_LVC3 */
+ multi-master;
+ aspeed,hw-timeout-ms = <300>;
+ status = "okay";
+};
+
+&i2c4 {
+ /* SMB_HSBP_STBY_LVC3_R */
+ multi-master;
+ aspeed,hw-timeout-ms = <300>;
+ status = "okay";
+};
+
+&i2c5 {
+ /* SMB_SMLINK0_STBY_LVC3_R2 */
+ bus-frequency = <1000000>;
+ multi-master;
+ aspeed,hw-timeout-ms = <300>;
+ status = "okay";
+};
+
+&i2c6 {
+ /* SMB_TEMPSENSOR_STBY_LVC3_R */
+ multi-master;
+ aspeed,hw-timeout-ms = <300>;
+ status = "okay";
+};
+
+&i2c7 {
+ /* SMB_PMBUS_SML1_STBY_LVC3_R */
+ multi-master;
+ #retries = <3>;
+ aspeed,hw-timeout-ms = <300>;
+ status = "okay";
+};
+
+&i2c8 {
+ /* SMB_PCIE_STBY_LVC3_R */
+ multi-master;
+ aspeed,hw-timeout-ms = <300>;
+ status = "okay";
+};
+
+&i2c9 {
+ /* SMB_HOST_STBY_BMC_LVC3_R */
+ multi-master;
+ aspeed,hw-timeout-ms = <300>;
+ status = "okay";
+
+ rtc-pch@44 {
+ compatible = "rtc,pchc620";
+ reg = <0x44>;
+ };
+};
+
+&i2c12 {
+ /* SMB_CPU_PIROM */
+ multi-master;
+ aspeed,hw-timeout-ms = <300>;
+ status = "okay";
+};
+
+&i2c13 {
+ /* SMB_IPMB_STBY_LVC3_R */
+ multi-master;
+ aspeed,hw-timeout-ms = <300>;
+ status = "okay";
+};
+
+&i3c0 {
+ /* I3C_SPD_DDRABCD_CPU0_BMC */
+ status = "okay";
+ jdec-spd;
+
+ /* Renesas SPD5118 */
+ spd5118_0_0: spd@50,3C000000000 {
+ reg = <0x50 0x3C0 0x00000000>;
+ assigned-address = <0x50>;
+ };
+
+ nvdimm_0_0: nvm@58,3C0000000008 {
+ reg = <0x58 0x3C0 0x00000008>;
+ assigned-address = <0x58>;
+ };
+
+ /* Renesas SPD5118 */
+ spd5118_0_1: spd@51,3C000000001 {
+ reg = <0x51 0x3C0 0x00000001>;
+ assigned-address = <0x51>;
+ };
+
+ nvdimm_0_1: nvm@59,3C0000000009 {
+ reg = <0x59 0x3C0 0x00000009>;
+ assigned-address = <0x59>;
+ };
+
+ /* Renesas SPD5118 */
+ spd5118_0_2: spd@52,3C000000002 {
+ reg = <0x52 0x3C0 0x00000002>;
+ assigned-address = <0x52>;
+ };
+
+ nvdimm_0_2: nvm@5A,3C000000000A {
+ reg = <0x5A 0x3C0 0x0000000A>;
+ assigned-address = <0x5A>;
+ };
+
+ /* Renesas SPD5118 */
+ spd5118_0_3: spd@53,3C000000003 {
+ reg = <0x53 0x3C0 0x00000003>;
+ assigned-address = <0x53>;
+ };
+
+ nvdimm_0_3: nvm@5B,3C000000000B {
+ reg = <0x5B 0x3C0 0x0000000B>;
+ assigned-address = <0x5B>;
+ };
+
+ /* Renesas SPD5118 */
+ spd5118_0_4: spd@54,3C000000004 {
+ reg = <0x54 0x3C0 0x00000004>;
+ assigned-address = <0x54>;
+ };
+
+ nvdimm_0_4: nvm@5C,3C000000000C {
+ reg = <0x5C 0x3C0 0x0000000C>;
+ assigned-address = <0x5C>;
+ };
+
+ /* Renesas SPD5118 */
+ spd5118_0_5: spd@55,3C000000005 {
+ reg = <0x55 0x3C0 0x00000005>;
+ assigned-address = <0x55>;
+ };
+
+ nvdimm_0_5: nvm@5D,3C000000000D {
+ reg = <0x5D 0x3C0 0x0000000D>;
+ assigned-address = <0x5D>;
+ };
+
+ /* Renesas SPD5118 */
+ spd5118_0_6: spd@56,3C000000006 {
+ reg = <0x56 0x3C0 0x00000006>;
+ assigned-address = <0x56>;
+ };
+
+ nvdimm_0_6: nvm@5E,3C000000000E {
+ reg = <0x5E 0x3C0 0x0000000E>;
+ assigned-address = <0x5E>;
+ };
+
+ /* Renesas SPD5118 */
+ spd5118_0_7: spd@57,3C000000007 {
+ reg = <0x57 0x3C0 0x00000007>;
+ assigned-address = <0x57>;
+ };
+
+ nvdimm_0_7: nvm@5F,3C000000000F {
+ reg = <0x5F 0x3C0 0x0000000F>;
+ assigned-address = <0x5F>;
+ };
+};
+
+&i3c1 {
+ /* I3C_SPD_DDREFGH_CPU0_BMC */
+ status = "okay";
+ jdec-spd;
+
+ /* Renesas SPD5118 */
+ spd5118_1_0: spd@50,3C000000000 {
+ reg = <0x50 0x3C0 0x00000000>;
+ assigned-address = <0x50>;
+ };
+
+ nvdimm_1_0: nvm@58,3C0000000008 {
+ reg = <0x58 0x3C0 0x00000008>;
+ assigned-address = <0x58>;
+ };
+
+ /* Renesas SPD5118 */
+ spd5118_1_1: spd@51,3C000000001 {
+ reg = <0x51 0x3C0 0x00000001>;
+ assigned-address = <0x51>;
+ };
+
+ nvdimm_1_1: nvm@59,3C0000000009 {
+ reg = <0x59 0x3C0 0x00000009>;
+ assigned-address = <0x59>;
+ };
+
+ /* Renesas SPD5118 */
+ spd5118_1_2: spd@52,3C000000002 {
+ reg = <0x52 0x3C0 0x00000002>;
+ assigned-address = <0x52>;
+ };
+
+ nvdimm_1_2: nvm@5A,3C000000000A {
+ reg = <0x5A 0x3C0 0x0000000A>;
+ assigned-address = <0x5A>;
+ };
+
+ /* Renesas SPD5118 */
+ spd5118_1_3: spd@53,3C000000003 {
+ reg = <0x53 0x3C0 0x00000003>;
+ assigned-address = <0x53>;
+ };
+
+ nvdimm_1_3: nvm@5B,3C000000000B {
+ reg = <0x5B 0x3C0 0x0000000B>;
+ assigned-address = <0x5B>;
+ };
+
+ /* Renesas SPD5118 */
+ spd5118_1_4: spd@54,3C000000004 {
+ reg = <0x54 0x3C0 0x00000004>;
+ assigned-address = <0x54>;
+ };
+
+ nvdimm_1_4: nvm@5C,3C000000000C {
+ reg = <0x5C 0x3C0 0x0000000C>;
+ assigned-address = <0x5C>;
+ };
+
+ /* Renesas SPD5118 */
+ spd5118_1_5: spd@55,3C000000005 {
+ reg = <0x55 0x3C0 0x00000005>;
+ assigned-address = <0x55>;
+ };
+
+ nvdimm_1_5: nvm@5D,3C000000000D {
+ reg = <0x5D 0x3C0 0x0000000D>;
+ assigned-address = <0x5D>;
+ };
+
+ /* Renesas SPD5118 */
+ spd5118_1_6: spd@56,3C000000006 {
+ reg = <0x56 0x3C0 0x00000006>;
+ assigned-address = <0x56>;
+ };
+
+ nvdimm_1_6: nvm@5E,3C000000000E {
+ reg = <0x5E 0x3C0 0x0000000E>;
+ assigned-address = <0x5E>;
+ };
+
+ /* Renesas SPD5118 */
+ spd5118_1_7: spd@57,3C000000007 {
+ reg = <0x57 0x3C0 0x00000007>;
+ assigned-address = <0x57>;
+ };
+
+ nvdimm_1_7: nvm@5F,3C000000000F {
+ reg = <0x5F 0x3C0 0x0000000F>;
+ assigned-address = <0x5F>;
+ };
+};
+
+&i3c2 {
+ /* I3C_SPD_DDRABCD_CPU1_BMC */
+ status = "okay";
+ jdec-spd;
+
+ /* Renesas SPD5118 */
+ spd5118_2_0: spd@50,3C000000000 {
+ reg = <0x50 0x3C0 0x00000000>;
+ assigned-address = <0x50>;
+ };
+
+ nvdimm_2_0: nvm@58,3C0000000008 {
+ reg = <0x58 0x3C0 0x00000008>;
+ assigned-address = <0x58>;
+ };
+
+ /* Renesas SPD5118 */
+ spd5118_2_1: spd@51,3C000000001 {
+ reg = <0x51 0x3C0 0x00000001>;
+ assigned-address = <0x51>;
+ };
+
+ nvdimm_2_1: nvm@59,3C0000000009 {
+ reg = <0x59 0x3C0 0x00000009>;
+ assigned-address = <0x59>;
+ };
+
+ /* Renesas SPD5118 */
+ spd5118_2_2: spd@52,3C000000002 {
+ reg = <0x52 0x3C0 0x00000002>;
+ assigned-address = <0x52>;
+ };
+
+ nvdimm_2_2: nvm@5A,3C000000000A {
+ reg = <0x5A 0x3C0 0x0000000A>;
+ assigned-address = <0x5A>;
+ };
+
+ /* Renesas SPD5118 */
+ spd5118_2_3: spd@53,3C000000003 {
+ reg = <0x53 0x3C0 0x00000003>;
+ assigned-address = <0x53>;
+ };
+
+ nvdimm_2_3: nvm@5B,3C000000000B {
+ reg = <0x5B 0x3C0 0x0000000B>;
+ assigned-address = <0x5B>;
+ };
+
+ /* Renesas SPD5118 */
+ spd5118_2_4: spd@54,3C000000004 {
+ reg = <0x54 0x3C0 0x00000004>;
+ assigned-address = <0x54>;
+ };
+
+ nvdimm_2_4: nvm@5C,3C000000000C {
+ reg = <0x5C 0x3C0 0x0000000C>;
+ assigned-address = <0x5C>;
+ };
+
+ /* Renesas SPD5118 */
+ spd5118_2_5: spd@55,3C000000005 {
+ reg = <0x55 0x3C0 0x00000005>;
+ assigned-address = <0x55>;
+ };
+
+ nvdimm_2_5: nvm@5D,3C000000000D {
+ reg = <0x5D 0x3C0 0x0000000D>;
+ assigned-address = <0x5D>;
+ };
+
+ /* Renesas SPD5118 */
+ spd5118_2_6: spd@56,3C000000006 {
+ reg = <0x56 0x3C0 0x00000006>;
+ assigned-address = <0x56>;
+ };
+
+ nvdimm_2_6: nvm@5E,3C000000000E {
+ reg = <0x5E 0x3C0 0x0000000E>;
+ assigned-address = <0x5E>;
+ };
+
+ /* Renesas SPD5118 */
+ spd5118_2_7: spd@57,3C000000007 {
+ reg = <0x57 0x3C0 0x00000007>;
+ assigned-address = <0x57>;
+ };
+
+ nvdimm_2_7: nvm@5F,3C000000000F {
+ reg = <0x5F 0x3C0 0x0000000F>;
+ assigned-address = <0x5F>;
+ };
+};
+
+&i3c3 {
+ /* I3C_SPD_DDREFGH_CPU1_BMC */
+ status = "okay";
+ jdec-spd;
+
+ /* Renesas SPD5118 */
+ spd5118_3_0: spd@50,3C000000000 {
+ reg = <0x50 0x3C0 0x00000000>;
+ assigned-address = <0x50>;
+ };
+
+ nvdimm_3_0: nvm@58,3C0000000008 {
+ reg = <0x58 0x3C0 0x00000008>;
+ assigned-address = <0x58>;
+ };
+
+ /* Renesas SPD5118 */
+ spd5118_3_1: spd@51,3C000000001 {
+ reg = <0x51 0x3C0 0x00000001>;
+ assigned-address = <0x51>;
+ };
+
+ nvdimm_3_1: nvm@59,3C0000000009 {
+ reg = <0x59 0x3C0 0x00000009>;
+ assigned-address = <0x59>;
+ };
+
+ /* Renesas SPD5118 */
+ spd5118_3_2: spd@52,3C000000002 {
+ reg = <0x52 0x3C0 0x00000002>;
+ assigned-address = <0x52>;
+ };
+
+ nvdimm_3_2: nvm@5A,3C000000000A {
+ reg = <0x5A 0x3C0 0x0000000A>;
+ assigned-address = <0x5A>;
+ };
+
+ /* Renesas SPD5118 */
+ spd5118_3_3: spd@53,3C000000003 {
+ reg = <0x53 0x3C0 0x00000003>;
+ assigned-address = <0x53>;
+ };
+
+ nvdimm_3_3: nvm@5B,3C000000000B {
+ reg = <0x5B 0x3C0 0x0000000B>;
+ assigned-address = <0x5B>;
+ };
+
+ /* Renesas SPD5118 */
+ spd5118_3_4: spd@54,3C000000004 {
+ reg = <0x54 0x3C0 0x00000004>;
+ assigned-address = <0x54>;
+ };
+
+ nvdimm_3_4: nvm@5C,3C000000000C {
+ reg = <0x5C 0x3C0 0x0000000C>;
+ assigned-address = <0x5C>;
+ };
+
+ /* Renesas SPD5118 */
+ spd5118_3_5: spd@55,3C000000005 {
+ reg = <0x55 0x3C0 0x00000005>;
+ assigned-address = <0x55>;
+ };
+
+ nvdimm_3_5: nvm@5D,3C000000000D {
+ reg = <0x5D 0x3C0 0x0000000D>;
+ assigned-address = <0x5D>;
+ };
+
+ /* Renesas SPD5118 */
+ spd5118_3_6: spd@56,3C000000006 {
+ reg = <0x56 0x3C0 0x00000006>;
+ assigned-address = <0x56>;
+ };
+
+ nvdimm_3_6: nvm@5E,3C000000000E {
+ reg = <0x5E 0x3C0 0x0000000E>;
+ assigned-address = <0x5E>;
+ };
+
+ /* Renesas SPD5118 */
+ spd5118_3_7: spd@57,3C000000007 {
+ reg = <0x57 0x3C0 0x00000007>;
+ assigned-address = <0x57>;
+ };
+
+ nvdimm_3_7: nvm@5F,3C000000000F {
+ reg = <0x5F 0x3C0 0x0000000F>;
+ assigned-address = <0x5F>;
+ };
+};
+
+&pcieh {
+ status = "okay";
+};
+
+&pwm_tacho {
+ status = "okay";
+ #pwm-cells = <3>;
+ aspeed,pwm-outputs = <7>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pwm0_default &pinctrl_tach0_default
+ &pinctrl_pwm1_default &pinctrl_tach1_default
+ &pinctrl_pwm2_default &pinctrl_tach2_default
+ &pinctrl_pwm3_default &pinctrl_tach3_default
+ &pinctrl_pwm4_default &pinctrl_tach4_default
+ &pinctrl_pwm5_default &pinctrl_tach5_default
+ &pinctrl_pwm12g1_default &pinctrl_tach6_default
+ &pinctrl_pwm13g1_default &pinctrl_tach7_default
+ &pinctrl_pwm14g1_default &pinctrl_tach8_default
+ &pinctrl_pwm15g1_default &pinctrl_tach9_default
+ &pinctrl_pwm7_default>;
+
+ fan@0 {
+ reg = <0x00>;
+ aspeed,fan-tach-ch = /bits/ 8 <0x00>;
+ };
+ fan@1 {
+ reg = <0x01>;
+ aspeed,fan-tach-ch = /bits/ 8 <0x01>;
+ };
+ fan@2 {
+ reg = <0x02>;
+ aspeed,fan-tach-ch = /bits/ 8 <0x02>;
+ };
+ fan@3 {
+ reg = <0x03>;
+ aspeed,fan-tach-ch = /bits/ 8 <0x03>;
+ };
+ fan@4 {
+ reg = <0x04>;
+ aspeed,fan-tach-ch = /bits/ 8 <0x04>;
+ };
+ fan@5 {
+ reg = <0x05>;
+ aspeed,fan-tach-ch = /bits/ 8 <0x05>;
+ };
+ fan@6 {
+ reg = <0x0c>;
+ aspeed,fan-tach-ch = /bits/ 8 <0x06>;
+ };
+ fan@7 {
+ reg = <0x0d>;
+ aspeed,fan-tach-ch = /bits/ 8 <0x07>;
+ };
+ fan@8 {
+ reg = <0x0e>;
+ aspeed,fan-tach-ch = /bits/ 8 <0x08>;
+ };
+ fan@9 {
+ reg = <0x0f>;
+ aspeed,fan-tach-ch = /bits/ 8 <0x09>;
+ };
+};
+
+&video {
+ status = "okay";
+ memory-region = <&video_engine_memory>;
+};
+
+&vhub {
+ status = "okay";
+};
+
+&jtag1 {
+ status = "okay";
+};
+
+&wdt2 {
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/aspeed-bmc-intel-s2600wf.dts b/arch/arm/boot/dts/aspeed-bmc-intel-s2600wf.dts
index d5b7d28cda88..635571f8c0c3 100644
--- a/arch/arm/boot/dts/aspeed-bmc-intel-s2600wf.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-intel-s2600wf.dts
@@ -3,14 +3,20 @@
/dts-v1/;
#include "aspeed-g5.dtsi"
+#include <dt-bindings/gpio/aspeed-gpio.h>
+#include <dt-bindings/i2c/i2c.h>
/ {
- model = "S2600WF BMC";
+ model = "Intel S2600WF BMC";
compatible = "intel,s2600wf-bmc", "aspeed,ast2500";
+ aliases {
+ serial4 = &uart5;
+ };
+
chosen {
stdout-path = &uart5;
- bootargs = "earlycon";
+ bootargs = "console=ttyS4,115200 earlycon";
};
memory@80000000 {
@@ -26,6 +32,32 @@
no-map;
reg = <0x9f000000 0x01000000>; /* 16M */
};
+
+ gfx_memory: framebuffer {
+ size = <0x01000000>;
+ alignment = <0x01000000>;
+ compatible = "shared-dma-pool";
+ reusable;
+ };
+
+ video_engine_memory: jpegbuffer {
+ size = <0x02000000>; /* 32M */
+ alignment = <0x01000000>;
+ compatible = "shared-dma-pool";
+ reusable;
+ };
+
+ ramoops@9eff0000{
+ compatible = "ramoops";
+ reg = <0x9eff0000 0x10000>;
+ record-size = <0x2000>;
+ console-size = <0x2000>;
+ };
+ };
+
+ vga-shared-memory {
+ compatible = "aspeed,ast2500-vga-sharedmem";
+ reg = <0x9ff00000 0x100000>;
};
iio-hwmon {
@@ -36,6 +68,29 @@
<&adc 12>, <&adc 13>, <&adc 14>, <&adc 15>;
};
+ leds {
+ compatible = "gpio-leds";
+
+ identify {
+ default-state = "on";
+ gpios = <&gpio ASPEED_GPIO(S, 6) GPIO_ACTIVE_LOW>;
+ };
+
+ status_amber {
+ default-state = "off";
+ gpios = <&gpio ASPEED_GPIO(S, 5) GPIO_ACTIVE_LOW>;
+ };
+
+ status_green {
+ default-state = "keep";
+ gpios = <&gpio ASPEED_GPIO(S, 4) GPIO_ACTIVE_LOW>;
+ };
+ };
+
+ beeper {
+ compatible = "pwm-beeper";
+ pwms = <&timer 5 1000000 0>;
+ };
};
&fmc {
@@ -44,35 +99,204 @@
status = "okay";
m25p,fast-read;
label = "bmc";
-#include "openbmc-flash-layout.dtsi"
+#include "openbmc-flash-layout-intel-64MB.dtsi"
};
};
-&spi1 {
+&espi {
status = "okay";
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_spi1_default>;
+};
- flash@0 {
- status = "okay";
- m25p,fast-read;
- label = "pnor";
+&jtag {
+ status = "okay";
+};
+
+&peci0 {
+ status = "okay";
+ gpios = <&gpio ASPEED_GPIO(F, 6) 0>;
+};
+
+&syscon {
+ uart-clock-high-speed;
+ status = "okay";
+
+ misc_control {
+ compatible = "aspeed,bmc-misc";
+ uart_port_debug {
+ offset = <0x2c>;
+ bit-mask = <0x1>;
+ bit-shift = <10>;
+ read-only;
+ };
+ p2a-bridge {
+ offset = <0x180>;
+ bit-mask = <0x1>;
+ bit-shift = <1>;
+ read-only;
+ };
+ boot-2nd-flash {
+ offset = <0x70>;
+ bit-mask = <0x1>;
+ bit-shift = <17>;
+ read-only;
+ };
+ chip_id {
+ offset = <0x150>;
+ bit-mask = <0x0fffffff 0xffffffff>;
+ bit-shift = <0>;
+ read-only;
+ reg-width = <64>;
+ hash-data = "d44f9b804976fa23c2e25d62f16154d26520a7e24c5555095fd1b55c027804f1570dcd16189739c640cd7d9a6ce14944a2c4eaf1dc429eed6940e8a83498a474";
+ };
};
};
-&uart5 {
+&adc {
status = "okay";
};
-&mac0 {
+&gpio {
+ status = "okay";
+ /* Enable GPIOE0 and GPIOE2 pass-through by default */
+ pinctrl-names = "pass-through";
+ pinctrl-0 = <&pinctrl_gpie0_default
+ &pinctrl_gpie2_default>;
+ gpio-line-names =
+ /*A0-A7*/ "","","","","","","","",
+ /*B0-B7*/ "","","","","","","","",
+ /*C0-C7*/ "","","","","","","","",
+ /*D0-D7*/ "","","","","","","","",
+ /*E0-E7*/ "RESET_BUTTON","RESET_OUT","POWER_BUTTON","POWER_OUT","","","","",
+ /*F0-F7*/ "NMI_OUT","","","","CPU_ERR0","CPU_ERR1","","",
+ /*G0-G7*/ "CPU_ERR2","CPU_CATERR","PCH_BMC_THERMTRIP","LCP_ENTER_BUTTON","LCP_LEFT_BUTTON","FM_BMC_BOARD_SKU_ID5_N","","",
+ /*H0-H7*/ "","","","FM_NODE_ID_1","FM_NODE_ID_2","FM_NODE_ID_3","FM_NODE_ID_4","FM_240VA_STATUS",
+ /*I0-I7*/ "FM_SYS_FAN0_PRSNT_D_N","FM_SYS_FAN1_PRSNT_D_N","FM_SYS_FAN2_PRSNT_D_N","FM_SYS_FAN3_PRSNT_D_N","FM_SYS_FAN4_PRSNT_D_N","FM_SYS_FAN5_PRSNT_D_N","","",
+ /*J0-J7*/ "","","","","","","","",
+ /*K0-K7*/ "","","","","","","","",
+ /*L0-L7*/ "","","","","","","","",
+ /*M0-M7*/ "","","","","","","","",
+ /*N0-N7*/ "","","","","","","","",
+ /*O0-O7*/ "","","","","","","","",
+ /*P0-P7*/ "","","","","","","","",
+ /*Q0-Q7*/ "","","","","","","","PWR_DEBUG_N",
+ /*R0-R7*/ "","XDP_PRST_N","","","","","","CHASSIS_INTRUSION",
+ /*S0-S7*/ "REMOTE_DEBUG_ENABLE","SYSPWROK","RSMRST_N","","","","","",
+ /*T0-T7*/ "","","","","","","","",
+ /*U0-U7*/ "","","","","","","","",
+ /*V0-V7*/ "","","","","","","","",
+ /*W0-W7*/ "","","","","","","","",
+ /*X0-X7*/ "","","","","","","","",
+ /*Y0-Y7*/ "SIO_S3","SIO_S5","","SIO_ONCONTROL","","","","",
+ /*Z0-Z7*/ "","SIO_POWER_GOOD","","","","","","",
+ /*AA0-AA7*/ "P3VBAT_BRIDGE_EN","","","","PREQ_N","TCK_MUX_SEL","SMI","POST_COMPLETE",
+ /*AB0-AB7*/ "","NMI_BUTTON","ID_BUTTON","PS_PWROK","","","","",
+ /*AC0-AC7*/ "","","","","","","","";
+};
+
+&sgpio {
status = "okay";
+ gpio-line-names =
+ /* SGPIO output lines */
+ /*OA0-OA7*/ "","","","","","","","",
+ /*OB0-OB7*/ "LED_CPU1_CH1_DIMM1_FAULT","LED_CPU1_CH1_DIMM2_FAULT","LED_CPU1_CH2_DIMM1_FAULT","LED_CPU1_CH2_DIMM2_FAULT","LED_CPU1_CH3_DIMM1_FAULT","LED_CPU1_CH3_DIMM2_FAULT","LED_CPU1_CH4_DIMM1_FAULT","LED_CPU1_CH4_DIMM2_FAULT",
+ /*OC0-OC7*/ "LED_CPU1_CH5_DIMM1_FAULT","LED_CPU1_CH5_DIMM2_FAULT","LED_CPU1_CH6_DIMM1_FAULT","LED_CPU1_CH6_DIMM2_FAULT","LED_FAN1_FAULT","LED_FAN2_FAULT","LED_FAN3_FAULT","LED_FAN4_FAULT",
+ /*OD0-OD7*/ "LED_FAN5_FAULT","LED_FAN6_FAULT","LED_FAN7_FAULT","LED_FAN8_FAULT","LED_CPU2_CH1_DIMM1_FAULT","LED_CPU1_CH1_DIMM2_FAULT","LED_CPU2_CH2_DIMM1_FAULT","LED_CPU2_CH2_DIMM2_FAULT",
+ /*OE0-OE7*/ "LED_CPU2_CH3_DIMM1_FAULT","LED_CPU2_CH3_DIMM2_FAULT","LED_CPU2_CH4_DIMM1_FAULT","LED_CPU2_CH4_DIMM2_FAULT","LED_CPU2_CH5_DIMM1_FAULT","LED_CPU2_CH5_DIMM2_FAULT","LED_CPU2_CH6_DIMM1_FAULT","LED_CPU2_CH6_DIMM2_FAULT",
+ /*OF0-OF7*/ "LED_CPU3_CH1_DIMM1_FAULT","LED_CPU3_CH1_DIMM2_FAULT","LED_CPU3_CH2_DIMM1_FAULT","LED_CPU3_CH2_DIMM2_FAULT","LED_CPU3_CH3_DIMM1_FAULT","LED_CPU3_CH3_DIMM2_FAULT","LED_CPU3_CH4_DIMM1_FAULT","LED_CPU3_CH4_DIMM2_FAULT",
+ /*OG0-OG7*/ "LED_CPU3_CH5_DIMM1_FAULT","LED_CPU3_CH5_DIMM2_FAULT","LED_CPU3_CH6_DIMM1_FAULT","LED_CPU3_CH6_DIMM2_FAULT","LED_CPU4_CH1_DIMM1_FAULT","LED_CPU4_CH1_DIMM2_FAULT","LED_CPU4_CH2_DIMM1_FAULT","LED_CPU4_CH2_DIMM2_FAULT",
+ /*OH0-OH7*/ "LED_CPU4_CH3_DIMM1_FAULT","LED_CPU4_CH3_DIMM2_FAULT","LED_CPU4_CH4_DIMM1_FAULT","LED_CPU4_CH4_DIMM2_FAULT","LED_CPU4_CH5_DIMM1_FAULT","LED_CPU4_CH5_DIMM2_FAULT","LED_CPU4_CH6_DIMM1_FAULT","LED_CPU4_CH6_DIMM2_FAULT",
+ /*OI0-OI7*/ "","","","","","","","",
+ /*OJ0-OJ7*/ "","","","","","","","",
+ /*DUMMY*/ "","","","","","","","",
+ /*DUMMY*/ "","","","","","","","",
+ /* SGPIO input lines */
+ /*IA0-IA7*/ "CPU1_PRESENCE","CPU1_THERMTRIP","CPU1_VRHOT","CPU1_FIVR_FAULT","CPU1_MEM_ABCD_VRHOT","CPU1_MEM_EFGH_VRHOT","","",
+ /*IB0-IB7*/ "CPU1_MISMATCH","CPU1_MEM_THERM_EVENT","CPU2_PRESENCE","CPU2_THERMTRIP","CPU2_VRHOT","CPU2_FIVR_FAULT","CPU2_MEM_ABCD_VRHOT","CPU2_MEM_EFGH_VRHOT",
+ /*IC0-IC7*/ "","","CPU2_MISMATCH","CPU2_MEM_THERM_EVENT","","","","",
+ /*ID0-ID7*/ "","","","","","","","",
+ /*IE0-IE7*/ "","","","","","","","",
+ /*IF0-IF7*/ "SGPIO_PLD_MINOR_REV_BIT0","SGPIO_PLD_MINOR_REV_BIT1","SGPIO_PLD_MINOR_REV_BIT2","SGPIO_PLD_MINOR_REV_BIT3","SGPIO_PLD_MAJOR_REV_BIT0","SGPIO_PLD_MAJOR_REV_BIT1","SGPIO_PLD_MAJOR_REV_BIT2","SGPIO_PLD_MAJOR_REV_BIT3",
+ /*IG0-IG7*/ "MAIN_PLD_MINOR_REV_BIT0","MAIN_PLD_MINOR_REV_BIT1","MAIN_PLD_MINOR_REV_BIT2","MAIN_PLD_MINOR_REV_BIT3","MAIN_PLD_MAJOR_REV_BIT0","MAIN_PLD_MAJOR_REV_BIT1","MAIN_PLD_MAJOR_REV_BIT2","MAIN_PLD_MAJOR_REV_BIT3",
+ /*IH0-IH7*/ "","","","","","","","",
+ /*II0-II7*/ "","","","","","","","",
+ /*IJ0-IJ7*/ "","","","","","","","";
+};
+
+&kcs3 {
+ kcs_addr = <0xCA2>;
+ status = "okay";
+};
+
+&kcs4 {
+ kcs_addr = <0xCA4>;
+ status = "okay";
+};
+
+&sio_regs {
+ status = "okay";
+ sio_status {
+ offset = <0x10C>;
+ bit-mask = <0x1F>;
+ bit-shift = <4>;
+ };
+};
+
+&lpc_sio {
+ status = "okay";
+};
+
+&lpc_snoop {
+ snoop-ports = <0x80>;
+ status = "okay";
+};
+
+&mbox {
+ status = "okay";
+};
+
+&uart_routing {
+ status = "okay";
+};
+
+&uart1 {
+ status = "okay";
pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_rmii1_default>;
- clocks = <&syscon ASPEED_CLK_GATE_MAC1CLK>,
- <&syscon ASPEED_CLK_MAC1RCLK>;
- clock-names = "MACCLK", "RCLK";
- use-ncsi;
+ pinctrl-0 = <&pinctrl_txd1_default
+ &pinctrl_rxd1_default
+ &pinctrl_nrts1_default
+ &pinctrl_ndtr1_default
+ &pinctrl_ndsr1_default
+ &pinctrl_ncts1_default
+ &pinctrl_ndcd1_default
+ &pinctrl_nri1_default>;
+};
+
+&uart2 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_txd2_default
+ &pinctrl_rxd2_default
+ &pinctrl_nrts2_default
+ &pinctrl_ndtr2_default
+ &pinctrl_ndsr2_default
+ &pinctrl_ncts2_default
+ &pinctrl_ndcd2_default
+ &pinctrl_nri2_default>;
+};
+
+&uart3 {
+ status = "okay";
+};
+
+&uart4 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <>;
+};
+
+&uart5 {
+ status = "okay";
};
&mac1 {
@@ -82,44 +306,87 @@
pinctrl-0 = <&pinctrl_rgmii2_default &pinctrl_mdio2_default>;
};
+&mac0 {
+ status = "okay";
+ use-ncsi;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_rmii1_default>;
+};
+
+&i2c0 {
+ multi-master;
+ general-call;
+ aspeed,dma-buf-size = <4095>;
+ aspeed,hw-timeout-ms = <300>;
+ status = "okay";
+};
+
&i2c1 {
+ multi-master;
+ aspeed,dma-buf-size = <4095>;
+ aspeed,hw-timeout-ms = <300>;
status = "okay";
};
&i2c2 {
+ multi-master;
+ aspeed,dma-buf-size = <4095>;
+ aspeed,hw-timeout-ms = <300>;
status = "okay";
};
&i2c3 {
+ multi-master;
+ aspeed,dma-buf-size = <4095>;
+ aspeed,hw-timeout-ms = <300>;
status = "okay";
};
&i2c4 {
+ multi-master;
+ aspeed,dma-buf-size = <4095>;
+ aspeed,hw-timeout-ms = <300>;
status = "okay";
};
&i2c5 {
+ bus-frequency = <1000000>;
+ multi-master;
+ aspeed,dma-buf-size = <4095>;
+ aspeed,hw-timeout-ms = <300>;
status = "okay";
};
&i2c6 {
+ multi-master;
+ aspeed,dma-buf-size = <4095>;
+ aspeed,hw-timeout-ms = <300>;
status = "okay";
};
&i2c7 {
+ multi-master;
+ #retries = <3>;
+ aspeed,dma-buf-size = <4095>;
+ aspeed,hw-timeout-ms = <300>;
status = "okay";
};
&i2c13 {
+ multi-master;
+ aspeed,dma-buf-size = <4095>;
+ aspeed,hw-timeout-ms = <300>;
status = "okay";
};
&gfx {
status = "okay";
+ memory-region = <&gfx_memory>;
};
-&pinctrl {
- aspeed,external-nodes = <&gfx &lhc>;
+&vuart {
+ status = "okay";
};
&pwm_tacho {
@@ -129,4 +396,61 @@
&pinctrl_pwm2_default &pinctrl_pwm3_default
&pinctrl_pwm4_default &pinctrl_pwm5_default
&pinctrl_pwm6_default &pinctrl_pwm7_default>;
+
+ fan@0 {
+ reg = <0x00>;
+ aspeed,fan-tach-ch = /bits/ 8 <0x00 0x01>;
+ };
+ fan@1 {
+ reg = <0x01>;
+ aspeed,fan-tach-ch = /bits/ 8 <0x02 0x03>;
+ };
+ fan@2 {
+ reg = <0x02>;
+ aspeed,fan-tach-ch = /bits/ 8 <0x04 0x05>;
+ };
+ fan@3 {
+ reg = <0x03>;
+ aspeed,fan-tach-ch = /bits/ 8 <0x06 0x07>;
+ };
+ fan@4 {
+ reg = <0x04>;
+ aspeed,fan-tach-ch = /bits/ 8 <0x08 0x09>;
+ };
+ fan@5 {
+ reg = <0x05>;
+ aspeed,fan-tach-ch = /bits/ 8 <0x0A 0x0B>;
+ };
+ fan@6 {
+ reg = <0x06>;
+ aspeed,fan-tach-ch = /bits/ 8 <0x0C 0x0D>;
+ };
+ fan@7 {
+ reg = <0x07>;
+ aspeed,fan-tach-ch = /bits/ 8 <0x0E 0x0F>;
+ };
+
+};
+
+&timer {
+/*
+ * Available settings:
+ * fttmr010,pwm-outputs = <5>, <6>, <7>, <8>;
+ * pinctrl-0 = <&pinctrl_timer5_default &pinctrl_timer6_default
+ * &pinctrl_timer7_default &pinctrl_timer8_default>;
+ */
+ fttmr010,pwm-outputs = <6>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_timer6_default>;
+ #pwm-cells = <3>;
+ status = "okay";
+};
+
+&video {
+ status = "okay";
+ memory-region = <&video_engine_memory>;
+};
+
+&vhub {
+ status = "okay";
};
diff --git a/arch/arm/boot/dts/aspeed-g4.dtsi b/arch/arm/boot/dts/aspeed-g4.dtsi
index f14dace34c5a..bf488f61e475 100644
--- a/arch/arm/boot/dts/aspeed-g4.dtsi
+++ b/arch/arm/boot/dts/aspeed-g4.dtsi
@@ -120,14 +120,6 @@
reg = <0x1e6c2000 0x80>;
};
- mac0: ethernet@1e660000 {
- compatible = "aspeed,ast2400-mac", "faraday,ftgmac100";
- reg = <0x1e660000 0x180>;
- interrupts = <2>;
- clocks = <&syscon ASPEED_CLK_GATE_MAC1CLK>;
- status = "disabled";
- };
-
mac1: ethernet@1e680000 {
compatible = "aspeed,ast2400-mac", "faraday,ftgmac100";
reg = <0x1e680000 0x180>;
@@ -136,6 +128,14 @@
status = "disabled";
};
+ mac0: ethernet@1e660000 {
+ compatible = "aspeed,ast2400-mac", "faraday,ftgmac100";
+ reg = <0x1e660000 0x180>;
+ interrupts = <2>;
+ clocks = <&syscon ASPEED_CLK_GATE_MAC1CLK>;
+ status = "disabled";
+ };
+
ehci0: usb@1e6a1000 {
compatible = "aspeed,ast2400-ehci", "generic-ehci";
reg = <0x1e6a1000 0x100>;
@@ -390,6 +390,33 @@
reg = <0x9c 0x4>;
status = "disabled";
};
+
+ sio_regs: regs {
+ compatible = "aspeed,bmc-misc";
+ };
+
+ lpc_sio: lpc-sio@180 {
+ compatible = "aspeed,ast2400-lpc-sio";
+ reg = <0x180 0x20>;
+ clocks = <&syscon ASPEED_CLK_GATE_LCLK>;
+ status = "disabled";
+ };
+
+ mbox: mbox@200 {
+ compatible = "aspeed,ast2400-mbox";
+ reg = <0x200 0x5c>;
+ interrupts = <46>;
+ #mbox-cells = <1>;
+ clocks = <&syscon ASPEED_CLK_GATE_LCLK>;
+ status = "disabled";
+ };
+ };
+
+ peci: bus@1e78b000 {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x1e78b000 0x60>;
};
uart2: serial@1e78d000 {
@@ -435,13 +462,40 @@
};
};
+&peci {
+ peci0: peci-bus@0 {
+ compatible = "aspeed,ast2400-peci";
+ reg = <0x0 0x60>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <15>;
+ clocks = <&syscon ASPEED_CLK_GATE_REFCLK>;
+ resets = <&syscon ASPEED_RESET_PECI>;
+ clock-frequency = <24000000>;
+ msg-timing = <1>;
+ addr-timing = <1>;
+ rd-sampling-point = <8>;
+ cmd-timeout-ms = <1000>;
+ status = "disabled";
+ };
+};
+
&i2c {
- i2c_ic: interrupt-controller@0 {
- #interrupt-cells = <1>;
- compatible = "aspeed,ast2400-i2c-ic";
+ i2c_gr: i2c-global-regs@0 {
+ compatible = "aspeed,ast2400-i2c-gr", "syscon";
reg = <0x0 0x40>;
- interrupts = <12>;
- interrupt-controller;
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x0 0x40>;
+
+ i2c_ic: interrupt-controller@0 {
+ #interrupt-cells = <1>;
+ compatible = "aspeed,ast2400-i2c-ic";
+ reg = <0x0 0x4>;
+ interrupts = <12>;
+ interrupt-controller;
+ };
};
i2c0: i2c-bus@40 {
@@ -449,7 +503,7 @@
#size-cells = <0>;
#interrupt-cells = <1>;
- reg = <0x40 0x40>;
+ reg = <0x40 0x40>, <0x800 0x80>;
compatible = "aspeed,ast2400-i2c-bus";
clocks = <&syscon ASPEED_CLK_APB>;
resets = <&syscon ASPEED_RESET_I2C>;
@@ -465,7 +519,7 @@
#size-cells = <0>;
#interrupt-cells = <1>;
- reg = <0x80 0x40>;
+ reg = <0x80 0x40>, <0x880 0x80>;
compatible = "aspeed,ast2400-i2c-bus";
clocks = <&syscon ASPEED_CLK_APB>;
resets = <&syscon ASPEED_RESET_I2C>;
@@ -481,7 +535,7 @@
#size-cells = <0>;
#interrupt-cells = <1>;
- reg = <0xc0 0x40>;
+ reg = <0xc0 0x40>, <0x900 0x80>;
compatible = "aspeed,ast2400-i2c-bus";
clocks = <&syscon ASPEED_CLK_APB>;
resets = <&syscon ASPEED_RESET_I2C>;
@@ -498,7 +552,7 @@
#size-cells = <0>;
#interrupt-cells = <1>;
- reg = <0x100 0x40>;
+ reg = <0x100 0x40>, <0x980 0x80>;
compatible = "aspeed,ast2400-i2c-bus";
clocks = <&syscon ASPEED_CLK_APB>;
resets = <&syscon ASPEED_RESET_I2C>;
@@ -515,7 +569,7 @@
#size-cells = <0>;
#interrupt-cells = <1>;
- reg = <0x140 0x40>;
+ reg = <0x140 0x40>, <0xa00 0x80>;
compatible = "aspeed,ast2400-i2c-bus";
clocks = <&syscon ASPEED_CLK_APB>;
resets = <&syscon ASPEED_RESET_I2C>;
@@ -532,7 +586,7 @@
#size-cells = <0>;
#interrupt-cells = <1>;
- reg = <0x180 0x40>;
+ reg = <0x180 0x40>, <0xa80 0x80>;
compatible = "aspeed,ast2400-i2c-bus";
clocks = <&syscon ASPEED_CLK_APB>;
resets = <&syscon ASPEED_RESET_I2C>;
@@ -549,7 +603,7 @@
#size-cells = <0>;
#interrupt-cells = <1>;
- reg = <0x1c0 0x40>;
+ reg = <0x1c0 0x40>, <0xb00 0x80>;
compatible = "aspeed,ast2400-i2c-bus";
clocks = <&syscon ASPEED_CLK_APB>;
resets = <&syscon ASPEED_RESET_I2C>;
@@ -566,7 +620,7 @@
#size-cells = <0>;
#interrupt-cells = <1>;
- reg = <0x300 0x40>;
+ reg = <0x300 0x40>, <0xb80 0x80>;
compatible = "aspeed,ast2400-i2c-bus";
clocks = <&syscon ASPEED_CLK_APB>;
resets = <&syscon ASPEED_RESET_I2C>;
@@ -583,7 +637,7 @@
#size-cells = <0>;
#interrupt-cells = <1>;
- reg = <0x340 0x40>;
+ reg = <0x340 0x40>, <0xc00 0x80>;
compatible = "aspeed,ast2400-i2c-bus";
clocks = <&syscon ASPEED_CLK_APB>;
resets = <&syscon ASPEED_RESET_I2C>;
@@ -600,7 +654,7 @@
#size-cells = <0>;
#interrupt-cells = <1>;
- reg = <0x380 0x40>;
+ reg = <0x380 0x40>, <0xc80 0x80>;
compatible = "aspeed,ast2400-i2c-bus";
clocks = <&syscon ASPEED_CLK_APB>;
resets = <&syscon ASPEED_RESET_I2C>;
@@ -617,7 +671,7 @@
#size-cells = <0>;
#interrupt-cells = <1>;
- reg = <0x3c0 0x40>;
+ reg = <0x3c0 0x40>, <0xd00 0x80>;
compatible = "aspeed,ast2400-i2c-bus";
clocks = <&syscon ASPEED_CLK_APB>;
resets = <&syscon ASPEED_RESET_I2C>;
@@ -634,7 +688,7 @@
#size-cells = <0>;
#interrupt-cells = <1>;
- reg = <0x400 0x40>;
+ reg = <0x400 0x40>, <0xd80 0x80>;
compatible = "aspeed,ast2400-i2c-bus";
clocks = <&syscon ASPEED_CLK_APB>;
resets = <&syscon ASPEED_RESET_I2C>;
@@ -651,7 +705,7 @@
#size-cells = <0>;
#interrupt-cells = <1>;
- reg = <0x440 0x40>;
+ reg = <0x440 0x40>, <0xe00 0x80>;
compatible = "aspeed,ast2400-i2c-bus";
clocks = <&syscon ASPEED_CLK_APB>;
resets = <&syscon ASPEED_RESET_I2C>;
@@ -668,7 +722,7 @@
#size-cells = <0>;
#interrupt-cells = <1>;
- reg = <0x480 0x40>;
+ reg = <0x480 0x40>, <0xe80 0x80>;
compatible = "aspeed,ast2400-i2c-bus";
clocks = <&syscon ASPEED_CLK_APB>;
resets = <&syscon ASPEED_RESET_I2C>;
diff --git a/arch/arm/boot/dts/aspeed-g5.dtsi b/arch/arm/boot/dts/aspeed-g5.dtsi
index 7495f93c5069..581e7ad9c615 100644
--- a/arch/arm/boot/dts/aspeed-g5.dtsi
+++ b/arch/arm/boot/dts/aspeed-g5.dtsi
@@ -128,7 +128,7 @@
};
vic: interrupt-controller@1e6c0080 {
- compatible = "aspeed,ast2400-vic";
+ compatible = "aspeed,ast2500-vic";
interrupt-controller;
#interrupt-cells = <1>;
valid-sources = <0xfefff7ff 0x0807ffff>;
@@ -142,14 +142,6 @@
reg = <0x1e6c2000 0x80>;
};
- mac0: ethernet@1e660000 {
- compatible = "aspeed,ast2500-mac", "faraday,ftgmac100";
- reg = <0x1e660000 0x180>;
- interrupts = <2>;
- clocks = <&syscon ASPEED_CLK_GATE_MAC1CLK>;
- status = "disabled";
- };
-
mac1: ethernet@1e680000 {
compatible = "aspeed,ast2500-mac", "faraday,ftgmac100";
reg = <0x1e680000 0x180>;
@@ -158,6 +150,14 @@
status = "disabled";
};
+ mac0: ethernet@1e660000 {
+ compatible = "aspeed,ast2500-mac", "faraday,ftgmac100";
+ reg = <0x1e660000 0x180>;
+ interrupts = <2>;
+ clocks = <&syscon ASPEED_CLK_GATE_MAC1CLK>;
+ status = "disabled";
+ };
+
ehci0: usb@1e6a1000 {
compatible = "aspeed,ast2500-ehci", "generic-ehci";
reg = <0x1e6a1000 0x100>;
@@ -342,6 +342,7 @@
clocks = <&syscon ASPEED_CLK_APB>;
interrupt-controller;
#interrupt-cells = <2>;
+ status = "disabled";
};
sgpio: sgpio@1e780200 {
@@ -366,7 +367,7 @@
timer: timer@1e782000 {
/* This timer is a Faraday FTTMR010 derivative */
- compatible = "aspeed,ast2400-timer";
+ compatible = "aspeed,ast2500-timer";
reg = <0x1e782000 0x90>;
interrupts = <16 17 18 35 36 37 38 39>;
clocks = <&syscon ASPEED_CLK_APB>;
@@ -433,6 +434,25 @@
status = "disabled";
};
+ espi: espi@1e6ee000 {
+ compatible = "aspeed,ast2500-espi-slave";
+ reg = <0x1e6ee000 0x100>;
+ interrupts = <23>;
+ status = "disabled";
+ clocks = <&syscon ASPEED_CLK_GATE_ESPICLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_espi_default>;
+ };
+
+ jtag: jtag@1e6e4000 {
+ compatible = "aspeed,ast2500-jtag";
+ reg = <0x1e6e4000 0x1c>;
+ clocks = <&syscon ASPEED_CLK_APB>;
+ resets = <&syscon ASPEED_RESET_JTAG_MASTER>;
+ interrupts = <43>;
+ status = "disabled";
+ };
+
lpc: lpc@1e789000 {
compatible = "aspeed,ast2500-lpc-v2", "simple-mfd", "syscon";
reg = <0x1e789000 0x1000>;
@@ -514,6 +534,33 @@
clocks = <&syscon ASPEED_CLK_GATE_LCLK>;
status = "disabled";
};
+
+ sio_regs: regs {
+ compatible = "aspeed,bmc-misc";
+ };
+
+ lpc_sio: lpc-sio@180 {
+ compatible = "aspeed,ast2500-lpc-sio";
+ reg = <0x180 0x20>;
+ clocks = <&syscon ASPEED_CLK_GATE_LCLK>;
+ status = "disabled";
+ };
+
+ mbox: mbox@200 {
+ compatible = "aspeed,ast2500-mbox";
+ reg = <0x200 0x5c>;
+ interrupts = <46>;
+ #mbox-cells = <1>;
+ clocks = <&syscon ASPEED_CLK_GATE_LCLK>;
+ status = "disabled";
+ };
+ };
+
+ peci: bus@1e78b000 {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x1e78b000 0x60>;
};
uart2: serial@1e78d000 {
@@ -559,13 +606,40 @@
};
};
+&peci {
+ peci0: peci-bus@0 {
+ compatible = "aspeed,ast2500-peci";
+ reg = <0x0 0x60>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <15>;
+ clocks = <&syscon ASPEED_CLK_GATE_REFCLK>;
+ resets = <&syscon ASPEED_RESET_PECI>;
+ clock-frequency = <24000000>;
+ msg-timing = <1>;
+ addr-timing = <1>;
+ rd-sampling-point = <8>;
+ cmd-timeout-ms = <1000>;
+ status = "disabled";
+ };
+};
+
&i2c {
- i2c_ic: interrupt-controller@0 {
- #interrupt-cells = <1>;
- compatible = "aspeed,ast2500-i2c-ic";
+ i2c_gr: i2c-global-regs@0 {
+ compatible = "aspeed,ast2500-i2c-gr", "syscon";
reg = <0x0 0x40>;
- interrupts = <12>;
- interrupt-controller;
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x0 0x40>;
+
+ i2c_ic: interrupt-controller@0 {
+ #interrupt-cells = <1>;
+ compatible = "aspeed,ast2500-i2c-ic";
+ reg = <0x0 0x4>;
+ interrupts = <12>;
+ interrupt-controller;
+ };
};
i2c0: i2c-bus@40 {
@@ -573,7 +647,7 @@
#size-cells = <0>;
#interrupt-cells = <1>;
- reg = <0x40 0x40>;
+ reg = <0x40 0x40>, <0x200 0x10>;
compatible = "aspeed,ast2500-i2c-bus";
clocks = <&syscon ASPEED_CLK_APB>;
resets = <&syscon ASPEED_RESET_I2C>;
@@ -589,7 +663,7 @@
#size-cells = <0>;
#interrupt-cells = <1>;
- reg = <0x80 0x40>;
+ reg = <0x80 0x40>, <0x210 0x10>;
compatible = "aspeed,ast2500-i2c-bus";
clocks = <&syscon ASPEED_CLK_APB>;
resets = <&syscon ASPEED_RESET_I2C>;
@@ -605,7 +679,7 @@
#size-cells = <0>;
#interrupt-cells = <1>;
- reg = <0xc0 0x40>;
+ reg = <0xc0 0x40>, <0x220 0x10>;
compatible = "aspeed,ast2500-i2c-bus";
clocks = <&syscon ASPEED_CLK_APB>;
resets = <&syscon ASPEED_RESET_I2C>;
@@ -622,7 +696,7 @@
#size-cells = <0>;
#interrupt-cells = <1>;
- reg = <0x100 0x40>;
+ reg = <0x100 0x40>, <0x230 0x10>;
compatible = "aspeed,ast2500-i2c-bus";
clocks = <&syscon ASPEED_CLK_APB>;
resets = <&syscon ASPEED_RESET_I2C>;
@@ -639,7 +713,7 @@
#size-cells = <0>;
#interrupt-cells = <1>;
- reg = <0x140 0x40>;
+ reg = <0x140 0x40>, <0x240 0x10>;
compatible = "aspeed,ast2500-i2c-bus";
clocks = <&syscon ASPEED_CLK_APB>;
resets = <&syscon ASPEED_RESET_I2C>;
@@ -656,7 +730,7 @@
#size-cells = <0>;
#interrupt-cells = <1>;
- reg = <0x180 0x40>;
+ reg = <0x180 0x40>, <0x250 0x10>;
compatible = "aspeed,ast2500-i2c-bus";
clocks = <&syscon ASPEED_CLK_APB>;
resets = <&syscon ASPEED_RESET_I2C>;
@@ -673,7 +747,7 @@
#size-cells = <0>;
#interrupt-cells = <1>;
- reg = <0x1c0 0x40>;
+ reg = <0x1c0 0x40>, <0x260 0x10>;
compatible = "aspeed,ast2500-i2c-bus";
clocks = <&syscon ASPEED_CLK_APB>;
resets = <&syscon ASPEED_RESET_I2C>;
@@ -690,7 +764,7 @@
#size-cells = <0>;
#interrupt-cells = <1>;
- reg = <0x300 0x40>;
+ reg = <0x300 0x40>, <0x270 0x10>;
compatible = "aspeed,ast2500-i2c-bus";
clocks = <&syscon ASPEED_CLK_APB>;
resets = <&syscon ASPEED_RESET_I2C>;
@@ -707,7 +781,7 @@
#size-cells = <0>;
#interrupt-cells = <1>;
- reg = <0x340 0x40>;
+ reg = <0x340 0x40>, <0x280 0x10>;
compatible = "aspeed,ast2500-i2c-bus";
clocks = <&syscon ASPEED_CLK_APB>;
resets = <&syscon ASPEED_RESET_I2C>;
@@ -724,7 +798,7 @@
#size-cells = <0>;
#interrupt-cells = <1>;
- reg = <0x380 0x40>;
+ reg = <0x380 0x40>, <0x290 0x10>;
compatible = "aspeed,ast2500-i2c-bus";
clocks = <&syscon ASPEED_CLK_APB>;
resets = <&syscon ASPEED_RESET_I2C>;
@@ -741,7 +815,7 @@
#size-cells = <0>;
#interrupt-cells = <1>;
- reg = <0x3c0 0x40>;
+ reg = <0x3c0 0x40>, <0x2a0 0x10>;
compatible = "aspeed,ast2500-i2c-bus";
clocks = <&syscon ASPEED_CLK_APB>;
resets = <&syscon ASPEED_RESET_I2C>;
@@ -758,7 +832,7 @@
#size-cells = <0>;
#interrupt-cells = <1>;
- reg = <0x400 0x40>;
+ reg = <0x400 0x40>, <0x2b0 0x10>;
compatible = "aspeed,ast2500-i2c-bus";
clocks = <&syscon ASPEED_CLK_APB>;
resets = <&syscon ASPEED_RESET_I2C>;
@@ -775,7 +849,7 @@
#size-cells = <0>;
#interrupt-cells = <1>;
- reg = <0x440 0x40>;
+ reg = <0x440 0x40>, <0x2c0 0x10>;
compatible = "aspeed,ast2500-i2c-bus";
clocks = <&syscon ASPEED_CLK_APB>;
resets = <&syscon ASPEED_RESET_I2C>;
@@ -792,7 +866,7 @@
#size-cells = <0>;
#interrupt-cells = <1>;
- reg = <0x480 0x40>;
+ reg = <0x480 0x40>, <0x2d0 0x10>;
compatible = "aspeed,ast2500-i2c-bus";
clocks = <&syscon ASPEED_CLK_APB>;
resets = <&syscon ASPEED_RESET_I2C>;
diff --git a/arch/arm/boot/dts/aspeed-g6-pinctrl.dtsi b/arch/arm/boot/dts/aspeed-g6-pinctrl.dtsi
index 7cd4f075e325..289668f051eb 100644
--- a/arch/arm/boot/dts/aspeed-g6-pinctrl.dtsi
+++ b/arch/arm/boot/dts/aspeed-g6-pinctrl.dtsi
@@ -297,6 +297,16 @@
groups = "I2C9";
};
+ pinctrl_i3c1_default: i3c1_default {
+ function = "I3C1";
+ groups = "I3C1";
+ };
+
+ pinctrl_i3c2_default: i3c2_default {
+ function = "I3C2";
+ groups = "I3C2";
+ };
+
pinctrl_i3c3_default: i3c3_default {
function = "I3C3";
groups = "I3C3";
diff --git a/arch/arm/boot/dts/aspeed-g6.dtsi b/arch/arm/boot/dts/aspeed-g6.dtsi
index 4540958912aa..6e7658b97e5b 100644
--- a/arch/arm/boot/dts/aspeed-g6.dtsi
+++ b/arch/arm/boot/dts/aspeed-g6.dtsi
@@ -4,6 +4,7 @@
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/interrupt-controller/aspeed-scu-ic.h>
#include <dt-bindings/clock/ast2600-clock.h>
+#include <dt-bindings/gpio/aspeed-gpio.h>
/ {
model = "Aspeed BMC";
@@ -29,6 +30,12 @@
i2c13 = &i2c13;
i2c14 = &i2c14;
i2c15 = &i2c15;
+ i3c0 = &i3c0;
+ i3c1 = &i3c1;
+ i3c2 = &i3c2;
+ i3c3 = &i3c3;
+ i3c4 = &i3c4;
+ i3c5 = &i3c5;
serial0 = &uart1;
serial1 = &uart2;
serial2 = &uart3;
@@ -318,6 +325,16 @@
#size-cells = <1>;
ranges;
+ pwm_tacho: pwm-tacho-controller@1e610000 {
+ compatible = "aspeed,ast2600-pwm-tacho";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x1e610000 0x100>;
+ clocks = <&syscon ASPEED_CLK_AHB>;
+ resets = <&syscon ASPEED_RESET_PWM>;
+ status = "disabled";
+ };
+
syscon: syscon@1e6e2000 {
compatible = "aspeed,ast2600-scu", "syscon", "simple-mfd";
reg = <0x1e6e2000 0x1000>;
@@ -389,6 +406,41 @@
status = "disabled";
};
+ jtag0: jtag@1e6e4000 {
+ compatible = "aspeed,ast2600-jtag";
+ reg = <0x1e6e4000 0x40>;
+ clocks = <&syscon ASPEED_CLK_APB1>;
+ resets = <&syscon ASPEED_RESET_JTAG_MASTER>;
+ interrupts = <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+
+ jtag1: jtag@1e6e4100 {
+ compatible = "aspeed,ast2600-jtag";
+ reg = <0x1e6e4100 0x40>;
+ clocks = <&syscon ASPEED_CLK_APB1>;
+ resets = <&syscon ASPEED_RESET_JTAG_MASTER2>;
+ interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_jtagm_default>;
+ status = "disabled";
+ };
+
+ pcieh: pcieh@1e6ed000 {
+ compatible = "aspeed,ast2600-pcieh", "syscon";
+ reg = <0x1e6ed000 0x100>;
+ };
+
+ mctp: mctp@1e6e8000 {
+ compatible = "aspeed,ast2600-mctp";
+ reg = <0x1e6e8000 0x1000>;
+ interrupts-extended = <&gic GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>,
+ <&scu_ic0 ASPEED_AST2600_SCU_IC0_PCIE_PERST_LO_TO_HI>;
+ resets = <&syscon ASPEED_RESET_DEV_MCTP>;
+ aspeed,pcieh = <&pcieh>;
+ status = "disabled";
+ };
+
adc0: adc@1e6e9000 {
compatible = "aspeed,ast2600-adc0";
reg = <0x1e6e9000 0x100>;
@@ -418,7 +470,7 @@
#gpio-cells = <2>;
gpio-controller;
compatible = "aspeed,ast2600-gpio";
- reg = <0x1e780000 0x400>;
+ reg = <0x1e780000 0x200>;
interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>;
gpio-ranges = <&pinctrl 0 0 208>;
ngpios = <208>;
@@ -459,7 +511,7 @@
#gpio-cells = <2>;
gpio-controller;
compatible = "aspeed,ast2600-gpio";
- reg = <0x1e780800 0x800>;
+ reg = <0x1e780800 0x200>;
interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
gpio-ranges = <&pinctrl 0 208 36>;
ngpios = <36>;
@@ -537,6 +589,20 @@
status = "disabled";
};
+ peci: bus@1e78b000 {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x1e78b000 0x100>;
+ };
+
+ i3c: bus@1e7a0000 {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x1e7a0000 0x8000>;
+ };
+
lpc: lpc@1e789000 {
compatible = "aspeed,ast2600-lpc-v2", "simple-mfd", "syscon";
reg = <0x1e789000 0x1000>;
@@ -618,6 +684,25 @@
clocks = <&syscon ASPEED_CLK_GATE_LCLK>;
status = "disabled";
};
+
+ sio_regs: regs {
+ compatible = "aspeed,bmc-misc";
+ };
+
+ lpc_sio: lpc-sio@180 {
+ compatible = "aspeed,ast2500-lpc-sio";
+ reg = <0x180 0x20>;
+ clocks = <&syscon ASPEED_CLK_GATE_LCLK>;
+ status = "disabled";
+ };
+
+ mbox: mbox@200 {
+ compatible = "aspeed,ast2600-mbox";
+ reg = <0x200 0xc0>;
+ interrupts = <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>;
+ #mbox-cells = <1>;
+ clocks = <&syscon ASPEED_CLK_GATE_LCLK>;
+ };
};
sdc: sdc@1e740000 {
@@ -730,6 +815,19 @@
status = "disabled";
};
+ espi: espi@1e6ee000 {
+ compatible = "aspeed,ast2600-espi-slave";
+ reg = <0x1e6ee000 0x200>;
+ interrupts-extended = <&gic GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>,
+ <&gpio0 ASPEED_GPIO(W, 7) IRQ_TYPE_EDGE_FALLING>;
+ status = "disabled";
+ clocks = <&syscon ASPEED_CLK_GATE_ESPICLK>;
+ resets = <&syscon ASPEED_RESET_ESPI>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_espi_default>,
+ <&pinctrl_espialt_default>;
+ };
+
i2c: bus@1e78a000 {
compatible = "simple-bus";
#address-cells = <1>;
@@ -762,12 +860,30 @@
#include "aspeed-g6-pinctrl.dtsi"
+&peci {
+ peci0: peci-bus@0 {
+ compatible = "aspeed,ast2600-peci";
+ reg = <0x0 0x100>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&syscon ASPEED_CLK_GATE_REF0CLK>;
+ resets = <&syscon ASPEED_RESET_PECI>;
+ clock-frequency = <24000000>;
+ msg-timing = <1>;
+ addr-timing = <1>;
+ rd-sampling-point = <8>;
+ cmd-timeout-ms = <1000>;
+ status = "disabled";
+ };
+};
+
&i2c {
i2c0: i2c-bus@80 {
#address-cells = <1>;
#size-cells = <0>;
#interrupt-cells = <1>;
- reg = <0x80 0x80>;
+ reg = <0x80 0x80>, <0xc00 0x20>;
compatible = "aspeed,ast2600-i2c-bus";
clocks = <&syscon ASPEED_CLK_APB2>;
resets = <&syscon ASPEED_RESET_I2C>;
@@ -782,7 +898,7 @@
#address-cells = <1>;
#size-cells = <0>;
#interrupt-cells = <1>;
- reg = <0x100 0x80>;
+ reg = <0x100 0x80>, <0xc20 0x20>;
compatible = "aspeed,ast2600-i2c-bus";
clocks = <&syscon ASPEED_CLK_APB2>;
resets = <&syscon ASPEED_RESET_I2C>;
@@ -797,7 +913,7 @@
#address-cells = <1>;
#size-cells = <0>;
#interrupt-cells = <1>;
- reg = <0x180 0x80>;
+ reg = <0x180 0x80>, <0xc40 0x20>;
compatible = "aspeed,ast2600-i2c-bus";
clocks = <&syscon ASPEED_CLK_APB2>;
resets = <&syscon ASPEED_RESET_I2C>;
@@ -812,7 +928,7 @@
#address-cells = <1>;
#size-cells = <0>;
#interrupt-cells = <1>;
- reg = <0x200 0x80>;
+ reg = <0x200 0x80>, <0xc60 0x20>;
compatible = "aspeed,ast2600-i2c-bus";
clocks = <&syscon ASPEED_CLK_APB2>;
resets = <&syscon ASPEED_RESET_I2C>;
@@ -827,7 +943,7 @@
#address-cells = <1>;
#size-cells = <0>;
#interrupt-cells = <1>;
- reg = <0x280 0x80>;
+ reg = <0x280 0x80>, <0xc80 0x20>;
compatible = "aspeed,ast2600-i2c-bus";
clocks = <&syscon ASPEED_CLK_APB2>;
resets = <&syscon ASPEED_RESET_I2C>;
@@ -842,7 +958,7 @@
#address-cells = <1>;
#size-cells = <0>;
#interrupt-cells = <1>;
- reg = <0x300 0x80>;
+ reg = <0x300 0x80>, <0xca0 0x20>;
compatible = "aspeed,ast2600-i2c-bus";
clocks = <&syscon ASPEED_CLK_APB2>;
resets = <&syscon ASPEED_RESET_I2C>;
@@ -857,7 +973,7 @@
#address-cells = <1>;
#size-cells = <0>;
#interrupt-cells = <1>;
- reg = <0x380 0x80>;
+ reg = <0x380 0x80>, <0xcc0 0x20>;
compatible = "aspeed,ast2600-i2c-bus";
clocks = <&syscon ASPEED_CLK_APB2>;
resets = <&syscon ASPEED_RESET_I2C>;
@@ -872,7 +988,7 @@
#address-cells = <1>;
#size-cells = <0>;
#interrupt-cells = <1>;
- reg = <0x400 0x80>;
+ reg = <0x400 0x80>, <0xce0 0x20>;
compatible = "aspeed,ast2600-i2c-bus";
clocks = <&syscon ASPEED_CLK_APB2>;
resets = <&syscon ASPEED_RESET_I2C>;
@@ -887,7 +1003,7 @@
#address-cells = <1>;
#size-cells = <0>;
#interrupt-cells = <1>;
- reg = <0x480 0x80>;
+ reg = <0x480 0x80>, <0xd00 0x20>;
compatible = "aspeed,ast2600-i2c-bus";
clocks = <&syscon ASPEED_CLK_APB2>;
resets = <&syscon ASPEED_RESET_I2C>;
@@ -902,7 +1018,7 @@
#address-cells = <1>;
#size-cells = <0>;
#interrupt-cells = <1>;
- reg = <0x500 0x80>;
+ reg = <0x500 0x80>, <0xd20 0x20>;
compatible = "aspeed,ast2600-i2c-bus";
clocks = <&syscon ASPEED_CLK_APB2>;
resets = <&syscon ASPEED_RESET_I2C>;
@@ -917,7 +1033,7 @@
#address-cells = <1>;
#size-cells = <0>;
#interrupt-cells = <1>;
- reg = <0x580 0x80>;
+ reg = <0x580 0x80>, <0xd40 0x20>;
compatible = "aspeed,ast2600-i2c-bus";
clocks = <&syscon ASPEED_CLK_APB2>;
resets = <&syscon ASPEED_RESET_I2C>;
@@ -932,7 +1048,7 @@
#address-cells = <1>;
#size-cells = <0>;
#interrupt-cells = <1>;
- reg = <0x600 0x80>;
+ reg = <0x600 0x80>, <0xd60 0x20>;
compatible = "aspeed,ast2600-i2c-bus";
clocks = <&syscon ASPEED_CLK_APB2>;
resets = <&syscon ASPEED_RESET_I2C>;
@@ -947,7 +1063,7 @@
#address-cells = <1>;
#size-cells = <0>;
#interrupt-cells = <1>;
- reg = <0x680 0x80>;
+ reg = <0x680 0x80>, <0xd80 0x20>;
compatible = "aspeed,ast2600-i2c-bus";
clocks = <&syscon ASPEED_CLK_APB2>;
resets = <&syscon ASPEED_RESET_I2C>;
@@ -962,7 +1078,7 @@
#address-cells = <1>;
#size-cells = <0>;
#interrupt-cells = <1>;
- reg = <0x700 0x80>;
+ reg = <0x700 0x80>, <0xda0 0x20>;
compatible = "aspeed,ast2600-i2c-bus";
clocks = <&syscon ASPEED_CLK_APB2>;
resets = <&syscon ASPEED_RESET_I2C>;
@@ -977,7 +1093,7 @@
#address-cells = <1>;
#size-cells = <0>;
#interrupt-cells = <1>;
- reg = <0x780 0x80>;
+ reg = <0x780 0x80>, <0xdc0 0x20>;
compatible = "aspeed,ast2600-i2c-bus";
clocks = <&syscon ASPEED_CLK_APB2>;
resets = <&syscon ASPEED_RESET_I2C>;
@@ -992,7 +1108,7 @@
#address-cells = <1>;
#size-cells = <0>;
#interrupt-cells = <1>;
- reg = <0x800 0x80>;
+ reg = <0x800 0x80>, <0xde0 0x20>;
compatible = "aspeed,ast2600-i2c-bus";
clocks = <&syscon ASPEED_CLK_APB2>;
resets = <&syscon ASPEED_RESET_I2C>;
@@ -1003,3 +1119,101 @@
status = "disabled";
};
};
+
+&i3c {
+ i3c0: i3c0@2000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #interrupt-cells = <1>;
+ reg = <0x2000 0x1000>;
+ compatible = "snps,dw-i3c-master-1.00a";
+ clocks = <&syscon ASPEED_CLK_GATE_I3C0CLK>;
+ resets = <&syscon ASPEED_RESET_I3C0>;
+ i2c-scl-hz = <400000>;
+ i3c-scl-hz = <12500000>;
+ interrupts = <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i3c1_default>;
+ status = "disabled";
+ };
+
+ i3c1: i3c1@3000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #interrupt-cells = <1>;
+ reg = <0x3000 0x1000>;
+ compatible = "snps,dw-i3c-master-1.00a";
+ clocks = <&syscon ASPEED_CLK_GATE_I3C1CLK>;
+ resets = <&syscon ASPEED_RESET_I3C1>;
+ i2c-scl-hz = <400000>;
+ i3c-scl-hz = <12500000>;
+ interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i3c2_default>;
+ status = "disabled";
+ };
+
+ i3c2: i3c2@4000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #interrupt-cells = <1>;
+ reg = <0x4000 0x1000>;
+ compatible = "snps,dw-i3c-master-1.00a";
+ clocks = <&syscon ASPEED_CLK_GATE_I3C2CLK>;
+ resets = <&syscon ASPEED_RESET_I3C2>;
+ i2c-scl-hz = <400000>;
+ i3c-scl-hz = <12500000>;
+ interrupts = <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i3c3_default>;
+ status = "disabled";
+ };
+
+ i3c3: i3c3@5000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #interrupt-cells = <1>;
+ reg = <0x5000 0x1000>;
+ compatible = "snps,dw-i3c-master-1.00a";
+ clocks = <&syscon ASPEED_CLK_GATE_I3C3CLK>;
+ resets = <&syscon ASPEED_RESET_I3C3>;
+ i2c-scl-hz = <400000>;
+ i3c-scl-hz = <12500000>;
+ interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i3c4_default>;
+ status = "disabled";
+ };
+
+ i3c4: i3c4@6000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #interrupt-cells = <1>;
+ reg = <0x6000 0x1000>;
+ compatible = "snps,dw-i3c-master-1.00a";
+ clocks = <&syscon ASPEED_CLK_GATE_I3C4CLK>;
+ resets = <&syscon ASPEED_RESET_I3C4>;
+ i2c-scl-hz = <400000>;
+ i3c-scl-hz = <12500000>;
+ interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i3c5_default>;
+ status = "disabled";
+ };
+
+ i3c5: i3c5@7000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #interrupt-cells = <1>;
+ reg = <0x7000 0x1000>;
+ compatible = "snps,dw-i3c-master-1.00a";
+ clocks = <&syscon ASPEED_CLK_GATE_I3C5CLK>;
+ resets = <&syscon ASPEED_RESET_I3C5>;
+ i2c-scl-hz = <400000>;
+ i3c-scl-hz = <12500000>;
+ interrupts = <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i3c6_default>;
+ status = "disabled";
+ };
+};
diff --git a/arch/arm/boot/dts/openbmc-flash-layout-intel-128MB.dtsi b/arch/arm/boot/dts/openbmc-flash-layout-intel-128MB.dtsi
new file mode 100644
index 000000000000..ca148cf9a364
--- /dev/null
+++ b/arch/arm/boot/dts/openbmc-flash-layout-intel-128MB.dtsi
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-2.0+
+// 128MB flash layout: PFR (active + tmp1/tmp2 + extra)
+// image with common RW partition
+
+partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ u-boot@0 {
+ reg = <0x0 0x80000>;
+ label = "u-boot";
+ };
+
+ pfm@80000 {
+ reg = <0x80000 0x20000>;
+ label = "pfm";
+ };
+
+ u-boot-env@a0000 {
+ reg = <0xa0000 0x20000>;
+ label = "u-boot-env";
+ };
+
+ sofs@c0000 {
+ reg = <0xc0000 0x200000>;
+ label = "sofs";
+ };
+
+ rwfs@2c0000 {
+ reg = <0x2c0000 0x840000>;
+ label = "rwfs";
+ };
+
+ fit-image-a@b00000 {
+ reg = <0xb00000 0x1f00000>;
+ label = "image-a";
+ };
+
+ rc-image@2a00000 {
+ reg = <0x2a00000 0x2000000>;
+ label = "rc-image";
+ };
+
+ image-staging@4a00000 {
+ reg = <0x4a00000 0x3400000>;
+ label = "image-stg";
+ };
+
+ afm-active@7e00000 {
+ reg = <0x7e00000 0x20000>;
+ label = "afm-active";
+ };
+
+ afm-recovery@7e20000 {
+ reg = <0x7e20000 0x20000>;
+ label = "afm-rcvr";
+ };
+
+ reserved@7e40000 {
+ reg = <0x7e40000 0xc0000>;
+ label = "rsvrd";
+ };
+
+ cpld-gold@7f00000 {
+ reg = <0x7f00000 0x100000>;
+ label = "cpld-gold";
+ };
+};
diff --git a/arch/arm/boot/dts/openbmc-flash-layout-intel-64MB.dtsi b/arch/arm/boot/dts/openbmc-flash-layout-intel-64MB.dtsi
new file mode 100644
index 000000000000..092708f5021f
--- /dev/null
+++ b/arch/arm/boot/dts/openbmc-flash-layout-intel-64MB.dtsi
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-2.0+
+// 64MB flash layout: redundant image with common RW partition
+
+partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ u-boot@0 {
+ reg = <0x0 0x80000>;
+ label = "u-boot";
+ };
+
+ fit-image-a@80000 {
+ reg = <0x80000 0x1b80000>;
+ label = "image-a";
+ };
+
+ sofs@1c00000 {
+ reg = <0x1c00000 0x200000>;
+ label = "sofs";
+ };
+
+ rwfs@1e00000 {
+ reg = <0x1e00000 0x600000>;
+ label = "rwfs";
+ };
+
+ u-boot-env@2400000 {
+ reg = <0x2400000 0x20000>;
+ label = "u-boot-env";
+ };
+
+ fit-image-b@2480000 {
+ reg = <0x2480000 0x1b80000>;
+ label = "image-b";
+ };
+};
diff --git a/arch/arm/configs/intel_bmc_defconfig b/arch/arm/configs/intel_bmc_defconfig
new file mode 100644
index 000000000000..3e9f2915e6f3
--- /dev/null
+++ b/arch/arm/configs/intel_bmc_defconfig
@@ -0,0 +1,314 @@
+CONFIG_KERNEL_XZ=y
+# CONFIG_SWAP is not set
+CONFIG_SYSVIPC=y
+CONFIG_NO_HZ_IDLE=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PSI=y
+CONFIG_PSI_DEFAULT_DISABLED=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=21
+CONFIG_CGROUPS=y
+CONFIG_NAMESPACES=y
+CONFIG_USER_NS=y
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZO is not set
+# CONFIG_RD_LZ4 is not set
+# CONFIG_UID16 is not set
+# CONFIG_SYSFS_SYSCALL is not set
+# CONFIG_AIO is not set
+CONFIG_EMBEDDED=y
+CONFIG_PERF_EVENTS=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLAB_FREELIST_RANDOM=y
+CONFIG_SLAB_FREELIST_HARDENED=y
+CONFIG_ARCH_ASPEED=y
+CONFIG_MACH_ASPEED_G6=y
+# CONFIG_CACHE_L2X0 is not set
+CONFIG_SMP=y
+# CONFIG_ARM_CPU_TOPOLOGY is not set
+CONFIG_VMSPLIT_2G=y
+CONFIG_NR_CPUS=2
+CONFIG_ARM_PSCI=y
+CONFIG_UACCESS_WITH_MEMCPY=y
+# CONFIG_ATAGS is not set
+CONFIG_VFP=y
+CONFIG_NEON=y
+CONFIG_KERNEL_MODE_NEON=y
+# CONFIG_SUSPEND is not set
+CONFIG_JUMP_LABEL=y
+# CONFIG_MQ_IOSCHED_DEADLINE is not set
+# CONFIG_MQ_IOSCHED_KYBER is not set
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+# CONFIG_COMPACTION is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_PACKET_DIAG=y
+CONFIG_UNIX=y
+CONFIG_UNIX_DIAG=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_SYN_COOKIES=y
+# CONFIG_INET_DIAG is not set
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+# CONFIG_IPV6_SIT is not set
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_NETFILTER=y
+# CONFIG_NETFILTER_ADVANCED is not set
+CONFIG_VLAN_8021Q=y
+CONFIG_NET_NCSI=y
+# CONFIG_WIRELESS is not set
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+CONFIG_FIRMWARE_MEMMAP=y
+CONFIG_MTD=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_PARTITIONED_MASTER=y
+CONFIG_MTD_SPI_NOR=y
+# CONFIG_MTD_SPI_NOR_USE_4K_SECTORS is not set
+CONFIG_SPI_ASPEED_SMC=y
+CONFIG_MTD_UBI=y
+CONFIG_MTD_UBI_FASTMAP=y
+CONFIG_MTD_UBI_BLOCK=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_NBD=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=49152
+CONFIG_EEPROM_AT24=y
+CONFIG_EEPROM_AT25=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_VERITY=y
+CONFIG_NETDEVICES=y
+CONFIG_NETCONSOLE=y
+# CONFIG_NET_VENDOR_ALACRITECH is not set
+# CONFIG_NET_VENDOR_AMAZON is not set
+# CONFIG_NET_VENDOR_AQUANTIA is not set
+# CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_CADENCE is not set
+# CONFIG_NET_VENDOR_CAVIUM is not set
+# CONFIG_NET_VENDOR_CIRRUS is not set
+# CONFIG_NET_VENDOR_CORTINA is not set
+# CONFIG_NET_VENDOR_EZCHIP is not set
+CONFIG_FTGMAC100=y
+# CONFIG_NET_VENDOR_HISILICON is not set
+# CONFIG_NET_VENDOR_HUAWEI is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MELLANOX is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROCHIP is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NETRONOME is not set
+# CONFIG_NET_VENDOR_NI is not set
+# CONFIG_NET_VENDOR_QUALCOMM is not set
+# CONFIG_NET_VENDOR_RENESAS is not set
+# CONFIG_NET_VENDOR_ROCKER is not set
+# CONFIG_NET_VENDOR_SAMSUNG is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SOLARFLARE is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_SOCIONEXT is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
+CONFIG_BROADCOM_PHY=y
+CONFIG_REALTEK_PHY=y
+# CONFIG_WLAN is not set
+CONFIG_INPUT_EVDEV=y
+# CONFIG_KEYBOARD_ATKBD is not set
+CONFIG_KEYBOARD_GPIO=y
+CONFIG_KEYBOARD_GPIO_POLLED=y
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_PWM_BEEPER=y
+CONFIG_INPUT_IBM_PANEL=y
+CONFIG_SERIO_RAW=y
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_8250=y
+# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=6
+CONFIG_SERIAL_8250_RUNTIME_UARTS=6
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_ASPEED_VUART=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_ASPEED_KCS_IPMI_BMC=y
+CONFIG_IPMI_KCS_BMC_CDEV_IPMI=y
+CONFIG_IPMI_KCS_BMC_SERIO=y
+CONFIG_IPMI_KCS_BMC_CDEV_RAW=y
+CONFIG_IPMB_DEVICE_INTERFACE=y
+CONFIG_HW_RANDOM_TIMERIOMEM=y
+# CONFIG_I2C_COMPAT is not set
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_MUX_GPIO=y
+CONFIG_I2C_MUX_PCA9541=y
+CONFIG_I2C_MUX_PCA954x=y
+CONFIG_I2C_ASPEED=y
+CONFIG_I2C_SLAVE=y
+CONFIG_I2C_SLAVE_MQUEUE_MESSAGE_SIZE=256
+CONFIG_I2C_SLAVE_MQUEUE=y
+CONFIG_I3C=y
+CONFIG_I3CDEV=y
+CONFIG_DW_I3C_MASTER=y
+CONFIG_SPI=y
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_GPIO_ASPEED=y
+CONFIG_GPIO_ASPEED_SGPIO=y
+CONFIG_GPIO_PCA953X=y
+CONFIG_GPIO_PCA953X_IRQ=y
+CONFIG_W1=y
+CONFIG_W1_MASTER_GPIO=y
+CONFIG_W1_SLAVE_THERM=y
+CONFIG_SENSORS_ASPEED_G6=y
+CONFIG_SENSORS_IIO_HWMON=y
+CONFIG_SENSORS_LM75=y
+CONFIG_SENSORS_NCT7904=y
+CONFIG_SENSORS_OCC_P8_I2C=y
+CONFIG_SENSORS_PECI_CPUTEMP=y
+CONFIG_SENSORS_PECI_DIMMTEMP=y
+CONFIG_SENSORS_PECI_CPUPOWER=y
+CONFIG_SENSORS_PECI_DIMMPOWER=y
+CONFIG_PMBUS=y
+CONFIG_SENSORS_ADM1275=y
+CONFIG_SENSORS_IBM_CFFPS=y
+CONFIG_SENSORS_IR35221=y
+CONFIG_SENSORS_IR38064=y
+CONFIG_SENSORS_ISL68137=y
+CONFIG_SENSORS_LM25066=y
+CONFIG_SENSORS_MAX31785=y
+CONFIG_SENSORS_UCD9000=y
+CONFIG_SENSORS_UCD9200=y
+CONFIG_SENSORS_TMP421=y
+CONFIG_SENSORS_W83773G=y
+CONFIG_WATCHDOG_SYSFS=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_MEDIA_SUPPORT_FILTER=y
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_PLATFORM_SUPPORT=y
+CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_VIDEO_ASPEED=y
+CONFIG_DRM=y
+CONFIG_DRM_ASPEED_GFX=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_ASPEED_VHUB=y
+CONFIG_USB_CONFIGFS=y
+CONFIG_USB_CONFIGFS_MASS_STORAGE=y
+CONFIG_USB_CONFIGFS_F_FS=y
+CONFIG_USB_CONFIGFS_F_HID=y
+CONFIG_USB_MASS_STORAGE=y
+CONFIG_MMC=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_OF_ASPEED=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_CLASS_FLASH=y
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_PCA955X=y
+CONFIG_LEDS_PCA955X_GPIO=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_EDAC=y
+# CONFIG_EDAC_LEGACY_SYSFS is not set
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_DS1307=y
+CONFIG_RTC_DRV_PCF8523=y
+CONFIG_RTC_DRV_PCHC620=y
+CONFIG_RTC_DRV_RV8803=y
+CONFIG_RTC_DRV_ASPEED=y
+# CONFIG_VIRTIO_MENU is not set
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_ASPEED_ESPI_SLAVE=y
+# CONFIG_ASPEED_LPC_CTRL is not set
+CONFIG_ASPEED_LPC_MBOX=y
+CONFIG_ASPEED_LPC_SIO=y
+CONFIG_ASPEED_MCTP=y
+# CONFIG_ASPEED_P2A_CTRL is not set
+CONFIG_ASPEED_XDMA=y
+CONFIG_ASPEED_VGA_SHAREDMEM=y
+CONFIG_IIO=y
+CONFIG_ASPEED_ADC=y
+CONFIG_MAX1363=y
+CONFIG_BMP280=y
+CONFIG_DPS310=y
+CONFIG_PWM=y
+CONFIG_PWM_FTTMR010=y
+CONFIG_RAS=y
+CONFIG_TEE=y
+CONFIG_OPTEE=y
+CONFIG_PECI=y
+CONFIG_PECI_CHARDEV=y
+CONFIG_PECI_ASPEED=y
+CONFIG_PECI_MCTP=y
+CONFIG_JTAG=y
+CONFIG_JTAG_ASPEED=y
+CONFIG_EXT4_FS=y
+CONFIG_FANOTIFY=y
+CONFIG_OVERLAY_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_JFFS2_FS=y
+# CONFIG_JFFS2_FS_WRITEBUFFER is not set
+CONFIG_JFFS2_SUMMARY=y
+CONFIG_JFFS2_FS_XATTR=y
+CONFIG_UBIFS_FS=y
+CONFIG_SQUASHFS=y
+CONFIG_SQUASHFS_XZ=y
+CONFIG_SQUASHFS_ZSTD=y
+CONFIG_PSTORE=y
+CONFIG_PSTORE_RAM=y
+CONFIG_CIFS=y
+CONFIG_CIFS_XATTR=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_ISO8859_15=y
+CONFIG_NLS_UTF8=y
+CONFIG_HARDENED_USERCOPY=y
+CONFIG_FORTIFY_SOURCE=y
+CONFIG_CRYPTO_USER_API_HASH=y
+# CONFIG_CRYPTO_HW is not set
+# CONFIG_XZ_DEC_X86 is not set
+# CONFIG_XZ_DEC_POWERPC is not set
+# CONFIG_XZ_DEC_IA64 is not set
+# CONFIG_XZ_DEC_SPARC is not set
+CONFIG_PRINTK_TIME=y
+CONFIG_DYNAMIC_DEBUG=y
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_REDUCED=y
+CONFIG_DEBUG_INFO_DWARF4=y
+CONFIG_GDB_SCRIPTS=y
+CONFIG_STRIP_ASM_SYMS=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x01
+CONFIG_DEBUG_FS=y
+CONFIG_DEBUG_WX=y
+CONFIG_SCHED_STACK_END_CHECK=y
+CONFIG_PANIC_ON_OOPS=y
+CONFIG_PANIC_TIMEOUT=-1
+CONFIG_SOFTLOCKUP_DETECTOR=y
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
+CONFIG_BOOTPARAM_HUNG_TASK_PANIC=y
+CONFIG_WQ_WATCHDOG=y
+# CONFIG_SCHED_DEBUG is not set
+CONFIG_DEBUG_LIST=y
+CONFIG_FUNCTION_TRACER=y
+CONFIG_DEBUG_USER=y
+# CONFIG_RUNTIME_TESTING_MENU is not set
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 0d399ddaa185..279801e4b14e 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -236,4 +236,8 @@ source "drivers/interconnect/Kconfig"
source "drivers/counter/Kconfig"
source "drivers/most/Kconfig"
+
+source "drivers/peci/Kconfig"
+
+source "drivers/jtag/Kconfig"
endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index a110338c860c..279112934367 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -187,3 +187,5 @@ obj-$(CONFIG_GNSS) += gnss/
obj-$(CONFIG_INTERCONNECT) += interconnect/
obj-$(CONFIG_COUNTER) += counter/
obj-$(CONFIG_MOST) += most/
+obj-$(CONFIG_PECI) += peci/
+obj-$(CONFIG_JTAG_ASPEED) += jtag/
diff --git a/drivers/char/ipmi/bt-bmc.c b/drivers/char/ipmi/bt-bmc.c
index 7450904e330a..fddd12a75d3f 100644
--- a/drivers/char/ipmi/bt-bmc.c
+++ b/drivers/char/ipmi/bt-bmc.c
@@ -5,6 +5,7 @@
#include <linux/atomic.h>
#include <linux/bt-bmc.h>
+#include <linux/clk.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -58,6 +59,7 @@ struct bt_bmc {
struct device dev;
struct miscdevice miscdev;
void __iomem *base;
+ struct clk *clk;
int irq;
wait_queue_head_t queue;
struct timer_list poll_timer;
@@ -426,6 +428,16 @@ static int bt_bmc_probe(struct platform_device *pdev)
mutex_init(&bt_bmc->mutex);
init_waitqueue_head(&bt_bmc->queue);
+ bt_bmc->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(bt_bmc->clk))
+ return dev_err_probe(dev, PTR_ERR(bt_bmc->clk),
+ "couldn't get clock\n");
+ rc = clk_prepare_enable(bt_bmc->clk);
+ if (rc) {
+ dev_err(dev, "couldn't enable clock\n");
+ return rc;
+ }
+
bt_bmc->miscdev.minor = MISC_DYNAMIC_MINOR;
bt_bmc->miscdev.name = DEVICE_NAME;
bt_bmc->miscdev.fops = &bt_bmc_fops;
@@ -433,7 +445,7 @@ static int bt_bmc_probe(struct platform_device *pdev)
rc = misc_register(&bt_bmc->miscdev);
if (rc) {
dev_err(dev, "Unable to register misc device\n");
- return rc;
+ goto err;
}
bt_bmc_config_irq(bt_bmc, pdev);
@@ -457,6 +469,11 @@ static int bt_bmc_probe(struct platform_device *pdev)
clr_b_busy(bt_bmc);
return 0;
+
+err:
+ clk_disable_unprepare(bt_bmc->clk);
+
+ return rc;
}
static int bt_bmc_remove(struct platform_device *pdev)
@@ -466,6 +483,8 @@ static int bt_bmc_remove(struct platform_device *pdev)
misc_deregister(&bt_bmc->miscdev);
if (bt_bmc->irq < 0)
del_timer_sync(&bt_bmc->poll_timer);
+ clk_disable_unprepare(bt_bmc->clk);
+
return 0;
}
diff --git a/drivers/char/ipmi/ipmb_dev_int.c b/drivers/char/ipmi/ipmb_dev_int.c
index 49b8f22fdcf0..23c38589fa5c 100644
--- a/drivers/char/ipmi/ipmb_dev_int.c
+++ b/drivers/char/ipmi/ipmb_dev_int.c
@@ -328,8 +328,12 @@ static int ipmb_probe(struct i2c_client *client,
if (ret)
return ret;
+#if 1 /* FIXME: Quick fix. Need to add parsing code for ipmb_id instead */
+ ipmb_dev->is_i2c_protocol = true;
+#else
ipmb_dev->is_i2c_protocol
= device_property_read_bool(&client->dev, "i2c-protocol");
+#endif
ipmb_dev->client = client;
i2c_set_clientdata(client, ipmb_dev);
diff --git a/drivers/char/ipmi/kcs_bmc_aspeed.c b/drivers/char/ipmi/kcs_bmc_aspeed.c
index b555286016b1..fc09e9c164ce 100644
--- a/drivers/char/ipmi/kcs_bmc_aspeed.c
+++ b/drivers/char/ipmi/kcs_bmc_aspeed.c
@@ -1,11 +1,10 @@
// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (c) 2015-2018, Intel Corporation.
- */
+// Copyright (c) 2015-2019, Intel Corporation.
#define pr_fmt(fmt) "aspeed-kcs-bmc: " fmt
#include <linux/atomic.h>
+#include <linux/clk.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -115,6 +114,7 @@ struct aspeed_kcs_bmc {
struct kcs_bmc_device kcs_bmc;
struct regmap *map;
+ struct clk *clk;
struct {
enum aspeed_kcs_irq_mode mode;
@@ -620,24 +620,34 @@ static int aspeed_kcs_probe(struct platform_device *pdev)
return -ENODEV;
}
+ priv->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(priv->clk),
+ "couldn't get clock\n");
+ rc = clk_prepare_enable(priv->clk);
+ if (rc) {
+ dev_err(&pdev->dev, "couldn't enable clock\n");
+ return rc;
+ }
+
spin_lock_init(&priv->obe.lock);
priv->obe.remove = false;
timer_setup(&priv->obe.timer, aspeed_kcs_check_obe, 0);
rc = aspeed_kcs_set_address(kcs_bmc, addrs, nr_addrs);
if (rc)
- return rc;
+ goto err;
/* Host to BMC IRQ */
rc = aspeed_kcs_config_downstream_irq(kcs_bmc, pdev);
if (rc)
- return rc;
+ goto err;
/* BMC to Host IRQ */
if (have_upstream_irq) {
rc = aspeed_kcs_config_upstream_irq(priv, upstream_irq[0], upstream_irq[1]);
if (rc < 0)
- return rc;
+ goto err;
} else {
priv->upstream_irq.mode = aspeed_kcs_irq_none;
}
@@ -650,13 +660,19 @@ static int aspeed_kcs_probe(struct platform_device *pdev)
rc = kcs_bmc_add_device(&priv->kcs_bmc);
if (rc) {
dev_warn(&pdev->dev, "Failed to register channel %d: %d\n", kcs_bmc->channel, rc);
- return rc;
+ goto err;
}
dev_info(&pdev->dev, "Initialised channel %d at 0x%x\n",
kcs_bmc->channel, addrs[0]);
return 0;
+
+err:
+ aspeed_kcs_enable_channel(kcs_bmc, false);
+ clk_disable_unprepare(priv->clk);
+
+ return rc;
}
static int aspeed_kcs_remove(struct platform_device *pdev)
@@ -664,6 +680,7 @@ static int aspeed_kcs_remove(struct platform_device *pdev)
struct aspeed_kcs_bmc *priv = platform_get_drvdata(pdev);
struct kcs_bmc_device *kcs_bmc = &priv->kcs_bmc;
+ clk_disable_unprepare(priv->clk);
kcs_bmc_remove_device(kcs_bmc);
aspeed_kcs_enable_channel(kcs_bmc, false);
diff --git a/drivers/clk/clk-aspeed.c b/drivers/clk/clk-aspeed.c
index 411ff5fb2c07..5e1c87bc8a99 100644
--- a/drivers/clk/clk-aspeed.c
+++ b/drivers/clk/clk-aspeed.c
@@ -14,7 +14,9 @@
#include "clk-aspeed.h"
-#define ASPEED_NUM_CLKS 38
+#define ASPEED_NUM_CLKS ASPEED_CLK_MAX
+#define UART_HIGH_SPEED_CLK 192000000
+#define UART_LOW_SPEED_CLK 24000000
#define ASPEED_RESET2_OFFSET 32
@@ -29,6 +31,12 @@
#define ASPEED_MISC_CTRL 0x2c
#define UART_DIV13_EN BIT(12)
#define ASPEED_MAC_CLK_DLY 0x48
+#define ASPEED_MISC2_CTRL 0x4c
+#define UART1_HS_CLK_EN BIT(24)
+#define UART2_HS_CLK_EN BIT(25)
+#define UART3_HS_CLK_EN BIT(26)
+#define UART4_HS_CLK_EN BIT(27)
+#define UART5_HS_CLK_EN BIT(28)
#define ASPEED_STRAP 0x70
#define CLKIN_25MHZ_EN BIT(23)
#define AST2400_CLK_SOURCE_SEL BIT(18)
@@ -386,7 +394,7 @@ static int aspeed_clk_probe(struct platform_device *pdev)
struct aspeed_reset *ar;
struct regmap *map;
struct clk_hw *hw;
- u32 val, rate;
+ u32 val, rate, rate_hi;
int i, ret;
map = syscon_node_to_regmap(dev->of_node);
@@ -420,16 +428,25 @@ static int aspeed_clk_probe(struct platform_device *pdev)
/* UART clock div13 setting */
regmap_read(map, ASPEED_MISC_CTRL, &val);
- if (val & UART_DIV13_EN)
- rate = 24000000 / 13;
- else
- rate = 24000000;
+ if (val & UART_DIV13_EN) {
+ rate = UART_LOW_SPEED_CLK / 13;
+ rate_hi = UART_HIGH_SPEED_CLK / 13;
+ } else {
+ rate = UART_LOW_SPEED_CLK;
+ rate_hi = UART_HIGH_SPEED_CLK;
+ }
/* TODO: Find the parent data for the uart clock */
hw = clk_hw_register_fixed_rate(dev, "uart", NULL, 0, rate);
if (IS_ERR(hw))
return PTR_ERR(hw);
aspeed_clk_data->hws[ASPEED_CLK_UART] = hw;
+ hw = clk_hw_register_fixed_rate(dev, "uart-hs", "usb-port1-gate", 0,
+ rate_hi);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+ aspeed_clk_data->hws[ASPEED_CLK_UART_HS] = hw;
+
/*
* Memory controller (M-PLL) PLL. This clock is configured by the
* bootloader, and is exposed to Linux as a read-only clock rate.
@@ -539,9 +556,22 @@ static int aspeed_clk_probe(struct platform_device *pdev)
* UART[1..5] clock source mux
*/
+ /* Get the uart clock source configuration from SCU4C*/
+ regmap_read(map, ASPEED_MISC2_CTRL, &val);
for (i = 0; i < ARRAY_SIZE(aspeed_gates); i++) {
const struct aspeed_gate_data *gd = &aspeed_gates[i];
u32 gate_flags;
+ char *parent_name;
+
+ /* For uart, needs to adjust the clock based on SCU4C value */
+ if ((i == ASPEED_CLK_GATE_UART1CLK && (val & UART1_HS_CLK_EN)) ||
+ (i == ASPEED_CLK_GATE_UART2CLK && (val & UART2_HS_CLK_EN)) ||
+ (i == ASPEED_CLK_GATE_UART5CLK && (val & UART5_HS_CLK_EN)) ||
+ (i == ASPEED_CLK_GATE_UART3CLK && (val & UART3_HS_CLK_EN)) ||
+ (i == ASPEED_CLK_GATE_UART4CLK && (val & UART4_HS_CLK_EN)))
+ parent_name = "uart-hs";
+ else
+ parent_name = gd->parent_name;
/* Special case: the USB port 1 clock (bit 14) is always
* working the opposite way from the other ones.
@@ -549,7 +579,7 @@ static int aspeed_clk_probe(struct platform_device *pdev)
gate_flags = (gd->clock_idx == 14) ? 0 : CLK_GATE_SET_TO_DISABLE;
hw = aspeed_clk_hw_register_gate(dev,
gd->name,
- gd->parent_name,
+ parent_name,
gd->flags,
map,
gd->clock_idx,
diff --git a/drivers/clk/clk-ast2600.c b/drivers/clk/clk-ast2600.c
index 6772628c9103..b9395da8bcf7 100644
--- a/drivers/clk/clk-ast2600.c
+++ b/drivers/clk/clk-ast2600.c
@@ -15,7 +15,7 @@
#include "clk-aspeed.h"
-#define ASPEED_G6_NUM_CLKS 71
+#define ASPEED_G6_NUM_CLKS ASPEED_CLK_MAX
#define ASPEED_G6_SILICON_REV 0x014
#define CHIP_REVISION_ID GENMASK(23, 16)
@@ -32,6 +32,24 @@
#define ASPEED_G6_CLK_SELECTION1 0x300
#define ASPEED_G6_CLK_SELECTION2 0x304
#define ASPEED_G6_CLK_SELECTION4 0x310
+#define ASPEED_G6_CLK_SELECTION5 0x314
+
+#define ASPEED_G6_MAC12_CLK_CTRL0 0x340
+#define ASPEED_G6_MAC12_CLK_CTRL1 0x348
+#define ASPEED_G6_MAC12_CLK_CTRL2 0x34C
+
+#define ASPEED_G6_MAC34_CLK_CTRL0 0x350
+#define ASPEED_G6_MAC34_CLK_CTRL1 0x358
+#define ASPEED_G6_MAC34_CLK_CTRL2 0x35C
+
+#define ASPEED_G6_MAC34_DRIVING_CTRL 0x458
+
+#define ASPEED_G6_DEF_MAC12_DELAY_1G 0x0041b410
+#define ASPEED_G6_DEF_MAC12_DELAY_100M 0x00417410
+#define ASPEED_G6_DEF_MAC12_DELAY_10M 0x00417410
+#define ASPEED_G6_DEF_MAC34_DELAY_1G 0x00104208
+#define ASPEED_G6_DEF_MAC34_DELAY_100M 0x00104208
+#define ASPEED_G6_DEF_MAC34_DELAY_10M 0x00104208
#define ASPEED_HPLL_PARAM 0x200
#define ASPEED_APLL_PARAM 0x210
@@ -41,8 +59,8 @@
#define ASPEED_G6_STRAP1 0x500
-#define ASPEED_MAC12_CLK_DLY 0x340
-#define ASPEED_MAC34_CLK_DLY 0x350
+#define ASPEED_G6_GEN_UART_REF 0x338
+#define UART_192MHZ_R_N_VALUE 0x3c38e
/* Globally visible clocks */
static DEFINE_SPINLOCK(aspeed_g6_clk_lock);
@@ -79,7 +97,7 @@ static const struct aspeed_gate_data aspeed_g6_gates[] = {
/* Reserved 11/12 */
[ASPEED_CLK_GATE_YCLK] = { 13, 4, "yclk-gate", NULL, 0 }, /* HAC */
[ASPEED_CLK_GATE_USBPORT1CLK] = { 14, 14, "usb-port1-gate", NULL, 0 }, /* USB2 hub/USB2 host port 1/USB1.1 dev */
- [ASPEED_CLK_GATE_UART5CLK] = { 15, -1, "uart5clk-gate", "uart", 0 }, /* UART5 */
+ [ASPEED_CLK_GATE_UART5CLK] = { 15, -1, "uart5clk-gate", "uart5", 0 }, /* UART5 */
/* Reserved 16/19 */
[ASPEED_CLK_GATE_MAC1CLK] = { 20, 11, "mac1clk-gate", "mac12", 0 }, /* MAC1 */
[ASPEED_CLK_GATE_MAC2CLK] = { 21, 12, "mac2clk-gate", "mac12", 0 }, /* MAC2 */
@@ -90,21 +108,21 @@ static const struct aspeed_gate_data aspeed_g6_gates[] = {
[ASPEED_CLK_GATE_EMMCCLK] = { 27, 16, "emmcclk-gate", NULL, 0 }, /* For card clk */
/* Reserved 28/29/30 */
[ASPEED_CLK_GATE_LCLK] = { 32, 32, "lclk-gate", NULL, 0 }, /* LPC */
- [ASPEED_CLK_GATE_ESPICLK] = { 33, -1, "espiclk-gate", NULL, 0 }, /* eSPI */
+ [ASPEED_CLK_GATE_ESPICLK] = { 33, -1, "espiclk-gate", NULL, CLK_IS_CRITICAL }, /* eSPI */
[ASPEED_CLK_GATE_REF1CLK] = { 34, -1, "ref1clk-gate", "clkin", CLK_IS_CRITICAL },
/* Reserved 35 */
[ASPEED_CLK_GATE_SDCLK] = { 36, 56, "sdclk-gate", NULL, 0 }, /* SDIO/SD */
[ASPEED_CLK_GATE_LHCCLK] = { 37, -1, "lhclk-gate", "lhclk", 0 }, /* LPC master/LPC+ */
/* Reserved 38 RSA: no longer used */
/* Reserved 39 */
- [ASPEED_CLK_GATE_I3C0CLK] = { 40, 40, "i3c0clk-gate", NULL, 0 }, /* I3C0 */
- [ASPEED_CLK_GATE_I3C1CLK] = { 41, 41, "i3c1clk-gate", NULL, 0 }, /* I3C1 */
- [ASPEED_CLK_GATE_I3C2CLK] = { 42, 42, "i3c2clk-gate", NULL, 0 }, /* I3C2 */
- [ASPEED_CLK_GATE_I3C3CLK] = { 43, 43, "i3c3clk-gate", NULL, 0 }, /* I3C3 */
- [ASPEED_CLK_GATE_I3C4CLK] = { 44, 44, "i3c4clk-gate", NULL, 0 }, /* I3C4 */
- [ASPEED_CLK_GATE_I3C5CLK] = { 45, 45, "i3c5clk-gate", NULL, 0 }, /* I3C5 */
- [ASPEED_CLK_GATE_I3C6CLK] = { 46, 46, "i3c6clk-gate", NULL, 0 }, /* I3C6 */
- [ASPEED_CLK_GATE_I3C7CLK] = { 47, 47, "i3c7clk-gate", NULL, 0 }, /* I3C7 */
+ [ASPEED_CLK_GATE_I3C0CLK] = { 40, 40, "i3c0clk-gate", "i3cclk", 0 }, /* I3C0 */
+ [ASPEED_CLK_GATE_I3C1CLK] = { 41, 41, "i3c1clk-gate", "i3cclk", 0 }, /* I3C1 */
+ [ASPEED_CLK_GATE_I3C2CLK] = { 42, 42, "i3c2clk-gate", "i3cclk", 0 }, /* I3C2 */
+ [ASPEED_CLK_GATE_I3C3CLK] = { 43, 43, "i3c3clk-gate", "i3cclk", 0 }, /* I3C3 */
+ [ASPEED_CLK_GATE_I3C4CLK] = { 44, 44, "i3c4clk-gate", "i3cclk", 0 }, /* I3C4 */
+ [ASPEED_CLK_GATE_I3C5CLK] = { 45, 45, "i3c5clk-gate", "i3cclk", 0 }, /* I3C5 */
+ [ASPEED_CLK_GATE_I3C6CLK] = { 46, 46, "i3c6clk-gate", "i3cclk", 0 }, /* I3C6 */
+ [ASPEED_CLK_GATE_I3C7CLK] = { 47, 47, "i3c7clk-gate", "i3cclk", 0 }, /* I3C7 */
[ASPEED_CLK_GATE_UART1CLK] = { 48, -1, "uart1clk-gate", "uart", 0 }, /* UART1 */
[ASPEED_CLK_GATE_UART2CLK] = { 49, -1, "uart2clk-gate", "uart", 0 }, /* UART2 */
[ASPEED_CLK_GATE_UART3CLK] = { 50, -1, "uart3clk-gate", "uart", 0 }, /* UART3 */
@@ -472,17 +490,26 @@ static int aspeed_g6_clk_probe(struct platform_device *pdev)
return ret;
}
- /* UART clock div13 setting */
- regmap_read(map, ASPEED_G6_MISC_CTRL, &val);
- if (val & UART_DIV13_EN)
- rate = 24000000 / 13;
+ /* UART clock setting */
+ regmap_read(map, ASPEED_G6_GEN_UART_REF, &val);
+ if (val == UART_192MHZ_R_N_VALUE){
+ rate = 192000000 / 13;
+ dev_err(dev, "192Mhz baud rate 921600\n");
+ }
else
- rate = 24000000;
+ rate = 24000000 / 13;
hw = clk_hw_register_fixed_rate(dev, "uart", NULL, 0, rate);
if (IS_ERR(hw))
return PTR_ERR(hw);
aspeed_g6_clk_data->hws[ASPEED_CLK_UART] = hw;
+ /* UART5 clock setting */
+ rate = 24000000 / 13;
+ hw = clk_hw_register_fixed_rate(dev, "uart5", NULL, 0, rate);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+ aspeed_g6_clk_data->hws[ASPEED_CLK_UART5] = hw;
+
/* UART6~13 clock div13 setting */
regmap_read(map, 0x80, &val);
if (val & BIT(31))
@@ -554,7 +581,7 @@ static int aspeed_g6_clk_probe(struct platform_device *pdev)
/* RMII1 50MHz (RCLK) output enable */
hw = clk_hw_register_gate(dev, "mac1rclk", "mac12rclk", 0,
- scu_g6_base + ASPEED_MAC12_CLK_DLY, 29, 0,
+ scu_g6_base + ASPEED_G6_MAC12_CLK_CTRL0, 29, 0,
&aspeed_g6_clk_lock);
if (IS_ERR(hw))
return PTR_ERR(hw);
@@ -562,7 +589,7 @@ static int aspeed_g6_clk_probe(struct platform_device *pdev)
/* RMII2 50MHz (RCLK) output enable */
hw = clk_hw_register_gate(dev, "mac2rclk", "mac12rclk", 0,
- scu_g6_base + ASPEED_MAC12_CLK_DLY, 30, 0,
+ scu_g6_base + ASPEED_G6_MAC12_CLK_CTRL0, 30, 0,
&aspeed_g6_clk_lock);
if (IS_ERR(hw))
return PTR_ERR(hw);
@@ -584,7 +611,7 @@ static int aspeed_g6_clk_probe(struct platform_device *pdev)
/* RMII3 50MHz (RCLK) output enable */
hw = clk_hw_register_gate(dev, "mac3rclk", "mac34rclk", 0,
- scu_g6_base + ASPEED_MAC34_CLK_DLY, 29, 0,
+ scu_g6_base + ASPEED_G6_MAC34_CLK_CTRL0, 29, 0,
&aspeed_g6_clk_lock);
if (IS_ERR(hw))
return PTR_ERR(hw);
@@ -592,7 +619,7 @@ static int aspeed_g6_clk_probe(struct platform_device *pdev)
/* RMII4 50MHz (RCLK) output enable */
hw = clk_hw_register_gate(dev, "mac4rclk", "mac34rclk", 0,
- scu_g6_base + ASPEED_MAC34_CLK_DLY, 30, 0,
+ scu_g6_base + ASPEED_G6_MAC34_CLK_CTRL0, 30, 0,
&aspeed_g6_clk_lock);
if (IS_ERR(hw))
return PTR_ERR(hw);
@@ -772,6 +799,20 @@ static void __init aspeed_g6_cc(struct regmap *map)
/* USB 2.0 port1 phy 40MHz clock */
hw = clk_hw_register_fixed_rate(NULL, "usb-phy-40m", NULL, 0, 40000000);
aspeed_g6_clk_data->hws[ASPEED_CLK_USBPHY_40M] = hw;
+
+ /* i3c clock source */
+ regmap_read(map, ASPEED_G6_CLK_SELECTION5, &val);
+ if(val & BIT(31)) {
+ val = (val >> 28) & 0x7;
+ if(val)
+ div = val + 1;
+ else
+ div = val + 2;
+ hw = clk_hw_register_fixed_factor(NULL, "i3cclk", "apll", 0, 1, div);
+ } else {
+ hw = clk_hw_register_fixed_factor(NULL, "i3cclk", "ahb", 0, 1, 1);
+ }
+ aspeed_g6_clk_data->hws[ASPEED_CLK_I3C] = hw;
};
static void __init aspeed_g6_cc_init(struct device_node *np)
@@ -810,6 +851,37 @@ static void __init aspeed_g6_cc_init(struct device_node *np)
return;
}
+ /* fixed settings for RGMII/RMII clock generator */
+ /* MAC1/2 RGMII 125MHz = EPLL / 8 */
+ regmap_update_bits(map, ASPEED_G6_CLK_SELECTION2, GENMASK(23, 20),
+ (0x7 << 20));
+
+ /* MAC3/4 RMII 50MHz = HCLK / 4 */
+ regmap_update_bits(map, ASPEED_G6_CLK_SELECTION4, GENMASK(18, 16),
+ (0x3 << 16));
+
+ /*
+ * BIT[31]: MAC1/2 RGMII 125M source = internal PLL
+ * BIT[28]: RGMIICK pad direction = output
+ */
+ regmap_write(map, ASPEED_G6_MAC12_CLK_CTRL0,
+ BIT(31) | BIT(28) | ASPEED_G6_DEF_MAC12_DELAY_1G);
+ regmap_write(map, ASPEED_G6_MAC12_CLK_CTRL1,
+ ASPEED_G6_DEF_MAC12_DELAY_100M);
+ regmap_write(map, ASPEED_G6_MAC12_CLK_CTRL2,
+ ASPEED_G6_DEF_MAC12_DELAY_10M);
+
+ /* MAC3/4 RGMII 125M source = RGMIICK pad */
+ regmap_write(map, ASPEED_G6_MAC34_CLK_CTRL0,
+ ASPEED_G6_DEF_MAC34_DELAY_1G);
+ regmap_write(map, ASPEED_G6_MAC34_CLK_CTRL1,
+ ASPEED_G6_DEF_MAC34_DELAY_100M);
+ regmap_write(map, ASPEED_G6_MAC34_CLK_CTRL2,
+ ASPEED_G6_DEF_MAC34_DELAY_10M);
+
+ /* MAC3/4 default pad driving strength */
+ regmap_write(map, ASPEED_G6_MAC34_DRIVING_CTRL, 0x0000000a);
+
aspeed_g6_cc(map);
aspeed_g6_clk_data->num = ASPEED_G6_NUM_CLKS;
ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, aspeed_g6_clk_data);
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index ccdaeafed0bb..04f164c2aa98 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -397,6 +397,17 @@ config SENSORS_ASPEED
This driver can also be built as a module. If so, the module
will be called aspeed_pwm_tacho.
+config SENSORS_ASPEED_G6
+ tristate "ASPEED AST2600 PWM and Fan tach driver"
+ depends on THERMAL || THERMAL=n
+ select REGMAP
+ help
+ This driver provides support for ASPEED AST2600 PWM
+ and Fan Tacho controllers.
+
+ This driver can also be built as a module. If so, the module
+ will be called aspeed_g6_pwm_tacho.
+
config SENSORS_ATXP1
tristate "Attansic ATXP1 VID controller"
depends on I2C
@@ -1517,6 +1528,62 @@ config SENSORS_PCF8591
These devices are hard to detect and rarely found on mainstream
hardware. If unsure, say N.
+config SENSORS_PECI_CPUTEMP
+ tristate "PECI CPU temperature monitoring client"
+ depends on PECI
+ select MFD_INTEL_PECI_CLIENT
+ help
+ If you say yes here you get support for the generic Intel PECI
+ cputemp driver which provides Digital Thermal Sensor (DTS) thermal
+ readings of the CPU package and CPU cores that are accessible using
+ the PECI Client Command Suite via the processor PECI client.
+ Check <file:Documentation/hwmon/peci-cputemp.rst> for details.
+
+ This driver can also be built as a module. If so, the module
+ will be called peci-cputemp.
+
+config SENSORS_PECI_DIMMTEMP
+ tristate "PECI DIMM temperature monitoring client"
+ depends on PECI
+ select MFD_INTEL_PECI_CLIENT
+ help
+ If you say yes here you get support for the generic Intel PECI hwmon
+ driver which provides Digital Thermal Sensor (DTS) thermal readings of
+ DIMM components that are accessible using the PECI Client Command
+ Suite via the processor PECI client.
+ Check <file:Documentation/hwmon/peci-dimmtemp.rst> for details.
+
+ This driver can also be built as a module. If so, the module
+ will be called peci-dimmtemp.
+
+config SENSORS_PECI_CPUPOWER
+ tristate "PECI CPU power monitoring support"
+ depends on PECI
+ select MFD_INTEL_PECI_CLIENT
+ help
+ If you say yes here you get support for the generic Intel PECI
+ cpupower driver which provides average engergy readings of the CPU package,
+ current package power limit, maximal (TDP) and minimal power setting using
+ the PECI Client Command Suite via the processor PECI client.
+ Check Documentation/hwmon/peci-cpupower for details.
+
+ This driver can also be built as a module. If so, the module
+ will be called peci-cpupower.
+
+config SENSORS_PECI_DIMMPOWER
+ tristate "PECI DIMM power monitoring support"
+ depends on PECI
+ select MFD_INTEL_PECI_CLIENT
+ help
+ If you say yes here you get support for the generic Intel PECI
+ dimmpower driver which provides average engergy readings of the memory
+ package, current power limit, maximal and minimal power setting using
+ the PECI Client Command Suite via the processor PECI client.
+ Check Documentation/hwmon/peci-dimmpower for details.
+
+ This driver can also be built as a module. If so, the module
+ will be called peci-dimmpower.
+
source "drivers/hwmon/pmbus/Kconfig"
config SENSORS_PWM_FAN
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 162940270661..631ff622c263 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -52,6 +52,7 @@ obj-$(CONFIG_SENSORS_ARM_SCPI) += scpi-hwmon.o
obj-$(CONFIG_SENSORS_AS370) += as370-hwmon.o
obj-$(CONFIG_SENSORS_ASC7621) += asc7621.o
obj-$(CONFIG_SENSORS_ASPEED) += aspeed-pwm-tacho.o
+obj-$(CONFIG_SENSORS_ASPEED_G6) += aspeed-g6-pwm-tacho.o
obj-$(CONFIG_SENSORS_ATXP1) += atxp1.o
obj-$(CONFIG_SENSORS_AXI_FAN_CONTROL) += axi-fan-control.o
obj-$(CONFIG_SENSORS_BT1_PVT) += bt1-pvt.o
@@ -159,6 +160,10 @@ obj-$(CONFIG_SENSORS_NZXT_KRAKEN2) += nzxt-kraken2.o
obj-$(CONFIG_SENSORS_PC87360) += pc87360.o
obj-$(CONFIG_SENSORS_PC87427) += pc87427.o
obj-$(CONFIG_SENSORS_PCF8591) += pcf8591.o
+obj-$(CONFIG_SENSORS_PECI_CPUTEMP) += peci-cputemp.o
+obj-$(CONFIG_SENSORS_PECI_DIMMTEMP) += peci-dimmtemp.o
+obj-$(CONFIG_SENSORS_PECI_CPUPOWER) += peci-cpupower.o
+obj-$(CONFIG_SENSORS_PECI_DIMMPOWER) += peci-dimmpower.o
obj-$(CONFIG_SENSORS_POWR1220) += powr1220.o
obj-$(CONFIG_SENSORS_PWM_FAN) += pwm-fan.o
obj-$(CONFIG_SENSORS_RASPBERRYPI_HWMON) += raspberrypi-hwmon.o
diff --git a/drivers/hwmon/aspeed-g6-pwm-tacho.c b/drivers/hwmon/aspeed-g6-pwm-tacho.c
new file mode 100644
index 000000000000..f9bfc83b32fe
--- /dev/null
+++ b/drivers/hwmon/aspeed-g6-pwm-tacho.c
@@ -0,0 +1,1163 @@
+/*
+ * Copyright (C) ASPEED Technology Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 or later as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/errno.h>
+#include <linux/gpio/consumer.h>
+#include <linux/delay.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/sysfs.h>
+#include <linux/reset.h>
+#include <linux/regmap.h>
+#include <linux/thermal.h>
+#include <linux/pwm.h>
+
+#define ASPEED_PWM_CTRL 0x00 //PWM0 General Register
+#define ASPEED_PWM_CTRL_CH(x) ((x * 0x10) + 0x00)
+#define PWM_LOAD_AS_WDT BIT(19) //load selection as WDT
+#define PWM_DUTY_LOAD_AS_WDT_EN BIT(18) //enable PWM duty load as WDT
+#define PWM_DUTY_SYNC_DIS BIT(17) //disable PWM duty sync
+#define PWM_CLK_ENABLE BIT(16) //enable PWM clock
+#define PWM_LEVEL_OUTPUT BIT(15) //output PWM level
+#define PWM_INVERSE BIT(14) //inverse PWM pin
+#define PWM_OPEN_DRAIN_EN BIT(13) //enable open-drain
+#define PWM_PIN_EN BIT(12) //enable PWM pin
+#define PWM_CLK_DIV_H_MASK (0xf << 8) //PWM clock division H bit [3:0]
+#define PWM_CLK_DIV_L_MASK (0xff) //PWM clock division H bit [3:0]
+
+/*
+\xregmid {11:8 }{RW}{PWM clock division H bit [3:0]}{
+ 0: divide 1 \n
+ 1: divide 2 \n
+ 2: divide 4 \n
+ 3: divide 8 \n
+ ... \n
+ F: divide 32768}
+\xregmid {7 :0 }{RW}{PWM clock division L bit [7:0]}{
+ 00: divide 1 \n
+ 01: divide 2 \n
+ 02: divide 3 \n
+ 03: divide 4 \n
+ ... \n
+ FF: divide 256}
+*/
+
+#define ASPEED_PWM_DUTY_CYCLE 0x04 //PWM0 Duty Cycle Register
+#define ASPEED_PWM_DUTY_CYCLE_CH(x) ((x * 0x10) + 0x04)
+#define PWM_LOOP_BIT_MASK (0xf << 24) //loop bit [7:0]
+#define PWM_PERIOD_BIT (24) //pwm period bit [7:0]
+#define PWM_PERIOD_BIT_MASK (0xff << 24) //pwm period bit [7:0]
+#define PWM_RISING_FALLING_AS_WDT_BIT (16)
+#define PWM_RISING_FALLING_AS_WDT_MASK (0xff << 16) //pwm rising/falling point bit [7:0] as WDT
+#define PWM_RISING_FALLING_MASK (0xffff)
+#define PWM_RISING_FALLING_BIT (8) //pwm falling point bit [7:0]
+#define PWM_RISING_RISING_BIT (0) //pwm rising point bit [7:0]
+
+#define PWM_PERIOD_MAX 255
+#define PWM_FALLING_DEFAULT 255 /* 100% */
+
+#define ASPEED_TACHO_CTRL 0x08 //TACH0 General Register
+#define ASPEED_TACHO_CTRL_CH(x) ((x * 0x10) + 0x08)
+#define TACHO_IER BIT(31) //enable tacho interrupt
+#define TACHO_INVERS_LIMIT BIT(30) //inverse tacho limit comparison
+#define TACHO_LOOPBACK BIT(29) //tacho loopback
+#define TACHO_ENABLE BIT(28) //{enable tacho}
+#define TACHO_DEBOUNCE_BIT (26) //{tacho de-bounce}
+#define TACHO_DEBOUNCE_MASK (0x3 << 26) //{tacho de-bounce}
+#define TACHIO_EDGE_BIT (24) /*tacho edge}*/
+#define TACHO_CLK_DIV_T_MASK (0xf << 20)
+#define TACHO_CLK_DIV_BIT (20)
+#define TACHO_THRESHOLD_MASK (0xfffff) //tacho threshold bit
+/*
+\xregmid {23:20}{RW}{tacho clock division T bit [3:0]}{
+ 0: divide 1 \n
+ 1: divide 4 \n
+ 2: divide 16 \n
+ 3: divide 64 \n
+ ... \n
+ B: divide 4194304 \n
+ others: reserved}
+\xregmidb{19 :0 }{RW}{tacho threshold bit [19:0]}
+*/
+
+#define ASPEED_TACHO_STS 0x0C //TACH0 Status Register
+#define ASPEED_TACHO_STS_CH(x) ((x * 0x10) + 0x0C)
+#define TACHO_ISR BIT(31) //interrupt status and clear
+#define PWM_OUT BIT(25) //{pwm_out}
+#define PWM_OEN BIT(24) //{pwm_oeN}
+#define TACHO_DEB_INPUT BIT(23) //tacho deB input
+#define TACHO_RAW_INPUT BIT(22) //tacho raw input}
+#define TACHO_VALUE_UPDATE BIT(21) //tacho value updated since the last read
+#define TACHO_FULL_MEASUREMENT BIT(20) //{tacho full measurement}
+#define TACHO_VALUE_MASK 0xfffff //tacho value bit [19:0]}
+
+#define MAX_CDEV_NAME_LEN 16
+
+#define DEFAULT_TARGET_PWM_FREQ 25000
+#define DEFAULT_MIN_RPM 2900
+
+struct aspeed_pwm_channel_params {
+ int target_freq;
+ int pwm_freq;
+ int load_wdt_rising_falling_pt;
+ int load_wdt_selection; //0: rising , 1: falling
+ int load_wdt_enable;
+ int duty_sync_enable;
+ int invert_pin;
+ u8 rising;
+ u8 falling;
+};
+
+static struct aspeed_pwm_channel_params default_pwm_params[] = {
+ [0] = {
+ .target_freq = 25000,
+ .load_wdt_rising_falling_pt = 0x10,
+ .load_wdt_selection = 0,
+ .load_wdt_enable = 1,
+ .duty_sync_enable = 0,
+ .invert_pin = 0,
+ .rising = 0x00,
+ .falling = PWM_FALLING_DEFAULT,
+ },
+ [1] = {
+ .target_freq = 25000,
+ .load_wdt_rising_falling_pt = 0x10,
+ .load_wdt_selection = 0,
+ .load_wdt_enable = 0,
+ .duty_sync_enable = 0,
+ .invert_pin = 0,
+ .rising = 0x00,
+ .falling = PWM_FALLING_DEFAULT,
+ },
+ [2] = {
+ .target_freq = 25000,
+ .load_wdt_rising_falling_pt = 0x10,
+ .load_wdt_selection = 0,
+ .load_wdt_enable = 0,
+ .duty_sync_enable = 0,
+ .invert_pin = 0,
+ .rising = 0x00,
+ .falling = PWM_FALLING_DEFAULT,
+ },
+ [3] = {
+ .target_freq = 25000,
+ .load_wdt_rising_falling_pt = 0x10,
+ .load_wdt_selection = 0,
+ .load_wdt_enable = 0,
+ .duty_sync_enable = 0,
+ .invert_pin = 0,
+ .rising = 0x00,
+ .falling = PWM_FALLING_DEFAULT,
+ },
+ [4] = {
+ .target_freq = 25000,
+ .load_wdt_rising_falling_pt = 0x10,
+ .load_wdt_selection = 0,
+ .load_wdt_enable = 0,
+ .duty_sync_enable = 0,
+ .invert_pin = 0,
+ .rising = 0x00,
+ .falling = PWM_FALLING_DEFAULT,
+ },
+ [5] = {
+ .target_freq = 25000,
+ .load_wdt_rising_falling_pt = 0x10,
+ .load_wdt_selection = 0,
+ .load_wdt_enable = 0,
+ .duty_sync_enable = 0,
+ .invert_pin = 0,
+ .rising = 0x00,
+ .falling = PWM_FALLING_DEFAULT,
+ },
+ [6] = {
+ .target_freq = 25000,
+ .load_wdt_rising_falling_pt = 0x10,
+ .load_wdt_selection = 0,
+ .load_wdt_enable = 0,
+ .duty_sync_enable = 0,
+ .invert_pin = 0,
+ .rising = 0x00,
+ .falling = PWM_FALLING_DEFAULT,
+ },
+ [7] = {
+ .target_freq = 25000,
+ .load_wdt_rising_falling_pt = 0x10,
+ .load_wdt_selection = 0,
+ .load_wdt_enable = 0,
+ .duty_sync_enable = 0,
+ .invert_pin = 0,
+ .rising = 0x00,
+ .falling = PWM_FALLING_DEFAULT,
+ },
+ [8] = {
+ .target_freq = 25000,
+ .load_wdt_rising_falling_pt = 0x10,
+ .load_wdt_selection = 0,
+ .load_wdt_enable = 0,
+ .duty_sync_enable = 0,
+ .invert_pin = 0,
+ .rising = 0x00,
+ .falling = PWM_FALLING_DEFAULT,
+ },
+ [9] = {
+ .target_freq = 25000,
+ .load_wdt_rising_falling_pt = 0x10,
+ .load_wdt_selection = 0,
+ .load_wdt_enable = 0,
+ .duty_sync_enable = 0,
+ .invert_pin = 0,
+ .rising = 0x00,
+ .falling = PWM_FALLING_DEFAULT,
+ },
+ [10] = {
+ .target_freq = 25000,
+ .load_wdt_rising_falling_pt = 0x10,
+ .load_wdt_selection = 0,
+ .load_wdt_enable = 0,
+ .duty_sync_enable = 0,
+ .invert_pin = 0,
+ .rising = 0x00,
+ .falling = PWM_FALLING_DEFAULT,
+ },
+ [11] = {
+ .target_freq = 25000,
+ .load_wdt_rising_falling_pt = 0x10,
+ .load_wdt_selection = 0,
+ .load_wdt_enable = 0,
+ .duty_sync_enable = 0,
+ .invert_pin = 0,
+ .rising = 0x00,
+ .falling = PWM_FALLING_DEFAULT,
+ },
+ [12] = {
+ .target_freq = 25000,
+ .load_wdt_rising_falling_pt = 0x10,
+ .load_wdt_selection = 0,
+ .load_wdt_enable = 0,
+ .duty_sync_enable = 0,
+ .invert_pin = 0,
+ .rising = 0x00,
+ .falling = PWM_FALLING_DEFAULT,
+ },
+ [13] = {
+ .target_freq = 25000,
+ .load_wdt_rising_falling_pt = 0x10,
+ .load_wdt_selection = 0,
+ .load_wdt_enable = 0,
+ .duty_sync_enable = 0,
+ .invert_pin = 0,
+ .rising = 0x00,
+ .falling = PWM_FALLING_DEFAULT,
+ },
+ [14] = {
+ .target_freq = 25000,
+ .load_wdt_rising_falling_pt = 0x10,
+ .load_wdt_selection = 0,
+ .load_wdt_enable = 0,
+ .duty_sync_enable = 0,
+ .invert_pin = 0,
+ .rising = 0x00,
+ .falling = PWM_FALLING_DEFAULT,
+ },
+ [15] = {
+ .target_freq = 25000,
+ .load_wdt_rising_falling_pt = 0x10,
+ .load_wdt_selection = 0,
+ .load_wdt_enable = 0,
+ .duty_sync_enable = 0,
+ .invert_pin = 0,
+ .rising = 0x00,
+ .falling = PWM_FALLING_DEFAULT,
+ },
+};
+
+/*
+ * 5:4 fan tach edge mode selection bit:
+ * 00: falling
+ * 01: rising
+ * 10: both
+ * 11: reserved.
+ */
+
+#define F2F_EDGES 0x00
+#define R2R_EDGES 0x01
+#define BOTH_EDGES 0x02
+
+struct aspeed_tacho_channel_params {
+ u32 min_rpm;
+ int limited_inverse;
+ u16 threshold;
+ u8 tacho_edge;
+ u8 tacho_debounce;
+ u32 divide;
+};
+
+
+static struct aspeed_tacho_channel_params default_tacho_params[] = {
+ [0] = {
+ .min_rpm = 2900,
+ .limited_inverse = 0,
+ .threshold = 0,
+ .tacho_edge = F2F_EDGES,
+ .tacho_debounce = 0,
+ .divide = 8,
+ },
+ [1] = {
+ .min_rpm = 2900,
+ .limited_inverse = 0,
+ .threshold = 0,
+ .tacho_edge = F2F_EDGES,
+ .tacho_debounce = 0,
+ .divide = 8,
+ },
+ [2] = {
+ .min_rpm = 2900,
+ .limited_inverse = 0,
+ .threshold = 0,
+ .tacho_edge = F2F_EDGES,
+ .tacho_debounce = 0,
+ .divide = 8,
+ },
+ [3] = {
+ .min_rpm = 2900,
+ .limited_inverse = 0,
+ .threshold = 0,
+ .tacho_edge = F2F_EDGES,
+ .tacho_debounce = 0,
+ .divide = 8,
+ },
+ [4] = {
+ .min_rpm = 2900,
+ .limited_inverse = 0,
+ .threshold = 0,
+ .tacho_edge = F2F_EDGES,
+ .tacho_debounce = 0,
+ .divide = 8,
+ },
+ [5] = {
+ .min_rpm = 2900,
+ .limited_inverse = 0,
+ .threshold = 0,
+ .tacho_edge = F2F_EDGES,
+ .tacho_debounce = 0,
+ .divide = 8,
+ },
+ [6] = {
+ .min_rpm = 2900,
+ .limited_inverse = 0,
+ .threshold = 0,
+ .tacho_edge = F2F_EDGES,
+ .tacho_debounce = 0,
+ .divide = 8,
+ },
+ [7] = {
+ .min_rpm = 2900,
+ .limited_inverse = 0,
+ .threshold = 0,
+ .tacho_edge = F2F_EDGES,
+ .tacho_debounce = 0,
+ .divide = 8,
+ },
+ [8] = {
+ .min_rpm = 2900,
+ .limited_inverse = 0,
+ .threshold = 0,
+ .tacho_edge = F2F_EDGES,
+ .tacho_debounce = 0,
+ .divide = 8,
+ },
+ [9] = {
+ .min_rpm = 2900,
+ .limited_inverse = 0,
+ .threshold = 0,
+ .tacho_edge = F2F_EDGES,
+ .tacho_debounce = 0,
+ .divide = 8,
+ },
+ [10] = {
+ .min_rpm = 2900,
+ .limited_inverse = 0,
+ .threshold = 0,
+ .tacho_edge = F2F_EDGES,
+ .tacho_debounce = 0,
+ .divide = 8,
+ },
+ [11] = {
+ .min_rpm = 2900,
+ .limited_inverse = 0,
+ .threshold = 0,
+ .tacho_edge = F2F_EDGES,
+ .tacho_debounce = 0,
+ .divide = 8,
+ },
+ [12] = {
+ .min_rpm = 2900,
+ .limited_inverse = 0,
+ .threshold = 0,
+ .tacho_edge = F2F_EDGES,
+ .tacho_debounce = 0,
+ .divide = 8,
+ },
+ [13] = {
+ .min_rpm = 2900,
+ .limited_inverse = 0,
+ .threshold = 0,
+ .tacho_edge = F2F_EDGES,
+ .tacho_debounce = 0,
+ .divide = 8,
+ },
+ [14] = {
+ .min_rpm = 2900,
+ .limited_inverse = 0,
+ .threshold = 0,
+ .tacho_edge = F2F_EDGES,
+ .tacho_debounce = 0,
+ .divide = 8,
+ },
+ [15] = {
+ .min_rpm = 2900,
+ .limited_inverse = 0,
+ .threshold = 0,
+ .tacho_edge = F2F_EDGES,
+ .tacho_debounce = 0,
+ .divide = 8,
+ },
+};
+
+struct aspeed_pwm_tachometer_data {
+ struct regmap *regmap;
+ unsigned long clk_freq;
+ struct reset_control *reset;
+ bool pwm_present[16];
+ bool fan_tach_present[16];
+ struct aspeed_pwm_channel_params *pwm_channel;
+ struct aspeed_tacho_channel_params *tacho_channel;
+ struct aspeed_cooling_device *cdev[8];
+ const struct attribute_group *groups[3];
+ struct pwm_chip chip;
+ u32 clk_tick_ns;
+};
+
+struct aspeed_cooling_device {
+ char name[16];
+ struct aspeed_pwm_tachometer_data *priv;
+ struct thermal_cooling_device *tcdev;
+ int pwm_channel;
+ u8 *cooling_levels;
+ u8 max_state;
+ u8 cur_state;
+};
+
+struct aspeed_pwm_output_chan {
+ u32 period_ns;
+ u32 duty_ns;
+};
+
+static int regmap_aspeed_pwm_tachometer_reg_write(void *context, unsigned int reg,
+ unsigned int val)
+{
+ void __iomem *regs = (void __iomem *)context;
+
+ writel(val, regs + reg);
+ return 0;
+}
+
+static int regmap_aspeed_pwm_tachometer_reg_read(void *context, unsigned int reg,
+ unsigned int *val)
+{
+ void __iomem *regs = (void __iomem *)context;
+
+ *val = readl(regs + reg);
+ return 0;
+}
+
+static const struct regmap_config aspeed_pwm_tachometer_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = 0x100,
+ .reg_write = regmap_aspeed_pwm_tachometer_reg_write,
+ .reg_read = regmap_aspeed_pwm_tachometer_reg_read,
+ .fast_io = true,
+};
+
+static void aspeed_set_pwm_channel_enable(struct regmap *regmap, u8 pwm_channel,
+ bool enable)
+{
+ regmap_update_bits(regmap, ASPEED_PWM_CTRL_CH(pwm_channel), (PWM_CLK_ENABLE | PWM_PIN_EN), enable ? (PWM_CLK_ENABLE | PWM_PIN_EN) : 0);
+}
+
+static void aspeed_set_fan_tach_ch_enable(struct aspeed_pwm_tachometer_data *priv, u8 fan_tach_ch,
+ bool enable)
+{
+ u32 i;
+ u32 divide_val = 0;
+ u32 target_div;
+ u32 reg_value = 0;
+
+ if(enable) {
+ /*RPM calculation as per ast2600 datasheet*/
+ target_div = (priv->clk_freq * 60 / priv->tacho_channel[fan_tach_ch].min_rpm * 2) / (0xfffff + 1);
+ if (target_div) {
+ for (i = 0; i < 12; i++) {
+ divide_val = BIT(i) * BIT(i);
+ if (divide_val > target_div)
+ break;
+ }
+ } else {
+ i = 0;
+ divide_val = 1;
+ }
+ priv->tacho_channel[fan_tach_ch].divide = divide_val;
+
+ reg_value = TACHO_ENABLE |
+ (priv->tacho_channel[fan_tach_ch].tacho_edge << TACHIO_EDGE_BIT) |
+ (i << TACHO_CLK_DIV_BIT) |
+ (priv->tacho_channel[fan_tach_ch].tacho_debounce << TACHO_DEBOUNCE_BIT);
+
+ if(priv->tacho_channel[fan_tach_ch].limited_inverse)
+ reg_value |= TACHO_INVERS_LIMIT;
+
+ if(priv->tacho_channel[fan_tach_ch].threshold)
+ reg_value |= (TACHO_IER | priv->tacho_channel[fan_tach_ch].threshold);
+
+ regmap_write(priv->regmap, ASPEED_TACHO_CTRL_CH(fan_tach_ch), reg_value);
+ } else
+ regmap_update_bits(priv->regmap, ASPEED_TACHO_CTRL_CH(fan_tach_ch), TACHO_ENABLE, 0);
+}
+
+static void aspeed_set_pwm_channel_fan_ctrl(struct aspeed_pwm_tachometer_data *priv,
+ u8 index, u8 fan_ctrl)
+{
+ u32 duty_value, ctrl_value;
+ u32 div_h, div_l, cal_freq;
+
+ if (fan_ctrl == 0) {
+ aspeed_set_pwm_channel_enable(priv->regmap, index, false);
+ } else {
+ cal_freq = priv->clk_freq / (PWM_PERIOD_MAX + 1);
+ /*calculate for target frequence*/
+ for (div_l = 0; div_l < 0x100; div_l++) {
+ for (div_h = 0; div_h < 0x10; div_h++) {
+ if ((cal_freq / (BIT(div_h) * (div_l + 1))) < priv->pwm_channel[index].target_freq)
+ break;
+ }
+ if ((cal_freq / (BIT(div_h) * (div_l + 1))) < priv->pwm_channel[index].target_freq)
+ break;
+ }
+
+ priv->pwm_channel[index].pwm_freq = cal_freq / (BIT(div_h) * (div_l + 1));
+
+ ctrl_value = (div_h << 8) | div_l;
+
+ duty_value = (PWM_PERIOD_MAX << PWM_PERIOD_BIT) |
+ (0 << PWM_RISING_RISING_BIT) | (fan_ctrl << PWM_RISING_FALLING_BIT);
+
+ if (priv->pwm_channel[index].load_wdt_enable) {
+ ctrl_value |= PWM_DUTY_LOAD_AS_WDT_EN;
+ if(priv->pwm_channel[index].load_wdt_selection) {
+ ctrl_value |= PWM_LOAD_AS_WDT;
+ duty_value |= (priv->pwm_channel[index].load_wdt_rising_falling_pt << PWM_RISING_FALLING_AS_WDT_BIT);
+ } else {
+ duty_value |= (priv->pwm_channel[index].load_wdt_rising_falling_pt << PWM_RISING_FALLING_AS_WDT_BIT);
+ }
+ }
+
+ regmap_write(priv->regmap, ASPEED_PWM_DUTY_CYCLE_CH(index), duty_value);
+
+ regmap_write(priv->regmap, ASPEED_PWM_CTRL_CH(index), ctrl_value);
+// printk("pwm clk is %d \n", priv->clk_freq / (priv->pwm_channel[index].period + 1));
+ aspeed_set_pwm_channel_enable(priv->regmap, index, true);
+ }
+}
+
+static int aspeed_get_fan_tach_ch_rpm(struct aspeed_pwm_tachometer_data *priv,
+ u8 fan_tach_ch)
+{
+ u32 raw_data, tach_div, clk_source, val;
+ u8 multiplier = 2;
+ int i, retries = 3;
+
+ for(i = 0; i < retries; i++) {
+ regmap_read(priv->regmap, ASPEED_TACHO_STS_CH(fan_tach_ch), &val);
+ if (TACHO_FULL_MEASUREMENT & val)
+ break;
+ }
+
+ raw_data = val & TACHO_VALUE_MASK;
+ if(raw_data == 0xfffff)
+ return 0;
+
+ raw_data += 1;
+ tach_div = raw_data * (priv->tacho_channel[fan_tach_ch].divide) * (multiplier);
+
+ clk_source = priv->clk_freq;
+
+ if (raw_data == 0)
+ return 0;
+
+ return (clk_source / tach_div * 60);
+
+}
+
+static ssize_t set_pwm(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
+ int index = sensor_attr->index;
+ int ret;
+ struct aspeed_pwm_tachometer_data *priv = dev_get_drvdata(dev);
+ long fan_ctrl;
+ u8 org_falling = priv->pwm_channel[index].falling;
+
+ ret = kstrtol(buf, 10, &fan_ctrl);
+ if (ret != 0)
+ return ret;
+
+ if (fan_ctrl < 0 || fan_ctrl > PWM_PERIOD_MAX)
+ return -EINVAL;
+
+ if (priv->pwm_channel[index].falling == fan_ctrl)
+ return count;
+
+ priv->pwm_channel[index].falling = fan_ctrl;
+
+ if (fan_ctrl == 0)
+ aspeed_set_pwm_channel_enable(priv->regmap, index, false);
+ else
+ regmap_update_bits(priv->regmap, ASPEED_PWM_DUTY_CYCLE_CH(index), GENMASK(15, 8), (fan_ctrl << PWM_RISING_FALLING_BIT));
+
+ if (org_falling == 0)
+ aspeed_set_pwm_channel_enable(priv->regmap, index, true);
+
+ return count;
+}
+
+static ssize_t show_pwm(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
+ int index = sensor_attr->index;
+ struct aspeed_pwm_tachometer_data *priv = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%u\n", priv->pwm_channel[index].falling);
+}
+
+static ssize_t show_rpm(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
+ int index = sensor_attr->index;
+ int rpm;
+ struct aspeed_pwm_tachometer_data *priv = dev_get_drvdata(dev);
+
+ rpm = aspeed_get_fan_tach_ch_rpm(priv, index);
+ if (rpm < 0)
+ return rpm;
+
+ return sprintf(buf, "%d\n", rpm);
+}
+
+static umode_t pwm_is_visible(struct kobject *kobj,
+ struct attribute *a, int index)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct aspeed_pwm_tachometer_data *priv = dev_get_drvdata(dev);
+
+ if (!priv->pwm_present[index])
+ return 0;
+ return a->mode;
+}
+
+static umode_t fan_dev_is_visible(struct kobject *kobj,
+ struct attribute *a, int index)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct aspeed_pwm_tachometer_data *priv = dev_get_drvdata(dev);
+
+ if (!priv->fan_tach_present[index])
+ return 0;
+ return a->mode;
+}
+
+static SENSOR_DEVICE_ATTR(pwm1, 0644,
+ show_pwm, set_pwm, 0);
+static SENSOR_DEVICE_ATTR(pwm2, 0644,
+ show_pwm, set_pwm, 1);
+static SENSOR_DEVICE_ATTR(pwm3, 0644,
+ show_pwm, set_pwm, 2);
+static SENSOR_DEVICE_ATTR(pwm4, 0644,
+ show_pwm, set_pwm, 3);
+static SENSOR_DEVICE_ATTR(pwm5, 0644,
+ show_pwm, set_pwm, 4);
+static SENSOR_DEVICE_ATTR(pwm6, 0644,
+ show_pwm, set_pwm, 5);
+static SENSOR_DEVICE_ATTR(pwm7, 0644,
+ show_pwm, set_pwm, 6);
+static SENSOR_DEVICE_ATTR(pwm8, 0644,
+ show_pwm, set_pwm, 7);
+static SENSOR_DEVICE_ATTR(pwm9, 0644,
+ show_pwm, set_pwm, 8);
+static SENSOR_DEVICE_ATTR(pwm10, 0644,
+ show_pwm, set_pwm, 9);
+static SENSOR_DEVICE_ATTR(pwm11, 0644,
+ show_pwm, set_pwm, 10);
+static SENSOR_DEVICE_ATTR(pwm12, 0644,
+ show_pwm, set_pwm, 11);
+static SENSOR_DEVICE_ATTR(pwm13, 0644,
+ show_pwm, set_pwm, 12);
+static SENSOR_DEVICE_ATTR(pwm14, 0644,
+ show_pwm, set_pwm, 13);
+static SENSOR_DEVICE_ATTR(pwm15, 0644,
+ show_pwm, set_pwm, 14);
+static SENSOR_DEVICE_ATTR(pwm16, 0644,
+ show_pwm, set_pwm, 15);
+static struct attribute *pwm_dev_attrs[] = {
+ &sensor_dev_attr_pwm1.dev_attr.attr,
+ &sensor_dev_attr_pwm2.dev_attr.attr,
+ &sensor_dev_attr_pwm3.dev_attr.attr,
+ &sensor_dev_attr_pwm4.dev_attr.attr,
+ &sensor_dev_attr_pwm5.dev_attr.attr,
+ &sensor_dev_attr_pwm6.dev_attr.attr,
+ &sensor_dev_attr_pwm7.dev_attr.attr,
+ &sensor_dev_attr_pwm8.dev_attr.attr,
+ &sensor_dev_attr_pwm9.dev_attr.attr,
+ &sensor_dev_attr_pwm10.dev_attr.attr,
+ &sensor_dev_attr_pwm11.dev_attr.attr,
+ &sensor_dev_attr_pwm12.dev_attr.attr,
+ &sensor_dev_attr_pwm13.dev_attr.attr,
+ &sensor_dev_attr_pwm14.dev_attr.attr,
+ &sensor_dev_attr_pwm15.dev_attr.attr,
+ &sensor_dev_attr_pwm16.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group pwm_dev_group = {
+ .attrs = pwm_dev_attrs,
+ .is_visible = pwm_is_visible,
+};
+
+static SENSOR_DEVICE_ATTR(fan1_input, 0444,
+ show_rpm, NULL, 0);
+static SENSOR_DEVICE_ATTR(fan2_input, 0444,
+ show_rpm, NULL, 1);
+static SENSOR_DEVICE_ATTR(fan3_input, 0444,
+ show_rpm, NULL, 2);
+static SENSOR_DEVICE_ATTR(fan4_input, 0444,
+ show_rpm, NULL, 3);
+static SENSOR_DEVICE_ATTR(fan5_input, 0444,
+ show_rpm, NULL, 4);
+static SENSOR_DEVICE_ATTR(fan6_input, 0444,
+ show_rpm, NULL, 5);
+static SENSOR_DEVICE_ATTR(fan7_input, 0444,
+ show_rpm, NULL, 6);
+static SENSOR_DEVICE_ATTR(fan8_input, 0444,
+ show_rpm, NULL, 7);
+static SENSOR_DEVICE_ATTR(fan9_input, 0444,
+ show_rpm, NULL, 8);
+static SENSOR_DEVICE_ATTR(fan10_input, 0444,
+ show_rpm, NULL, 9);
+static SENSOR_DEVICE_ATTR(fan11_input, 0444,
+ show_rpm, NULL, 10);
+static SENSOR_DEVICE_ATTR(fan12_input, 0444,
+ show_rpm, NULL, 11);
+static SENSOR_DEVICE_ATTR(fan13_input, 0444,
+ show_rpm, NULL, 12);
+static SENSOR_DEVICE_ATTR(fan14_input, 0444,
+ show_rpm, NULL, 13);
+static SENSOR_DEVICE_ATTR(fan15_input, 0444,
+ show_rpm, NULL, 14);
+static SENSOR_DEVICE_ATTR(fan16_input, 0444,
+ show_rpm, NULL, 15);
+static struct attribute *fan_dev_attrs[] = {
+ &sensor_dev_attr_fan1_input.dev_attr.attr,
+ &sensor_dev_attr_fan2_input.dev_attr.attr,
+ &sensor_dev_attr_fan3_input.dev_attr.attr,
+ &sensor_dev_attr_fan4_input.dev_attr.attr,
+ &sensor_dev_attr_fan5_input.dev_attr.attr,
+ &sensor_dev_attr_fan6_input.dev_attr.attr,
+ &sensor_dev_attr_fan7_input.dev_attr.attr,
+ &sensor_dev_attr_fan8_input.dev_attr.attr,
+ &sensor_dev_attr_fan9_input.dev_attr.attr,
+ &sensor_dev_attr_fan10_input.dev_attr.attr,
+ &sensor_dev_attr_fan11_input.dev_attr.attr,
+ &sensor_dev_attr_fan12_input.dev_attr.attr,
+ &sensor_dev_attr_fan13_input.dev_attr.attr,
+ &sensor_dev_attr_fan14_input.dev_attr.attr,
+ &sensor_dev_attr_fan15_input.dev_attr.attr,
+ &sensor_dev_attr_fan16_input.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group fan_dev_group = {
+ .attrs = fan_dev_attrs,
+ .is_visible = fan_dev_is_visible,
+};
+
+static void aspeed_create_pwm_channel(struct aspeed_pwm_tachometer_data *priv,
+ u8 pwm_channel)
+{
+ priv->pwm_present[pwm_channel] = true;
+
+ //use default
+ aspeed_set_pwm_channel_fan_ctrl(priv, pwm_channel, priv->pwm_channel[pwm_channel].falling);
+}
+
+static void aspeed_create_fan_tach_channel(struct aspeed_pwm_tachometer_data *priv,
+ u8 *fan_tach_ch, int count, u32 min_rpm)
+{
+ u8 val, index;
+
+ for (val = 0; val < count; val++) {
+ index = fan_tach_ch[val];
+ priv->fan_tach_present[index] = true;
+ priv->tacho_channel[index].min_rpm = min_rpm;
+ aspeed_set_fan_tach_ch_enable(priv, index, true);
+ }
+}
+
+static int
+aspeed_pwm_cz_get_max_state(struct thermal_cooling_device *tcdev,
+ unsigned long *state)
+{
+ struct aspeed_cooling_device *cdev = tcdev->devdata;
+
+ *state = cdev->max_state;
+
+ return 0;
+}
+
+static int
+aspeed_pwm_cz_get_cur_state(struct thermal_cooling_device *tcdev,
+ unsigned long *state)
+{
+ struct aspeed_cooling_device *cdev = tcdev->devdata;
+
+ *state = cdev->cur_state;
+
+ return 0;
+}
+
+static int
+aspeed_pwm_cz_set_cur_state(struct thermal_cooling_device *tcdev,
+ unsigned long state)
+{
+ struct aspeed_cooling_device *cdev = tcdev->devdata;
+
+ if (state > cdev->max_state)
+ return -EINVAL;
+
+ cdev->cur_state = state;
+ cdev->priv->pwm_channel[cdev->pwm_channel].falling =
+ cdev->cooling_levels[cdev->cur_state];
+ aspeed_set_pwm_channel_fan_ctrl(cdev->priv, cdev->pwm_channel,
+ cdev->cooling_levels[cdev->cur_state]);
+
+ return 0;
+}
+
+static const struct thermal_cooling_device_ops aspeed_pwm_cool_ops = {
+ .get_max_state = aspeed_pwm_cz_get_max_state,
+ .get_cur_state = aspeed_pwm_cz_get_cur_state,
+ .set_cur_state = aspeed_pwm_cz_set_cur_state,
+};
+
+static int aspeed_create_pwm_cooling(struct device *dev,
+ struct device_node *child,
+ struct aspeed_pwm_tachometer_data *priv,
+ u32 pwm_channel, u8 num_levels)
+{
+ int ret;
+ struct aspeed_cooling_device *cdev;
+
+ cdev = devm_kzalloc(dev, sizeof(*cdev), GFP_KERNEL);
+ if (!cdev)
+ return -ENOMEM;
+
+ cdev->cooling_levels = devm_kzalloc(dev, num_levels, GFP_KERNEL);
+ if (!cdev->cooling_levels)
+ return -ENOMEM;
+
+ cdev->max_state = num_levels - 1;
+ ret = of_property_read_u8_array(child, "cooling-levels",
+ cdev->cooling_levels,
+ num_levels);
+ if (ret) {
+ dev_err(dev, "Property 'cooling-levels' cannot be read.\n");
+ return ret;
+ }
+ snprintf(cdev->name, MAX_CDEV_NAME_LEN, "%s%d", child->name, pwm_channel);
+
+ cdev->tcdev = thermal_of_cooling_device_register(child,
+ cdev->name,
+ cdev,
+ &aspeed_pwm_cool_ops);
+ if (IS_ERR(cdev->tcdev))
+ return PTR_ERR(cdev->tcdev);
+
+ cdev->priv = priv;
+ cdev->pwm_channel = pwm_channel;
+
+ priv->cdev[pwm_channel] = cdev;
+
+ return 0;
+}
+
+static int aspeed_pwm_create_fan(struct device *dev,
+ struct device_node *child,
+ struct aspeed_pwm_tachometer_data *priv)
+{
+ u8 *fan_tach_ch;
+ u32 fan_min_rpm;
+ u32 pwm_channel;
+ u32 target_pwm_freq;
+ int ret, count;
+
+ ret = of_property_read_u32(child, "reg", &pwm_channel);
+ if (ret)
+ return ret;
+
+ ret = of_property_read_u32(child, "aspeed,target_pwm", &target_pwm_freq);
+ if (ret)
+ target_pwm_freq = DEFAULT_TARGET_PWM_FREQ;
+
+ aspeed_create_pwm_channel(priv, (u8)pwm_channel);
+
+ ret = of_property_count_u8_elems(child, "cooling-levels");
+ if (ret > 0) {
+ ret = aspeed_create_pwm_cooling(dev, child, priv, pwm_channel,
+ ret);
+ if (ret)
+ return ret;
+ }
+
+ count = of_property_count_u8_elems(child, "aspeed,fan-tach-ch");
+ if (count < 1)
+ return -EINVAL;
+
+ fan_tach_ch = devm_kzalloc(dev, sizeof(*fan_tach_ch) * count,
+ GFP_KERNEL);
+ if (!fan_tach_ch)
+ return -ENOMEM;
+ ret = of_property_read_u8_array(child, "aspeed,fan-tach-ch",
+ fan_tach_ch, count);
+ if (ret)
+ return ret;
+ ret = of_property_read_u32(child, "aspeed,min_rpm", &fan_min_rpm);
+ if (ret)
+ fan_min_rpm = DEFAULT_MIN_RPM;
+
+ aspeed_create_fan_tach_channel(priv, fan_tach_ch, count, fan_min_rpm);
+ return 0;
+}
+
+static inline
+struct aspeed_pwm_tachometer_data *to_aspeed_pwm(struct pwm_chip *chip)
+{
+ return container_of(chip, struct aspeed_pwm_tachometer_data, chip);
+}
+
+static int aspeed_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct aspeed_pwm_output_chan *chan;
+
+ chan = devm_kzalloc(chip->dev, sizeof(*chan), GFP_KERNEL);
+ if (!chan)
+ return -ENOMEM;
+
+ pwm_set_chip_data(pwm, chan);
+
+ return 0;
+}
+
+static void aspeed_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ devm_kfree(chip->dev, pwm_get_chip_data(pwm));
+ pwm_set_chip_data(pwm, NULL);
+}
+
+static int aspeed_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct aspeed_pwm_tachometer_data *priv = to_aspeed_pwm(chip);
+
+ aspeed_set_pwm_channel_enable(priv->regmap, pwm->hwpwm, true);
+
+ return 0;
+}
+
+static void aspeed_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct aspeed_pwm_tachometer_data *priv = to_aspeed_pwm(chip);
+
+ aspeed_set_pwm_channel_enable(priv->regmap, pwm->hwpwm, false);
+}
+
+static int aspeed_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
+ int duty_ns, int period_ns)
+{
+ struct aspeed_pwm_tachometer_data *priv = to_aspeed_pwm(chip);
+ struct aspeed_pwm_output_chan *chan = pwm_get_chip_data(pwm);
+ u8 div_h, div_l, period_value, falling_point, rising_point;
+ u32 ctrl_value, duty_value, tick_ns;
+
+ /*
+ * We currently avoid using 64bit arithmetic by using the
+ * fact that anything faster than 1Hz is easily representable
+ * by 32bits.
+ */
+ if (period_ns > NSEC_PER_SEC)
+ return -ERANGE;
+
+ if (chan->period_ns == period_ns && chan->duty_ns == duty_ns)
+ return 0;
+
+ for (div_l = 0; div_l <= 0xff; div_l++) {
+ for (div_h = 0; div_h <= 0xf; div_h++) {
+ tick_ns = priv->clk_tick_ns * BIT(div_h) * (div_l + 1);
+ if (tick_ns * PWM_PERIOD_MAX >= period_ns)
+ break;
+ }
+ if (tick_ns * PWM_PERIOD_MAX >= period_ns)
+ break;
+ }
+
+ if (period_ns / tick_ns > PWM_PERIOD_MAX)
+ return -ERANGE;
+
+ ctrl_value = div_h << 8 | div_l;
+ period_value = period_ns / tick_ns;
+ falling_point = 0;
+ rising_point = duty_ns / tick_ns;
+ duty_value = period_value << PWM_PERIOD_BIT |
+ falling_point << PWM_RISING_RISING_BIT |
+ rising_point << PWM_RISING_FALLING_BIT;
+
+ regmap_update_bits(priv->regmap, ASPEED_PWM_DUTY_CYCLE_CH(pwm->hwpwm),
+ PWM_PERIOD_BIT_MASK | PWM_RISING_FALLING_MASK,
+ duty_value);
+ regmap_update_bits(priv->regmap, ASPEED_PWM_CTRL_CH(pwm->hwpwm),
+ PWM_CLK_DIV_H_MASK | PWM_CLK_DIV_L_MASK, ctrl_value);
+
+ chan->period_ns = period_ns;
+ chan->duty_ns = duty_ns;
+
+ return 0;
+}
+
+static const struct pwm_ops aspeed_pwm_ops = {
+ .request = aspeed_pwm_request,
+ .free = aspeed_pwm_free,
+ .enable = aspeed_pwm_enable,
+ .disable = aspeed_pwm_disable,
+ .config = aspeed_pwm_config,
+ .owner = THIS_MODULE,
+};
+
+static int aspeed_pwm_tachometer_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np, *child;
+ struct aspeed_pwm_tachometer_data *priv;
+ void __iomem *regs;
+ struct resource *res;
+ struct device *hwmon;
+ struct clk *clk;
+ int ret;
+
+ np = dev->of_node;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENOENT;
+ regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->pwm_channel = default_pwm_params;
+ priv->tacho_channel = default_tacho_params;
+ priv->regmap = devm_regmap_init(dev, NULL, (__force void *)regs,
+ &aspeed_pwm_tachometer_regmap_config);
+ if (IS_ERR(priv->regmap))
+ return PTR_ERR(priv->regmap);
+
+ clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(clk))
+ return -ENODEV;
+ priv->clk_freq = clk_get_rate(clk);
+ priv->clk_tick_ns = NSEC_PER_SEC / priv->clk_freq;
+
+ priv->reset = devm_reset_control_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->reset)) {
+ dev_err(&pdev->dev, "can't get aspeed_pwm_tacho reset\n");
+ return PTR_ERR(priv->reset);
+ }
+
+ //scu init
+ reset_control_assert(priv->reset);
+ reset_control_deassert(priv->reset);
+
+ for_each_child_of_node(np, child) {
+ ret = aspeed_pwm_create_fan(dev, child, priv);
+ if (ret) {
+ of_node_put(child);
+ return ret;
+ }
+ }
+
+ priv->chip.dev = &pdev->dev;
+ priv->chip.ops = &aspeed_pwm_ops;
+ priv->chip.base = -1;
+ priv->chip.npwm = 16;
+ priv->chip.of_xlate = of_pwm_xlate_with_flags;
+ priv->chip.of_pwm_n_cells = 3;
+
+ ret = pwmchip_add(&priv->chip);
+ if (ret < 0) {
+ dev_err(dev, "failed to register PWM chip\n");
+ return ret;
+ }
+
+ priv->groups[0] = &pwm_dev_group;
+ priv->groups[1] = &fan_dev_group;
+ priv->groups[2] = NULL;
+ hwmon = devm_hwmon_device_register_with_groups(dev,
+ "aspeed_g6_pwm_tacho",
+ priv, priv->groups);
+
+ return PTR_ERR_OR_ZERO(hwmon);
+}
+
+static const struct of_device_id of_pwm_tachometer_match_table[] = {
+ { .compatible = "aspeed,ast2600-pwm-tacho", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, of_pwm_tachometer_match_table);
+
+static struct platform_driver aspeed_pwm_tachometer_driver = {
+ .probe = aspeed_pwm_tachometer_probe,
+ .driver = {
+ .name = "aspeed_g6_pwm_tacho",
+ .of_match_table = of_pwm_tachometer_match_table,
+ },
+};
+
+module_platform_driver(aspeed_pwm_tachometer_driver);
+
+MODULE_AUTHOR("Ryan Chen <ryan_chen@aspeedtech.com>");
+MODULE_DESCRIPTION("ASPEED PWM and Fan Tachometer device driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/aspeed-pwm-tacho.c b/drivers/hwmon/aspeed-pwm-tacho.c
index 3cb88d6fbec0..058e01539ed7 100644
--- a/drivers/hwmon/aspeed-pwm-tacho.c
+++ b/drivers/hwmon/aspeed-pwm-tacho.c
@@ -160,7 +160,7 @@
*/
#define M_TACH_MODE 0x02 /* 10b */
#define M_TACH_UNIT 0x0210
-#define INIT_FAN_CTRL 0xFF
+#define INIT_FAN_CTRL 150 /* 58% */
/* How long we sleep in us while waiting for an RPM result. */
#define ASPEED_RPM_STATUS_SLEEP_USEC 500
diff --git a/drivers/hwmon/peci-cpupower.c b/drivers/hwmon/peci-cpupower.c
new file mode 100644
index 000000000000..f7aa9d463053
--- /dev/null
+++ b/drivers/hwmon/peci-cpupower.c
@@ -0,0 +1,754 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2018-2020 Intel Corporation
+
+#include <linux/hwmon.h>
+#include <linux/jiffies.h>
+#include <linux/mfd/intel-peci-client.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include "peci-hwmon.h"
+
+enum PECI_CPUPOWER_POWER_SENSOR_TYPES {
+ PECI_CPUPOWER_SENSOR_TYPE_POWER = 0,
+ PECI_CPUPOWER_SENSOR_TYPE_ENERGY,
+ PECI_CPUPOWER_SENSOR_TYPES_COUNT,
+};
+
+#define PECI_CPUPOWER_POWER_CHANNEL_COUNT 1 /* Supported channels number */
+#define PECI_CPUPOWER_ENERGY_CHANNEL_COUNT 1 /* Supported channels number */
+
+#define PECI_CPUPOWER_POWER_SENSOR_COUNT 4 /* Supported sensors number */
+#define PECI_CPUPOWER_ENERGY_SENSOR_COUNT 1 /* Supported sensors number */
+
+struct peci_cpupower {
+ struct device *dev;
+ struct peci_client_manager *mgr;
+ char name[PECI_NAME_SIZE];
+ u32 power_config[PECI_CPUPOWER_POWER_CHANNEL_COUNT + 1];
+ u32 energy_config[PECI_CPUPOWER_ENERGY_CHANNEL_COUNT + 1];
+
+ struct hwmon_channel_info power_info;
+ struct hwmon_channel_info energy_info;
+ const struct hwmon_channel_info *info[PECI_CPUPOWER_SENSOR_TYPES_COUNT + 1];
+ struct hwmon_chip_info chip;
+
+ struct peci_sensor_data
+ power_sensor_data_list[PECI_CPUPOWER_POWER_CHANNEL_COUNT]
+ [PECI_CPUPOWER_POWER_SENSOR_COUNT];
+ struct peci_sensor_data
+ energy_sensor_data_list[PECI_CPUPOWER_ENERGY_CHANNEL_COUNT]
+ [PECI_CPUPOWER_ENERGY_SENSOR_COUNT];
+
+ /* Below structs are not exposed to any sensor directly */
+ struct peci_sensor_data energy_cache; /* used to limit PECI communication */
+ struct peci_sensor_data power_sensor_prev_energy;
+ struct peci_sensor_data energy_sensor_prev_energy;
+
+ union peci_pkg_power_sku_unit units;
+ bool units_valid;
+
+ u32 ppl1_time_window;
+ u32 ppl2_time_window;
+ bool ppl_time_windows_valid;
+};
+
+static const char *peci_cpupower_labels[PECI_CPUPOWER_SENSOR_TYPES_COUNT] = {
+ "cpu power",
+ "cpu energy",
+};
+
+/**
+ * peci_cpupower_read_cpu_pkg_pwr_info_low - read PCS Platform Power SKU low.
+ * @peci_mgr: PECI client manager handle
+ * @reg: Pointer to the variable read value is going to be put
+ *
+ * Return: 0 if succeeded, other values in case an error.
+ */
+static inline int
+peci_cpupower_read_cpu_pkg_pwr_info_low(struct peci_client_manager *peci_mgr,
+ union peci_package_power_info_low *reg)
+{
+ return peci_pcs_read(peci_mgr, PECI_MBX_INDEX_TDP,
+ PECI_PKG_ID_CPU_PACKAGE, &reg->value);
+}
+
+/**
+ * peci_cpupower_read_cpu_pkg_pwr_lim_low - read PCS Package Power Limit Low
+ * @peci_mgr: PECI client manager handle
+ * @reg: Pointer to the variable read value is going to be put
+ *
+ * Return: 0 if succeeded, other values in case an error.
+ */
+static inline int
+peci_cpupower_read_cpu_pkg_pwr_lim_low(struct peci_client_manager *peci_mgr,
+ union peci_package_power_limit_low *reg)
+{
+ return peci_pcs_read(peci_mgr, PECI_MBX_INDEX_PKG_POWER_LIMIT1,
+ PECI_PCS_PARAM_ZERO, &reg->value);
+}
+
+static int
+peci_cpupower_get_energy_counter(struct peci_cpupower *priv,
+ struct peci_sensor_data *sensor_data,
+ ulong update_interval)
+{
+ int ret = 0;
+
+ mutex_lock(&sensor_data->lock);
+ if (!peci_sensor_need_update_with_time(sensor_data,
+ update_interval)) {
+ dev_dbg(priv->dev, "skip reading package energy over peci\n");
+ goto unlock;
+ }
+
+ ret = peci_pcs_read(priv->mgr, PECI_MBX_INDEX_ENERGY_COUNTER,
+ PECI_PKG_ID_CPU_PACKAGE, &sensor_data->uvalue);
+ if (ret) {
+ dev_dbg(priv->dev, "not able to read package energy\n");
+ goto unlock;
+ }
+
+ peci_sensor_mark_updated(sensor_data);
+
+ dev_dbg(priv->dev,
+ "energy counter updated %duJ, jif %lu, HZ is %d jiffies\n",
+ sensor_data->uvalue, sensor_data->last_updated, HZ);
+
+unlock:
+ mutex_unlock(&sensor_data->lock);
+ return ret;
+}
+
+static int
+peci_cpupower_get_average_power(void *ctx,
+ struct peci_sensor_conf *sensor_conf,
+ struct peci_sensor_data *sensor_data)
+{
+ struct peci_cpupower *priv = (struct peci_cpupower *)ctx;
+ int ret = 0;
+
+ mutex_lock(&sensor_data->lock);
+ if (!peci_sensor_need_update_with_time(sensor_data,
+ sensor_conf->update_interval)) {
+ dev_dbg(priv->dev,
+ "skip generating new power value %dmW jif %lu\n",
+ sensor_data->value, jiffies);
+ goto unlock;
+ }
+
+ ret = peci_cpupower_get_energy_counter(priv, &priv->energy_cache,
+ sensor_conf->update_interval);
+ if (ret) {
+ dev_dbg(priv->dev, "cannot update energy counter\n");
+ goto unlock;
+ }
+
+ ret = peci_pcs_get_units(priv->mgr, &priv->units, &priv->units_valid);
+ if (ret) {
+ dev_dbg(priv->dev, "not able to read units\n");
+ goto unlock;
+ }
+
+ ret = peci_pcs_calc_pwr_from_eng(priv->dev,
+ &priv->power_sensor_prev_energy,
+ &priv->energy_cache,
+ priv->units.bits.eng_unit,
+ &sensor_data->value);
+ if (ret) {
+ dev_dbg(priv->dev, "power calculation failed\n");
+ goto unlock;
+ }
+
+ peci_sensor_mark_updated_with_time(sensor_data,
+ priv->energy_cache.last_updated);
+
+ dev_dbg(priv->dev, "average power %dmW, jif %lu, HZ is %d jiffies\n",
+ sensor_data->value, sensor_data->last_updated, HZ);
+
+unlock:
+ mutex_unlock(&sensor_data->lock);
+ return ret;
+}
+
+static int
+peci_cpupower_get_power_limit(void *ctx, struct peci_sensor_conf *sensor_conf,
+ struct peci_sensor_data *sensor_data)
+{
+ struct peci_cpupower *priv = (struct peci_cpupower *)ctx;
+ union peci_package_power_limit_low power_limit;
+ int ret = 0;
+
+ mutex_lock(&sensor_data->lock);
+ if (!peci_sensor_need_update_with_time(sensor_data,
+ sensor_conf->update_interval)) {
+ dev_dbg(priv->dev, "skip reading peci, power limit %dmW\n",
+ sensor_data->value);
+ goto unlock;
+ }
+
+ ret = peci_pcs_get_units(priv->mgr, &priv->units, &priv->units_valid);
+ if (ret) {
+ dev_dbg(priv->dev, "not able to read units\n");
+ goto unlock;
+ }
+
+ ret = peci_cpupower_read_cpu_pkg_pwr_lim_low(priv->mgr, &power_limit);
+ if (ret) {
+ dev_dbg(priv->dev, "not able to read power limit\n");
+ goto unlock;
+ }
+
+ peci_sensor_mark_updated(sensor_data);
+ sensor_data->value = peci_pcs_xn_to_munits(power_limit.bits.pwr_lim_1,
+ priv->units.bits.pwr_unit);
+
+ dev_dbg(priv->dev, "raw power limit %u, unit %u, power limit %d\n",
+ power_limit.bits.pwr_lim_1, priv->units.bits.pwr_unit,
+ sensor_data->value);
+
+unlock:
+ mutex_unlock(&sensor_data->lock);
+ return ret;
+}
+
+static int
+peci_cpupower_set_power_limit(void *ctx, struct peci_sensor_conf *sensor_conf,
+ struct peci_sensor_data *sensor_data,
+ s32 val)
+{
+ struct peci_cpupower *priv = (struct peci_cpupower *)ctx;
+ union peci_package_power_limit_high power_limit_high;
+ union peci_package_power_limit_low power_limit_low;
+ int ret;
+
+ ret = peci_pcs_get_units(priv->mgr, &priv->units, &priv->units_valid);
+ if (ret) {
+ dev_dbg(priv->dev, "not able to read units\n");
+ return ret;
+ }
+
+ ret = peci_cpupower_read_cpu_pkg_pwr_lim_low(priv->mgr,
+ &power_limit_low);
+ if (ret) {
+ dev_dbg(priv->dev, "not able to read package power limit 1\n");
+ return ret;
+ }
+
+ ret = peci_pcs_read(priv->mgr, PECI_MBX_INDEX_PKG_POWER_LIMIT2,
+ PECI_PCS_PARAM_ZERO, &power_limit_high.value);
+ if (ret) {
+ dev_dbg(priv->dev, "not able to read package power limit 2\n");
+ return ret;
+ }
+
+ /* Calculate PPL time windows if needed */
+ if (!priv->ppl_time_windows_valid) {
+ priv->ppl1_time_window =
+ peci_pcs_calc_plxy_time_window(peci_pcs_munits_to_xn(
+ PECI_PCS_PPL1_TIME_WINDOW,
+ priv->units.bits.tim_unit));
+ priv->ppl2_time_window =
+ peci_pcs_calc_plxy_time_window(peci_pcs_munits_to_xn(
+ PECI_PCS_PPL2_TIME_WINDOW,
+ priv->units.bits.tim_unit));
+ priv->ppl_time_windows_valid = true;
+ }
+
+ /* Enable or disable power limitation */
+ if (val > 0) {
+ /* Set PPL1 */
+ power_limit_low.bits.pwr_lim_1 =
+ peci_pcs_munits_to_xn(val, priv->units.bits.pwr_unit);
+ power_limit_low.bits.pwr_lim_1_en = 1u;
+ power_limit_low.bits.pwr_clmp_lim_1 = 1u;
+ power_limit_low.bits.pwr_lim_1_time = priv->ppl1_time_window;
+
+ /* Set PPL2 */
+ power_limit_high.bits.pwr_lim_2 =
+ peci_pcs_munits_to_xn(PECI_PCS_PPL1_TO_PPL2(val),
+ priv->units.bits.pwr_unit);
+ power_limit_high.bits.pwr_lim_2_en = 1u;
+ power_limit_high.bits.pwr_clmp_lim_2 = 1u;
+ power_limit_high.bits.pwr_lim_2_time = priv->ppl2_time_window;
+ } else {
+ power_limit_low.bits.pwr_lim_1 = 0u;
+ power_limit_low.bits.pwr_lim_1_en = 0u;
+ power_limit_low.bits.pwr_clmp_lim_1 = 0u;
+ power_limit_low.bits.pwr_lim_1_time = 0u;
+ power_limit_high.bits.pwr_lim_2 = 0u;
+ power_limit_high.bits.pwr_lim_2_en = 0u;
+ power_limit_high.bits.pwr_clmp_lim_2 = 0u;
+ power_limit_high.bits.pwr_lim_2_time = 0u;
+ }
+
+ ret = peci_pcs_write(priv->mgr, PECI_MBX_INDEX_PKG_POWER_LIMIT1,
+ PECI_PCS_PARAM_ZERO, power_limit_low.value);
+ if (ret) {
+ dev_dbg(priv->dev, "not able to write package power limit 1\n");
+ return ret;
+ }
+
+ ret = peci_pcs_write(priv->mgr, PECI_MBX_INDEX_PKG_POWER_LIMIT2,
+ PECI_PCS_PARAM_ZERO, power_limit_high.value);
+ if (ret) {
+ dev_dbg(priv->dev, "not able to write package power limit 2\n");
+ return ret;
+ }
+
+ dev_dbg(priv->dev,
+ "power limit %d, unit %u, raw package power limit 1 %u,\n",
+ val, priv->units.bits.pwr_unit, power_limit_low.bits.pwr_lim_1);
+
+ return ret;
+}
+
+static int
+peci_cpupower_read_max_power(void *ctx, struct peci_sensor_conf *sensor_conf,
+ struct peci_sensor_data *sensor_data)
+{
+ struct peci_cpupower *priv = (struct peci_cpupower *)ctx;
+ union peci_package_power_info_low power_info;
+ int ret = 0;
+
+ mutex_lock(&sensor_data->lock);
+ if (!peci_sensor_need_update_with_time(sensor_data,
+ sensor_conf->update_interval)) {
+ dev_dbg(priv->dev, "skip reading peci, max power %dmW\n",
+ sensor_data->value);
+ goto unlock;
+ }
+
+ ret = peci_pcs_get_units(priv->mgr, &priv->units, &priv->units_valid);
+ if (ret) {
+ dev_dbg(priv->dev, "not able to read units\n");
+ goto unlock;
+ }
+
+ ret = peci_cpupower_read_cpu_pkg_pwr_info_low(priv->mgr, &power_info);
+ if (ret) {
+ dev_dbg(priv->dev, "not able to read package power info\n");
+ goto unlock;
+ }
+
+ peci_sensor_mark_updated(sensor_data);
+ sensor_data->value = peci_pcs_xn_to_munits(power_info.bits.pkg_tdp,
+ priv->units.bits.pwr_unit);
+
+
+ dev_dbg(priv->dev, "raw max power %u, unit %u, max power %dmW\n",
+ power_info.bits.pkg_tdp, priv->units.bits.pwr_unit,
+ sensor_data->value);
+
+unlock:
+ mutex_unlock(&sensor_data->lock);
+ return ret;
+}
+
+static int
+peci_cpupower_read_min_power(void *ctx, struct peci_sensor_conf *sensor_conf,
+ struct peci_sensor_data *sensor_data)
+{
+ struct peci_cpupower *priv = (struct peci_cpupower *)ctx;
+ union peci_package_power_info_low power_info;
+ int ret = 0;
+
+ mutex_lock(&sensor_data->lock);
+ if (!peci_sensor_need_update_with_time(sensor_data,
+ sensor_conf->update_interval)) {
+ dev_dbg(priv->dev, "skip reading peci, min power %dmW\n",
+ sensor_data->value);
+ goto unlock;
+ }
+
+ ret = peci_pcs_get_units(priv->mgr, &priv->units, &priv->units_valid);
+ if (ret) {
+ dev_dbg(priv->dev, "not able to read units\n");
+ goto unlock;
+ }
+
+ ret = peci_cpupower_read_cpu_pkg_pwr_info_low(priv->mgr, &power_info);
+ if (ret) {
+ dev_dbg(priv->dev, "not able to read package power info\n");
+ goto unlock;
+ }
+
+ peci_sensor_mark_updated(sensor_data);
+ sensor_data->value = peci_pcs_xn_to_munits(power_info.bits.pkg_min_pwr,
+ priv->units.bits.pwr_unit);
+
+ dev_dbg(priv->dev, "raw min power %u, unit %u, min power %dmW\n",
+ power_info.bits.pkg_min_pwr, priv->units.bits.pwr_unit,
+ sensor_data->value);
+
+unlock:
+ mutex_unlock(&sensor_data->lock);
+ return ret;
+}
+
+static int
+peci_cpupower_read_energy(void *ctx, struct peci_sensor_conf *sensor_conf,
+ struct peci_sensor_data *sensor_data)
+{
+ struct peci_cpupower *priv = (struct peci_cpupower *)ctx;
+ int ret = 0;
+
+ mutex_lock(&sensor_data->lock);
+ if (!peci_sensor_need_update_with_time(sensor_data,
+ sensor_conf->update_interval)) {
+ dev_dbg(priv->dev,
+ "skip generating new energy value %uuJ jif %lu\n",
+ sensor_data->uvalue, jiffies);
+ goto unlock;
+ }
+
+ ret = peci_cpupower_get_energy_counter(priv, &priv->energy_cache,
+ sensor_conf->update_interval);
+ if (ret) {
+ dev_dbg(priv->dev, "cannot update energy counter\n");
+ goto unlock;
+ }
+
+ ret = peci_pcs_get_units(priv->mgr, &priv->units, &priv->units_valid);
+ if (ret) {
+ dev_dbg(priv->dev, "not able to read units\n");
+ goto unlock;
+ }
+
+ ret = peci_pcs_calc_acc_eng(priv->dev,
+ &priv->energy_sensor_prev_energy,
+ &priv->energy_cache,
+ priv->units.bits.eng_unit,
+ &sensor_data->uvalue);
+
+ if (ret) {
+ dev_dbg(priv->dev, "cumulative energy calculation failed\n");
+ goto unlock;
+ }
+
+ peci_sensor_mark_updated_with_time(sensor_data,
+ priv->energy_cache.last_updated);
+
+ dev_dbg(priv->dev, "energy %duJ, jif %lu, HZ is %d jiffies\n",
+ sensor_data->uvalue, sensor_data->last_updated, HZ);
+
+unlock:
+ mutex_unlock(&sensor_data->lock);
+ return 0;
+}
+
+static struct peci_sensor_conf
+peci_cpupower_power_cfg[PECI_CPUPOWER_POWER_CHANNEL_COUNT]
+ [PECI_CPUPOWER_POWER_SENSOR_COUNT] = {
+ /* Channel 0 - Power */
+ {
+ {
+ .attribute = hwmon_power_average,
+ .config = HWMON_P_AVERAGE,
+ .update_interval = UPDATE_INTERVAL_100MS,
+ .read = peci_cpupower_get_average_power,
+ .write = NULL,
+ },
+ {
+ .attribute = hwmon_power_cap,
+ .config = HWMON_P_CAP,
+ .update_interval = UPDATE_INTERVAL_100MS,
+ .read = peci_cpupower_get_power_limit,
+ .write = peci_cpupower_set_power_limit,
+ },
+ {
+ .attribute = hwmon_power_cap_max,
+ .config = HWMON_P_CAP_MAX,
+ .update_interval = UPDATE_INTERVAL_10S,
+ .read = peci_cpupower_read_max_power,
+ .write = NULL,
+ },
+ {
+ .attribute = hwmon_power_cap_min,
+ .config = HWMON_P_CAP_MIN,
+ .update_interval = UPDATE_INTERVAL_10S,
+ .read = peci_cpupower_read_min_power,
+ .write = NULL,
+ },
+ },
+};
+
+static struct peci_sensor_conf
+peci_cpupower_energy_cfg[PECI_CPUPOWER_ENERGY_CHANNEL_COUNT]
+ [PECI_CPUPOWER_ENERGY_SENSOR_COUNT] = {
+ /* Channel 0 - Energy */
+ {
+ {
+ .attribute = hwmon_energy_input,
+ .config = HWMON_E_INPUT,
+ .update_interval = UPDATE_INTERVAL_100MS,
+ .read = peci_cpupower_read_energy,
+ .write = NULL,
+ },
+ }
+};
+
+static bool
+peci_cpupower_is_channel_valid(enum hwmon_sensor_types type,
+ int channel)
+{
+ if ((type == hwmon_power && channel < PECI_CPUPOWER_POWER_CHANNEL_COUNT) ||
+ (type == hwmon_energy && channel < PECI_CPUPOWER_ENERGY_CHANNEL_COUNT))
+ return true;
+
+ return false;
+}
+
+static int
+peci_cpupower_read_string(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, const char **str)
+{
+ if (!peci_cpupower_is_channel_valid(type, channel))
+ return -EOPNOTSUPP;
+
+ switch (attr) {
+ case hwmon_power_label:
+ *str = peci_cpupower_labels[PECI_CPUPOWER_SENSOR_TYPE_POWER];
+ break;
+ case hwmon_energy_label:
+ *str = peci_cpupower_labels[PECI_CPUPOWER_SENSOR_TYPE_ENERGY];
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int
+peci_cpupower_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct peci_cpupower *priv = dev_get_drvdata(dev);
+ struct peci_sensor_conf *sensor_conf;
+ struct peci_sensor_data *sensor_data;
+ int ret;
+
+ if (!priv || !val)
+ return -EINVAL;
+
+ if (!peci_cpupower_is_channel_valid(type, channel))
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case hwmon_power:
+ ret = peci_sensor_get_ctx(attr, peci_cpupower_power_cfg[channel],
+ &sensor_conf,
+ priv->power_sensor_data_list[channel],
+ &sensor_data,
+ ARRAY_SIZE(peci_cpupower_power_cfg[channel]));
+ break;
+ case hwmon_energy:
+ ret = peci_sensor_get_ctx(attr, peci_cpupower_energy_cfg[channel],
+ &sensor_conf,
+ priv->energy_sensor_data_list[channel],
+ &sensor_data,
+ ARRAY_SIZE(peci_cpupower_energy_cfg[channel]));
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ }
+
+ if (ret)
+ return ret;
+
+ if (sensor_conf->read) {
+ ret = sensor_conf->read(priv, sensor_conf, sensor_data);
+ if (!ret)
+ *val = (long)sensor_data->value;
+ } else {
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+static int
+peci_cpupower_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ struct peci_cpupower *priv = dev_get_drvdata(dev);
+ struct peci_sensor_conf *sensor_conf;
+ struct peci_sensor_data *sensor_data;
+ int ret;
+
+ if (!priv)
+ return -EINVAL;
+
+ if (!peci_cpupower_is_channel_valid(type, channel))
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case hwmon_power:
+ ret = peci_sensor_get_ctx(attr, peci_cpupower_power_cfg[channel],
+ &sensor_conf,
+ priv->power_sensor_data_list[channel],
+ &sensor_data,
+ ARRAY_SIZE(peci_cpupower_power_cfg[channel]));
+ break;
+ case hwmon_energy:
+ ret = peci_sensor_get_ctx(attr, peci_cpupower_energy_cfg[channel],
+ &sensor_conf,
+ priv->energy_sensor_data_list[channel],
+ &sensor_data,
+ ARRAY_SIZE(peci_cpupower_energy_cfg[channel]));
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ }
+
+ if (ret)
+ return ret;
+
+ if (sensor_conf->write) {
+ ret = sensor_conf->write(priv, sensor_conf, sensor_data,
+ (s32)val);
+ } else {
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+static umode_t
+peci_cpupower_is_visible(const void *data, enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ struct peci_sensor_conf *sensor_conf;
+ umode_t mode = 0;
+ int ret;
+
+ if (!peci_cpupower_is_channel_valid(type, channel))
+ return mode;
+
+ if (attr == hwmon_power_label || attr == hwmon_energy_label)
+ return 0444;
+
+ switch (type) {
+ case hwmon_power:
+ ret = peci_sensor_get_ctx(attr, peci_cpupower_power_cfg[channel],
+ &sensor_conf, NULL, NULL,
+ ARRAY_SIZE(peci_cpupower_power_cfg[channel]));
+ break;
+ case hwmon_energy:
+ ret = peci_sensor_get_ctx(attr, peci_cpupower_energy_cfg[channel],
+ &sensor_conf, NULL, NULL,
+ ARRAY_SIZE(peci_cpupower_energy_cfg[channel]));
+ break;
+ default:
+ return mode;
+ }
+
+ if (!ret) {
+ if (sensor_conf->read)
+ mode |= 0444;
+ if (sensor_conf->write)
+ mode |= 0200;
+ }
+
+ return mode;
+}
+
+static const struct hwmon_ops peci_cpupower_ops = {
+ .is_visible = peci_cpupower_is_visible,
+ .read_string = peci_cpupower_read_string,
+ .read = peci_cpupower_read,
+ .write = peci_cpupower_write,
+};
+
+static void peci_cpupower_sensor_init(struct peci_cpupower *priv)
+{
+ int i, j;
+
+ mutex_init(&priv->energy_cache.lock);
+
+ for (i = 0; i < PECI_CPUPOWER_POWER_CHANNEL_COUNT; i++) {
+ for (j = 0; j < PECI_CPUPOWER_POWER_SENSOR_COUNT; j++)
+ mutex_init(&priv->power_sensor_data_list[i][j].lock);
+ }
+
+ for (i = 0; i < PECI_CPUPOWER_ENERGY_CHANNEL_COUNT; i++) {
+ for (j = 0; j < PECI_CPUPOWER_ENERGY_SENSOR_COUNT; j++)
+ mutex_init(&priv->energy_sensor_data_list[i][j].lock);
+ }
+}
+
+static int peci_cpupower_probe(struct platform_device *pdev)
+{
+ struct peci_client_manager *mgr = dev_get_drvdata(pdev->dev.parent);
+ struct device *dev = &pdev->dev;
+ struct peci_cpupower *priv;
+ struct device *hwmon_dev;
+ u32 power_cfg_idx = 0;
+ u32 energy_cfg_idx = 0;
+ u32 cmd_mask;
+
+ cmd_mask = BIT(PECI_CMD_RD_PKG_CFG) | BIT(PECI_CMD_WR_PKG_CFG);
+ if ((mgr->client->adapter->cmd_mask & cmd_mask) != cmd_mask)
+ return -ENODEV;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, priv);
+ priv->mgr = mgr;
+ priv->dev = dev;
+
+ snprintf(priv->name, PECI_NAME_SIZE, "peci_cpupower.cpu%d",
+ mgr->client->addr - PECI_BASE_ADDR);
+
+ priv->power_config[power_cfg_idx] = HWMON_P_LABEL |
+ peci_sensor_get_config(peci_cpupower_power_cfg[power_cfg_idx],
+ ARRAY_SIZE(peci_cpupower_power_cfg[power_cfg_idx]));
+
+ priv->energy_config[energy_cfg_idx] = HWMON_E_LABEL |
+ peci_sensor_get_config(peci_cpupower_energy_cfg[energy_cfg_idx],
+ ARRAY_SIZE(peci_cpupower_energy_cfg[energy_cfg_idx]));
+
+ priv->info[PECI_CPUPOWER_SENSOR_TYPE_POWER] = &priv->power_info;
+ priv->power_info.type = hwmon_power;
+ priv->power_info.config = priv->power_config;
+
+ priv->info[PECI_CPUPOWER_SENSOR_TYPE_ENERGY] = &priv->energy_info;
+ priv->energy_info.type = hwmon_energy;
+ priv->energy_info.config = priv->energy_config;
+
+ priv->chip.ops = &peci_cpupower_ops;
+ priv->chip.info = priv->info;
+
+ peci_cpupower_sensor_init(priv);
+
+ hwmon_dev = devm_hwmon_device_register_with_info(priv->dev, priv->name,
+ priv, &priv->chip,
+ NULL);
+
+ if (IS_ERR(hwmon_dev))
+ return PTR_ERR(hwmon_dev);
+
+ dev_dbg(dev, "%s: sensor '%s'\n", dev_name(hwmon_dev), priv->name);
+
+ return 0;
+}
+
+static const struct platform_device_id peci_cpupower_ids[] = {
+ { .name = "peci-cpupower", .driver_data = 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(platform, peci_cpupower_ids);
+
+static struct platform_driver peci_cpupower_driver = {
+ .probe = peci_cpupower_probe,
+ .id_table = peci_cpupower_ids,
+ .driver = { .name = KBUILD_MODNAME, },
+};
+module_platform_driver(peci_cpupower_driver);
+
+MODULE_AUTHOR("Zhikui Ren <zhikui.ren@intel.com>");
+MODULE_DESCRIPTION("PECI cpupower driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hwmon/peci-cputemp.c b/drivers/hwmon/peci-cputemp.c
new file mode 100644
index 000000000000..a4a7f8cc0108
--- /dev/null
+++ b/drivers/hwmon/peci-cputemp.c
@@ -0,0 +1,550 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2018-2019 Intel Corporation
+
+#include <linux/hwmon.h>
+#include <linux/jiffies.h>
+#include <linux/mfd/intel-peci-client.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include "peci-hwmon.h"
+
+#define DEFAULT_CHANNEL_NUMS 5
+#define MODTEMP_CHANNEL_NUMS CORE_MASK_BITS_MAX
+#define CPUTEMP_CHANNEL_NUMS (DEFAULT_CHANNEL_NUMS + MODTEMP_CHANNEL_NUMS)
+#define BIOS_RST_CPL3 BIT(3)
+
+struct temp_group {
+ struct peci_sensor_data die;
+ struct peci_sensor_data dts;
+ struct peci_sensor_data tcontrol;
+ struct peci_sensor_data tthrottle;
+ struct peci_sensor_data tjmax;
+ struct peci_sensor_data module[MODTEMP_CHANNEL_NUMS];
+};
+
+struct peci_cputemp {
+ struct peci_client_manager *mgr;
+ struct device *dev;
+ char name[PECI_NAME_SIZE];
+ const struct cpu_gen_info *gen_info;
+ struct temp_group temp;
+ u64 core_mask;
+ u32 temp_config[CPUTEMP_CHANNEL_NUMS + 1];
+ uint config_idx;
+ struct hwmon_channel_info temp_info;
+ const struct hwmon_channel_info *info[2];
+ struct hwmon_chip_info chip;
+ char **module_temp_label;
+};
+
+enum cputemp_channels {
+ channel_die,
+ channel_dts,
+ channel_tcontrol,
+ channel_tthrottle,
+ channel_tjmax,
+ channel_core,
+};
+
+static const u32 config_table[] = {
+ /* Die temperature */
+ HWMON_T_LABEL | HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT |
+ HWMON_T_CRIT_HYST,
+
+ /* DTS margin */
+ HWMON_T_LABEL | HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT |
+ HWMON_T_CRIT_HYST,
+
+ /* Tcontrol temperature */
+ HWMON_T_LABEL | HWMON_T_INPUT | HWMON_T_CRIT,
+
+ /* Tthrottle temperature */
+ HWMON_T_LABEL | HWMON_T_INPUT,
+
+ /* Tjmax temperature */
+ HWMON_T_LABEL | HWMON_T_INPUT,
+
+ /* Core temperature - for all core channels */
+ HWMON_T_LABEL | HWMON_T_INPUT,
+};
+
+static const char *cputemp_label[DEFAULT_CHANNEL_NUMS] = {
+ "Die",
+ "DTS",
+ "Tcontrol",
+ "Tthrottle",
+ "Tjmax"
+};
+
+static s32 ten_dot_six_to_millidegree(s32 val)
+{
+ return ((val ^ 0x8000) - 0x8000) * 1000 / 64;
+}
+
+static int get_temp_targets(struct peci_cputemp *priv)
+{
+ struct peci_rd_end_pt_cfg_msg re_msg;
+ u32 bios_reset_cpl_cfg;
+ s32 tthrottle_offset;
+ s32 tcontrol_margin;
+ u8 pkg_cfg[4];
+ int ret;
+
+ /*
+ * Just use only the tcontrol marker to determine if target values need
+ * update.
+ */
+ if (!peci_sensor_need_update(&priv->temp.tcontrol))
+ return 0;
+
+ /*
+ * CPU can return invalid temperatures prior to BIOS-PCU handshake
+ * RST_CPL3 completion so filter the invalid readings out.
+ */
+ switch (priv->gen_info->model) {
+ case INTEL_FAM6_ICELAKE_X:
+ case INTEL_FAM6_ICELAKE_XD:
+ re_msg.addr = priv->mgr->client->addr;
+ re_msg.msg_type = PECI_ENDPTCFG_TYPE_LOCAL_PCI;
+ re_msg.params.pci_cfg.seg = 0;
+ re_msg.params.pci_cfg.bus = 31;
+ re_msg.params.pci_cfg.device = 30;
+ re_msg.params.pci_cfg.function = 1;
+ re_msg.params.pci_cfg.reg = 0x94;
+ re_msg.rx_len = 4;
+ re_msg.domain_id = 0;
+
+ ret = peci_command(priv->mgr->client->adapter,
+ PECI_CMD_RD_END_PT_CFG, sizeof(re_msg), &re_msg);
+ if (ret || re_msg.cc != PECI_DEV_CC_SUCCESS)
+ ret = -EAGAIN;
+ if (ret)
+ return ret;
+
+ bios_reset_cpl_cfg = le32_to_cpup((__le32 *)re_msg.data);
+ if (!(bios_reset_cpl_cfg & BIOS_RST_CPL3)) {
+ dev_dbg(priv->dev, "BIOS and Pcode Node ID isn't configured, BIOS_RESET_CPL_CFG: 0x%x\n",
+ bios_reset_cpl_cfg);
+ return -EAGAIN;
+ }
+
+ break;
+ default:
+ /* TODO: Check reset completion for other CPUs if needed */
+ break;
+ }
+
+ ret = peci_client_read_package_config(priv->mgr,
+ PECI_MBX_INDEX_TEMP_TARGET, 0,
+ pkg_cfg);
+ if (ret)
+ return ret;
+
+ priv->temp.tjmax.value = pkg_cfg[2] * 1000;
+
+ tcontrol_margin = pkg_cfg[1];
+ tcontrol_margin = ((tcontrol_margin ^ 0x80) - 0x80) * 1000;
+ priv->temp.tcontrol.value = priv->temp.tjmax.value - tcontrol_margin;
+
+ tthrottle_offset = (pkg_cfg[3] & 0x2f) * 1000;
+ priv->temp.tthrottle.value = priv->temp.tjmax.value - tthrottle_offset;
+
+ peci_sensor_mark_updated(&priv->temp.tcontrol);
+
+ return 0;
+}
+
+static int get_die_temp(struct peci_cputemp *priv)
+{
+ struct peci_get_temp_msg msg;
+ int ret;
+
+ if (!peci_sensor_need_update(&priv->temp.die))
+ return 0;
+
+ msg.addr = priv->mgr->client->addr;
+
+ ret = peci_command(priv->mgr->client->adapter, PECI_CMD_GET_TEMP, sizeof(msg), &msg);
+ if (ret)
+ return ret;
+
+ /* Note that the tjmax should be available before calling it */
+ priv->temp.die.value = priv->temp.tjmax.value +
+ (msg.temp_raw * 1000 / 64);
+
+ peci_sensor_mark_updated(&priv->temp.die);
+
+ return 0;
+}
+
+static int get_dts(struct peci_cputemp *priv)
+{
+ s32 dts_margin;
+ u8 pkg_cfg[4];
+ int ret;
+
+ if (!peci_sensor_need_update(&priv->temp.dts))
+ return 0;
+
+ ret = peci_client_read_package_config(priv->mgr,
+ PECI_MBX_INDEX_DTS_MARGIN, 0,
+ pkg_cfg);
+
+ if (ret)
+ return ret;
+
+ dts_margin = le16_to_cpup((__le16 *)pkg_cfg);
+
+ /**
+ * Processors return a value of DTS reading in 10.6 format
+ * (10 bits signed decimal, 6 bits fractional).
+ * Error codes:
+ * 0x8000: General sensor error
+ * 0x8001: Reserved
+ * 0x8002: Underflow on reading value
+ * 0x8003-0x81ff: Reserved
+ */
+ if (dts_margin >= 0x8000 && dts_margin <= 0x81ff)
+ return -EIO;
+
+ dts_margin = ten_dot_six_to_millidegree(dts_margin);
+
+ /* Note that the tcontrol should be available before calling it */
+ priv->temp.dts.value = priv->temp.tcontrol.value - dts_margin;
+
+ peci_sensor_mark_updated(&priv->temp.dts);
+
+ return 0;
+}
+
+static int get_module_temp(struct peci_cputemp *priv, int index)
+{
+ s32 module_dts_margin;
+ u8 pkg_cfg[4];
+ int ret;
+
+ if (!peci_sensor_need_update(&priv->temp.module[index]))
+ return 0;
+
+ ret = peci_client_read_package_config(priv->mgr,
+ PECI_MBX_INDEX_MODULE_TEMP,
+ index, pkg_cfg);
+ if (ret)
+ return ret;
+
+ module_dts_margin = le16_to_cpup((__le16 *)pkg_cfg);
+
+ /*
+ * Processors return a value of the DTS reading in 10.6 format
+ * (10 bits signed decimal, 6 bits fractional).
+ * Error codes:
+ * 0x8000: General sensor error
+ * 0x8001: Reserved
+ * 0x8002: Underflow on reading value
+ * 0x8003-0x81ff: Reserved
+ */
+ if (module_dts_margin >= 0x8000 && module_dts_margin <= 0x81ff)
+ return -EIO;
+
+ module_dts_margin = ten_dot_six_to_millidegree(module_dts_margin);
+
+ /* Note that the tjmax should be available before calling it */
+ priv->temp.module[index].value = priv->temp.tjmax.value +
+ module_dts_margin;
+
+ peci_sensor_mark_updated(&priv->temp.module[index]);
+
+ return 0;
+}
+
+static int cputemp_read_string(struct device *dev,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel, const char **str)
+{
+ struct peci_cputemp *priv = dev_get_drvdata(dev);
+
+ if (attr != hwmon_temp_label)
+ return -EOPNOTSUPP;
+
+ *str = (channel < DEFAULT_CHANNEL_NUMS) ?
+ cputemp_label[channel] :
+ (const char *)priv->module_temp_label[channel -
+ DEFAULT_CHANNEL_NUMS];
+
+ return 0;
+}
+
+static int cputemp_read(struct device *dev,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct peci_cputemp *priv = dev_get_drvdata(dev);
+ int ret, module_index;
+
+ if (channel >= CPUTEMP_CHANNEL_NUMS ||
+ !(priv->temp_config[channel] & BIT(attr)))
+ return -EOPNOTSUPP;
+
+ ret = get_temp_targets(priv);
+ if (ret)
+ return ret;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ switch (channel) {
+ case channel_die:
+ ret = get_die_temp(priv);
+ if (ret)
+ break;
+
+ *val = priv->temp.die.value;
+ break;
+ case channel_dts:
+ ret = get_dts(priv);
+ if (ret)
+ break;
+
+ *val = priv->temp.dts.value;
+ break;
+ case channel_tcontrol:
+ *val = priv->temp.tcontrol.value;
+ break;
+ case channel_tthrottle:
+ *val = priv->temp.tthrottle.value;
+ break;
+ case channel_tjmax:
+ *val = priv->temp.tjmax.value;
+ break;
+ default:
+ module_index = channel - DEFAULT_CHANNEL_NUMS;
+ ret = get_module_temp(priv, module_index);
+ if (ret)
+ break;
+
+ *val = priv->temp.module[module_index].value;
+ break;
+ }
+ break;
+ case hwmon_temp_max:
+ *val = priv->temp.tcontrol.value;
+ break;
+ case hwmon_temp_crit:
+ *val = priv->temp.tjmax.value;
+ break;
+ case hwmon_temp_crit_hyst:
+ *val = priv->temp.tjmax.value - priv->temp.tcontrol.value;
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ return ret;
+}
+
+static umode_t cputemp_is_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ const struct peci_cputemp *priv = data;
+
+ if (channel < ARRAY_SIZE(priv->temp_config) &&
+ (priv->temp_config[channel] & BIT(attr)) &&
+ (channel < DEFAULT_CHANNEL_NUMS ||
+ (channel >= DEFAULT_CHANNEL_NUMS &&
+ (priv->core_mask & BIT(channel - DEFAULT_CHANNEL_NUMS)))))
+ return 0444;
+
+ return 0;
+}
+
+static const struct hwmon_ops cputemp_ops = {
+ .is_visible = cputemp_is_visible,
+ .read_string = cputemp_read_string,
+ .read = cputemp_read,
+};
+
+static int check_resolved_cores(struct peci_cputemp *priv)
+{
+ struct peci_rd_pci_cfg_local_msg msg;
+ int ret;
+
+ /* Get the RESOLVED_CORES register value */
+ switch (priv->gen_info->model) {
+ case INTEL_FAM6_ICELAKE_X:
+ case INTEL_FAM6_ICELAKE_XD:
+ msg.addr = priv->mgr->client->addr;
+ msg.device = 30;
+ msg.function = 3;
+ msg.bus = 14;
+ msg.reg = 0xd4;
+ msg.rx_len = 4;
+ msg.domain_id = 0;
+
+ ret = peci_command(priv->mgr->client->adapter,
+ PECI_CMD_RD_PCI_CFG_LOCAL, sizeof(msg), &msg);
+ if (msg.cc != PECI_DEV_CC_SUCCESS)
+ ret = -EAGAIN;
+ if (ret)
+ return ret;
+
+ priv->core_mask = le32_to_cpup((__le32 *)msg.pci_config);
+ priv->core_mask <<= 32;
+
+ msg.reg = 0xd0;
+
+ ret = peci_command(priv->mgr->client->adapter,
+ PECI_CMD_RD_PCI_CFG_LOCAL, sizeof(msg), &msg);
+
+ if (msg.cc != PECI_DEV_CC_SUCCESS)
+ ret = -EAGAIN;
+ if (ret) {
+ priv->core_mask = 0;
+ return ret;
+ }
+
+ priv->core_mask |= le32_to_cpup((__le32 *)msg.pci_config);
+ break;
+ default:
+ msg.addr = priv->mgr->client->addr;
+ msg.device = 30;
+ msg.function = 3;
+ msg.bus = 1;
+ msg.reg = 0xb4;
+ msg.rx_len = 4;
+ msg.domain_id = 0;
+
+ ret = peci_command(priv->mgr->client->adapter,
+ PECI_CMD_RD_PCI_CFG_LOCAL, sizeof(msg), &msg);
+ if (msg.cc != PECI_DEV_CC_SUCCESS)
+ ret = -EAGAIN;
+ if (ret)
+ return ret;
+
+ priv->core_mask = le32_to_cpup((__le32 *)msg.pci_config);
+ break;
+ }
+
+ if (!priv->core_mask)
+ return -EAGAIN;
+
+ dev_dbg(priv->dev, "Scanned resolved cores: 0x%llx\n", priv->core_mask);
+
+ return 0;
+}
+
+static int create_module_temp_label(struct peci_cputemp *priv, int idx)
+{
+ priv->module_temp_label[idx] = devm_kzalloc(priv->dev,
+ PECI_HWMON_LABEL_STR_LEN,
+ GFP_KERNEL);
+ if (!priv->module_temp_label[idx])
+ return -ENOMEM;
+
+ sprintf(priv->module_temp_label[idx], "Core %d", idx);
+
+ return 0;
+}
+
+static int create_module_temp_info(struct peci_cputemp *priv)
+{
+ int ret, i;
+
+ ret = check_resolved_cores(priv);
+ if (ret)
+ return ret;
+
+ priv->module_temp_label = devm_kzalloc(priv->dev,
+ MODTEMP_CHANNEL_NUMS *
+ sizeof(char *),
+ GFP_KERNEL);
+ if (!priv->module_temp_label)
+ return -ENOMEM;
+
+ for (i = 0; i < MODTEMP_CHANNEL_NUMS; i++) {
+ priv->temp_config[priv->config_idx++] = config_table[channel_core];
+
+ if (i < priv->gen_info->core_mask_bits && priv->core_mask & BIT(i)) {
+ ret = create_module_temp_label(priv, i);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int peci_cputemp_probe(struct platform_device *pdev)
+{
+ struct peci_client_manager *mgr = dev_get_drvdata(pdev->dev.parent);
+ struct device *dev = &pdev->dev;
+ struct peci_cputemp *priv;
+ struct device *hwmon_dev;
+ int ret;
+
+ if ((mgr->client->adapter->cmd_mask &
+ (BIT(PECI_CMD_GET_TEMP) | BIT(PECI_CMD_RD_PKG_CFG))) !=
+ (BIT(PECI_CMD_GET_TEMP) | BIT(PECI_CMD_RD_PKG_CFG)))
+ return -ENODEV;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, priv);
+ priv->mgr = mgr;
+ priv->dev = dev;
+ priv->gen_info = mgr->gen_info;
+
+ snprintf(priv->name, PECI_NAME_SIZE, "peci_cputemp.cpu%d",
+ mgr->client->addr - PECI_BASE_ADDR);
+
+ priv->temp_config[priv->config_idx++] = config_table[channel_die];
+ priv->temp_config[priv->config_idx++] = config_table[channel_dts];
+ priv->temp_config[priv->config_idx++] = config_table[channel_tcontrol];
+ priv->temp_config[priv->config_idx++] = config_table[channel_tthrottle];
+ priv->temp_config[priv->config_idx++] = config_table[channel_tjmax];
+
+ ret = create_module_temp_info(priv);
+ if (ret)
+ dev_dbg(dev, "Skipped creating core temp info\n");
+
+ priv->chip.ops = &cputemp_ops;
+ priv->chip.info = priv->info;
+
+ priv->info[0] = &priv->temp_info;
+
+ priv->temp_info.type = hwmon_temp;
+ priv->temp_info.config = priv->temp_config;
+
+ hwmon_dev = devm_hwmon_device_register_with_info(priv->dev,
+ priv->name,
+ priv,
+ &priv->chip,
+ NULL);
+
+ if (IS_ERR(hwmon_dev))
+ return PTR_ERR(hwmon_dev);
+
+ dev_dbg(dev, "%s: sensor '%s'\n", dev_name(hwmon_dev), priv->name);
+
+ return 0;
+}
+
+static const struct platform_device_id peci_cputemp_ids[] = {
+ { .name = "peci-cputemp", .driver_data = 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(platform, peci_cputemp_ids);
+
+static struct platform_driver peci_cputemp_driver = {
+ .probe = peci_cputemp_probe,
+ .id_table = peci_cputemp_ids,
+ .driver = { .name = KBUILD_MODNAME, },
+};
+module_platform_driver(peci_cputemp_driver);
+
+MODULE_AUTHOR("Jae Hyun Yoo <jae.hyun.yoo@linux.intel.com>");
+MODULE_DESCRIPTION("PECI cputemp driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hwmon/peci-dimmpower.c b/drivers/hwmon/peci-dimmpower.c
new file mode 100644
index 000000000000..78a1570b6650
--- /dev/null
+++ b/drivers/hwmon/peci-dimmpower.c
@@ -0,0 +1,673 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2020 Intel Corporation
+
+#include <linux/hwmon.h>
+#include <linux/jiffies.h>
+#include <linux/mfd/intel-peci-client.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include "peci-hwmon.h"
+
+enum PECI_DIMMPOWER_SENSOR_TYPES {
+ PECI_DIMMPOWER_SENSOR_TYPE_POWER = 0,
+ PECI_DIMMPOWER_SENSOR_TYPE_ENERGY,
+ PECI_DIMMPOWER_SENSOR_TYPES_COUNT,
+};
+
+#define PECI_DIMMPOWER_POWER_CHANNEL_COUNT 1 /* Supported channels number */
+#define PECI_DIMMPOWER_ENERGY_CHANNEL_COUNT 1 /* Supported channels number */
+
+#define PECI_DIMMPOWER_POWER_SENSOR_COUNT 4 /* Supported sensors/readings number */
+#define PECI_DIMMPOWER_ENERGY_SENSOR_COUNT 1 /* Supported sensors/readings number */
+
+struct peci_dimmpower {
+ struct device *dev;
+ struct peci_client_manager *mgr;
+ char name[PECI_NAME_SIZE];
+ u32 power_config[PECI_DIMMPOWER_POWER_CHANNEL_COUNT + 1];
+ u32 energy_config[PECI_DIMMPOWER_ENERGY_CHANNEL_COUNT + 1];
+
+ struct hwmon_channel_info power_info;
+ struct hwmon_channel_info energy_info;
+ const struct hwmon_channel_info *info[PECI_DIMMPOWER_SENSOR_TYPES_COUNT + 1];
+ struct hwmon_chip_info chip;
+
+ struct peci_sensor_data
+ power_sensor_data_list[PECI_DIMMPOWER_POWER_CHANNEL_COUNT]
+ [PECI_DIMMPOWER_POWER_SENSOR_COUNT];
+ struct peci_sensor_data
+ energy_sensor_data_list[PECI_DIMMPOWER_ENERGY_CHANNEL_COUNT]
+ [PECI_DIMMPOWER_ENERGY_SENSOR_COUNT];
+
+ /* Below structs are not exposed to any sensor directly */
+ struct peci_sensor_data energy_cache; /* used to limit PECI communication */
+ struct peci_sensor_data power_sensor_prev_energy;
+ struct peci_sensor_data energy_sensor_prev_energy;
+
+ union peci_pkg_power_sku_unit units;
+ bool units_valid;
+
+ u32 dpl_time_window;
+ bool dpl_time_window_valid;
+};
+
+static const char *peci_dimmpower_labels[PECI_DIMMPOWER_SENSOR_TYPES_COUNT] = {
+ "dimm power",
+ "dimm energy",
+};
+
+/**
+ * peci_dimmpower_read_dram_power_limit - read PCS DRAM Power Limit
+ * @peci_mgr: PECI client manager handle
+ * @reg: Pointer to the variable read value is going to be put
+ *
+ * Return: 0 if succeeded, other values in case an error.
+ */
+static inline int
+peci_dimmpower_read_dram_power_limit(struct peci_client_manager *peci_mgr,
+ union peci_dram_power_limit *reg)
+{
+ return peci_pcs_read(peci_mgr, PECI_MBX_INDEX_DDR_RAPL_PL1,
+ PECI_PCS_PARAM_ZERO, &reg->value);
+}
+
+static int
+peci_dimmpower_get_energy_counter(struct peci_dimmpower *priv,
+ struct peci_sensor_data *sensor_data,
+ ulong update_interval)
+{
+ int ret = 0;
+
+ mutex_lock(&sensor_data->lock);
+ if (!peci_sensor_need_update_with_time(sensor_data,
+ update_interval)) {
+ dev_dbg(priv->dev, "skip reading dimm energy over peci\n");
+ goto unlock;
+ }
+
+ ret = peci_pcs_read(priv->mgr, PECI_MBX_INDEX_ENERGY_STATUS,
+ PECI_PKG_ID_DIMM, &sensor_data->uvalue);
+ if (ret) {
+ dev_dbg(priv->dev, "not able to read dimm energy\n");
+ goto unlock;
+ }
+
+ peci_sensor_mark_updated(sensor_data);
+
+ dev_dbg(priv->dev,
+ "energy counter updated %duJ, jif %lu, HZ is %d jiffies\n",
+ sensor_data->uvalue, sensor_data->last_updated, HZ);
+
+unlock:
+ mutex_unlock(&sensor_data->lock);
+ return ret;
+}
+
+static int
+peci_dimmpower_get_avg_power(void *ctx, struct peci_sensor_conf *sensor_conf,
+ struct peci_sensor_data *sensor_data)
+{
+ struct peci_dimmpower *priv = (struct peci_dimmpower *)ctx;
+ int ret = 0;
+
+ mutex_lock(&sensor_data->lock);
+ if (!peci_sensor_need_update_with_time(sensor_data,
+ sensor_conf->update_interval)) {
+ dev_dbg(priv->dev, "skip reading peci, average power %dmW jif %lu\n",
+ sensor_data->value, jiffies);
+ goto unlock;
+ }
+
+ ret = peci_dimmpower_get_energy_counter(priv, &priv->energy_cache,
+ sensor_conf->update_interval);
+ if (ret) {
+ dev_dbg(priv->dev, "cannot update energy counter\n");
+ goto unlock;
+ }
+
+ ret = peci_pcs_get_units(priv->mgr, &priv->units, &priv->units_valid);
+ if (ret) {
+ dev_dbg(priv->dev, "not able to read units\n");
+ goto unlock;
+ }
+
+ ret = peci_pcs_calc_pwr_from_eng(priv->dev,
+ &priv->power_sensor_prev_energy,
+ &priv->energy_cache,
+ priv->units.bits.eng_unit,
+ &sensor_data->value);
+ if (ret) {
+ dev_dbg(priv->dev, "power calculation failed\n");
+ goto unlock;
+ }
+
+ peci_sensor_mark_updated_with_time(sensor_data, priv->energy_cache.last_updated);
+
+ dev_dbg(priv->dev, "average power %dmW, jif %lu, HZ is %d jiffies\n",
+ sensor_data->value, sensor_data->last_updated, HZ);
+
+unlock:
+ mutex_unlock(&sensor_data->lock);
+ return ret;
+}
+
+static int
+peci_dimmpower_get_power_limit(void *ctx, struct peci_sensor_conf *sensor_conf,
+ struct peci_sensor_data *sensor_data)
+{
+ struct peci_dimmpower *priv = (struct peci_dimmpower *)ctx;
+ union peci_dram_power_limit power_limit;
+ int ret = 0;
+
+ mutex_lock(&sensor_data->lock);
+ if (!peci_sensor_need_update_with_time(sensor_data,
+ sensor_conf->update_interval)) {
+ dev_dbg(priv->dev, "skip reading peci, power limit %dmW\n",
+ sensor_data->value);
+ goto unlock;
+ }
+
+ ret = peci_pcs_get_units(priv->mgr, &priv->units, &priv->units_valid);
+ if (ret) {
+ dev_dbg(priv->dev, "not able to read units\n");
+ goto unlock;
+ }
+
+ ret = peci_dimmpower_read_dram_power_limit(priv->mgr, &power_limit);
+ if (ret) {
+ dev_dbg(priv->dev, "not able to read power limit\n");
+ goto unlock;
+ }
+
+ peci_sensor_mark_updated(sensor_data);
+ sensor_data->value = peci_pcs_xn_to_munits(power_limit.bits.pp_pwr_lim,
+ priv->units.bits.pwr_unit);
+
+ dev_dbg(priv->dev, "raw power limit %u, unit %u, power limit %d\n",
+ power_limit.bits.pp_pwr_lim, priv->units.bits.pwr_unit,
+ sensor_data->value);
+
+unlock:
+ mutex_unlock(&sensor_data->lock);
+ return ret;
+}
+
+static int
+peci_dimmpower_set_power_limit(void *ctx, struct peci_sensor_conf *sensor_conf,
+ struct peci_sensor_data *sensor_data,
+ s32 val)
+{
+ struct peci_dimmpower *priv = (struct peci_dimmpower *)ctx;
+ union peci_dram_power_limit power_limit;
+ int ret;
+
+ ret = peci_pcs_get_units(priv->mgr, &priv->units, &priv->units_valid);
+ if (ret) {
+ dev_dbg(priv->dev, "not able to read units\n");
+ return ret;
+ }
+
+ ret = peci_dimmpower_read_dram_power_limit(priv->mgr, &power_limit);
+ if (ret) {
+ dev_dbg(priv->dev, "not able to read power limit\n");
+ return ret;
+ }
+
+ /* Calculate DPL time window if needed */
+ if (!priv->dpl_time_window_valid) {
+ priv->dpl_time_window =
+ peci_pcs_calc_plxy_time_window(peci_pcs_munits_to_xn(
+ PECI_PCS_PPL1_TIME_WINDOW,
+ priv->units.bits.tim_unit));
+ priv->dpl_time_window_valid = true;
+ }
+
+ /* Enable or disable power limitation */
+ if (val > 0) {
+ power_limit.bits.pp_pwr_lim =
+ peci_pcs_munits_to_xn(val, priv->units.bits.pwr_unit);
+ power_limit.bits.pwr_lim_ctrl_en = 1u;
+ power_limit.bits.ctrl_time_win = priv->dpl_time_window;
+ } else {
+ power_limit.bits.pp_pwr_lim = 0u;
+ power_limit.bits.pwr_lim_ctrl_en = 0u;
+ power_limit.bits.ctrl_time_win = 0u;
+ }
+
+ ret = peci_pcs_write(priv->mgr, PECI_MBX_INDEX_DDR_RAPL_PL1,
+ PECI_PCS_PARAM_ZERO, power_limit.value);
+ if (ret) {
+ dev_dbg(priv->dev, "not able to write power limit\n");
+ return ret;
+ }
+
+ dev_dbg(priv->dev, "power limit %d, unit %u, raw power limit %u,\n",
+ val, priv->units.bits.pwr_unit, power_limit.bits.pp_pwr_lim);
+
+ return ret;
+}
+
+static int
+peci_dimmpower_read_max_power(void *ctx, struct peci_sensor_conf *sensor_conf,
+ struct peci_sensor_data *sensor_data)
+{
+ struct peci_dimmpower *priv = (struct peci_dimmpower *)ctx;
+ union peci_dram_power_info_low power_info;
+ int ret = 0;
+
+ mutex_lock(&sensor_data->lock);
+ if (!peci_sensor_need_update_with_time(sensor_data,
+ sensor_conf->update_interval)) {
+ dev_dbg(priv->dev, "skip reading peci, max power %dmW\n",
+ sensor_data->value);
+ goto unlock;
+ }
+
+ ret = peci_pcs_get_units(priv->mgr, &priv->units, &priv->units_valid);
+ if (ret) {
+ dev_dbg(priv->dev, "not able to read units\n");
+ goto unlock;
+ }
+
+ ret = peci_pcs_read(priv->mgr, PECI_MBX_INDEX_DDR_PWR_INFO_LOW,
+ PECI_PCS_PARAM_ZERO, &power_info.value);
+ if (ret) {
+ dev_dbg(priv->dev, "not able to read power info\n");
+ goto unlock;
+ }
+
+ peci_sensor_mark_updated(sensor_data);
+ sensor_data->value = peci_pcs_xn_to_munits(power_info.bits.tdp,
+ priv->units.bits.pwr_unit);
+
+ dev_dbg(priv->dev, "raw max power %u, unit %u, max power %dmW\n",
+ power_info.bits.tdp, priv->units.bits.pwr_unit,
+ sensor_data->value);
+unlock:
+ mutex_unlock(&sensor_data->lock);
+ return ret;
+}
+
+static int
+peci_dimmpower_read_min_power(void *ctx, struct peci_sensor_conf *sensor_conf,
+ struct peci_sensor_data *sensor_data)
+{
+ struct peci_dimmpower *priv = (struct peci_dimmpower *)ctx;
+
+ /* DRAM_POWER_INFO.DRAM_MIN_PWR is no more supported in CPU starting from
+ * SPR. So BIOS doesn't update this. That's why there is still default
+ * value (15W) which doesn't make sense. There should be a case when
+ * MAX_PWR/TDP is smaller than 15W.
+ * 0 seems to be a reasonable value for that parameter.
+ */
+ sensor_data->value = 0;
+ dev_dbg(priv->dev, "min power %dmW\n", sensor_data->value);
+ return 0;
+}
+
+static int
+peci_dimmpower_read_energy(void *ctx, struct peci_sensor_conf *sensor_conf,
+ struct peci_sensor_data *sensor_data)
+{
+ struct peci_dimmpower *priv = (struct peci_dimmpower *)ctx;
+ int ret = 0;
+
+ mutex_lock(&sensor_data->lock);
+ if (!peci_sensor_need_update_with_time(sensor_data,
+ sensor_conf->update_interval)) {
+ dev_dbg(priv->dev,
+ "skip generating new energy value %duJ jif %lu\n",
+ sensor_data->uvalue, jiffies);
+ goto unlock;
+ }
+
+ ret = peci_dimmpower_get_energy_counter(priv, &priv->energy_cache,
+ sensor_conf->update_interval);
+ if (ret) {
+ dev_dbg(priv->dev, "cannot update energy counter\n");
+ goto unlock;
+ }
+
+ ret = peci_pcs_get_units(priv->mgr, &priv->units, &priv->units_valid);
+ if (ret) {
+ dev_dbg(priv->dev, "not able to read units\n");
+ goto unlock;
+ }
+
+ ret = peci_pcs_calc_acc_eng(priv->dev,
+ &priv->energy_sensor_prev_energy,
+ &priv->energy_cache,
+ priv->units.bits.eng_unit,
+ &sensor_data->uvalue);
+
+ if (ret) {
+ dev_dbg(priv->dev, "cumulative energy calculation failed\n");
+ goto unlock;
+ }
+ peci_sensor_mark_updated_with_time(sensor_data,
+ priv->energy_cache.last_updated);
+
+ dev_dbg(priv->dev, "energy %duJ, jif %lu, HZ is %d jiffies\n",
+ sensor_data->uvalue, sensor_data->last_updated, HZ);
+
+unlock:
+ mutex_unlock(&sensor_data->lock);
+ return ret;
+}
+
+static struct peci_sensor_conf
+peci_dimmpower_power_cfg[PECI_DIMMPOWER_POWER_CHANNEL_COUNT]
+ [PECI_DIMMPOWER_POWER_SENSOR_COUNT] = {
+ /* Channel 0 - Power */
+ {
+ {
+ .attribute = hwmon_power_average,
+ .config = HWMON_P_AVERAGE,
+ .update_interval = UPDATE_INTERVAL_100MS,
+ .read = peci_dimmpower_get_avg_power,
+ .write = NULL,
+ },
+ {
+ .attribute = hwmon_power_cap,
+ .config = HWMON_P_CAP,
+ .update_interval = UPDATE_INTERVAL_100MS,
+ .read = peci_dimmpower_get_power_limit,
+ .write = peci_dimmpower_set_power_limit,
+ },
+ {
+ .attribute = hwmon_power_cap_max,
+ .config = HWMON_P_CAP_MAX,
+ .update_interval = UPDATE_INTERVAL_10S,
+ .read = peci_dimmpower_read_max_power,
+ .write = NULL,
+ },
+ {
+ .attribute = hwmon_power_cap_min,
+ .config = HWMON_P_CAP_MIN,
+ .update_interval = UPDATE_INTERVAL_10S,
+ .read = peci_dimmpower_read_min_power,
+ .write = NULL,
+ },
+ },
+};
+
+static struct peci_sensor_conf
+peci_dimmpower_energy_cfg[PECI_DIMMPOWER_ENERGY_CHANNEL_COUNT]
+ [PECI_DIMMPOWER_ENERGY_SENSOR_COUNT] = {
+ /* Channel 0 - Energy */
+ {
+ {
+ .attribute = hwmon_energy_input,
+ .config = HWMON_E_INPUT,
+ .update_interval = UPDATE_INTERVAL_100MS,
+ .read = peci_dimmpower_read_energy,
+ .write = NULL,
+ },
+ }
+};
+
+static bool
+peci_dimmpower_is_channel_valid(enum hwmon_sensor_types type,
+ int channel)
+{
+ if ((type == hwmon_power && channel < PECI_DIMMPOWER_POWER_CHANNEL_COUNT) ||
+ (type == hwmon_energy && channel < PECI_DIMMPOWER_ENERGY_CHANNEL_COUNT))
+ return true;
+
+ return false;
+}
+
+static int
+peci_dimmpower_read_string(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, const char **str)
+{
+ if (!peci_dimmpower_is_channel_valid(type, channel))
+ return -EOPNOTSUPP;
+
+ switch (attr) {
+ case hwmon_power_label:
+ *str = peci_dimmpower_labels[PECI_DIMMPOWER_SENSOR_TYPE_POWER];
+ break;
+ case hwmon_energy_label:
+ *str = peci_dimmpower_labels[PECI_DIMMPOWER_SENSOR_TYPE_ENERGY];
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int
+peci_dimmpower_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct peci_dimmpower *priv = dev_get_drvdata(dev);
+ struct peci_sensor_conf *sensor_conf;
+ struct peci_sensor_data *sensor_data;
+ int ret;
+
+ if (!priv || !val)
+ return -EINVAL;
+
+ if (!peci_dimmpower_is_channel_valid(type, channel))
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case hwmon_power:
+ ret = peci_sensor_get_ctx(attr, peci_dimmpower_power_cfg[channel],
+ &sensor_conf,
+ priv->power_sensor_data_list[channel],
+ &sensor_data,
+ ARRAY_SIZE(peci_dimmpower_power_cfg[channel]));
+ break;
+ case hwmon_energy:
+ ret = peci_sensor_get_ctx(attr, peci_dimmpower_energy_cfg[channel],
+ &sensor_conf,
+ priv->energy_sensor_data_list[channel],
+ &sensor_data,
+ ARRAY_SIZE(peci_dimmpower_energy_cfg[channel]));
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ }
+
+ if (ret)
+ return ret;
+
+ if (sensor_conf->read) {
+ ret = sensor_conf->read(priv, sensor_conf, sensor_data);
+ if (!ret)
+ *val = (long)sensor_data->value;
+ } else {
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+static int
+peci_dimmpower_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ struct peci_dimmpower *priv = dev_get_drvdata(dev);
+ struct peci_sensor_conf *sensor_conf;
+ struct peci_sensor_data *sensor_data;
+ int ret;
+
+ if (!priv)
+ return -EINVAL;
+
+ if (!peci_dimmpower_is_channel_valid(type, channel))
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case hwmon_power:
+ ret = peci_sensor_get_ctx(attr, peci_dimmpower_power_cfg[channel],
+ &sensor_conf,
+ priv->power_sensor_data_list[channel],
+ &sensor_data,
+ ARRAY_SIZE(peci_dimmpower_power_cfg[channel]));
+ break;
+ case hwmon_energy:
+ ret = peci_sensor_get_ctx(attr, peci_dimmpower_energy_cfg[channel],
+ &sensor_conf,
+ priv->energy_sensor_data_list[channel],
+ &sensor_data,
+ ARRAY_SIZE(peci_dimmpower_energy_cfg[channel]));
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ }
+
+ if (ret)
+ return ret;
+
+ if (sensor_conf->write) {
+ ret = sensor_conf->write(priv, sensor_conf, sensor_data,
+ (s32)val);
+ } else {
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+static umode_t
+peci_dimmpower_is_visible(const void *data, enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ struct peci_sensor_conf *sensor_conf;
+ umode_t mode = 0;
+ int ret;
+
+ if (!peci_dimmpower_is_channel_valid(type, channel))
+ return mode;
+
+ if (attr == hwmon_power_label || attr == hwmon_energy_label)
+ return 0444;
+
+ switch (type) {
+ case hwmon_power:
+ ret = peci_sensor_get_ctx(attr, peci_dimmpower_power_cfg[channel],
+ &sensor_conf, NULL, NULL,
+ ARRAY_SIZE(peci_dimmpower_power_cfg[channel]));
+ break;
+ case hwmon_energy:
+ ret = peci_sensor_get_ctx(attr, peci_dimmpower_energy_cfg[channel],
+ &sensor_conf, NULL, NULL,
+ ARRAY_SIZE(peci_dimmpower_energy_cfg[channel]));
+ break;
+ default:
+ return mode;
+ }
+
+ if (!ret) {
+ if (sensor_conf->read)
+ mode |= 0444;
+ if (sensor_conf->write)
+ mode |= 0200;
+ }
+
+ return mode;
+}
+
+static const struct hwmon_ops peci_dimmpower_ops = {
+ .is_visible = peci_dimmpower_is_visible,
+ .read_string = peci_dimmpower_read_string,
+ .read = peci_dimmpower_read,
+ .write = peci_dimmpower_write,
+};
+
+static void peci_dimmpower_sensor_init(struct peci_dimmpower *priv)
+{
+ int i, j;
+
+ mutex_init(&priv->energy_cache.lock);
+
+ for (i = 0; i < PECI_DIMMPOWER_POWER_CHANNEL_COUNT; i++) {
+ for (j = 0; j < PECI_DIMMPOWER_POWER_SENSOR_COUNT; j++)
+ mutex_init(&priv->power_sensor_data_list[i][j].lock);
+ }
+
+ for (i = 0; i < PECI_DIMMPOWER_ENERGY_CHANNEL_COUNT; i++) {
+ for (j = 0; j < PECI_DIMMPOWER_ENERGY_SENSOR_COUNT; j++)
+ mutex_init(&priv->energy_sensor_data_list[i][j].lock);
+ }
+}
+
+static int peci_dimmpower_probe(struct platform_device *pdev)
+{
+ struct peci_client_manager *mgr = dev_get_drvdata(pdev->dev.parent);
+ struct device *dev = &pdev->dev;
+ struct peci_dimmpower *priv;
+ struct device *hwmon_dev;
+ u32 power_config_idx = 0;
+ u32 energy_config_idx = 0;
+ u32 cmd_mask;
+
+ cmd_mask = BIT(PECI_CMD_RD_PKG_CFG) | BIT(PECI_CMD_WR_PKG_CFG);
+ if ((mgr->client->adapter->cmd_mask & cmd_mask) != cmd_mask)
+ return -ENODEV;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, priv);
+ priv->mgr = mgr;
+ priv->dev = dev;
+
+ snprintf(priv->name, PECI_NAME_SIZE, "peci_dimmpower.cpu%d",
+ mgr->client->addr - PECI_BASE_ADDR);
+
+ priv->power_config[power_config_idx] = HWMON_P_LABEL |
+ peci_sensor_get_config(peci_dimmpower_power_cfg[power_config_idx],
+ ARRAY_SIZE(peci_dimmpower_power_cfg[power_config_idx]));
+
+ priv->energy_config[energy_config_idx] = HWMON_E_LABEL |
+ peci_sensor_get_config(peci_dimmpower_energy_cfg[energy_config_idx],
+ ARRAY_SIZE(peci_dimmpower_energy_cfg[energy_config_idx]));
+
+ priv->info[PECI_DIMMPOWER_SENSOR_TYPE_POWER] = &priv->power_info;
+ priv->power_info.type = hwmon_power;
+ priv->power_info.config = priv->power_config;
+
+ priv->info[PECI_DIMMPOWER_SENSOR_TYPE_ENERGY] = &priv->energy_info;
+ priv->energy_info.type = hwmon_energy;
+ priv->energy_info.config = priv->energy_config;
+
+ priv->chip.ops = &peci_dimmpower_ops;
+ priv->chip.info = priv->info;
+
+ peci_dimmpower_sensor_init(priv);
+
+ hwmon_dev = devm_hwmon_device_register_with_info(priv->dev, priv->name,
+ priv, &priv->chip,
+ NULL);
+
+ if (IS_ERR(hwmon_dev))
+ return PTR_ERR(hwmon_dev);
+
+ dev_dbg(dev, "%s: sensor '%s'\n", dev_name(hwmon_dev), priv->name);
+
+ return 0;
+}
+
+static const struct platform_device_id peci_dimmpower_ids[] = {
+ { .name = "peci-dimmpower", .driver_data = 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(platform, peci_dimmpower_ids);
+
+static struct platform_driver peci_dimmpower_driver = {
+ .probe = peci_dimmpower_probe,
+ .id_table = peci_dimmpower_ids,
+ .driver = { .name = KBUILD_MODNAME, },
+};
+module_platform_driver(peci_dimmpower_driver);
+
+MODULE_AUTHOR("Zbigniew Lukwinski <zbigniew.lukwinski@linux.intel.com>");
+MODULE_DESCRIPTION("PECI dimmpower driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hwmon/peci-dimmtemp.c b/drivers/hwmon/peci-dimmtemp.c
new file mode 100644
index 000000000000..4b042c275ceb
--- /dev/null
+++ b/drivers/hwmon/peci-dimmtemp.c
@@ -0,0 +1,554 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2018-2019 Intel Corporation
+
+#include <linux/hwmon.h>
+#include <linux/jiffies.h>
+#include <linux/mfd/intel-peci-client.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include "peci-hwmon.h"
+
+#define DIMM_MASK_CHECK_DELAY_JIFFIES msecs_to_jiffies(5000)
+#define DIMM_MASK_CHECK_RETRY_MAX -1 /* 60 x 5 secs = 5 minutes */
+ /* -1 = no timeout */
+#define DIMM_TEMP_MAX_DEFAULT 90000
+#define DIMM_TEMP_CRIT_DEFAULT 100000
+#define BIOS_RST_CPL4 BIT(4)
+
+struct peci_dimmtemp {
+ struct peci_client_manager *mgr;
+ struct device *dev;
+ char name[PECI_NAME_SIZE];
+ const struct cpu_gen_info *gen_info;
+ struct workqueue_struct *work_queue;
+ struct delayed_work work_handler;
+ struct peci_sensor_data temp[DIMM_NUMS_MAX];
+ long temp_max[DIMM_NUMS_MAX];
+ long temp_crit[DIMM_NUMS_MAX];
+ u32 dimm_mask;
+ int retry_count;
+ u32 temp_config[DIMM_NUMS_MAX + 1];
+ struct hwmon_channel_info temp_info;
+ const struct hwmon_channel_info *info[2];
+ struct hwmon_chip_info chip;
+ char **dimmtemp_label;
+};
+
+static const u8 support_model[] = {
+ INTEL_FAM6_HASWELL_X,
+ INTEL_FAM6_BROADWELL_X,
+ INTEL_FAM6_SKYLAKE_X,
+ INTEL_FAM6_SKYLAKE_XD,
+ INTEL_FAM6_ICELAKE_X,
+ INTEL_FAM6_ICELAKE_XD,
+};
+
+static inline int read_ddr_dimm_temp_config(struct peci_dimmtemp *priv,
+ int chan_rank,
+ u8 *cfg_data)
+{
+ return peci_client_read_package_config(priv->mgr,
+ PECI_MBX_INDEX_DDR_DIMM_TEMP,
+ chan_rank, cfg_data);
+}
+
+static int get_dimm_temp(struct peci_dimmtemp *priv, int dimm_no)
+{
+ int dimm_order = dimm_no % priv->gen_info->dimm_idx_max;
+ int chan_rank = dimm_no / priv->gen_info->dimm_idx_max;
+ struct peci_rd_pci_cfg_local_msg rp_msg;
+ struct peci_rd_end_pt_cfg_msg re_msg;
+ u32 bios_reset_cpl_cfg;
+ u8 cfg_data[4];
+ u8 cpu_seg, cpu_bus;
+ int ret;
+
+ if (!peci_sensor_need_update(&priv->temp[dimm_no]))
+ return 0;
+
+ ret = read_ddr_dimm_temp_config(priv, chan_rank, cfg_data);
+ if (ret || cfg_data[dimm_order] == 0 || cfg_data[dimm_order] == 0xff)
+ return -ENODATA;
+
+ priv->temp[dimm_no].value = cfg_data[dimm_order] * 1000;
+
+ /*
+ * CPU can return invalid temperatures prior to BIOS-PCU handshake
+ * RST_CPL4 completion so filter the invalid readings out.
+ */
+ switch (priv->gen_info->model) {
+ case INTEL_FAM6_ICELAKE_X:
+ case INTEL_FAM6_ICELAKE_XD:
+ re_msg.addr = priv->mgr->client->addr;
+ re_msg.msg_type = PECI_ENDPTCFG_TYPE_LOCAL_PCI;
+ re_msg.params.pci_cfg.seg = 0;
+ re_msg.params.pci_cfg.bus = 31;
+ re_msg.params.pci_cfg.device = 30;
+ re_msg.params.pci_cfg.function = 1;
+ re_msg.params.pci_cfg.reg = 0x94;
+ re_msg.rx_len = 4;
+ re_msg.domain_id = 0;
+
+ ret = peci_command(priv->mgr->client->adapter,
+ PECI_CMD_RD_END_PT_CFG, sizeof(re_msg), &re_msg);
+ if (ret || re_msg.cc != PECI_DEV_CC_SUCCESS)
+ ret = -EAGAIN;
+ if (ret)
+ return ret;
+
+ bios_reset_cpl_cfg = le32_to_cpup((__le32 *)re_msg.data);
+ if (!(bios_reset_cpl_cfg & BIOS_RST_CPL4)) {
+ dev_dbg(priv->dev, "DRAM parameters aren't calibrated, BIOS_RESET_CPL_CFG: 0x%x\n",
+ bios_reset_cpl_cfg);
+ return -EAGAIN;
+ }
+
+ break;
+ default:
+ /* TODO: Check reset completion for other CPUs if needed */
+ break;
+ }
+
+ switch (priv->gen_info->model) {
+ case INTEL_FAM6_ICELAKE_X:
+ case INTEL_FAM6_ICELAKE_XD:
+ re_msg.addr = priv->mgr->client->addr;
+ re_msg.rx_len = 4;
+ re_msg.msg_type = PECI_ENDPTCFG_TYPE_LOCAL_PCI;
+ re_msg.params.pci_cfg.seg = 0;
+ re_msg.params.pci_cfg.bus = 13;
+ re_msg.params.pci_cfg.device = 0;
+ re_msg.params.pci_cfg.function = 2;
+ re_msg.params.pci_cfg.reg = 0xd4;
+ re_msg.domain_id = 0;
+
+ ret = peci_command(priv->mgr->client->adapter,
+ PECI_CMD_RD_END_PT_CFG, sizeof(re_msg), &re_msg);
+ if (ret || re_msg.cc != PECI_DEV_CC_SUCCESS ||
+ !(re_msg.data[3] & BIT(7))) {
+ /* Use default or previous value */
+ ret = 0;
+ break;
+ }
+
+ re_msg.msg_type = PECI_ENDPTCFG_TYPE_LOCAL_PCI;
+ re_msg.params.pci_cfg.reg = 0xd0;
+
+ ret = peci_command(priv->mgr->client->adapter,
+ PECI_CMD_RD_END_PT_CFG, sizeof(re_msg), &re_msg);
+ if (ret || re_msg.cc != PECI_DEV_CC_SUCCESS) {
+ /* Use default or previous value */
+ ret = 0;
+ break;
+ }
+
+ cpu_seg = re_msg.data[2];
+ cpu_bus = re_msg.data[0];
+
+ re_msg.addr = priv->mgr->client->addr;
+ re_msg.msg_type = PECI_ENDPTCFG_TYPE_MMIO;
+ re_msg.params.mmio.seg = cpu_seg;
+ re_msg.params.mmio.bus = cpu_bus;
+ /*
+ * Device 26, Offset 224e0: IMC 0 channel 0 -> rank 0
+ * Device 26, Offset 264e0: IMC 0 channel 1 -> rank 1
+ * Device 27, Offset 224e0: IMC 1 channel 0 -> rank 2
+ * Device 27, Offset 264e0: IMC 1 channel 1 -> rank 3
+ * Device 28, Offset 224e0: IMC 2 channel 0 -> rank 4
+ * Device 28, Offset 264e0: IMC 2 channel 1 -> rank 5
+ * Device 29, Offset 224e0: IMC 3 channel 0 -> rank 6
+ * Device 29, Offset 264e0: IMC 3 channel 1 -> rank 7
+ */
+ re_msg.params.mmio.device = 0x1a + chan_rank / 2;
+ re_msg.params.mmio.function = 0;
+ re_msg.params.mmio.bar = 0;
+ re_msg.params.mmio.addr_type = PECI_ENDPTCFG_ADDR_TYPE_MMIO_Q;
+ re_msg.params.mmio.offset = 0x224e0 + dimm_order * 4;
+ if (chan_rank % 2)
+ re_msg.params.mmio.offset += 0x4000;
+
+ ret = peci_command(priv->mgr->client->adapter,
+ PECI_CMD_RD_END_PT_CFG, sizeof(re_msg), &re_msg);
+ if (ret || re_msg.cc != PECI_DEV_CC_SUCCESS ||
+ re_msg.data[1] == 0 || re_msg.data[2] == 0) {
+ /* Use default or previous value */
+ ret = 0;
+ break;
+ }
+
+ priv->temp_max[dimm_no] = re_msg.data[1] * 1000;
+ priv->temp_crit[dimm_no] = re_msg.data[2] * 1000;
+ break;
+ case INTEL_FAM6_SKYLAKE_X:
+ rp_msg.addr = priv->mgr->client->addr;
+ rp_msg.bus = 2;
+ /*
+ * Device 10, Function 2: IMC 0 channel 0 -> rank 0
+ * Device 10, Function 6: IMC 0 channel 1 -> rank 1
+ * Device 11, Function 2: IMC 0 channel 2 -> rank 2
+ * Device 12, Function 2: IMC 1 channel 0 -> rank 3
+ * Device 12, Function 6: IMC 1 channel 1 -> rank 4
+ * Device 13, Function 2: IMC 1 channel 2 -> rank 5
+ */
+ rp_msg.device = 10 + chan_rank / 3 * 2 +
+ (chan_rank % 3 == 2 ? 1 : 0);
+ rp_msg.function = chan_rank % 3 == 1 ? 6 : 2;
+ rp_msg.reg = 0x120 + dimm_order * 4;
+ rp_msg.rx_len = 4;
+
+ ret = peci_command(priv->mgr->client->adapter,
+ PECI_CMD_RD_PCI_CFG_LOCAL, sizeof(rp_msg), &rp_msg);
+ if (ret || rp_msg.cc != PECI_DEV_CC_SUCCESS ||
+ rp_msg.pci_config[1] == 0 || rp_msg.pci_config[2] == 0) {
+ /* Use default or previous value */
+ ret = 0;
+ break;
+ }
+
+ priv->temp_max[dimm_no] = rp_msg.pci_config[1] * 1000;
+ priv->temp_crit[dimm_no] = rp_msg.pci_config[2] * 1000;
+ break;
+ case INTEL_FAM6_SKYLAKE_XD:
+ rp_msg.addr = priv->mgr->client->addr;
+ rp_msg.bus = 2;
+ /*
+ * Device 10, Function 2: IMC 0 channel 0 -> rank 0
+ * Device 10, Function 6: IMC 0 channel 1 -> rank 1
+ * Device 12, Function 2: IMC 1 channel 0 -> rank 2
+ * Device 12, Function 6: IMC 1 channel 1 -> rank 3
+ */
+ rp_msg.device = 10 + chan_rank / 2 * 2;
+ rp_msg.function = (chan_rank % 2) ? 6 : 2;
+ rp_msg.reg = 0x120 + dimm_order * 4;
+ rp_msg.rx_len = 4;
+
+ ret = peci_command(priv->mgr->client->adapter,
+ PECI_CMD_RD_PCI_CFG_LOCAL, sizeof(rp_msg), &rp_msg);
+ if (ret || rp_msg.cc != PECI_DEV_CC_SUCCESS ||
+ rp_msg.pci_config[1] == 0 || rp_msg.pci_config[2] == 0) {
+ /* Use default or previous value */
+ ret = 0;
+ break;
+ }
+
+ priv->temp_max[dimm_no] = rp_msg.pci_config[1] * 1000;
+ priv->temp_crit[dimm_no] = rp_msg.pci_config[2] * 1000;
+ break;
+ case INTEL_FAM6_HASWELL_X:
+ case INTEL_FAM6_BROADWELL_X:
+ rp_msg.addr = priv->mgr->client->addr;
+ rp_msg.bus = 1;
+ /*
+ * Device 20, Function 0: IMC 0 channel 0 -> rank 0
+ * Device 20, Function 1: IMC 0 channel 1 -> rank 1
+ * Device 21, Function 0: IMC 0 channel 2 -> rank 2
+ * Device 21, Function 1: IMC 0 channel 3 -> rank 3
+ * Device 23, Function 0: IMC 1 channel 0 -> rank 4
+ * Device 23, Function 1: IMC 1 channel 1 -> rank 5
+ * Device 24, Function 0: IMC 1 channel 2 -> rank 6
+ * Device 24, Function 1: IMC 1 channel 3 -> rank 7
+ */
+ rp_msg.device = 20 + chan_rank / 2 + chan_rank / 4;
+ rp_msg.function = chan_rank % 2;
+ rp_msg.reg = 0x120 + dimm_order * 4;
+ rp_msg.rx_len = 4;
+
+ ret = peci_command(priv->mgr->client->adapter,
+ PECI_CMD_RD_PCI_CFG_LOCAL, sizeof(rp_msg), &rp_msg);
+ if (ret || rp_msg.cc != PECI_DEV_CC_SUCCESS ||
+ rp_msg.pci_config[1] == 0 || rp_msg.pci_config[2] == 0) {
+ /* Use default or previous value */
+ ret = 0;
+ break;
+ }
+
+ priv->temp_max[dimm_no] = rp_msg.pci_config[1] * 1000;
+ priv->temp_crit[dimm_no] = rp_msg.pci_config[2] * 1000;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ peci_sensor_mark_updated(&priv->temp[dimm_no]);
+
+ return 0;
+}
+
+static int dimmtemp_read_string(struct device *dev,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel, const char **str)
+{
+ struct peci_dimmtemp *priv = dev_get_drvdata(dev);
+
+ if (attr != hwmon_temp_label)
+ return -EOPNOTSUPP;
+
+ *str = (const char *)priv->dimmtemp_label[channel];
+
+ return 0;
+}
+
+static int dimmtemp_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct peci_dimmtemp *priv = dev_get_drvdata(dev);
+ int ret;
+
+ ret = get_dimm_temp(priv, channel);
+ if (ret)
+ return ret;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ *val = priv->temp[channel].value;
+ break;
+ case hwmon_temp_max:
+ *val = priv->temp_max[channel];
+ break;
+ case hwmon_temp_crit:
+ *val = priv->temp_crit[channel];
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ return ret;
+}
+
+static umode_t dimmtemp_is_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ const struct peci_dimmtemp *priv = data;
+
+ if (priv->temp_config[channel] & BIT(attr) &&
+ priv->dimm_mask & BIT(channel))
+ return 0444;
+
+ return 0;
+}
+
+static const struct hwmon_ops dimmtemp_ops = {
+ .is_visible = dimmtemp_is_visible,
+ .read_string = dimmtemp_read_string,
+ .read = dimmtemp_read,
+};
+
+static int check_populated_dimms(struct peci_dimmtemp *priv)
+{
+ u32 chan_rank_max = priv->gen_info->chan_rank_max;
+ u32 dimm_idx_max = priv->gen_info->dimm_idx_max;
+ int chan_rank;
+ u8 cfg_data[4];
+
+ for (chan_rank = 0; chan_rank < chan_rank_max; chan_rank++) {
+ int ret, idx;
+
+ ret = read_ddr_dimm_temp_config(priv, chan_rank, cfg_data);
+ if (ret) {
+ if (ret == -EAGAIN)
+ continue;
+
+ priv->dimm_mask = 0;
+ return ret;
+ }
+
+ for (idx = 0; idx < dimm_idx_max; idx++) {
+ if (cfg_data[idx]) {
+ uint chan = chan_rank * dimm_idx_max + idx;
+ priv->dimm_mask |= BIT(chan);
+ priv->temp_max[chan] = DIMM_TEMP_MAX_DEFAULT;
+ priv->temp_crit[chan] = DIMM_TEMP_CRIT_DEFAULT;
+ }
+ }
+ }
+
+ if (!priv->dimm_mask)
+ return -EAGAIN;
+
+ dev_dbg(priv->dev, "Scanned populated DIMMs: 0x%x\n", priv->dimm_mask);
+
+ return 0;
+}
+
+static int create_dimm_temp_label(struct peci_dimmtemp *priv, int chan)
+{
+ int rank, idx;
+
+ priv->dimmtemp_label[chan] = devm_kzalloc(priv->dev,
+ PECI_HWMON_LABEL_STR_LEN,
+ GFP_KERNEL);
+ if (!priv->dimmtemp_label[chan])
+ return -ENOMEM;
+
+ rank = chan / priv->gen_info->dimm_idx_max;
+ idx = chan % priv->gen_info->dimm_idx_max;
+
+ snprintf(priv->dimmtemp_label[chan], PECI_HWMON_LABEL_STR_LEN,
+ "DIMM %c%d", 'A' + rank, idx + 1);
+
+ return 0;
+}
+
+static int create_dimm_temp_info(struct peci_dimmtemp *priv)
+{
+ int ret, i, config_idx, channels;
+ struct device *dev;
+
+ ret = check_populated_dimms(priv);
+ if (ret) {
+ if (ret == -EAGAIN) {
+ if (DIMM_MASK_CHECK_RETRY_MAX == -1 ||
+ priv->retry_count < DIMM_MASK_CHECK_RETRY_MAX) {
+ queue_delayed_work(priv->work_queue,
+ &priv->work_handler,
+ DIMM_MASK_CHECK_DELAY_JIFFIES);
+ priv->retry_count++;
+ dev_dbg(priv->dev,
+ "Deferred DIMM temp info creation\n");
+ } else {
+ dev_err(priv->dev,
+ "Timeout DIMM temp info creation\n");
+ ret = -ETIMEDOUT;
+ }
+ }
+
+ return ret;
+ }
+
+ channels = priv->gen_info->chan_rank_max *
+ priv->gen_info->dimm_idx_max;
+
+ priv->dimmtemp_label = devm_kzalloc(priv->dev,
+ channels * sizeof(char *),
+ GFP_KERNEL);
+ if (!priv->dimmtemp_label)
+ return -ENOMEM;
+
+ for (i = 0, config_idx = 0; i < channels; i++)
+ if (priv->dimm_mask & BIT(i)) {
+ while (i >= config_idx)
+ priv->temp_config[config_idx++] =
+ HWMON_T_LABEL | HWMON_T_INPUT |
+ HWMON_T_MAX | HWMON_T_CRIT;
+
+ ret = create_dimm_temp_label(priv, i);
+ if (ret)
+ return ret;
+ }
+
+ priv->chip.ops = &dimmtemp_ops;
+ priv->chip.info = priv->info;
+
+ priv->info[0] = &priv->temp_info;
+
+ priv->temp_info.type = hwmon_temp;
+ priv->temp_info.config = priv->temp_config;
+
+ dev = devm_hwmon_device_register_with_info(priv->dev,
+ priv->name,
+ priv,
+ &priv->chip,
+ NULL);
+ if (IS_ERR(dev)) {
+ dev_err(priv->dev, "Failed to register hwmon device\n");
+ return PTR_ERR(dev);
+ }
+
+ dev_dbg(priv->dev, "%s: sensor '%s'\n", dev_name(dev), priv->name);
+
+ return 0;
+}
+
+static void create_dimm_temp_info_delayed(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct peci_dimmtemp *priv = container_of(dwork, struct peci_dimmtemp,
+ work_handler);
+ int ret;
+
+ ret = create_dimm_temp_info(priv);
+ if (ret && ret != -EAGAIN)
+ dev_dbg(priv->dev, "Failed to create DIMM temp info\n");
+}
+
+static int peci_dimmtemp_probe(struct platform_device *pdev)
+{
+ struct peci_client_manager *mgr = dev_get_drvdata(pdev->dev.parent);
+ struct device *dev = &pdev->dev;
+ struct peci_dimmtemp *priv;
+ int ret, i;
+
+ if ((mgr->client->adapter->cmd_mask &
+ (BIT(PECI_CMD_GET_TEMP) | BIT(PECI_CMD_RD_PKG_CFG))) !=
+ (BIT(PECI_CMD_GET_TEMP) | BIT(PECI_CMD_RD_PKG_CFG)))
+ return -ENODEV;
+
+ for (i = 0; i < ARRAY_SIZE(support_model); i++) {
+ if (mgr->gen_info->model == support_model[i])
+ break;
+ }
+ if (i == ARRAY_SIZE(support_model))
+ return -ENODEV;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, priv);
+ priv->mgr = mgr;
+ priv->dev = dev;
+ priv->gen_info = mgr->gen_info;
+
+ snprintf(priv->name, PECI_NAME_SIZE, "peci_dimmtemp.cpu%d",
+ priv->mgr->client->addr - PECI_BASE_ADDR);
+
+ priv->work_queue = alloc_ordered_workqueue(priv->name, 0);
+ if (!priv->work_queue)
+ return -ENOMEM;
+
+ INIT_DELAYED_WORK(&priv->work_handler, create_dimm_temp_info_delayed);
+
+ ret = create_dimm_temp_info(priv);
+ if (ret && ret != -EAGAIN) {
+ dev_dbg(dev, "Failed to create DIMM temp info\n");
+ goto err_free_wq;
+ }
+
+ return 0;
+
+err_free_wq:
+ destroy_workqueue(priv->work_queue);
+ return ret;
+}
+
+static int peci_dimmtemp_remove(struct platform_device *pdev)
+{
+ struct peci_dimmtemp *priv = dev_get_drvdata(&pdev->dev);
+
+ cancel_delayed_work_sync(&priv->work_handler);
+ destroy_workqueue(priv->work_queue);
+
+ return 0;
+}
+
+static const struct platform_device_id peci_dimmtemp_ids[] = {
+ { .name = "peci-dimmtemp", .driver_data = 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(platform, peci_dimmtemp_ids);
+
+static struct platform_driver peci_dimmtemp_driver = {
+ .probe = peci_dimmtemp_probe,
+ .remove = peci_dimmtemp_remove,
+ .id_table = peci_dimmtemp_ids,
+ .driver = { .name = KBUILD_MODNAME, },
+};
+module_platform_driver(peci_dimmtemp_driver);
+
+MODULE_AUTHOR("Jae Hyun Yoo <jae.hyun.yoo@linux.intel.com>");
+MODULE_DESCRIPTION("PECI dimmtemp driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hwmon/peci-hwmon.h b/drivers/hwmon/peci-hwmon.h
new file mode 100644
index 000000000000..c6947346cf77
--- /dev/null
+++ b/drivers/hwmon/peci-hwmon.h
@@ -0,0 +1,659 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018-2020 Intel Corporation */
+
+#ifndef __PECI_HWMON_H
+#define __PECI_HWMON_H
+
+#include <linux/peci.h>
+#include <asm/div64.h>
+
+#define TEMP_TYPE_PECI 6 /* Sensor type 6: Intel PECI */
+#define UPDATE_INTERVAL_DEFAULT HZ
+#define UPDATE_INTERVAL_100MS (HZ / 10)
+#define UPDATE_INTERVAL_10S (HZ * 10)
+
+#define PECI_HWMON_LABEL_STR_LEN 10
+
+/**
+ * struct peci_sensor_data - PECI sensor information
+ * @valid: flag to indicate the sensor value is valid
+ * @value: sensor value in milli units
+ * @last_updated: time of the last update in jiffies
+ */
+struct peci_sensor_data {
+ uint valid;
+ union {
+ s32 value;
+ u32 uvalue;
+ };
+ ulong last_updated;
+ struct mutex lock; /* protect sensor access */
+};
+
+/**
+ * peci_sensor_need_update - check whether sensor update is needed or not
+ * @sensor: pointer to sensor data struct
+ *
+ * Return: true if update is needed, false if not.
+ */
+static inline bool peci_sensor_need_update(struct peci_sensor_data *sensor)
+{
+ return !sensor->valid ||
+ time_after(jiffies,
+ sensor->last_updated + UPDATE_INTERVAL_DEFAULT);
+}
+
+/**
+ * peci_sensor_need_update_with_time - check whether sensor update is needed
+ * or not
+ * @sensor: pointer to sensor data struct
+ * @update_interval: update interval to check
+ *
+ * Return: true if update is needed, false if not.
+ */
+static inline bool
+peci_sensor_need_update_with_time(struct peci_sensor_data *sensor,
+ ulong update_interval)
+{
+ return !sensor->valid ||
+ time_after(jiffies, sensor->last_updated + update_interval);
+}
+
+/**
+ * peci_sensor_mark_updated - mark the sensor is updated
+ * @sensor: pointer to sensor data struct
+ */
+static inline void peci_sensor_mark_updated(struct peci_sensor_data *sensor)
+{
+ sensor->valid = 1;
+ sensor->last_updated = jiffies;
+}
+
+/**
+ * peci_sensor_mark_updated_with_time - mark the sensor is updated
+ * @sensor: pointer to sensor data struct
+ * @jif: jiffies value to update with
+ */
+static inline void
+peci_sensor_mark_updated_with_time(struct peci_sensor_data *sensor, ulong jif)
+{
+ sensor->valid = 1;
+ sensor->last_updated = jif;
+}
+
+/**
+ * struct peci_sensor_conf - PECI sensor information
+ * @attribute: Sensor attribute
+ * @config: Part of channel parameters brought by single sensor
+ * @update_interval: time in jiffies needs to elapse to read sensor again
+ * @read: Read callback for data attributes. Mandatory if readable
+ * data attributes are present.
+ * Parameters are:
+ * @module_ctx: Pointer peci module context
+ * @sensor_conf: Pointer to sensor configuration object
+ * @sensor_data: Pointer to sensor data object
+ * @val: Pointer to returned value
+ * The function returns 0 on success or a negative error number.
+ * @write: Write callback for data attributes. Mandatory if writeable
+ * data attributes are present.
+ * Parameters are:
+ * @module_ctx: Pointer peci module context
+ * @sensor_conf: Pointer to sensor configuration object
+ * @sensor_data: Pointer to sensor data object
+ * @val: Value to write
+ * The function returns 0 on success or a negative error number.
+ */
+struct peci_sensor_conf {
+ const s32 attribute;
+ const u32 config;
+ const ulong update_interval;
+
+ int (*const read)(void *priv, struct peci_sensor_conf *sensor_conf,
+ struct peci_sensor_data *sensor_data);
+ int (*const write)(void *priv, struct peci_sensor_conf *sensor_conf,
+ struct peci_sensor_data *sensor_data, s32 val);
+};
+
+/**
+ * peci_sensor_get_config - get peci sensor configuration for provided channel
+ * @sensors: Sensors list
+ * @sensor_count: Sensors count
+ *
+ * Return: sensor configuration
+ */
+static inline u32 peci_sensor_get_config(struct peci_sensor_conf sensors[],
+ u8 sensor_count)
+{
+ u32 config = 0u;
+ int iter;
+
+ for (iter = 0; iter < sensor_count; ++iter)
+ config |= sensors[iter].config;
+
+ return config;
+}
+
+/**
+ * peci_sensor_get_ctx - get peci sensor context - both configuration and data
+ * @attribute: Sensor attribute
+ * @sensor_conf_list: Sensors configuration object list
+ * @sensor_conf: Sensor configuration object found
+ * @sensor_data_list: Sensors data object list, maybe NULL in case there is no
+ * need to find sensor data object
+ * @sensor_data: Sensor data object found, maybe NULL in case there is no need
+ * to find sensor data object
+ * @sensor_count: Sensor count
+ *
+ * Return: 0 on success or -EOPNOTSUPP in case sensor attribute not found
+ */
+static inline int
+peci_sensor_get_ctx(s32 attribute, struct peci_sensor_conf sensor_conf_list[],
+ struct peci_sensor_conf **sensor_conf,
+ struct peci_sensor_data sensor_data_list[],
+ struct peci_sensor_data **sensor_data,
+ const u8 sensor_count)
+{
+ int iter;
+
+ for (iter = 0; iter < sensor_count; ++iter) {
+ if (attribute == sensor_conf_list[iter].attribute) {
+ *sensor_conf = &sensor_conf_list[iter];
+ if (sensor_data_list && sensor_data)
+ *sensor_data = &sensor_data_list[iter];
+ return 0;
+ }
+ }
+
+ return -EOPNOTSUPP;
+}
+
+/* Value for the most common parameter used for PCS accessing */
+#define PECI_PCS_PARAM_ZERO 0x0000u
+
+#define PECI_PCS_REGISTER_SIZE 4u /* PCS register size in bytes */
+
+/* PPL1 value to PPL2 value conversation macro */
+#define PECI_PCS_PPL1_TO_PPL2(ppl1_value) ((((u32)(ppl1_value)) * 12uL) / 10uL)
+
+#define PECI_PCS_PPL1_TIME_WINDOW 250 /* PPL1 Time Window value in ms */
+
+#define PECI_PCS_PPL2_TIME_WINDOW 10 /* PPL2 Time Window value in ms */
+
+/**
+ * union peci_pkg_power_sku_unit - PECI Package Power Unit PCS
+ * This register coresponds to the MSR@606h - MSR_RAPL_POWER_UNIT
+ * Accessing over PECI: PCS=0x1E, Parameter=0x0000
+ * @value: PCS register value
+ * @bits: PCS register bits
+ * @pwr_unit: Bits [3:0] - Power Unit
+ * @rsvd0: Bits [7:4]
+ * @eng_unit: Bits [12:8] - Energy Unit
+ * @rsvd1: Bits [15:13]
+ * @tim_unit: Bits [19:16] - Time Unit
+ * @rsvd2: Bits [31:20]
+ */
+union peci_pkg_power_sku_unit {
+ u32 value;
+ struct {
+ u32 pwr_unit : 4;
+ u32 rsvd0 : 4;
+ u32 eng_unit : 5;
+ u32 rsvd1 : 3;
+ u32 tim_unit : 4;
+ u32 rsvd2 : 12;
+ } __attribute__((__packed__)) bits;
+} __attribute__((__packed__));
+
+static_assert(sizeof(union peci_pkg_power_sku_unit) == PECI_PCS_REGISTER_SIZE);
+
+/**
+ * union peci_package_power_info_low - Platform and Package Power SKU (Low) PCS
+ * This PCS coresponds to the MSR@614h - PACKAGE_POWER_SKU, bits [31:0]
+ * Accessing over PECI: PCS=0x1C, parameter=0x00FF
+ * @value: PCS register value
+ * @bits: PCS register bits
+ * @pkg_tdp: Bits [14:0] - TDP Package Power
+ * @rsvd0: Bits [15:15]
+ * @pkg_min_pwr: Bits [30:16] - Minimal Package Power
+ * @rsvd1: Bits [31:31]
+ */
+union peci_package_power_info_low {
+ u32 value;
+ struct {
+ u32 pkg_tdp : 15;
+ u32 rsvd0 : 1;
+ u32 pkg_min_pwr : 15;
+ u32 rsvd1 : 1;
+ } __attribute__((__packed__)) bits;
+} __attribute__((__packed__));
+
+static_assert(sizeof(union peci_package_power_info_low) ==
+ PECI_PCS_REGISTER_SIZE);
+
+/**
+ * union peci_package_power_limit_high - Package Power Limit 2 PCS
+ * This PCS coresponds to the MSR@610h - PACKAGE_RAPL_LIMIT, bits [63:32]
+ * Accessing over PECI: PCS=0x1B, Parameter=0x0000
+ * @value: PCS register value
+ * @bits: PCS register bits
+ * @pwr_lim_2: Bits [14:0] - Power Limit 2
+ * @pwr_lim_2_en: Bits [15:15] - Power Limit 2 Enable
+ * @pwr_clmp_lim_2:Bits [16:16] - Package Clamping Limitation 2
+ * @pwr_lim_2_time:Bits [23:17] - Power Limit 2 Time Window
+ * @rsvd0: Bits [31:24]
+ */
+union peci_package_power_limit_high {
+ u32 value;
+ struct {
+ u32 pwr_lim_2 : 15;
+ u32 pwr_lim_2_en : 1;
+ u32 pwr_clmp_lim_2 : 1;
+ u32 pwr_lim_2_time : 7;
+ u32 rsvd0 : 8;
+ } __attribute__((__packed__)) bits;
+} __attribute__((__packed__));
+
+static_assert(sizeof(union peci_package_power_limit_high) ==
+ PECI_PCS_REGISTER_SIZE);
+
+/**
+ * union peci_package_power_limit_low - Package Power Limit 1 PCS
+ * This PCS coresponds to the MSR@610h - PACKAGE_RAPL_LIMIT, bits [31:0]
+ * Accessing over PECI: PCS=0x1A, Parameter=0x0000
+ * @value: PCS register value
+ * @bits: PCS register bits
+ * @pwr_lim_1: Bits [14:0] - Power Limit 1
+ * @pwr_lim_1_en: Bits [15:15] - Power Limit 1 Enable
+ * @pwr_clmp_lim_1:Bits [16:16] - Package Clamping Limitation 1
+ * @pwr_lim_1_time:Bits [23:17] - Power Limit 1 Time Window
+ * @rsvd0: Bits [31:24]
+ */
+union peci_package_power_limit_low {
+ u32 value;
+ struct {
+ u32 pwr_lim_1 : 15;
+ u32 pwr_lim_1_en : 1;
+ u32 pwr_clmp_lim_1 : 1;
+ u32 pwr_lim_1_time : 7;
+ u32 rsvd0 : 8;
+ } __attribute__((__packed__)) bits;
+} __attribute__((__packed__));
+
+static_assert(sizeof(union peci_package_power_limit_low) ==
+ PECI_PCS_REGISTER_SIZE);
+
+/**
+ * union peci_dram_power_info_low - DRAM Power Info low PCS
+ * This PCS coresponds to the MSR@61Ch - MSR_DRAM_POWER_INFO, bits [31:0]
+ * Accessing over PECI: PCS=0x24, Parameter=0x0000
+ * @value: PCS register value
+ * @bits: PCS register bits
+ * @tdp: Bits [14:0] - Spec DRAM Power
+ * @rsvd0: Bits [15:15]
+ * @min_pwr: Bits [30:16] - Minimal DRAM Power
+ * @rsvd1: Bits [31:31]
+ */
+union peci_dram_power_info_low {
+ u32 value;
+ struct {
+ u32 tdp : 15;
+ u32 rsvd0 : 1;
+ u32 min_pwr : 15;
+ u32 rsvd1 : 1;
+ } __attribute__((__packed__)) bits;
+} __attribute__((__packed__));
+
+static_assert(sizeof(union peci_dram_power_info_low) == PECI_PCS_REGISTER_SIZE);
+
+/**
+ * union peci_dram_power_limit - DRAM Power Limit PCS
+ * This PCS coresponds to the MSR@618h - DRAM_PLANE_POWER_LIMIT, bits [31:0]
+ * Accessing over PECI: PCS=0x22, Parameter=0x0000
+ * @value: PCS register value
+ * @bits: PCS register bits
+ * @pp_pwr_lim: Bits [14:0] - Power Limit[0] for DDR domain,
+ * format: U11.3
+ * @pwr_lim_ctrl_en:Bits [15:15] - Power Limit[0] enable bit for
+ * DDR domain
+ * @rsvd0: Bits [16:16]
+ * @ctrl_time_win: Bits [23:17] - Power Limit[0] time window for
+ * DDR domain
+ * @rsvd1: Bits [31:24]
+ */
+union peci_dram_power_limit {
+ u32 value;
+ struct {
+ u32 pp_pwr_lim : 15;
+ u32 pwr_lim_ctrl_en : 1;
+ u32 rsvd0 : 1;
+ u32 ctrl_time_win : 7;
+ u32 rsvd1 : 8;
+ } __attribute__((__packed__)) bits;
+} __attribute__((__packed__));
+
+static_assert(sizeof(union peci_dram_power_limit) == PECI_PCS_REGISTER_SIZE);
+
+/**
+ * peci_pcs_xn_to_uunits - function converting value in units in x.N format to
+ * micro units (microjoules, microseconds, microdegrees) in regular format
+ * @x_n_value: Value in units in x.n format
+ * @n: n factor for x.n format
+
+ *
+ * Return: value in micro units (microjoules, microseconds, microdegrees)
+ * in regular format
+ */
+static inline u64 peci_pcs_xn_to_uunits(u32 x_n_value, u8 n)
+{
+ u64 mx_n_value = (u64)x_n_value * 1000000uLL;
+
+ return mx_n_value >> n;
+}
+
+/**
+ * peci_pcs_xn_to_munits - function converting value in units in x.N format to
+ * milli units (millijoules, milliseconds, millidegrees) in regular format
+ * @x_n_value: Value in units in x.n format
+ * @n: n factor for x.n format
+
+ *
+ * Return: value in milli units (millijoules, milliseconds, millidegrees)
+ * in regular format
+ */
+static inline u64 peci_pcs_xn_to_munits(u32 x_n_value, u8 n)
+{
+ u64 mx_n_value = (u64)x_n_value * 1000uLL;
+
+ return mx_n_value >> n;
+}
+
+/**
+ * peci_pcs_munits_to_xn - function converting value in milli units
+ * (millijoules,milliseconds, millidegrees) in regular format to value in units
+ * in x.n format
+ * @mu_value: Value in milli units (millijoules, milliseconds, millidegrees)
+ * @n: n factor for x.n format, assumed here maximal value for n is 32
+ *
+ * Return: value in units in x.n format
+ */
+static inline u32 peci_pcs_munits_to_xn(u32 mu_value, u8 n)
+{
+ /* Convert value in milli units (regular format) to the x.n format */
+ u64 mx_n_value = (u64)mu_value << n;
+ /* Convert milli units (x.n format) to units (x.n format) */
+ if (mx_n_value > (u64)U32_MAX) {
+ do_div(mx_n_value, 1000uL);
+ return (u32)mx_n_value;
+ } else {
+ return (u32)mx_n_value / 1000uL;
+ }
+}
+
+/**
+ * peci_pcs_read - read PCS register
+ * @peci_mgr: PECI client manager handle
+ * @index: PCS index
+ * @parameter: PCS parameter
+ * @reg: Pointer to the variable read value is going to be put
+ *
+ * Return: 0 if succeeded,
+ * -EINVAL if there are null pointers among arguments,
+ * other values in case other errors.
+ */
+static inline int peci_pcs_read(struct peci_client_manager *peci_mgr, u8 index,
+ u16 parameter, u32 *reg)
+{
+ u32 pcs_reg;
+ int ret;
+
+ if (!reg)
+ return -EINVAL;
+
+ ret = peci_client_read_package_config(peci_mgr, index, parameter,
+ (u8 *)&pcs_reg);
+ if (!ret)
+ *reg = le32_to_cpup((__le32 *)&pcs_reg);
+
+ return ret;
+}
+
+/**
+ * peci_pcs_write - write PCS register
+ * @peci_mgr: PECI client manager handle
+ * @index: PCS index
+ * @parameter: PCS parameter
+ * @reg: Variable which value is going to be written to the PCS
+ *
+ * Return: 0 if succeeded, other values in case an error.
+ */
+static inline int peci_pcs_write(struct peci_client_manager *peci_mgr, u8 index,
+ u16 parameter, u32 reg)
+{
+ int ret;
+
+ ret = peci_client_write_package_config(peci_mgr, index, parameter, reg);
+
+ return ret;
+}
+
+/**
+ * peci_pcs_calc_pwr_from_eng - calculate power (in milliwatts) based on
+ * two energy readings
+ * @dev: Device handle
+ * @prev_energy: Previous energy reading context with raw energy counter value
+ * @energy: Current energy reading context with raw energy counter value
+ * @unit: Calculation factor
+ * @power_val_in_mW: Pointer to the variable calculation result is going to
+ * be put
+ *
+ * Return: 0 if succeeded,
+ * -EINVAL if there are null pointers among arguments,
+ * -EAGAIN if calculation is skipped.
+ */
+static inline int peci_pcs_calc_pwr_from_eng(struct device *dev,
+ struct peci_sensor_data *prev_energy,
+ struct peci_sensor_data *energy,
+ u32 unit, s32 *power_in_mW)
+{
+ ulong elapsed;
+ int ret;
+
+
+ elapsed = energy->last_updated - prev_energy->last_updated;
+
+ dev_dbg(dev, "raw energy before %u, raw energy now %u, unit %u, jiffies elapsed %lu\n",
+ prev_energy->uvalue, energy->uvalue, unit, elapsed);
+
+ /*
+ * TODO: Remove checking current energy value against 0.
+ * During host reset CPU resets its energy counters, hwmon treats such case
+ * as proper energy read (counter overflow) and calculates invalid
+ * power consumption. Currently hwmon is unable to determine if CPU was
+ * reset, stop treating 0 as invalid value when proper mechanism
+ * is implemented.
+ *
+ * Don't calculate average power for first counter read last counter
+ * read was more than 60 minutes ago (jiffies did not wrap and power
+ * calculation does not overflow or underflow) or energy read time
+ * did not change.
+ */
+ if (energy->uvalue > 0 && prev_energy->last_updated > 0 &&
+ elapsed < (HZ * 3600) && elapsed) {
+ u32 energy_consumed;
+ u64 energy_consumed_in_mJ;
+ u64 energy_by_jiffies;
+
+ if (energy->uvalue >= prev_energy->uvalue)
+ energy_consumed = energy->uvalue - prev_energy->uvalue;
+ else
+ energy_consumed = (U32_MAX - prev_energy->uvalue) +
+ energy->uvalue + 1u;
+
+ energy_consumed_in_mJ =
+ peci_pcs_xn_to_munits(energy_consumed, unit);
+ energy_by_jiffies = energy_consumed_in_mJ * HZ;
+
+ if (energy_by_jiffies > (u64)U32_MAX) {
+ do_div(energy_by_jiffies, elapsed);
+ *power_in_mW = (long)energy_by_jiffies;
+ } else {
+ *power_in_mW = (u32)energy_by_jiffies / elapsed;
+ }
+
+ dev_dbg(dev, "raw energy consumed %u, scaled energy consumed %llumJ, scaled power %dmW\n",
+ energy_consumed, energy_consumed_in_mJ, *power_in_mW);
+
+ ret = 0;
+ } else {
+ dev_dbg(dev, "skipping calculate power, try again\n");
+ *power_in_mW = 0;
+ ret = -EAGAIN;
+ }
+
+ prev_energy->uvalue = energy->uvalue;
+ peci_sensor_mark_updated_with_time(prev_energy, energy->last_updated);
+
+ return ret;
+}
+
+/**
+ * peci_pcs_calc_acc_eng - calculate accumulated energy (in microjoules) based
+ * on two energy readings
+ * @dev: Device handle
+ * @prev_energy: Previous energy reading context with raw energy counter value
+ * @energy: Current energy reading context with raw energy counter value
+ * @unit: Calculation factor
+ * @acc_energy_in_uJ: Pointer to the variable with cumulative energy counter
+ *
+ * Return: 0 if succeeded,
+ * -EINVAL if there are null pointers among arguments,
+ * -EAGAIN if calculation is skipped.
+ */
+static inline int peci_pcs_calc_acc_eng(struct device *dev,
+ struct peci_sensor_data *prev_energy,
+ struct peci_sensor_data *curr_energy,
+ u32 unit, u32 *acc_energy_in_uJ)
+{
+ ulong elapsed;
+ int ret;
+
+ elapsed = curr_energy->last_updated - prev_energy->last_updated;
+
+ dev_dbg(dev, "raw energy before %u, raw energy now %u, unit %u, jiffies elapsed %lu\n",
+ prev_energy->uvalue, curr_energy->uvalue, unit, elapsed);
+
+ /*
+ * TODO: Remove checking current energy value against 0.
+ * During host reset CPU resets its energy counters, hwmon treats such case
+ * as proper energy read (counter overflow) and calculates invalid
+ * energy increase. Currently hwmon is unable to determine if CPU was
+ * reset, stop treating 0 as invalid value when proper mechanism
+ * is implemented.
+ *
+ * Don't calculate cumulative energy for first counter read - last counter
+ * read was more than 17 minutes ago (jiffies and energy raw counter did not wrap
+ * and power calculation does not overflow or underflow).
+ */
+ if (curr_energy->uvalue > 0 && prev_energy->last_updated > 0 &&
+ elapsed < (HZ * 17 * 60)) {
+ u32 energy_consumed;
+ u64 energy_consumed_in_uJ;
+
+ if (curr_energy->uvalue >= prev_energy->uvalue)
+ energy_consumed = curr_energy->uvalue -
+ prev_energy->uvalue;
+ else
+ energy_consumed = (U32_MAX - prev_energy->uvalue) +
+ curr_energy->uvalue + 1u;
+
+ energy_consumed_in_uJ =
+ peci_pcs_xn_to_uunits(energy_consumed, unit);
+ *acc_energy_in_uJ = S32_MAX &
+ (*acc_energy_in_uJ + (u32)energy_consumed_in_uJ);
+
+ dev_dbg(dev, "raw energy %u, scaled energy %llumJ, cumulative energy %dmJ\n",
+ energy_consumed, energy_consumed_in_uJ,
+ *acc_energy_in_uJ);
+
+ ret = 0;
+ } else {
+ dev_dbg(dev, "skipping calculate cumulative energy, try again\n");
+
+ *acc_energy_in_uJ = 0;
+ ret = -EAGAIN;
+ }
+
+ prev_energy->uvalue = curr_energy->uvalue;
+ peci_sensor_mark_updated_with_time(prev_energy,
+ curr_energy->last_updated);
+
+ return ret;
+}
+
+/**
+ * peci_pcs_get_units - read units (power, energy, time) from HW or cache
+ * @peci_mgr: PECI client manager handle
+ * @units: Pointer to the variable read value is going to be put in case reading
+ * from HW
+ * @valid: Flag telling cache is valid
+ *
+ * Return: 0 if succeeded
+ * -EINVAL if there are null pointers among arguments,
+ * other values in case other errors.
+ */
+static inline int peci_pcs_get_units(struct peci_client_manager *peci_mgr,
+ union peci_pkg_power_sku_unit *units,
+ bool *valid)
+{
+ int ret = 0;
+
+ if (!valid)
+ return -EINVAL;
+
+ if (!(*valid)) {
+ ret = peci_pcs_read(peci_mgr, PECI_MBX_INDEX_TDP_UNITS,
+ PECI_PCS_PARAM_ZERO, &units->value);
+ if (!ret)
+ *valid = true;
+ }
+ return ret;
+}
+
+/**
+ * peci_pcs_calc_plxy_time_window - calculate power limit time window in
+ * PCS format. To figure that value out needs to solve the following equation:
+ * time_window = (1+(x/4)) * (2 ^ y), where time_window is known value and
+ * x and y values are variables to find.
+ * Return value is about X & Y compostion according to the following:
+ * x = ret[6:5], y = ret[4:0].
+ * @pl_tim_wnd_in_xn: PPL time window in X-n format
+ *
+ * Return: Power limit time window value
+ */
+static inline u32 peci_pcs_calc_plxy_time_window(u32 pl_tim_wnd_in_xn)
+{
+ u32 x = 0u;
+ u32 y = 0u;
+
+ /* Calculate y first */
+ while (pl_tim_wnd_in_xn > 7u) {
+ pl_tim_wnd_in_xn >>= 1;
+ y++;
+ }
+
+ /* Correct y value */
+ if (pl_tim_wnd_in_xn >= 4u)
+ y += 2u;
+ else if (pl_tim_wnd_in_xn >= 2u)
+ y += 1u;
+
+ /* Calculate x then */
+ if (pl_tim_wnd_in_xn >= 4u)
+ x = pl_tim_wnd_in_xn % 4;
+ else
+ x = 0u;
+
+ return ((x & 0x3) << 5) | (y & 0x1F);
+}
+
+#endif /* __PECI_HWMON_H */
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
index b56bd8542864..960cc47f4e93 100644
--- a/drivers/hwmon/pmbus/Kconfig
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -321,6 +321,15 @@ config SENSORS_Q54SJ108A2
This driver can also be built as a module. If so, the module will
be called q54sj108a2.
+config SENSORS_RAA229126
+ tristate "Renesas RAA229126"
+ help
+ If you say yes here you get hardware monitoring support for Renesas
+ RAA229126.
+
+ This driver can also be built as a module. If so, the module will
+ be called raa229126.
+
config SENSORS_STPDDC60
tristate "ST STPDDC60"
help
diff --git a/drivers/hwmon/pmbus/Makefile b/drivers/hwmon/pmbus/Makefile
index 61cdc24b1309..09c73316c0bc 100644
--- a/drivers/hwmon/pmbus/Makefile
+++ b/drivers/hwmon/pmbus/Makefile
@@ -35,6 +35,7 @@ obj-$(CONFIG_SENSORS_MP5023) += mp5023.o
obj-$(CONFIG_SENSORS_PM6764TR) += pm6764tr.o
obj-$(CONFIG_SENSORS_PXE1610) += pxe1610.o
obj-$(CONFIG_SENSORS_Q54SJ108A2) += q54sj108a2.o
+obj-$(CONFIG_SENSORS_RAA229126) += raa229126.o
obj-$(CONFIG_SENSORS_STPDDC60) += stpddc60.o
obj-$(CONFIG_SENSORS_TPS40422) += tps40422.o
obj-$(CONFIG_SENSORS_TPS53679) += tps53679.o
diff --git a/drivers/hwmon/pmbus/pmbus.c b/drivers/hwmon/pmbus/pmbus.c
index d0d386990af5..ce2f020f09d7 100644
--- a/drivers/hwmon/pmbus/pmbus.c
+++ b/drivers/hwmon/pmbus/pmbus.c
@@ -29,6 +29,7 @@ static void pmbus_find_sensor_groups(struct i2c_client *client,
struct pmbus_driver_info *info)
{
int page;
+ int fan_mode;
/* Sensors detected on page 0 only */
if (pmbus_check_word_register(client, 0, PMBUS_READ_VIN))
@@ -47,13 +48,20 @@ static void pmbus_find_sensor_groups(struct i2c_client *client,
info->func[0] |= PMBUS_HAVE_FAN12;
if (pmbus_check_byte_register(client, 0, PMBUS_STATUS_FAN_12))
info->func[0] |= PMBUS_HAVE_STATUS_FAN12;
+ fan_mode = pmbus_read_byte_data(client, page, PMBUS_FAN_CONFIG_12);
+ if ((fan_mode & (PB_FAN_1_RPM | PB_FAN_2_RPM)) != (PB_FAN_1_RPM | PB_FAN_2_RPM))
+ info->func[0] |= PMBUS_HAVE_PWM12;
}
if (pmbus_check_byte_register(client, 0, PMBUS_FAN_CONFIG_34) &&
pmbus_check_word_register(client, 0, PMBUS_READ_FAN_SPEED_3)) {
info->func[0] |= PMBUS_HAVE_FAN34;
if (pmbus_check_byte_register(client, 0, PMBUS_STATUS_FAN_34))
info->func[0] |= PMBUS_HAVE_STATUS_FAN34;
+ fan_mode = pmbus_read_byte_data(client, page, PMBUS_FAN_CONFIG_34);
+ if ((fan_mode & (PB_FAN_1_RPM | PB_FAN_2_RPM)) != (PB_FAN_1_RPM | PB_FAN_2_RPM))
+ info->func[0] |= PMBUS_HAVE_PWM34;
}
+
if (pmbus_check_word_register(client, 0, PMBUS_READ_TEMPERATURE_1))
info->func[0] |= PMBUS_HAVE_TEMP;
if (pmbus_check_word_register(client, 0, PMBUS_READ_TEMPERATURE_2))
diff --git a/drivers/hwmon/pmbus/pmbus.h b/drivers/hwmon/pmbus/pmbus.h
index ef3a8ecde4df..75aa97b1ecc0 100644
--- a/drivers/hwmon/pmbus/pmbus.h
+++ b/drivers/hwmon/pmbus/pmbus.h
@@ -331,6 +331,7 @@ enum pmbus_fan_mode { percent = 0, rpm };
#define PB_PIN_OP_WARNING BIT(0)
#define PB_IIN_OC_WARNING BIT(1)
#define PB_IIN_OC_FAULT BIT(2)
+#define PB_UNIT_OFF_FOR_INSUF_VIN BIT(3)
/*
* STATUS_TEMPERATURE
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index f495f0e79392..d462f732f3b4 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -241,12 +241,32 @@ int pmbus_write_word_data(struct i2c_client *client, int page, u8 reg,
}
EXPORT_SYMBOL_NS_GPL(pmbus_write_word_data, PMBUS);
+static int pmbus_update_fan_config(struct i2c_client *client, int page, int id,
+ u8 config, u8 mask)
+{
+ int from;
+ int rv;
+ u8 to;
+
+ from = pmbus_read_byte_data(client, page, pmbus_fan_config_registers[id]);
+ if (from < 0)
+ return from;
+
+ to = (from & ~mask) | (config & mask);
+ if (to != from) {
+ rv = pmbus_write_byte_data(client, page, pmbus_fan_config_registers[id], to);
+ if (rv < 0)
+ return rv;
+ }
+ return 0;
+}
static int pmbus_write_virt_reg(struct i2c_client *client, int page, int reg,
u16 word)
{
int bit;
int id;
+ int config;
int rv;
switch (reg) {
@@ -255,6 +275,18 @@ static int pmbus_write_virt_reg(struct i2c_client *client, int page, int reg,
bit = pmbus_fan_rpm_mask[id];
rv = pmbus_update_fan(client, page, id, bit, bit, word);
break;
+ case PMBUS_VIRT_PWM_1 ... PMBUS_VIRT_PWM_4:
+ id = reg - PMBUS_VIRT_PWM_1;
+ bit = pmbus_fan_rpm_mask[id];
+ /* 0 is pwm mode */
+ rv = pmbus_update_fan(client, page, id, 0, bit, word);
+ break;
+ case PMBUS_VIRT_PWM_ENABLE_1 ... PMBUS_VIRT_PWM_ENABLE_4:
+ id = reg - PMBUS_VIRT_PWM_ENABLE_1;
+ bit = pmbus_fan_rpm_mask[id];
+ config = word ? bit : 0;
+ rv = pmbus_update_fan_config(client, page, id, config, bit);
+ break;
default:
rv = -ENXIO;
break;
@@ -293,8 +325,7 @@ int pmbus_update_fan(struct i2c_client *client, int page, int id,
int rv;
u8 to;
- from = pmbus_read_byte_data(client, page,
- pmbus_fan_config_registers[id]);
+ from = pmbus_read_byte_data(client, page, pmbus_fan_config_registers[id]);
if (from < 0)
return from;
@@ -323,16 +354,39 @@ int pmbus_read_word_data(struct i2c_client *client, int page, int phase, u8 reg)
}
EXPORT_SYMBOL_NS_GPL(pmbus_read_word_data, PMBUS);
+static int pmbus_get_fan_config(struct i2c_client *client, int page, int id, u8 mask)
+{
+ int from;
+
+ from = pmbus_read_byte_data(client, page, pmbus_fan_config_registers[id]);
+ if (from < 0)
+ return from;
+
+ return from & mask;
+}
+
static int pmbus_read_virt_reg(struct i2c_client *client, int page, int reg)
{
int rv;
int id;
+ int bit;
switch (reg) {
case PMBUS_VIRT_FAN_TARGET_1 ... PMBUS_VIRT_FAN_TARGET_4:
id = reg - PMBUS_VIRT_FAN_TARGET_1;
rv = pmbus_get_fan_rate_device(client, page, id, rpm);
break;
+ case PMBUS_VIRT_PWM_1 ... PMBUS_VIRT_PWM_4:
+ id = reg - PMBUS_VIRT_PWM_1;
+ rv = pmbus_get_fan_rate_device(client, page, id, percent);
+ break;
+ case PMBUS_VIRT_PWM_ENABLE_1 ... PMBUS_VIRT_PWM_ENABLE_4:
+ id = reg - PMBUS_VIRT_PWM_ENABLE_1;
+ bit = pmbus_fan_rpm_mask[id];
+ rv = pmbus_get_fan_config(client, page, id, bit);
+ if (rv >= 0)
+ rv = !rv; /* PWM is enabled when rpm bit is not set (rv = 0) */
+ break;
default:
rv = -ENXIO;
break;
@@ -1198,6 +1252,8 @@ struct pmbus_limit_attr {
struct pmbus_sensor_attr {
u16 reg; /* sensor register */
u16 gbit; /* generic status bit */
+ u16 gfbit; /* generic fault status bit */
+ u16 sbbit; /* beep status bit */
u8 nlimit; /* # of limit registers */
enum pmbus_sensor_classes class;/* sensor class */
const char *label; /* sensor label */
@@ -1300,6 +1356,32 @@ static int pmbus_add_sensor_attrs_one(struct i2c_client *client,
return ret;
}
}
+ /*
+ * Add fault attribute if there is a generic fault bit, and if
+ * the generic status register (word or byte, depending on which global
+ * bit is set) for this page is accessible.
+ */
+ if (attr->gfbit) {
+ upper = !!(attr->gfbit & 0xff00); /* need to check STATUS_WORD */
+ if ((!upper || (upper && data->has_status_word)) &&
+ pmbus_check_status_register(client, page)) {
+ ret = pmbus_add_boolean(data, name, "fault", index,
+ NULL, NULL,
+ page, PMBUS_STATUS_WORD,
+ attr->gfbit);
+ if (ret)
+ return ret;
+ }
+ }
+ /* Add beep attribute if there is a beep status bit. */
+ if (attr->sbbit) {
+ ret = pmbus_add_boolean(data, name, "beep", index,
+ NULL, NULL,
+ page, attr->sreg,
+ attr->sbbit);
+ if (ret)
+ return ret;
+ }
return 0;
}
@@ -1498,6 +1580,8 @@ static const struct pmbus_sensor_attr voltage_attributes[] = {
.gbit = PB_STATUS_VIN_UV,
.limit = vin_limit_attrs,
.nlimit = ARRAY_SIZE(vin_limit_attrs),
+ .gfbit = PB_STATUS_WORD_MFR,
+ .sbbit = PB_UNIT_OFF_FOR_INSUF_VIN,
}, {
.reg = PMBUS_VIRT_READ_VMON,
.class = PSC_VOLTAGE_IN,
diff --git a/drivers/hwmon/pmbus/raa229126.c b/drivers/hwmon/pmbus/raa229126.c
new file mode 100644
index 000000000000..cd2de3ceb5a2
--- /dev/null
+++ b/drivers/hwmon/pmbus/raa229126.c
@@ -0,0 +1,92 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Hardware monitoring driver for RAA229126
+ *
+ * Copyright (c) 2022 Intel Corporation.
+ */
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include "pmbus.h"
+
+#define RAA229126_NUM_PAGES 2
+
+static struct pmbus_driver_info raa229126_info = {
+ .pages = RAA229126_NUM_PAGES,
+ .format[PSC_VOLTAGE_IN] = direct,
+ .format[PSC_VOLTAGE_OUT] = direct,
+ .format[PSC_CURRENT_IN] = direct,
+ .format[PSC_CURRENT_OUT] = direct,
+ .format[PSC_TEMPERATURE] = direct,
+ .format[PSC_POWER] = direct,
+ .m[PSC_TEMPERATURE] = 1,
+ .m[PSC_VOLTAGE_OUT] = 1,
+ .R[PSC_VOLTAGE_OUT] = 3,
+ .m[PSC_CURRENT_OUT] = 1,
+ .m[PSC_POWER] = 1,
+ .func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT | PMBUS_HAVE_IIN |
+ PMBUS_HAVE_IOUT | PMBUS_HAVE_PIN | PMBUS_HAVE_POUT |
+ PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_VOUT |
+ PMBUS_HAVE_STATUS_IOUT | PMBUS_HAVE_STATUS_INPUT |
+ PMBUS_HAVE_STATUS_TEMP,
+ .func[1] = PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT | PMBUS_HAVE_IIN |
+ PMBUS_HAVE_IOUT | PMBUS_HAVE_PIN | PMBUS_HAVE_POUT |
+ PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_VOUT |
+ PMBUS_HAVE_STATUS_IOUT | PMBUS_HAVE_STATUS_INPUT |
+ PMBUS_HAVE_STATUS_TEMP,
+};
+
+static int raa229126_probe(struct i2c_client *client)
+{
+ struct pmbus_driver_info *info;
+ u8 buf[I2C_SMBUS_BLOCK_MAX];
+ int ret;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_READ_BYTE_DATA |
+ I2C_FUNC_SMBUS_READ_WORD_DATA |
+ I2C_FUNC_SMBUS_READ_BLOCK_DATA))
+ return -ENODEV;
+
+ /* Read Manufacturer id */
+ ret = i2c_smbus_read_block_data(client, PMBUS_IC_DEVICE_ID, buf);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed to read PMBUS_IC_DEVICE_ID\n");
+ return ret;
+ }
+ if (ret != 4 || strncmp(buf, "\x00\x82\xd2\x49", 4)) {
+ dev_err(&client->dev, "DEVICE_ID unrecognized\n");
+ return -ENODEV;
+ }
+
+ info = devm_kmemdup(&client->dev, &raa229126_info, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ return pmbus_do_probe(client, info);
+}
+
+static const struct i2c_device_id raa229126_id[] = {
+ {"raa229126", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, raa229126_id);
+
+static struct i2c_driver raa229126_driver = {
+ .driver = {
+ .name = "raa229126",
+ },
+ .probe_new = raa229126_probe,
+ .id_table = raa229126_id,
+};
+
+module_i2c_driver(raa229126_driver);
+
+MODULE_AUTHOR("Zhikui Ren <zhikui.ren@intel.com>");
+MODULE_DESCRIPTION("PMBus driver for Renesas RAA229126");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(PMBUS);
diff --git a/drivers/i2c/Kconfig b/drivers/i2c/Kconfig
index 438905e2a1d0..339464db1df6 100644
--- a/drivers/i2c/Kconfig
+++ b/drivers/i2c/Kconfig
@@ -133,6 +133,29 @@ config I2C_SLAVE_TESTUNIT
multi-master, SMBus Host Notify, etc. Please read
Documentation/i2c/slave-testunit-backend.rst for further details.
+config I2C_SLAVE_MQUEUE_MESSAGE_SIZE
+ int "The message size of I2C mqueue slave"
+ default 120
+
+config I2C_SLAVE_MQUEUE_QUEUE_SIZE
+ int "The queue size of I2C mqueue slave"
+ default 32
+ help
+ This number MUST be power of 2.
+
+config I2C_SLAVE_MQUEUE
+ tristate "I2C mqueue (message queue) slave driver"
+ help
+ Some protocols over I2C are designed for bi-directional transferring
+ messages by using I2C Master Write protocol. This driver is used to
+ receive and queue messages from the remote I2C device.
+
+ Userspace can get the messages by reading sysfs file that this driver
+ exposes.
+
+ This support is also available as a module. If so, the module will be
+ called i2c-slave-mqueue.
+
endif
config I2C_DEBUG_CORE
diff --git a/drivers/i2c/Makefile b/drivers/i2c/Makefile
index c1d493dc9bac..0442e5cf8587 100644
--- a/drivers/i2c/Makefile
+++ b/drivers/i2c/Makefile
@@ -17,5 +17,6 @@ obj-y += algos/ busses/ muxes/
obj-$(CONFIG_I2C_STUB) += i2c-stub.o
obj-$(CONFIG_I2C_SLAVE_EEPROM) += i2c-slave-eeprom.o
obj-$(CONFIG_I2C_SLAVE_TESTUNIT) += i2c-slave-testunit.o
+obj-$(CONFIG_I2C_SLAVE_MQUEUE) += i2c-slave-mqueue.o
ccflags-$(CONFIG_I2C_DEBUG_CORE) := -DDEBUG
diff --git a/drivers/i2c/busses/i2c-aspeed.c b/drivers/i2c/busses/i2c-aspeed.c
index 67e8b97c0c95..d2cbfa7ce5c9 100644
--- a/drivers/i2c/busses/i2c-aspeed.c
+++ b/drivers/i2c/busses/i2c-aspeed.c
@@ -7,8 +7,11 @@
* Copyright 2017 Google, Inc.
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/completion.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/i2c.h>
@@ -19,15 +22,24 @@
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
+#include <linux/regmap.h>
#include <linux/reset.h>
#include <linux/slab.h>
-/* I2C Register */
+/* I2C Global Registers */
+/* 0x00 : I2CG Interrupt Status Register */
+/* 0x08 : I2CG Interrupt Target Assignment */
+/* 0x0c : I2CG Global Control Register (AST2500) */
+#define ASPEED_I2CG_GLOBAL_CTRL_REG 0x0c
+#define ASPEED_I2CG_SRAM_BUFFER_EN BIT(0)
+
+/* I2C Bus Registers */
#define ASPEED_I2C_FUN_CTRL_REG 0x00
#define ASPEED_I2C_AC_TIMING_REG1 0x04
#define ASPEED_I2C_AC_TIMING_REG2 0x08
@@ -35,18 +47,20 @@
#define ASPEED_I2C_INTR_STS_REG 0x10
#define ASPEED_I2C_CMD_REG 0x14
#define ASPEED_I2C_DEV_ADDR_REG 0x18
+#define ASPEED_I2C_BUF_CTRL_REG 0x1c
#define ASPEED_I2C_BYTE_BUF_REG 0x20
-
-/* Global Register Definition */
-/* 0x00 : I2C Interrupt Status Register */
-/* 0x08 : I2C Interrupt Target Assignment */
+#define ASPEED_I2C_DMA_ADDR_REG 0x24
+#define ASPEED_I2C_DMA_LEN_REG 0x28
/* Device Register Definition */
/* 0x00 : I2CD Function Control Register */
+#define ASPEED_I2CD_BUFFER_PAGE_SEL_MASK GENMASK(22, 20)
+#define ASPEED_I2CD_BUS_AUTO_RECOVERY_EN BIT(17)
#define ASPEED_I2CD_MULTI_MASTER_DIS BIT(15)
#define ASPEED_I2CD_SDA_DRIVE_1T_EN BIT(8)
#define ASPEED_I2CD_M_SDA_DRIVE_1T_EN BIT(7)
#define ASPEED_I2CD_M_HIGH_SPEED_EN BIT(6)
+#define ASPEED_I2CD_GCALL_EN BIT(2)
#define ASPEED_I2CD_SLAVE_EN BIT(1)
#define ASPEED_I2CD_MASTER_EN BIT(0)
@@ -58,10 +72,14 @@
#define ASPEED_I2CD_TIME_SCL_HIGH_MASK GENMASK(19, 16)
#define ASPEED_I2CD_TIME_SCL_LOW_SHIFT 12
#define ASPEED_I2CD_TIME_SCL_LOW_MASK GENMASK(15, 12)
+#define ASPEED_I2CD_TIME_TIMEOUT_BASE_DIVISOR_SHIFT 8
+#define ASPEED_I2CD_TIME_TIMEOUT_BASE_DIVISOR_MASK GENMASK(9, 8)
#define ASPEED_I2CD_TIME_BASE_DIVISOR_MASK GENMASK(3, 0)
#define ASPEED_I2CD_TIME_SCL_REG_MAX GENMASK(3, 0)
+
/* 0x08 : I2CD Clock and AC Timing Control Register #2 */
-#define ASPEED_NO_TIMEOUT_CTRL 0
+#define ASPEED_I2CD_TIMEOUT_CYCLES_SHIFT 0
+#define ASPEED_I2CD_TIMEOUT_CYCLES_MASK GENMASK(4, 0)
/* 0x0c : I2CD Interrupt Control Register &
* 0x10 : I2CD Interrupt Status Register
@@ -70,8 +88,15 @@
* status bits.
*/
#define ASPEED_I2CD_INTR_RECV_MASK 0xf000ffff
+#if defined(CONFIG_MACH_ASPEED_G6)
+#define ASPEED_I2CD_INTR_SLAVE_ADDR_RECEIVED_PENDING BIT(29)
+#else
+#define ASPEED_I2CD_INTR_SLAVE_ADDR_RECEIVED_PENDING BIT(30)
+#endif
+#define ASPEED_I2CD_INTR_SLAVE_INACTIVE_TIMEOUT BIT(15)
#define ASPEED_I2CD_INTR_SDA_DL_TIMEOUT BIT(14)
#define ASPEED_I2CD_INTR_BUS_RECOVER_DONE BIT(13)
+#define ASPEED_I2CD_INTR_GCALL_ADDR BIT(8)
#define ASPEED_I2CD_INTR_SLAVE_MATCH BIT(7)
#define ASPEED_I2CD_INTR_SCL_TIMEOUT BIT(6)
#define ASPEED_I2CD_INTR_ABNORMAL BIT(5)
@@ -85,8 +110,11 @@
ASPEED_I2CD_INTR_SCL_TIMEOUT | \
ASPEED_I2CD_INTR_ABNORMAL | \
ASPEED_I2CD_INTR_ARBIT_LOSS)
+#define ASPEED_I2CD_INTR_SLAVE_ERRORS \
+ ASPEED_I2CD_INTR_SLAVE_INACTIVE_TIMEOUT
#define ASPEED_I2CD_INTR_ALL \
- (ASPEED_I2CD_INTR_SDA_DL_TIMEOUT | \
+ (ASPEED_I2CD_INTR_SLAVE_INACTIVE_TIMEOUT | \
+ ASPEED_I2CD_INTR_SDA_DL_TIMEOUT | \
ASPEED_I2CD_INTR_BUS_RECOVER_DONE | \
ASPEED_I2CD_INTR_SCL_TIMEOUT | \
ASPEED_I2CD_INTR_ABNORMAL | \
@@ -95,6 +123,11 @@
ASPEED_I2CD_INTR_RX_DONE | \
ASPEED_I2CD_INTR_TX_NAK | \
ASPEED_I2CD_INTR_TX_ACK)
+#define ASPEED_I2CD_INTR_STATUS_MASK \
+ (ASPEED_I2CD_INTR_SLAVE_ADDR_RECEIVED_PENDING | \
+ ASPEED_I2CD_INTR_GCALL_ADDR | \
+ ASPEED_I2CD_INTR_SLAVE_MATCH | \
+ ASPEED_I2CD_INTR_ALL)
/* 0x14 : I2CD Command/Status Register */
#define ASPEED_I2CD_SCL_LINE_STS BIT(18)
@@ -103,6 +136,10 @@
#define ASPEED_I2CD_BUS_RECOVER_CMD BIT(11)
/* Command Bit */
+#define ASPEED_I2CD_RX_DMA_ENABLE BIT(9)
+#define ASPEED_I2CD_TX_DMA_ENABLE BIT(8)
+#define ASPEED_I2CD_RX_BUFF_ENABLE BIT(7)
+#define ASPEED_I2CD_TX_BUFF_ENABLE BIT(6)
#define ASPEED_I2CD_M_STOP_CMD BIT(5)
#define ASPEED_I2CD_M_S_RX_CMD_LAST BIT(4)
#define ASPEED_I2CD_M_RX_CMD BIT(3)
@@ -119,6 +156,21 @@
/* 0x18 : I2CD Slave Device Address Register */
#define ASPEED_I2CD_DEV_ADDR_MASK GENMASK(6, 0)
+/* 0x1c : I2CD Buffer Control Register */
+/* Use 8-bits or 6-bits wide bit fileds to support both AST2400 and AST2500 */
+#define ASPEED_I2CD_BUF_RX_COUNT_MASK GENMASK(31, 24)
+#define ASPEED_I2CD_BUF_RX_SIZE_MASK GENMASK(23, 16)
+#define ASPEED_I2CD_BUF_TX_COUNT_MASK GENMASK(15, 8)
+#define ASPEED_I2CD_BUF_OFFSET_MASK GENMASK(5, 0)
+
+/* 0x24 : I2CD DMA Mode Buffer Address Register */
+#define ASPEED_I2CD_DMA_ADDR_MASK GENMASK(31, 2)
+#define ASPEED_I2CD_DMA_ALIGN 4
+
+/* 0x28 : I2CD DMA Transfer Length Register */
+#define ASPEED_I2CD_DMA_LEN_SHIFT 0
+#define ASPEED_I2CD_DMA_LEN_MASK GENMASK(11, 0)
+
enum aspeed_i2c_master_state {
ASPEED_I2C_MASTER_INACTIVE,
ASPEED_I2C_MASTER_PENDING,
@@ -137,6 +189,8 @@ enum aspeed_i2c_slave_state {
ASPEED_I2C_SLAVE_READ_PROCESSED,
ASPEED_I2C_SLAVE_WRITE_REQUESTED,
ASPEED_I2C_SLAVE_WRITE_RECEIVED,
+ ASPEED_I2C_SLAVE_GCALL_START,
+ ASPEED_I2C_SLAVE_GCALL_REQUESTED,
ASPEED_I2C_SLAVE_STOP,
};
@@ -152,6 +206,7 @@ struct aspeed_i2c_bus {
u32 divisor);
unsigned long parent_clk_frequency;
u32 bus_frequency;
+ u32 hw_timeout_ms;
/* Transaction state. */
enum aspeed_i2c_master_state master_state;
struct i2c_msg *msgs;
@@ -164,12 +219,43 @@ struct aspeed_i2c_bus {
int master_xfer_result;
/* Multi-master */
bool multi_master;
+ /* Buffer mode */
+ void __iomem *buf_base;
+ u8 buf_offset;
+ u8 buf_page;
+ /* DMA mode */
+ struct dma_pool *dma_pool;
+ dma_addr_t dma_handle;
+ u8 *dma_buf;
+ size_t dma_len;
+ /* Buffer/DMA mode */
+ size_t buf_size;
#if IS_ENABLED(CONFIG_I2C_SLAVE)
struct i2c_client *slave;
enum aspeed_i2c_slave_state slave_state;
+ /* General call */
+ bool general_call;
#endif /* CONFIG_I2C_SLAVE */
};
+static bool dump_debug __read_mostly;
+static int dump_debug_bus_id __read_mostly;
+
+#define I2C_HEX_DUMP(bus, addr, flags, buf, len) \
+ do { \
+ if (dump_debug && (bus)->adap.nr == dump_debug_bus_id) { \
+ char dump_info[100] = {0,}; \
+ char task_info[TASK_COMM_LEN]; \
+ get_task_comm(task_info, current); \
+ snprintf(dump_info, sizeof(dump_info), \
+ "bus_id:%d, addr:0x%02x, flags:0x%02x, task:%s(%d): ", \
+ (bus)->adap.nr, addr, flags, task_info, \
+ task_pid_nr(current)); \
+ print_hex_dump(KERN_ERR, dump_info, DUMP_PREFIX_NONE, \
+ 16, 1, buf, len, true); \
+ } \
+ } while (0)
+
static int aspeed_i2c_reset(struct aspeed_i2c_bus *bus);
static int aspeed_i2c_recover_bus(struct aspeed_i2c_bus *bus)
@@ -241,6 +327,123 @@ reset_out:
}
#if IS_ENABLED(CONFIG_I2C_SLAVE)
+static int aspeed_i2c_check_slave_error(u32 irq_status)
+{
+ if (irq_status & ASPEED_I2CD_INTR_SLAVE_INACTIVE_TIMEOUT)
+ return -EIO;
+
+ return 0;
+}
+
+static inline void
+aspeed_i2c_slave_handle_rx_done(struct aspeed_i2c_bus *bus, u32 irq_status,
+ u8 *value)
+{
+ if (bus->dma_buf &&
+ bus->slave_state == ASPEED_I2C_SLAVE_WRITE_RECEIVED &&
+ !(irq_status & ASPEED_I2CD_INTR_NORMAL_STOP))
+ *value = bus->dma_buf[0];
+ else if (bus->buf_base &&
+ bus->slave_state == ASPEED_I2C_SLAVE_WRITE_RECEIVED &&
+ !(irq_status & ASPEED_I2CD_INTR_NORMAL_STOP))
+ *value = readb(bus->buf_base);
+ else
+ *value = readl(bus->base + ASPEED_I2C_BYTE_BUF_REG) >> 8;
+}
+
+static inline void
+aspeed_i2c_slave_handle_normal_stop(struct aspeed_i2c_bus *bus, u32 irq_status,
+ u8 *value)
+{
+ int i, len;
+
+ if (bus->slave_state == ASPEED_I2C_SLAVE_WRITE_RECEIVED &&
+ irq_status & ASPEED_I2CD_INTR_RX_DONE) {
+ if (bus->dma_buf) {
+ len = bus->buf_size -
+ FIELD_GET(ASPEED_I2CD_DMA_LEN_MASK,
+ readl(bus->base +
+ ASPEED_I2C_DMA_LEN_REG));
+ for (i = 0; i < len; i++) {
+ *value = bus->dma_buf[i];
+ i2c_slave_event(bus->slave,
+ I2C_SLAVE_WRITE_RECEIVED,
+ value);
+ }
+ } else if (bus->buf_base) {
+ len = FIELD_GET(ASPEED_I2CD_BUF_RX_COUNT_MASK,
+ readl(bus->base +
+ ASPEED_I2C_BUF_CTRL_REG));
+ for (i = 0; i < len; i++) {
+ *value = readb(bus->buf_base + i);
+ i2c_slave_event(bus->slave,
+ I2C_SLAVE_WRITE_RECEIVED,
+ value);
+ }
+ }
+ }
+}
+
+static inline void
+aspeed_i2c_slave_handle_write_requested(struct aspeed_i2c_bus *bus, u8 *value)
+{
+ if (bus->dma_buf) {
+ writel(bus->dma_handle & ASPEED_I2CD_DMA_ADDR_MASK,
+ bus->base + ASPEED_I2C_DMA_ADDR_REG);
+ writel(FIELD_PREP(ASPEED_I2CD_DMA_LEN_MASK, bus->buf_size),
+ bus->base + ASPEED_I2C_DMA_LEN_REG);
+ writel(ASPEED_I2CD_RX_DMA_ENABLE,
+ bus->base + ASPEED_I2C_CMD_REG);
+ } else if (bus->buf_base) {
+ writel(FIELD_PREP(ASPEED_I2CD_BUF_RX_SIZE_MASK,
+ bus->buf_size - 1) |
+ FIELD_PREP(ASPEED_I2CD_BUF_OFFSET_MASK,
+ bus->buf_offset),
+ bus->base + ASPEED_I2C_BUF_CTRL_REG);
+ writel(ASPEED_I2CD_RX_BUFF_ENABLE,
+ bus->base + ASPEED_I2C_CMD_REG);
+ }
+}
+
+static inline void
+aspeed_i2c_slave_handle_write_received(struct aspeed_i2c_bus *bus, u8 *value)
+{
+ int i, len;
+
+ if (bus->dma_buf) {
+ len = bus->buf_size -
+ FIELD_GET(ASPEED_I2CD_DMA_LEN_MASK,
+ readl(bus->base +
+ ASPEED_I2C_DMA_LEN_REG));
+ for (i = 1; i < len; i++) {
+ *value = bus->dma_buf[i];
+ i2c_slave_event(bus->slave, I2C_SLAVE_WRITE_RECEIVED,
+ value);
+ }
+ writel(bus->dma_handle & ASPEED_I2CD_DMA_ADDR_MASK,
+ bus->base + ASPEED_I2C_DMA_ADDR_REG);
+ writel(FIELD_PREP(ASPEED_I2CD_DMA_LEN_MASK, bus->buf_size),
+ bus->base + ASPEED_I2C_DMA_LEN_REG);
+ writel(ASPEED_I2CD_RX_DMA_ENABLE,
+ bus->base + ASPEED_I2C_CMD_REG);
+ } else if (bus->buf_base) {
+ len = FIELD_GET(ASPEED_I2CD_BUF_RX_COUNT_MASK,
+ readl(bus->base +
+ ASPEED_I2C_BUF_CTRL_REG));
+ for (i = 1; i < len; i++) {
+ *value = readb(bus->buf_base + i);
+ i2c_slave_event(bus->slave, I2C_SLAVE_WRITE_RECEIVED,
+ value);
+ }
+ writel(FIELD_PREP(ASPEED_I2CD_BUF_RX_SIZE_MASK,
+ bus->buf_size - 1) |
+ FIELD_PREP(ASPEED_I2CD_BUF_OFFSET_MASK, bus->buf_offset),
+ bus->base + ASPEED_I2C_BUF_CTRL_REG);
+ writel(ASPEED_I2CD_RX_BUFF_ENABLE,
+ bus->base + ASPEED_I2C_CMD_REG);
+ }
+}
+
static u32 aspeed_i2c_slave_irq(struct aspeed_i2c_bus *bus, u32 irq_status)
{
u32 command, irq_handled = 0;
@@ -250,6 +453,14 @@ static u32 aspeed_i2c_slave_irq(struct aspeed_i2c_bus *bus, u32 irq_status)
if (!slave)
return 0;
+ if (aspeed_i2c_check_slave_error(irq_status)) {
+ dev_dbg(bus->dev, "received slave error interrupt: 0x%08x\n",
+ irq_status);
+ irq_handled |= (irq_status & ASPEED_I2CD_INTR_SLAVE_ERRORS);
+ bus->slave_state = ASPEED_I2C_SLAVE_INACTIVE;
+ return irq_handled;
+ }
+
command = readl(bus->base + ASPEED_I2C_CMD_REG);
/* Slave was requested, restart state machine. */
@@ -258,6 +469,12 @@ static u32 aspeed_i2c_slave_irq(struct aspeed_i2c_bus *bus, u32 irq_status)
bus->slave_state = ASPEED_I2C_SLAVE_START;
}
+ /* General call was requested, restart state machine. */
+ if (irq_status & ASPEED_I2CD_INTR_GCALL_ADDR) {
+ irq_handled |= ASPEED_I2CD_INTR_GCALL_ADDR;
+ bus->slave_state = ASPEED_I2C_SLAVE_GCALL_START;
+ }
+
/* Slave is not currently active, irq was for someone else. */
if (bus->slave_state == ASPEED_I2C_SLAVE_INACTIVE)
return irq_handled;
@@ -265,9 +482,21 @@ static u32 aspeed_i2c_slave_irq(struct aspeed_i2c_bus *bus, u32 irq_status)
dev_dbg(bus->dev, "slave irq status 0x%08x, cmd 0x%08x\n",
irq_status, command);
+ /*
+ * If a peer master sends messages too quickly before it processes
+ * previous slave DMA data handling, this indicator will be set. It's
+ * just a indicator and driver can't recover this case so just ignore
+ * it.
+ */
+ if (unlikely(irq_status &
+ ASPEED_I2CD_INTR_SLAVE_ADDR_RECEIVED_PENDING)) {
+ dev_dbg(bus->dev, "A slave addr match interrupt is pending.\n");
+ irq_handled |= ASPEED_I2CD_INTR_SLAVE_ADDR_RECEIVED_PENDING;
+ }
+
/* Slave was sent something. */
if (irq_status & ASPEED_I2CD_INTR_RX_DONE) {
- value = readl(bus->base + ASPEED_I2C_BYTE_BUF_REG) >> 8;
+ aspeed_i2c_slave_handle_rx_done(bus, irq_status, &value);
/* Handle address frame. */
if (bus->slave_state == ASPEED_I2C_SLAVE_START) {
if (value & 0x1)
@@ -276,15 +505,32 @@ static u32 aspeed_i2c_slave_irq(struct aspeed_i2c_bus *bus, u32 irq_status)
else
bus->slave_state =
ASPEED_I2C_SLAVE_WRITE_REQUESTED;
+ } else if (bus->slave_state == ASPEED_I2C_SLAVE_GCALL_START) {
+ /*
+ * I2C spec defines the second byte meaning like below.
+ * 0x06 : Reset and write programmable part of slave
+ * address by hardware.
+ * 0x04 : Write programmable part of slave address by
+ * hardware.
+ * 0x00 : No allowed.
+ *
+ * But in OpenBMC, we are going to use this
+ * 'General call' feature for IPMB message broadcasting
+ * so it delivers all data as is without any specific
+ * handling of the second byte.
+ */
+ bus->slave_state = ASPEED_I2C_SLAVE_GCALL_REQUESTED;
}
irq_handled |= ASPEED_I2CD_INTR_RX_DONE;
}
/* Slave was asked to stop. */
if (irq_status & ASPEED_I2CD_INTR_NORMAL_STOP) {
+ aspeed_i2c_slave_handle_normal_stop(bus, irq_status, &value);
irq_handled |= ASPEED_I2CD_INTR_NORMAL_STOP;
bus->slave_state = ASPEED_I2C_SLAVE_STOP;
}
+
if (irq_status & ASPEED_I2CD_INTR_TX_NAK &&
bus->slave_state == ASPEED_I2C_SLAVE_READ_PROCESSED) {
irq_handled |= ASPEED_I2CD_INTR_TX_NAK;
@@ -314,15 +560,22 @@ static u32 aspeed_i2c_slave_irq(struct aspeed_i2c_bus *bus, u32 irq_status)
case ASPEED_I2C_SLAVE_WRITE_REQUESTED:
bus->slave_state = ASPEED_I2C_SLAVE_WRITE_RECEIVED;
i2c_slave_event(slave, I2C_SLAVE_WRITE_REQUESTED, &value);
+ aspeed_i2c_slave_handle_write_requested(bus, &value);
break;
case ASPEED_I2C_SLAVE_WRITE_RECEIVED:
i2c_slave_event(slave, I2C_SLAVE_WRITE_RECEIVED, &value);
+ aspeed_i2c_slave_handle_write_received(bus, &value);
+ break;
+ case ASPEED_I2C_SLAVE_GCALL_REQUESTED:
+ bus->slave_state = ASPEED_I2C_SLAVE_WRITE_RECEIVED;
+ i2c_slave_event(slave, I2C_SLAVE_GCALL_REQUESTED, &value);
break;
case ASPEED_I2C_SLAVE_STOP:
i2c_slave_event(slave, I2C_SLAVE_STOP, &value);
bus->slave_state = ASPEED_I2C_SLAVE_INACTIVE;
break;
case ASPEED_I2C_SLAVE_START:
+ case ASPEED_I2C_SLAVE_GCALL_START:
/* Slave was just started. Waiting for the next event. */;
break;
default:
@@ -336,12 +589,95 @@ static u32 aspeed_i2c_slave_irq(struct aspeed_i2c_bus *bus, u32 irq_status)
}
#endif /* CONFIG_I2C_SLAVE */
+static inline u32
+aspeed_i2c_prepare_rx_buf(struct aspeed_i2c_bus *bus, struct i2c_msg *msg)
+{
+ u32 command = 0;
+ int len;
+
+ if (msg->len > bus->buf_size) {
+ len = bus->buf_size;
+ } else {
+ len = msg->len;
+ command |= ASPEED_I2CD_M_S_RX_CMD_LAST;
+ }
+
+ if (bus->dma_buf) {
+ command |= ASPEED_I2CD_RX_DMA_ENABLE;
+
+ writel(bus->dma_handle & ASPEED_I2CD_DMA_ADDR_MASK,
+ bus->base + ASPEED_I2C_DMA_ADDR_REG);
+ writel(FIELD_PREP(ASPEED_I2CD_DMA_LEN_MASK, len),
+ bus->base + ASPEED_I2C_DMA_LEN_REG);
+ bus->dma_len = len;
+ } else {
+ command |= ASPEED_I2CD_RX_BUFF_ENABLE;
+
+ writel(FIELD_PREP(ASPEED_I2CD_BUF_RX_SIZE_MASK, len - 1) |
+ FIELD_PREP(ASPEED_I2CD_BUF_OFFSET_MASK, bus->buf_offset),
+ bus->base + ASPEED_I2C_BUF_CTRL_REG);
+ }
+
+ return command;
+}
+
+static inline u32
+aspeed_i2c_prepare_tx_buf(struct aspeed_i2c_bus *bus, struct i2c_msg *msg)
+{
+ u8 slave_addr = i2c_8bit_addr_from_msg(msg);
+ u32 command = 0;
+ int len;
+
+ if (msg->len + 1 > bus->buf_size)
+ len = bus->buf_size;
+ else
+ len = msg->len + 1;
+
+ if (bus->dma_buf) {
+ command |= ASPEED_I2CD_TX_DMA_ENABLE;
+
+ bus->dma_buf[0] = slave_addr;
+ memcpy(bus->dma_buf + 1, msg->buf, len);
+
+ writel(bus->dma_handle & ASPEED_I2CD_DMA_ADDR_MASK,
+ bus->base + ASPEED_I2C_DMA_ADDR_REG);
+ writel(FIELD_PREP(ASPEED_I2CD_DMA_LEN_MASK, len),
+ bus->base + ASPEED_I2C_DMA_LEN_REG);
+ bus->dma_len = len;
+ } else {
+ u8 wbuf[4];
+ int i;
+
+ command |= ASPEED_I2CD_TX_BUFF_ENABLE;
+
+ /*
+ * Yeah, it looks bad but byte writing on remapped I2C SRAM
+ * causes corruption so use this way to make dword writings.
+ */
+ wbuf[0] = slave_addr;
+ for (i = 1; i < len; i++) {
+ wbuf[i % 4] = msg->buf[i - 1];
+ if (i % 4 == 3)
+ writel(*(u32 *)wbuf, bus->buf_base + i - 3);
+ }
+ if (--i % 4 != 3)
+ writel(*(u32 *)wbuf, bus->buf_base + i - (i % 4));
+
+ writel(FIELD_PREP(ASPEED_I2CD_BUF_TX_COUNT_MASK, len - 1) |
+ FIELD_PREP(ASPEED_I2CD_BUF_OFFSET_MASK, bus->buf_offset),
+ bus->base + ASPEED_I2C_BUF_CTRL_REG);
+ }
+
+ bus->buf_index = len - 1;
+
+ return command;
+}
+
/* precondition: bus.lock has been acquired. */
static void aspeed_i2c_do_start(struct aspeed_i2c_bus *bus)
{
u32 command = ASPEED_I2CD_M_START_CMD | ASPEED_I2CD_M_TX_CMD;
struct i2c_msg *msg = &bus->msgs[bus->msgs_index];
- u8 slave_addr = i2c_8bit_addr_from_msg(msg);
#if IS_ENABLED(CONFIG_I2C_SLAVE)
/*
@@ -360,12 +696,22 @@ static void aspeed_i2c_do_start(struct aspeed_i2c_bus *bus)
if (msg->flags & I2C_M_RD) {
command |= ASPEED_I2CD_M_RX_CMD;
- /* Need to let the hardware know to NACK after RX. */
- if (msg->len == 1 && !(msg->flags & I2C_M_RECV_LEN))
- command |= ASPEED_I2CD_M_S_RX_CMD_LAST;
+ if (!(msg->flags & I2C_M_RECV_LEN)) {
+ if (msg->len && (bus->dma_buf || bus->buf_base))
+ command |= aspeed_i2c_prepare_rx_buf(bus, msg);
+
+ /* Need to let the hardware know to NACK after RX. */
+ if (msg->len <= 1)
+ command |= ASPEED_I2CD_M_S_RX_CMD_LAST;
+ }
+ } else if (msg->len && (bus->dma_buf || bus->buf_base)) {
+ command |= aspeed_i2c_prepare_tx_buf(bus, msg);
}
- writel(slave_addr, bus->base + ASPEED_I2C_BYTE_BUF_REG);
+ if (!(command & (ASPEED_I2CD_TX_BUFF_ENABLE |
+ ASPEED_I2CD_TX_DMA_ENABLE)))
+ writel(i2c_8bit_addr_from_msg(msg),
+ bus->base + ASPEED_I2C_BYTE_BUF_REG);
writel(command, bus->base + ASPEED_I2C_CMD_REG);
}
@@ -387,7 +733,7 @@ static void aspeed_i2c_next_msg_or_stop(struct aspeed_i2c_bus *bus)
}
}
-static int aspeed_i2c_is_irq_error(u32 irq_status)
+static int aspeed_i2c_check_master_error(u32 irq_status)
{
if (irq_status & ASPEED_I2CD_INTR_ARBIT_LOSS)
return -EAGAIN;
@@ -400,6 +746,134 @@ static int aspeed_i2c_is_irq_error(u32 irq_status)
return 0;
}
+static inline u32
+aspeed_i2c_master_handle_tx_first(struct aspeed_i2c_bus *bus,
+ struct i2c_msg *msg)
+{
+ u32 command = 0;
+
+ if (bus->dma_buf || bus->buf_base) {
+ int len;
+
+ if (msg->len - bus->buf_index > bus->buf_size)
+ len = bus->buf_size;
+ else
+ len = msg->len - bus->buf_index;
+
+ if (bus->dma_buf) {
+ command |= ASPEED_I2CD_TX_DMA_ENABLE;
+
+ memcpy(bus->dma_buf, msg->buf + bus->buf_index, len);
+
+
+ writel(bus->dma_handle & ASPEED_I2CD_DMA_ADDR_MASK,
+ bus->base + ASPEED_I2C_DMA_ADDR_REG);
+ writel(FIELD_PREP(ASPEED_I2CD_DMA_LEN_MASK, len),
+ bus->base + ASPEED_I2C_DMA_LEN_REG);
+ bus->dma_len = len;
+ } else {
+ u8 wbuf[4];
+ int i;
+
+ command |= ASPEED_I2CD_TX_BUFF_ENABLE;
+
+ if (msg->len - bus->buf_index > bus->buf_size)
+ len = bus->buf_size;
+ else
+ len = msg->len - bus->buf_index;
+
+ for (i = 0; i < len; i++) {
+ wbuf[i % 4] = msg->buf[bus->buf_index + i];
+ if (i % 4 == 3)
+ writel(*(u32 *)wbuf,
+ bus->buf_base + i - 3);
+ }
+ if (--i % 4 != 3)
+ writel(*(u32 *)wbuf,
+ bus->buf_base + i - (i % 4));
+
+ writel(FIELD_PREP(ASPEED_I2CD_BUF_TX_COUNT_MASK,
+ len - 1) |
+ FIELD_PREP(ASPEED_I2CD_BUF_OFFSET_MASK,
+ bus->buf_offset),
+ bus->base + ASPEED_I2C_BUF_CTRL_REG);
+ }
+
+ bus->buf_index += len;
+ } else {
+ writel(msg->buf[bus->buf_index++],
+ bus->base + ASPEED_I2C_BYTE_BUF_REG);
+ }
+
+ return command;
+}
+
+static inline void
+aspeed_i2c_master_handle_rx(struct aspeed_i2c_bus *bus, struct i2c_msg *msg)
+{
+ u8 recv_byte;
+ int len;
+
+ if (bus->dma_buf) {
+ len = bus->dma_len -
+ FIELD_GET(ASPEED_I2CD_DMA_LEN_MASK,
+ readl(bus->base + ASPEED_I2C_DMA_LEN_REG));
+
+ memcpy(msg->buf + bus->buf_index, bus->dma_buf, len);
+ bus->buf_index += len;
+ } else if (bus->buf_base) {
+ len = FIELD_GET(ASPEED_I2CD_BUF_RX_COUNT_MASK,
+ readl(bus->base + ASPEED_I2C_BUF_CTRL_REG));
+ memcpy_fromio(msg->buf + bus->buf_index, bus->buf_base, len);
+ bus->buf_index += len;
+ } else {
+ recv_byte = readl(bus->base + ASPEED_I2C_BYTE_BUF_REG) >> 8;
+ msg->buf[bus->buf_index++] = recv_byte;
+ }
+}
+
+static inline u32
+aspeed_i2c_master_handle_rx_next(struct aspeed_i2c_bus *bus,
+ struct i2c_msg *msg)
+{
+ u32 command = 0;
+
+ if (bus->dma_buf || bus->buf_base) {
+ int len;
+
+ if (msg->len - bus->buf_index > bus->buf_size) {
+ len = bus->buf_size;
+ } else {
+ len = msg->len - bus->buf_index;
+ command |= ASPEED_I2CD_M_S_RX_CMD_LAST;
+ }
+
+ if (bus->dma_buf) {
+ command |= ASPEED_I2CD_RX_DMA_ENABLE;
+
+ writel(bus->dma_handle & ASPEED_I2CD_DMA_ADDR_MASK,
+ bus->base + ASPEED_I2C_DMA_ADDR_REG);
+ writel(FIELD_PREP(ASPEED_I2CD_DMA_LEN_MASK, len),
+ bus->base + ASPEED_I2C_DMA_LEN_REG);
+ bus->dma_len = len;
+ } else {
+ command |= ASPEED_I2CD_RX_BUFF_ENABLE;
+
+ writel(FIELD_PREP(ASPEED_I2CD_BUF_RX_SIZE_MASK,
+ len - 1) |
+ FIELD_PREP(ASPEED_I2CD_BUF_TX_COUNT_MASK, 0) |
+ FIELD_PREP(ASPEED_I2CD_BUF_OFFSET_MASK,
+ bus->buf_offset),
+ bus->base + ASPEED_I2C_BUF_CTRL_REG);
+ }
+ } else {
+ if (bus->buf_index + 1 == msg->len)
+ command |= ASPEED_I2CD_M_S_RX_CMD_LAST;
+ }
+
+ return command;
+}
+
static u32 aspeed_i2c_master_irq(struct aspeed_i2c_bus *bus, u32 irq_status)
{
u32 irq_handled = 0, command = 0;
@@ -418,13 +892,19 @@ static u32 aspeed_i2c_master_irq(struct aspeed_i2c_bus *bus, u32 irq_status)
* should clear the command queue effectively taking us back to the
* INACTIVE state.
*/
- ret = aspeed_i2c_is_irq_error(irq_status);
+ ret = aspeed_i2c_check_master_error(irq_status);
if (ret) {
- dev_dbg(bus->dev, "received error interrupt: 0x%08x\n",
+ dev_dbg(bus->dev, "received master error interrupt: 0x%08x\n",
irq_status);
irq_handled |= (irq_status & ASPEED_I2CD_INTR_MASTER_ERRORS);
if (bus->master_state != ASPEED_I2C_MASTER_INACTIVE) {
bus->cmd_err = ret;
+ if (bus->master_state == ASPEED_I2C_MASTER_STOP)
+ irq_handled |= (irq_status &
+ ASPEED_I2CD_INTR_NORMAL_STOP);
+ if (ret == -EAGAIN)
+ irq_handled |= (irq_status &
+ ASPEED_I2CD_INTR_TX_ACK);
bus->master_state = ASPEED_I2C_MASTER_INACTIVE;
goto out_complete;
}
@@ -508,11 +988,10 @@ static u32 aspeed_i2c_master_irq(struct aspeed_i2c_bus *bus, u32 irq_status)
fallthrough;
case ASPEED_I2C_MASTER_TX_FIRST:
if (bus->buf_index < msg->len) {
+ command = ASPEED_I2CD_M_TX_CMD;
+ command |= aspeed_i2c_master_handle_tx_first(bus, msg);
+ writel(command, bus->base + ASPEED_I2C_CMD_REG);
bus->master_state = ASPEED_I2C_MASTER_TX;
- writel(msg->buf[bus->buf_index++],
- bus->base + ASPEED_I2C_BYTE_BUF_REG);
- writel(ASPEED_I2CD_M_TX_CMD,
- bus->base + ASPEED_I2C_CMD_REG);
} else {
aspeed_i2c_next_msg_or_stop(bus);
}
@@ -529,26 +1008,34 @@ static u32 aspeed_i2c_master_irq(struct aspeed_i2c_bus *bus, u32 irq_status)
}
irq_handled |= ASPEED_I2CD_INTR_RX_DONE;
- recv_byte = readl(bus->base + ASPEED_I2C_BYTE_BUF_REG) >> 8;
- msg->buf[bus->buf_index++] = recv_byte;
-
if (msg->flags & I2C_M_RECV_LEN) {
- if (unlikely(recv_byte > I2C_SMBUS_BLOCK_MAX)) {
+ if (unlikely(bus->buf_index != 0)) {
+ dev_err(bus->dev, "I2C_M_RECV_LEN buf_index is not zero\n");
bus->cmd_err = -EPROTO;
aspeed_i2c_do_stop(bus);
goto out_no_complete;
}
- msg->len = recv_byte +
- ((msg->flags & I2C_CLIENT_PEC) ? 2 : 1);
+ recv_byte = readl(bus->base + ASPEED_I2C_BYTE_BUF_REG) >> 8;
+ if (unlikely(recv_byte > I2C_SMBUS_BLOCK_MAX)) {
+ dev_err(bus->dev,
+ "I2C_M_RECV_LEN too big %d, truncate to %d\n",
+ recv_byte, I2C_SMBUS_BLOCK_MAX);
+ recv_byte = I2C_SMBUS_BLOCK_MAX;
+ }
+ msg->len = recv_byte + ((msg->flags & I2C_CLIENT_PEC) ?
+ 2 : 1);
msg->flags &= ~I2C_M_RECV_LEN;
+ msg->buf[0] = recv_byte;
+ bus->buf_index = 1;
+ } else if (msg->len) {
+ aspeed_i2c_master_handle_rx(bus, msg);
}
if (bus->buf_index < msg->len) {
- bus->master_state = ASPEED_I2C_MASTER_RX;
command = ASPEED_I2CD_M_RX_CMD;
- if (bus->buf_index + 1 == msg->len)
- command |= ASPEED_I2CD_M_S_RX_CMD_LAST;
+ command |= aspeed_i2c_master_handle_rx_next(bus, msg);
writel(command, bus->base + ASPEED_I2C_CMD_REG);
+ bus->master_state = ASPEED_I2C_MASTER_RX;
} else {
aspeed_i2c_next_msg_or_stop(bus);
}
@@ -625,9 +1112,14 @@ static irqreturn_t aspeed_i2c_bus_irq(int irq, void *dev_id)
} else {
irq_handled = aspeed_i2c_slave_irq(bus, irq_remaining);
irq_remaining &= ~irq_handled;
- if (irq_remaining)
+ if (irq_remaining) {
irq_handled |= aspeed_i2c_master_irq(bus,
irq_remaining);
+ if (bus->master_state == ASPEED_I2C_MASTER_INACTIVE &&
+ bus->slave_state == ASPEED_I2C_SLAVE_INACTIVE)
+ irq_handled |= (irq_remaining &
+ ASPEED_I2CD_INTR_NORMAL_STOP);
+ }
}
/*
@@ -662,6 +1154,7 @@ static int aspeed_i2c_master_xfer(struct i2c_adapter *adap,
{
struct aspeed_i2c_bus *bus = i2c_get_adapdata(adap);
unsigned long time_left, flags;
+ int i;
spin_lock_irqsave(&bus->lock, flags);
bus->cmd_err = 0;
@@ -713,6 +1206,11 @@ static int aspeed_i2c_master_xfer(struct i2c_adapter *adap,
return -ETIMEDOUT;
}
+ for (i = 0; i < num; i++) {
+ I2C_HEX_DUMP(bus, msgs[i].addr, msgs[i].flags,
+ msgs[i].buf, msgs[i].len);
+ }
+
return bus->master_xfer_result;
}
@@ -740,6 +1238,8 @@ static void __aspeed_i2c_reg_slave(struct aspeed_i2c_bus *bus, u16 slave_addr)
/* Turn on slave mode. */
func_ctrl_reg_val = readl(bus->base + ASPEED_I2C_FUN_CTRL_REG);
func_ctrl_reg_val |= ASPEED_I2CD_SLAVE_EN;
+ if (bus->general_call)
+ func_ctrl_reg_val |= ASPEED_I2CD_GCALL_EN;
writel(func_ctrl_reg_val, bus->base + ASPEED_I2C_FUN_CTRL_REG);
}
@@ -778,6 +1278,8 @@ static int aspeed_i2c_unreg_slave(struct i2c_client *client)
/* Turn off slave mode. */
func_ctrl_reg_val = readl(bus->base + ASPEED_I2C_FUN_CTRL_REG);
func_ctrl_reg_val &= ~ASPEED_I2CD_SLAVE_EN;
+ if (bus->general_call)
+ func_ctrl_reg_val &= ~ASPEED_I2CD_GCALL_EN;
writel(func_ctrl_reg_val, bus->base + ASPEED_I2C_FUN_CTRL_REG);
bus->slave = NULL;
@@ -884,6 +1386,7 @@ static u32 aspeed_i2c_25xx_get_clk_reg_val(struct device *dev, u32 divisor)
/* precondition: bus.lock has been acquired. */
static int aspeed_i2c_init_clk(struct aspeed_i2c_bus *bus)
{
+ u32 timeout_base_divisor, timeout_tick_us, timeout_cycles;
u32 divisor, clk_reg_val;
divisor = DIV_ROUND_UP(bus->parent_clk_frequency, bus->bus_frequency);
@@ -892,8 +1395,46 @@ static int aspeed_i2c_init_clk(struct aspeed_i2c_bus *bus)
ASPEED_I2CD_TIME_THDSTA_MASK |
ASPEED_I2CD_TIME_TACST_MASK);
clk_reg_val |= bus->get_clk_reg_val(bus->dev, divisor);
+
+ if (bus->hw_timeout_ms) {
+ u8 div_max = ASPEED_I2CD_TIME_TIMEOUT_BASE_DIVISOR_MASK >>
+ ASPEED_I2CD_TIME_TIMEOUT_BASE_DIVISOR_SHIFT;
+ u8 cycles_max = ASPEED_I2CD_TIMEOUT_CYCLES_MASK >>
+ ASPEED_I2CD_TIMEOUT_CYCLES_SHIFT;
+
+ timeout_base_divisor = 0;
+
+ do {
+ timeout_tick_us = 1000 * (16384 <<
+ (timeout_base_divisor << 1)) /
+ (bus->parent_clk_frequency / 1000);
+
+ if (timeout_base_divisor == div_max ||
+ timeout_tick_us * ASPEED_I2CD_TIMEOUT_CYCLES_MASK >=
+ bus->hw_timeout_ms * 1000)
+ break;
+ } while (timeout_base_divisor++ < div_max);
+
+ if (timeout_tick_us) {
+ timeout_cycles = DIV_ROUND_UP(bus->hw_timeout_ms * 1000,
+ timeout_tick_us);
+ if (timeout_cycles == 0)
+ timeout_cycles = 1;
+ else if (timeout_cycles > cycles_max)
+ timeout_cycles = cycles_max;
+ } else {
+ timeout_cycles = 0;
+ }
+ } else {
+ timeout_base_divisor = 0;
+ timeout_cycles = 0;
+ }
+
+ clk_reg_val |= FIELD_PREP(ASPEED_I2CD_TIME_TIMEOUT_BASE_DIVISOR_MASK,
+ timeout_base_divisor);
+
writel(clk_reg_val, bus->base + ASPEED_I2C_AC_TIMING_REG1);
- writel(ASPEED_NO_TIMEOUT_CTRL, bus->base + ASPEED_I2C_AC_TIMING_REG2);
+ writel(timeout_cycles, bus->base + ASPEED_I2C_AC_TIMING_REG2);
return 0;
}
@@ -908,10 +1449,18 @@ static int aspeed_i2c_init(struct aspeed_i2c_bus *bus,
/* Disable everything. */
writel(0, bus->base + ASPEED_I2C_FUN_CTRL_REG);
+ device_property_read_u32(&pdev->dev, "aspeed,hw-timeout-ms",
+ &bus->hw_timeout_ms);
+ if (bus->hw_timeout_ms)
+ fun_ctrl_reg |= ASPEED_I2CD_BUS_AUTO_RECOVERY_EN;
+
ret = aspeed_i2c_init_clk(bus);
if (ret < 0)
return ret;
+ fun_ctrl_reg |= FIELD_PREP(ASPEED_I2CD_BUFFER_PAGE_SEL_MASK,
+ bus->buf_page);
+
if (of_property_read_bool(pdev->dev.of_node, "multi-master"))
bus->multi_master = true;
else
@@ -922,6 +1471,9 @@ static int aspeed_i2c_init(struct aspeed_i2c_bus *bus,
bus->base + ASPEED_I2C_FUN_CTRL_REG);
#if IS_ENABLED(CONFIG_I2C_SLAVE)
+ if (of_property_read_bool(pdev->dev.of_node, "general-call"))
+ bus->general_call = true;
+
/* If slave has already been registered, re-enable it. */
if (bus->slave)
__aspeed_i2c_reg_slave(bus, bus->slave->addr);
@@ -952,6 +1504,96 @@ static int aspeed_i2c_reset(struct aspeed_i2c_bus *bus)
return ret;
}
+static void aspeed_i2c_set_xfer_mode(struct aspeed_i2c_bus *bus)
+{
+ struct platform_device *pdev = to_platform_device(bus->dev);
+ bool sram_enabled = true;
+ int ret;
+
+ /*
+ * Enable I2C SRAM in case of AST2500.
+ * SRAM is enabled by default in AST2400 and AST2600.
+ */
+ if (of_device_is_compatible(pdev->dev.of_node,
+ "aspeed,ast2500-i2c-bus")) {
+ struct regmap *gr_regmap = syscon_regmap_lookup_by_compatible("aspeed,ast2500-i2c-gr");
+
+ if (IS_ERR(gr_regmap))
+ ret = PTR_ERR(gr_regmap);
+ else
+ ret = regmap_update_bits(gr_regmap,
+ ASPEED_I2CG_GLOBAL_CTRL_REG,
+ ASPEED_I2CG_SRAM_BUFFER_EN,
+ ASPEED_I2CG_SRAM_BUFFER_EN);
+
+ if (ret)
+ sram_enabled = false;
+ }
+
+ /*
+ * Only AST2500 and AST2600 support DMA mode under some limitations:
+ * I2C is sharing the DMA H/W with UHCI host controller and MCTP
+ * controller. Since those controllers operate with DMA mode only, I2C
+ * has to use buffer mode or byte mode instead if one of those
+ * controllers is enabled. Also make sure that if SD/eMMC or Port80
+ * snoop uses DMA mode instead of PIO or FIFO respectively, I2C can't
+ * use DMA mode.
+ */
+ if (sram_enabled && !IS_ENABLED(CONFIG_USB_UHCI_ASPEED) &&
+ !of_device_is_compatible(pdev->dev.of_node,
+ "aspeed,ast2400-i2c-bus")) {
+ u32 dma_len_max = ASPEED_I2CD_DMA_LEN_MASK >>
+ ASPEED_I2CD_DMA_LEN_SHIFT;
+
+ ret = device_property_read_u32(&pdev->dev,
+ "aspeed,dma-buf-size",
+ &bus->buf_size);
+ if (!ret && bus->buf_size > dma_len_max)
+ bus->buf_size = dma_len_max;
+ }
+
+ if (bus->buf_size) {
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
+ dev_warn(&pdev->dev, "No suitable DMA available\n");
+ } else {
+ bus->dma_pool = dma_pool_create("i2c-aspeed",
+ &pdev->dev,
+ bus->buf_size,
+ ASPEED_I2CD_DMA_ALIGN,
+ 0);
+ if (bus->dma_pool)
+ bus->dma_buf = dma_pool_alloc(bus->dma_pool,
+ GFP_KERNEL,
+ &bus->dma_handle);
+
+ if (!bus->dma_buf) {
+ dev_warn(&pdev->dev,
+ "Cannot allocate DMA buffer\n");
+ dma_pool_destroy(bus->dma_pool);
+ }
+ }
+ }
+
+ if (!bus->dma_buf && sram_enabled) {
+ struct resource *res = platform_get_resource(pdev,
+ IORESOURCE_MEM, 1);
+
+ if (res && resource_size(res) >= 2)
+ bus->buf_base = devm_ioremap_resource(&pdev->dev, res);
+
+ if (!IS_ERR_OR_NULL(bus->buf_base)) {
+ bus->buf_size = resource_size(res);
+ if (of_device_is_compatible(pdev->dev.of_node,
+ "aspeed,ast2400-i2c-bus")) {
+ bus->buf_page = ((res->start >> 8) &
+ GENMASK(3, 0)) - 8;
+ bus->buf_offset = (res->start >> 2) &
+ ASPEED_I2CD_BUF_OFFSET_MASK;
+ }
+ }
+ }
+}
+
static const struct of_device_id aspeed_i2c_bus_of_table[] = {
{
.compatible = "aspeed,ast2400-i2c-bus",
@@ -974,18 +1616,26 @@ static int aspeed_i2c_probe_bus(struct platform_device *pdev)
const struct of_device_id *match;
struct aspeed_i2c_bus *bus;
struct clk *parent_clk;
- struct resource *res;
int irq, ret;
bus = devm_kzalloc(&pdev->dev, sizeof(*bus), GFP_KERNEL);
if (!bus)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- bus->base = devm_ioremap_resource(&pdev->dev, res);
+ bus->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(bus->base))
return PTR_ERR(bus->base);
+ bus->dev = &pdev->dev;
+
+ /* Disable bus and clean up any left over interrupt state. */
+ writel(0, bus->base + ASPEED_I2C_FUN_CTRL_REG);
+ writel(0, bus->base + ASPEED_I2C_INTR_CTRL_REG);
+ writel(0xffffffff, bus->base + ASPEED_I2C_INTR_STS_REG);
+
+ /* Clear slave addresses. */
+ writel(0, bus->base + ASPEED_I2C_DEV_ADDR_REG);
+
parent_clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(parent_clk))
return PTR_ERR(parent_clk);
@@ -1016,46 +1666,47 @@ static int aspeed_i2c_probe_bus(struct platform_device *pdev)
bus->get_clk_reg_val = (u32 (*)(struct device *, u32))
match->data;
+ aspeed_i2c_set_xfer_mode(bus);
+
/* Initialize the I2C adapter */
spin_lock_init(&bus->lock);
init_completion(&bus->cmd_complete);
bus->adap.owner = THIS_MODULE;
- bus->adap.retries = 0;
bus->adap.algo = &aspeed_i2c_algo;
bus->adap.dev.parent = &pdev->dev;
bus->adap.dev.of_node = pdev->dev.of_node;
strlcpy(bus->adap.name, pdev->name, sizeof(bus->adap.name));
i2c_set_adapdata(&bus->adap, bus);
- bus->dev = &pdev->dev;
-
- /* Clean up any left over interrupt state. */
- writel(0, bus->base + ASPEED_I2C_INTR_CTRL_REG);
- writel(0xffffffff, bus->base + ASPEED_I2C_INTR_STS_REG);
- /*
- * bus.lock does not need to be held because the interrupt handler has
- * not been enabled yet.
- */
- ret = aspeed_i2c_init(bus, pdev);
- if (ret < 0)
- return ret;
-
irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
ret = devm_request_irq(&pdev->dev, irq, aspeed_i2c_bus_irq,
0, dev_name(&pdev->dev), bus);
if (ret < 0)
- return ret;
+ goto out_free_dma_buf;
+
+ ret = aspeed_i2c_init(bus, pdev);
+ if (ret < 0)
+ goto out_free_dma_buf;
ret = i2c_add_adapter(&bus->adap);
if (ret < 0)
- return ret;
+ goto out_free_dma_buf;
platform_set_drvdata(pdev, bus);
- dev_info(bus->dev, "i2c bus %d registered, irq %d\n",
- bus->adap.nr, irq);
+ dev_info(bus->dev, "i2c bus %d registered (%s mode), irq %d\n",
+ bus->adap.nr, bus->dma_buf ? "dma" :
+ bus->buf_base ? "buffer" : "byte",
+ irq);
return 0;
+
+out_free_dma_buf:
+ if (bus->dma_buf)
+ dma_pool_free(bus->dma_pool, bus->dma_buf, bus->dma_handle);
+ dma_pool_destroy(bus->dma_pool);
+
+ return ret;
}
static int aspeed_i2c_remove_bus(struct platform_device *pdev)
@@ -1073,6 +1724,10 @@ static int aspeed_i2c_remove_bus(struct platform_device *pdev)
reset_control_assert(bus->rst);
+ if (bus->dma_buf)
+ dma_pool_free(bus->dma_pool, bus->dma_buf, bus->dma_handle);
+ dma_pool_destroy(bus->dma_pool);
+
i2c_del_adapter(&bus->adap);
return 0;
@@ -1088,6 +1743,11 @@ static struct platform_driver aspeed_i2c_bus_driver = {
};
module_platform_driver(aspeed_i2c_bus_driver);
+module_param_named(dump_debug, dump_debug, bool, 0644);
+MODULE_PARM_DESC(dump_debug, "debug flag for dump printing");
+module_param_named(dump_debug_bus_id, dump_debug_bus_id, int, 0644);
+MODULE_PARM_DESC(dump_debug_bus_id, "bus id for dump debug printing");
+
MODULE_AUTHOR("Brendan Higgins <brendanhiggins@google.com>");
MODULE_DESCRIPTION("Aspeed I2C Bus Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index cfbef70e8ba7..dfc77ef4819f 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -1428,8 +1428,30 @@ int i2c_handle_smbus_host_notify(struct i2c_adapter *adap, unsigned short addr)
}
EXPORT_SYMBOL_GPL(i2c_handle_smbus_host_notify);
+static void i2c_adapter_hold(struct i2c_adapter *adapter, unsigned long timeout)
+{
+ mutex_lock(&adapter->hold_lock);
+ schedule_delayed_work(&adapter->unhold_work, timeout);
+}
+
+static void i2c_adapter_unhold(struct i2c_adapter *adapter)
+{
+ cancel_delayed_work_sync(&adapter->unhold_work);
+ mutex_unlock(&adapter->hold_lock);
+}
+
+static void i2c_adapter_unhold_work(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct i2c_adapter *adapter = container_of(dwork, struct i2c_adapter,
+ unhold_work);
+
+ mutex_unlock(&adapter->hold_lock);
+}
+
static int i2c_register_adapter(struct i2c_adapter *adap)
{
+ u32 bus_timeout_ms = 0;
int res = -EINVAL;
/* Can't register until after driver model init */
@@ -1453,12 +1475,21 @@ static int i2c_register_adapter(struct i2c_adapter *adap)
adap->locked_flags = 0;
rt_mutex_init(&adap->bus_lock);
rt_mutex_init(&adap->mux_lock);
+ mutex_init(&adap->hold_lock);
mutex_init(&adap->userspace_clients_lock);
INIT_LIST_HEAD(&adap->userspace_clients);
+ INIT_DELAYED_WORK(&adap->unhold_work, i2c_adapter_unhold_work);
/* Set default timeout to 1 second if not already set */
- if (adap->timeout == 0)
- adap->timeout = HZ;
+ if (adap->timeout == 0) {
+ device_property_read_u32(&adap->dev, "bus-timeout-ms",
+ &bus_timeout_ms);
+ adap->timeout = bus_timeout_ms ?
+ msecs_to_jiffies(bus_timeout_ms) : HZ;
+ }
+
+ /* Set retries count if it has the property setting */
+ device_property_read_u32(&adap->dev, "#retries", &adap->retries);
/* register soft irqs for Host Notify */
res = i2c_setup_host_notify_irq_domain(adap);
@@ -1732,6 +1763,8 @@ void i2c_del_adapter(struct i2c_adapter *adap)
idr_remove(&i2c_adapter_idr, adap->nr);
mutex_unlock(&core_lock);
+ i2c_adapter_unhold(adap);
+
/* Clear the device structure in case this adapter is ever going to be
added again */
memset(&adap->dev, 0, sizeof(adap->dev));
@@ -2076,7 +2109,9 @@ static int i2c_check_for_quirks(struct i2c_adapter *adap, struct i2c_msg *msgs,
*/
int __i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
{
+ enum i2c_hold_msg_type hold_msg = I2C_HOLD_MSG_NONE;
unsigned long orig_jiffies;
+ unsigned long timeout;
int ret, try;
if (WARN_ON(!msgs || num < 1))
@@ -2089,6 +2124,25 @@ int __i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
if (adap->quirks && i2c_check_for_quirks(adap, msgs, num))
return -EOPNOTSUPP;
+ /* Do not deliver a mux hold msg to root bus adapter */
+ if (!i2c_parent_is_i2c_adapter(adap)) {
+ hold_msg = i2c_check_hold_msg(msgs[num - 1].flags,
+ msgs[num - 1].len,
+ (u16 *)msgs[num - 1].buf);
+ if (hold_msg == I2C_HOLD_MSG_SET) {
+ timeout = msecs_to_jiffies(*(u16 *)msgs[num - 1].buf);
+ i2c_adapter_hold(adap, timeout);
+
+ if (--num == 0)
+ return 0;
+ } else if (hold_msg == I2C_HOLD_MSG_RESET) {
+ i2c_adapter_unhold(adap);
+ return 0;
+ } else if (hold_msg == I2C_HOLD_MSG_NONE) {
+ mutex_lock(&adap->hold_lock);
+ }
+ }
+
/*
* i2c_trace_msg_key gets enabled when tracepoint i2c_transfer gets
* enabled. This is an efficient way of keeping the for-loop from
@@ -2125,6 +2179,13 @@ int __i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
trace_i2c_result(adap, num, ret);
}
+ if (!i2c_parent_is_i2c_adapter(adap)) {
+ if (hold_msg == I2C_HOLD_MSG_SET && ret < 0)
+ i2c_adapter_unhold(adap);
+ else if (hold_msg == I2C_HOLD_MSG_NONE)
+ mutex_unlock(&adap->hold_lock);
+ }
+
return ret;
}
EXPORT_SYMBOL(__i2c_transfer);
@@ -2143,6 +2204,7 @@ EXPORT_SYMBOL(__i2c_transfer);
*/
int i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
{
+ bool do_bus_lock = true;
int ret;
if (!adap->algo->master_xfer) {
@@ -2166,12 +2228,25 @@ int i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
* one (discarding status on the second message) or errno
* (discarding status on the first one).
*/
- ret = __i2c_lock_bus_helper(adap);
- if (ret)
- return ret;
+ /*
+ * Do not lock a bus for delivering an unhold msg to a mux
+ * adpater. This is just for a single length unhold msg case.
+ */
+ if (num == 1 && i2c_parent_is_i2c_adapter(adap) &&
+ i2c_check_hold_msg(msgs[0].flags, msgs[0].len,
+ (u16 *)msgs[0].buf) ==
+ I2C_HOLD_MSG_RESET)
+ do_bus_lock = false;
+
+ if (do_bus_lock) {
+ ret = __i2c_lock_bus_helper(adap);
+ if (ret)
+ return ret;
+ }
ret = __i2c_transfer(adap, msgs, num);
- i2c_unlock_bus(adap, I2C_LOCK_SEGMENT);
+ if (do_bus_lock)
+ i2c_unlock_bus(adap, I2C_LOCK_SEGMENT);
return ret;
}
diff --git a/drivers/i2c/i2c-core-smbus.c b/drivers/i2c/i2c-core-smbus.c
index e5b2d1465e7e..e4ec158d38f1 100644
--- a/drivers/i2c/i2c-core-smbus.c
+++ b/drivers/i2c/i2c-core-smbus.c
@@ -535,15 +535,29 @@ s32 i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
unsigned short flags, char read_write,
u8 command, int protocol, union i2c_smbus_data *data)
{
+ bool do_bus_lock = true;
s32 res;
- res = __i2c_lock_bus_helper(adapter);
- if (res)
- return res;
+ /*
+ * Do not lock a bus for delivering an unhold msg to a mux adpater.
+ * This is just for a single length unhold msg case.
+ */
+ if (i2c_parent_is_i2c_adapter(adapter) &&
+ i2c_check_hold_msg(flags,
+ protocol == I2C_SMBUS_WORD_DATA ? 2 : 0,
+ &data->word) == I2C_HOLD_MSG_RESET)
+ do_bus_lock = false;
+
+ if (do_bus_lock) {
+ res = __i2c_lock_bus_helper(adapter);
+ if (res)
+ return res;
+ }
res = __i2c_smbus_xfer(adapter, addr, flags, read_write,
command, protocol, data);
- i2c_unlock_bus(adapter, I2C_LOCK_SEGMENT);
+ if (do_bus_lock)
+ i2c_unlock_bus(adapter, I2C_LOCK_SEGMENT);
return res;
}
diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
index 774507b54b57..abdfd7513df2 100644
--- a/drivers/i2c/i2c-mux.c
+++ b/drivers/i2c/i2c-mux.c
@@ -36,21 +36,66 @@ struct i2c_mux_priv {
u32 chan_id;
};
+static void i2c_mux_hold(struct i2c_mux_core *muxc, u32 chan_id,
+ unsigned long timeout)
+{
+ mutex_lock(&muxc->hold_lock);
+ muxc->holder_chan_id = chan_id;
+ schedule_delayed_work(&muxc->unhold_work, timeout);
+}
+
+static void i2c_mux_unhold(struct i2c_mux_core *muxc)
+{
+ cancel_delayed_work_sync(&muxc->unhold_work);
+ mutex_unlock(&muxc->hold_lock);
+}
+
+static void i2c_mux_unhold_work(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct i2c_mux_core *muxc = container_of(dwork, struct i2c_mux_core,
+ unhold_work);
+
+ if (muxc->deselect)
+ muxc->deselect(muxc, muxc->holder_chan_id);
+
+ mutex_unlock(&muxc->hold_lock);
+}
+
static int __i2c_mux_master_xfer(struct i2c_adapter *adap,
struct i2c_msg msgs[], int num)
{
struct i2c_mux_priv *priv = adap->algo_data;
struct i2c_mux_core *muxc = priv->muxc;
struct i2c_adapter *parent = muxc->parent;
+ enum i2c_hold_msg_type hold_msg;
+ unsigned long timeout;
int ret;
/* Switch to the right mux port and perform the transfer. */
+ hold_msg = i2c_check_hold_msg(msgs[num - 1].flags,
+ msgs[num - 1].len,
+ (u16 *)msgs[num - 1].buf);
+ if (hold_msg == I2C_HOLD_MSG_SET) {
+ timeout = msecs_to_jiffies(*(u16 *)msgs[num - 1].buf);
+ i2c_mux_hold(muxc, priv->chan_id, timeout);
+ } else if (hold_msg == I2C_HOLD_MSG_NONE) {
+ mutex_lock(&muxc->hold_lock);
+ }
ret = muxc->select(muxc, priv->chan_id);
if (ret >= 0)
ret = __i2c_transfer(parent, msgs, num);
- if (muxc->deselect)
- muxc->deselect(muxc, priv->chan_id);
+ if (hold_msg != I2C_HOLD_MSG_SET) {
+ if (muxc->deselect)
+ muxc->deselect(muxc, priv->chan_id);
+ if (hold_msg == I2C_HOLD_MSG_RESET)
+ i2c_mux_unhold(muxc);
+ else
+ mutex_unlock(&muxc->hold_lock);
+ } else if (hold_msg == I2C_HOLD_MSG_SET && ret < 0) {
+ i2c_mux_unhold(muxc);
+ }
return ret;
}
@@ -61,15 +106,32 @@ static int i2c_mux_master_xfer(struct i2c_adapter *adap,
struct i2c_mux_priv *priv = adap->algo_data;
struct i2c_mux_core *muxc = priv->muxc;
struct i2c_adapter *parent = muxc->parent;
+ enum i2c_hold_msg_type hold_msg;
+ unsigned long timeout;
int ret;
/* Switch to the right mux port and perform the transfer. */
+ hold_msg = i2c_check_hold_msg(msgs[num - 1].flags,
+ msgs[num - 1].len,
+ (u16 *)msgs[num - 1].buf);
+ if (hold_msg == I2C_HOLD_MSG_SET) {
+ timeout = msecs_to_jiffies(*(u16 *)msgs[num - 1].buf);
+ i2c_mux_hold(muxc, priv->chan_id, timeout);
+ } else if (hold_msg == I2C_HOLD_MSG_NONE) {
+ mutex_lock(&muxc->hold_lock);
+ }
ret = muxc->select(muxc, priv->chan_id);
if (ret >= 0)
ret = i2c_transfer(parent, msgs, num);
- if (muxc->deselect)
- muxc->deselect(muxc, priv->chan_id);
+ if (hold_msg != I2C_HOLD_MSG_SET) {
+ if (muxc->deselect)
+ muxc->deselect(muxc, priv->chan_id);
+ if (hold_msg == I2C_HOLD_MSG_RESET)
+ i2c_mux_unhold(muxc);
+ else
+ mutex_unlock(&muxc->hold_lock);
+ }
return ret;
}
@@ -82,16 +144,33 @@ static int __i2c_mux_smbus_xfer(struct i2c_adapter *adap,
struct i2c_mux_priv *priv = adap->algo_data;
struct i2c_mux_core *muxc = priv->muxc;
struct i2c_adapter *parent = muxc->parent;
+ enum i2c_hold_msg_type hold_msg;
+ unsigned long timeout;
int ret;
/* Select the right mux port and perform the transfer. */
+ hold_msg = i2c_check_hold_msg(flags,
+ size == I2C_SMBUS_WORD_DATA ? 2 : 0,
+ &data->word);
+ if (hold_msg == I2C_HOLD_MSG_SET) {
+ timeout = msecs_to_jiffies(data->word);
+ i2c_mux_hold(muxc, priv->chan_id, timeout);
+ } else if (hold_msg == I2C_HOLD_MSG_NONE) {
+ mutex_lock(&muxc->hold_lock);
+ }
ret = muxc->select(muxc, priv->chan_id);
if (ret >= 0)
ret = __i2c_smbus_xfer(parent, addr, flags,
read_write, command, size, data);
- if (muxc->deselect)
- muxc->deselect(muxc, priv->chan_id);
+ if (hold_msg != I2C_HOLD_MSG_SET) {
+ if (muxc->deselect)
+ muxc->deselect(muxc, priv->chan_id);
+ if (hold_msg == I2C_HOLD_MSG_RESET)
+ i2c_mux_unhold(muxc);
+ else
+ mutex_unlock(&muxc->hold_lock);
+ }
return ret;
}
@@ -104,16 +183,33 @@ static int i2c_mux_smbus_xfer(struct i2c_adapter *adap,
struct i2c_mux_priv *priv = adap->algo_data;
struct i2c_mux_core *muxc = priv->muxc;
struct i2c_adapter *parent = muxc->parent;
+ enum i2c_hold_msg_type hold_msg;
+ unsigned long timeout;
int ret;
/* Select the right mux port and perform the transfer. */
+ hold_msg = i2c_check_hold_msg(flags,
+ size == I2C_SMBUS_WORD_DATA ? 2 : 0,
+ &data->word);
+ if (hold_msg == I2C_HOLD_MSG_SET) {
+ timeout = msecs_to_jiffies(data->word);
+ i2c_mux_hold(muxc, priv->chan_id, timeout);
+ } else if (hold_msg == I2C_HOLD_MSG_NONE) {
+ mutex_lock(&muxc->hold_lock);
+ }
ret = muxc->select(muxc, priv->chan_id);
if (ret >= 0)
ret = i2c_smbus_xfer(parent, addr, flags,
read_write, command, size, data);
- if (muxc->deselect)
- muxc->deselect(muxc, priv->chan_id);
+ if (hold_msg != I2C_HOLD_MSG_SET) {
+ if (muxc->deselect)
+ muxc->deselect(muxc, priv->chan_id);
+ if (hold_msg == I2C_HOLD_MSG_RESET)
+ i2c_mux_unhold(muxc);
+ else
+ mutex_unlock(&muxc->hold_lock);
+ }
return ret;
}
@@ -263,6 +359,9 @@ struct i2c_mux_core *i2c_mux_alloc(struct i2c_adapter *parent,
muxc->deselect = deselect;
muxc->max_adapters = max_adapters;
+ mutex_init(&muxc->hold_lock);
+ INIT_DELAYED_WORK(&muxc->unhold_work, i2c_mux_unhold_work);
+
return muxc;
}
EXPORT_SYMBOL_GPL(i2c_mux_alloc);
@@ -441,6 +540,8 @@ void i2c_mux_del_adapters(struct i2c_mux_core *muxc)
{
char symlink_name[20];
+ i2c_mux_unhold(muxc);
+
while (muxc->num_adapters) {
struct i2c_adapter *adap = muxc->adapter[--muxc->num_adapters];
struct i2c_mux_priv *priv = adap->algo_data;
diff --git a/drivers/i2c/i2c-slave-mqueue.c b/drivers/i2c/i2c-slave-mqueue.c
new file mode 100644
index 000000000000..1d4db584b393
--- /dev/null
+++ b/drivers/i2c/i2c-slave-mqueue.c
@@ -0,0 +1,243 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2017 - 2018, Intel Corporation.
+
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/sysfs.h>
+
+#define MQ_MSGBUF_SIZE CONFIG_I2C_SLAVE_MQUEUE_MESSAGE_SIZE
+#define MQ_QUEUE_SIZE CONFIG_I2C_SLAVE_MQUEUE_QUEUE_SIZE
+#define MQ_QUEUE_NEXT(x) (((x) + 1) & (MQ_QUEUE_SIZE - 1))
+
+struct mq_msg {
+ int len;
+ u8 *buf;
+};
+
+struct mq_queue {
+ struct bin_attribute bin;
+ struct kernfs_node *kn;
+ struct i2c_client *client;
+
+ spinlock_t lock; /* spinlock for queue index handling */
+ int in;
+ int out;
+
+ struct mq_msg *curr;
+ int truncated; /* drop current if truncated */
+ struct mq_msg *queue;
+};
+
+static bool dump_debug __read_mostly;
+static int dump_debug_bus_id __read_mostly;
+
+#define I2C_HEX_DUMP(client, buf, len) \
+ do { \
+ if (dump_debug && \
+ (client)->adapter->nr == dump_debug_bus_id) { \
+ char dump_info[100] = {0,}; \
+ snprintf(dump_info, sizeof(dump_info), \
+ "bus_id:%d: ", (client)->adapter->nr); \
+ print_hex_dump(KERN_ERR, dump_info, DUMP_PREFIX_NONE, \
+ 16, 1, buf, len, true); \
+ } \
+ } while (0)
+
+static int i2c_slave_mqueue_callback(struct i2c_client *client,
+ enum i2c_slave_event event, u8 *val)
+{
+ struct mq_queue *mq = i2c_get_clientdata(client);
+ struct mq_msg *msg = mq->curr;
+ int ret = 0;
+
+ switch (event) {
+ case I2C_SLAVE_WRITE_REQUESTED:
+ case I2C_SLAVE_GCALL_REQUESTED:
+ mq->truncated = 0;
+
+ msg->len = 1;
+ msg->buf[0] = event == I2C_SLAVE_GCALL_REQUESTED ?
+ 0 : client->addr << 1;
+ break;
+
+ case I2C_SLAVE_WRITE_RECEIVED:
+ if (msg->len < MQ_MSGBUF_SIZE) {
+ msg->buf[msg->len++] = *val;
+ } else {
+ dev_err(&client->dev, "message is truncated!\n");
+ mq->truncated = 1;
+ ret = -EINVAL;
+ }
+ break;
+
+ case I2C_SLAVE_STOP:
+ if (unlikely(mq->truncated || msg->len < 2))
+ break;
+
+ spin_lock(&mq->lock);
+ mq->in = MQ_QUEUE_NEXT(mq->in);
+ mq->curr = &mq->queue[mq->in];
+ mq->curr->len = 0;
+
+ /* Flush the oldest message */
+ if (mq->out == mq->in)
+ mq->out = MQ_QUEUE_NEXT(mq->out);
+ spin_unlock(&mq->lock);
+
+ kernfs_notify(mq->kn);
+ break;
+
+ default:
+ *val = 0xFF;
+ break;
+ }
+
+ return ret;
+}
+
+static ssize_t i2c_slave_mqueue_bin_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t pos, size_t count)
+{
+ struct mq_queue *mq;
+ struct mq_msg *msg;
+ unsigned long flags;
+ bool more = false;
+ ssize_t ret = 0;
+
+ mq = dev_get_drvdata(container_of(kobj, struct device, kobj));
+
+ spin_lock_irqsave(&mq->lock, flags);
+ if (mq->out != mq->in) {
+ msg = &mq->queue[mq->out];
+
+ if (msg->len <= count) {
+ ret = msg->len;
+ memcpy(buf, msg->buf, ret);
+ I2C_HEX_DUMP(mq->client, buf, ret);
+ } else {
+ ret = -EOVERFLOW; /* Drop this HUGE one. */
+ }
+
+ mq->out = MQ_QUEUE_NEXT(mq->out);
+ if (mq->out != mq->in)
+ more = true;
+ }
+ spin_unlock_irqrestore(&mq->lock, flags);
+
+ if (more)
+ kernfs_notify(mq->kn);
+
+ return ret;
+}
+
+static int i2c_slave_mqueue_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct mq_queue *mq;
+ int ret, i;
+ void *buf;
+
+ mq = devm_kzalloc(dev, sizeof(*mq), GFP_KERNEL);
+ if (!mq)
+ return -ENOMEM;
+
+ BUILD_BUG_ON(!is_power_of_2(MQ_QUEUE_SIZE));
+
+ mq->client = client;
+
+ buf = devm_kmalloc_array(dev, MQ_QUEUE_SIZE, MQ_MSGBUF_SIZE,
+ GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ mq->queue = devm_kzalloc(dev, sizeof(*mq->queue) * MQ_QUEUE_SIZE,
+ GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ for (i = 0; i < MQ_QUEUE_SIZE; i++)
+ mq->queue[i].buf = buf + i * MQ_MSGBUF_SIZE;
+
+ i2c_set_clientdata(client, mq);
+
+ spin_lock_init(&mq->lock);
+ mq->curr = &mq->queue[0];
+
+ sysfs_bin_attr_init(&mq->bin);
+ mq->bin.attr.name = "slave-mqueue";
+ mq->bin.attr.mode = 0400;
+ mq->bin.read = i2c_slave_mqueue_bin_read;
+ mq->bin.size = MQ_MSGBUF_SIZE * MQ_QUEUE_SIZE;
+
+ ret = sysfs_create_bin_file(&dev->kobj, &mq->bin);
+ if (ret)
+ return ret;
+
+ mq->kn = kernfs_find_and_get(dev->kobj.sd, mq->bin.attr.name);
+ if (!mq->kn) {
+ sysfs_remove_bin_file(&dev->kobj, &mq->bin);
+ return -EFAULT;
+ }
+
+ ret = i2c_slave_register(client, i2c_slave_mqueue_callback);
+ if (ret) {
+ kernfs_put(mq->kn);
+ sysfs_remove_bin_file(&dev->kobj, &mq->bin);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int i2c_slave_mqueue_remove(struct i2c_client *client)
+{
+ struct mq_queue *mq = i2c_get_clientdata(client);
+
+ i2c_slave_unregister(client);
+
+ kernfs_put(mq->kn);
+ sysfs_remove_bin_file(&client->dev.kobj, &mq->bin);
+
+ return 0;
+}
+
+static const struct i2c_device_id i2c_slave_mqueue_id[] = {
+ { "slave-mqueue", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, i2c_slave_mqueue_id);
+
+#if IS_ENABLED(CONFIG_OF)
+static const struct of_device_id i2c_slave_mqueue_of_match[] = {
+ { .compatible = "slave-mqueue", .data = (void *)0 },
+ { },
+};
+MODULE_DEVICE_TABLE(of, i2c_slave_mqueue_of_match);
+#endif
+
+static struct i2c_driver i2c_slave_mqueue_driver = {
+ .driver = {
+ .name = "i2c-slave-mqueue",
+ .of_match_table = of_match_ptr(i2c_slave_mqueue_of_match),
+ },
+ .probe = i2c_slave_mqueue_probe,
+ .remove = i2c_slave_mqueue_remove,
+ .id_table = i2c_slave_mqueue_id,
+};
+module_i2c_driver(i2c_slave_mqueue_driver);
+
+module_param_named(dump_debug, dump_debug, bool, 0644);
+MODULE_PARM_DESC(dump_debug, "debug flag for dump printing");
+module_param_named(dump_debug_bus_id, dump_debug_bus_id, int, 0644);
+MODULE_PARM_DESC(dump_debug_bus_id, "bus id for dump debug printing");
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Haiyue Wang <haiyue.wang@linux.intel.com>");
+MODULE_DESCRIPTION("I2C slave mode for receiving and queuing messages");
diff --git a/drivers/i3c/Kconfig b/drivers/i3c/Kconfig
index 30a441506f61..93e6c1e68a2b 100644
--- a/drivers/i3c/Kconfig
+++ b/drivers/i3c/Kconfig
@@ -20,5 +20,34 @@ menuconfig I3C
will be called i3c.
if I3C
+
+source "drivers/i3c/mctp/Kconfig"
+
+config I3CDEV
+ tristate "I3C device interface"
+ depends on I3C
+ help
+ Say Y here to use i3c-* device files, usually found in the /dev
+ directory on your system. They make it possible to have user-space
+ programs use the I3C devices.
+
+ This support is also available as a module. If so, the module
+ will be called i3cdev.
+
+ Note that this application programming interface is EXPERIMENTAL
+ and hence SUBJECT TO CHANGE WITHOUT NOTICE while it stabilizes.
+
+config I3C_HUB
+ tristate "I3C HUB support"
+ depends on I3C
+ select REGMAP_I3C
+ help
+ This enables support for I3C HUB. Say Y here to use I3C HUB driver to
+ configure I3C HUB device.
+
+ I3C HUB drivers will be loaded automatically when I3C device with BCR
+ equals to 0xC2 (HUB device) is detected on the bus.
+
source "drivers/i3c/master/Kconfig"
+
endif # I3C
diff --git a/drivers/i3c/Makefile b/drivers/i3c/Makefile
index 11982efbc6d9..4a61329c4f7f 100644
--- a/drivers/i3c/Makefile
+++ b/drivers/i3c/Makefile
@@ -1,4 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
i3c-y := device.o master.o
obj-$(CONFIG_I3C) += i3c.o
+obj-$(CONFIG_I3CDEV) += i3cdev.o
obj-$(CONFIG_I3C) += master/
+obj-$(CONFIG_I3C) += mctp/
+obj-$(CONFIG_I3C_HUB) += i3c-hub.o
+
diff --git a/drivers/i3c/device.c b/drivers/i3c/device.c
index e92d3e9a52bd..be6669cf0846 100644
--- a/drivers/i3c/device.c
+++ b/drivers/i3c/device.c
@@ -51,6 +51,29 @@ int i3c_device_do_priv_xfers(struct i3c_device *dev,
EXPORT_SYMBOL_GPL(i3c_device_do_priv_xfers);
/**
+ * i3c_device_generate_ibi() - request In-Band Interrupt
+ *
+ * @dev: target device
+ * @data: IBI payload
+ * @len: payload length in bytes
+ *
+ * Request In-Band Interrupt with or without data payload.
+ *
+ * Return: 0 in case of success, a negative error code otherwise.
+ */
+int i3c_device_generate_ibi(struct i3c_device *dev, const u8 *data, int len)
+{
+ int ret;
+
+ i3c_bus_normaluse_lock(dev->bus);
+ ret = i3c_dev_generate_ibi_locked(dev->desc, data, len);
+ i3c_bus_normaluse_unlock(dev->bus);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i3c_device_generate_ibi);
+
+/**
* i3c_device_get_info() - get I3C device information
*
* @dev: device we want information on
@@ -283,3 +306,27 @@ void i3c_driver_unregister(struct i3c_driver *drv)
driver_unregister(&drv->driver);
}
EXPORT_SYMBOL_GPL(i3c_driver_unregister);
+
+/**
+ * i3c_device_getstatus_ccc() - receive device status
+ *
+ * @dev: I3C device to get the status for
+ * @info: I3C device info to fill the status in
+ *
+ * Receive I3C device status from I3C master device via corresponding CCC
+ * command
+ *
+ * Return: 0 in case of success, a negative error code otherwise.
+ */
+int i3c_device_getstatus_ccc(struct i3c_device *dev, struct i3c_device_info *info)
+{
+ int ret = -EINVAL;
+
+ i3c_bus_normaluse_lock(dev->bus);
+ if (dev->desc)
+ ret = i3c_dev_getstatus_locked(dev->desc, info);
+ i3c_bus_normaluse_unlock(dev->bus);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i3c_device_getstatus_ccc);
diff --git a/drivers/i3c/i3c-hub.c b/drivers/i3c/i3c-hub.c
new file mode 100644
index 000000000000..59576b244531
--- /dev/null
+++ b/drivers/i3c/i3c-hub.c
@@ -0,0 +1,699 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2021 Intel Corporation.*/
+
+#include <linux/bitfield.h>
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+
+#include <linux/i3c/device.h>
+#include <linux/i3c/master.h>
+
+#define I3C_HUB_TP_MAX_COUNT 0x08
+
+/* I3C HUB REGISTERS */
+
+/*
+ * In this driver Controller - Target convention is used. All the abbreviations are
+ * based on this convention. For instance: CP - Controller Port, TP - Target Port.
+ */
+
+/* Device Information Registers */
+#define I3C_HUB_DEV_INFO_0 0x00
+#define I3C_HUB_DEV_INFO_1 0x01
+#define I3C_HUB_PID_5 0x02
+#define I3C_HUB_PID_4 0x03
+#define I3C_HUB_PID_3 0x04
+#define I3C_HUB_PID_2 0x05
+#define I3C_HUB_PID_1 0x06
+#define I3C_HUB_PID_0 0x07
+#define I3C_HUB_BCR 0x08
+#define I3C_HUB_DCR 0x09
+#define I3C_HUB_DEV_CAPAB 0x0A
+#define I3C_HUB_DEV_REV 0x0B
+
+/* Device Configuration Registers */
+#define I3C_HUB_PROTECTION_CODE 0x10
+#define REGISTERS_LOCK_CODE 0x00
+#define REGISTERS_UNLOCK_CODE 0x69
+#define CP1_REGISTERS_UNLOCK_CODE 0x6A
+
+#define I3C_HUB_CP_CONF 0x11
+#define I3C_HUB_TP_ENABLE 0x12
+#define TPn_ENABLE(n) BIT(n)
+
+#define I3C_HUB_DEV_CONF 0x13
+#define I3C_HUB_IO_STRENGTH 0x14
+#define I3C_HUB_NET_OPER_MODE_CONF 0x15
+#define I3C_HUB_LDO_CONF 0x16
+#define CP0_LDO_VOLTAGE_MASK GENMASK(1, 0)
+#define CP0_LDO_VOLTAGE(x) (((x) << 0) & CP0_LDO_VOLTAGE_MASK)
+#define CP1_LDO_VOLTAGE_MASK GENMASK(3, 2)
+#define CP1_LDO_VOLTAGE(x) (((x) << 2) & CP1_LDO_VOLTAGE_MASK)
+#define TP0145_LDO_VOLTAGE_MASK GENMASK(5, 4)
+#define TP0145_LDO_VOLTAGE(x) (((x) << 4) & TP0145_LDO_VOLTAGE_MASK)
+#define TP2367_LDO_VOLTAGE_MASK GENMASK(7, 6)
+#define TP2367_LDO_VOLTAGE(x) (((x) << 6) & TP2367_LDO_VOLTAGE_MASK)
+#define LDO_VOLTAGE_1_0V 0x00
+#define LDO_VOLTAGE_1_1V 0x01
+#define LDO_VOLTAGE_1_2V 0x02
+#define LDO_VOLTAGE_1_8V 0x03
+
+#define I3C_HUB_TP_IO_MODE_CONF 0x17
+#define I3C_HUB_TP_SMBUS_AGNT_EN 0x18
+#define TPn_SMBUS_MODE_EN(n) BIT(n)
+
+#define I3C_HUB_LDO_AND_PULLUP_CONF 0x19
+#define CP0_LDO_EN BIT(0)
+#define CP1_LDO_EN BIT(1)
+/*
+ * I3C HUB does not provide a way to control LDO or pull-up for individual ports. It is possible
+ * for group of ports TP0/TP1/TP4/TP5 and TP2/TP3/TP6/TP7.
+ */
+#define TP0145_LDO_EN BIT(2)
+#define TP2367_LDO_EN BIT(3)
+#define TP0145_PULLUP_CONF_MASK GENMASK(7, 6)
+#define TP0145_PULLUP_CONF(x) (((x) << 6) & TP0145_PULLUP_CONF_MASK)
+#define TP2367_PULLUP_CONF_MASK GENMASK(5, 4)
+#define TP2367_PULLUP_CONF(x) (((x) << 4) & TP2367_PULLUP_CONF_MASK)
+#define PULLUP_250R 0x00
+#define PULLUP_500R 0x01
+#define PULLUP_1K 0x02
+#define PULLUP_2K 0x03
+
+#define I3C_HUB_CP_IBI_CONF 0x1A
+#define I3C_HUB_TP_IBI_CONF 0x1B
+#define I3C_HUB_IBI_MDB_CUSTOM 0x1C
+#define I3C_HUB_JEDEC_CONTEXT_ID 0x1D
+#define I3C_HUB_TP_GPIO_MODE_EN 0x1E
+#define TPn_GPIO_MODE_EN(n) BIT(n)
+
+/* Device Status and IBI Registers */
+#define I3C_HUB_DEV_AND_IBI_STS 0x20
+#define I3C_HUB_TP_SMBUS_AGNT_IBI_STS 0x21
+
+/* Controller Port Control/Status Registers */
+#define I3C_HUB_CP_MUX_SET 0x38
+#define I3C_HUB_CP_MUX_STS 0x39
+
+/* Target Ports Control Registers */
+#define I3C_HUB_TP_SMBUS_AGNT_TRANS_START 0x50
+#define I3C_HUB_TP_NET_CON_CONF 0x51
+#define TPn_NET_CON(n) BIT(n)
+
+#define I3C_HUB_TP_PULLUP_EN 0x53
+#define TPn_PULLUP_EN(n) BIT(n)
+
+#define I3C_HUB_TP_SCL_OUT_EN 0x54
+#define I3C_HUB_TP_SDA_OUT_EN 0x55
+#define I3C_HUB_TP_SCL_OUT_LEVEL 0x56
+#define I3C_HUB_TP_SDA_OUT_LEVEL 0x57
+#define I3C_HUB_TP_IN_DETECT_MODE_CONF 0x58
+#define I3C_HUB_TP_SCL_IN_DETECT_IBI_EN 0x59
+#define I3C_HUB_TP_SDA_IN_DETECT_IBI_EN 0x5A
+
+/* Target Ports Status Registers */
+#define I3C_HUB_TP_SCL_IN_LEVEL_STS 0x60
+#define I3C_HUB_TP_SDA_IN_LEVEL_STS 0x61
+#define I3C_HUB_TP_SCL_IN_DETECT_FLG 0x62
+#define I3C_HUB_TP_SDA_IN_DETECT_FLG 0x63
+
+/* SMBus Agent Configuration and Status Registers */
+#define I3C_HUB_TP0_SMBUS_AGNT_STS 0x64
+#define I3C_HUB_TP1_SMBUS_AGNT_STS 0x65
+#define I3C_HUB_TP2_SMBUS_AGNT_STS 0x66
+#define I3C_HUB_TP3_SMBUS_AGNT_STS 0x67
+#define I3C_HUB_TP4_SMBUS_AGNT_STS 0x68
+#define I3C_HUB_TP5_SMBUS_AGNT_STS 0x69
+#define I3C_HUB_TP6_SMBUS_AGNT_STS 0x6A
+#define I3C_HUB_TP7_SMBUS_AGNT_STS 0x6B
+#define I3C_HUB_ONCHIP_TD_AND_SMBUS_AGNT_CONF 0x6C
+
+/* Special Function Registers */
+#define I3C_HUB_LDO_AND_CPSEL_STS 0x79
+#define I3C_HUB_BUS_RESET_SCL_TIMEOUT 0x7A
+#define I3C_HUB_ONCHIP_TD_PROTO_ERR_FLG 0x7B
+#define I3C_HUB_DEV_CMD 0x7C
+#define I3C_HUB_ONCHIP_TD_STS 0x7D
+#define I3C_HUB_ONCHIP_TD_ADDR_CONF 0x7E
+#define I3C_HUB_PAGE_PTR 0x7F
+
+/* LDO DT settings */
+#define I3C_HUB_DT_LDO_DISABLED 0x00
+#define I3C_HUB_DT_LDO_1_0V 0x01
+#define I3C_HUB_DT_LDO_1_1V 0x02
+#define I3C_HUB_DT_LDO_1_2V 0x03
+#define I3C_HUB_DT_LDO_1_8V 0x04
+#define I3C_HUB_DT_LDO_NOT_DEFINED 0xFF
+
+/* Pull-up DT settings */
+#define I3C_HUB_DT_PULLUP_DISABLED 0x00
+#define I3C_HUB_DT_PULLUP_250R 0x01
+#define I3C_HUB_DT_PULLUP_500R 0x02
+#define I3C_HUB_DT_PULLUP_1K 0x03
+#define I3C_HUB_DT_PULLUP_2K 0x04
+#define I3C_HUB_DT_PULLUP_NOT_DEFINED 0xFF
+
+/* TP DT setting */
+#define I3C_HUB_DT_TP_MODE_DISABLED 0x00
+#define I3C_HUB_DT_TP_MODE_I3C 0x01
+#define I3C_HUB_DT_TP_MODE_I3C_PERF 0x02
+#define I3C_HUB_DT_TP_MODE_SMBUS 0x03
+#define I3C_HUB_DT_TP_MODE_GPIO 0x04
+#define I3C_HUB_DT_TP_MODE_NOT_DEFINED 0xFF
+
+/* TP pull-up status */
+#define I3C_HUB_DT_TP_PULLUP_DISABLED 0x00
+#define I3C_HUB_DT_TP_PULLUP_ENABLED 0x01
+#define I3C_HUB_DT_TP_PULLUP_NOT_DEFINED 0xFF
+
+struct tp_setting {
+ u8 mode;
+ u8 pullup_en;
+};
+
+struct dt_settings {
+ u8 cp0_ldo;
+ u8 cp1_ldo;
+ u8 tp0145_ldo;
+ u8 tp2367_ldo;
+ u8 tp0145_pullup;
+ u8 tp2367_pullup;
+ struct tp_setting tp[I3C_HUB_TP_MAX_COUNT];
+};
+
+struct i3c_hub {
+ struct i3c_device *i3cdev;
+ struct regmap *regmap;
+ struct dt_settings settings;
+
+ /* Offset for reading HUB's register. */
+ u8 reg_addr;
+ struct dentry *debug_dir;
+};
+
+struct hub_setting {
+ const char * const name;
+ const u8 value;
+};
+
+static const struct hub_setting ldo_settings[] = {
+ {"disabled", I3C_HUB_DT_LDO_DISABLED},
+ {"1.0V", I3C_HUB_DT_LDO_1_0V},
+ {"1.1V", I3C_HUB_DT_LDO_1_1V},
+ {"1.2V", I3C_HUB_DT_LDO_1_2V},
+ {"1.8V", I3C_HUB_DT_LDO_1_8V},
+};
+
+static const struct hub_setting pullup_settings[] = {
+ {"disabled", I3C_HUB_DT_PULLUP_DISABLED},
+ {"250R", I3C_HUB_DT_PULLUP_250R},
+ {"500R", I3C_HUB_DT_PULLUP_500R},
+ {"1k", I3C_HUB_DT_PULLUP_1K},
+ {"2k", I3C_HUB_DT_PULLUP_2K},
+};
+
+static const struct hub_setting tp_mode_settings[] = {
+ {"disabled", I3C_HUB_DT_TP_MODE_DISABLED},
+ {"i3c", I3C_HUB_DT_TP_MODE_I3C},
+ {"i3c-perf", I3C_HUB_DT_TP_MODE_I3C_PERF},
+ {"smbus", I3C_HUB_DT_TP_MODE_SMBUS},
+ {"gpio", I3C_HUB_DT_TP_MODE_GPIO},
+};
+
+static const struct hub_setting tp_pullup_settings[] = {
+ {"disabled", I3C_HUB_DT_TP_PULLUP_DISABLED},
+ {"enabled", I3C_HUB_DT_TP_PULLUP_ENABLED},
+};
+
+static u8 i3c_hub_ldo_dt_to_reg(u8 dt_value)
+{
+ switch (dt_value) {
+ case I3C_HUB_DT_LDO_1_1V:
+ return LDO_VOLTAGE_1_1V;
+ case I3C_HUB_DT_LDO_1_2V:
+ return LDO_VOLTAGE_1_2V;
+ case I3C_HUB_DT_LDO_1_8V:
+ return LDO_VOLTAGE_1_8V;
+ default:
+ return LDO_VOLTAGE_1_0V;
+ }
+}
+
+static u8 i3c_hub_pullup_dt_to_reg(u8 dt_value)
+{
+ switch (dt_value) {
+ case I3C_HUB_DT_PULLUP_250R:
+ return PULLUP_250R;
+ case I3C_HUB_DT_PULLUP_500R:
+ return PULLUP_500R;
+ case I3C_HUB_DT_PULLUP_1K:
+ return PULLUP_1K;
+ default:
+ return PULLUP_2K;
+ }
+}
+
+static int i3c_hub_of_get_setting(const struct device_node *node, const char *setting_name,
+ const struct hub_setting settings[], const u8 settings_count,
+ u8 *setting_value)
+{
+ const char *sval;
+ int ret;
+ int i;
+
+ ret = of_property_read_string(node, setting_name, &sval);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < settings_count; ++i) {
+ const struct hub_setting * const setting = &settings[i];
+
+ if (!strcmp(setting->name, sval)) {
+ *setting_value = setting->value;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static void i3c_hub_tp_of_get_setting(struct device *dev, const struct device_node *node,
+ struct tp_setting tp_setting[])
+{
+ struct device_node *tp_node;
+ int id;
+
+ for_each_available_child_of_node(node, tp_node) {
+ int ret;
+
+ if (!tp_node->name || of_node_cmp(tp_node->name, "target-port"))
+ continue;
+
+ if (!tp_node->full_name ||
+ (sscanf(tp_node->full_name, "target-port@%i", &id) != 1)) {
+ dev_warn(dev, "Invalid target port node found in DT - %s\n",
+ tp_node->full_name);
+ continue;
+ }
+
+ if (id >= I3C_HUB_TP_MAX_COUNT) {
+ dev_warn(dev, "Invalid target port index found in DT - %i\n", id);
+ continue;
+ }
+ ret = i3c_hub_of_get_setting(tp_node, "mode", tp_mode_settings,
+ ARRAY_SIZE(tp_mode_settings), &tp_setting[id].mode);
+ if (ret)
+ dev_warn(dev, "Invalid or not specified setting for target port[%i].mode\n",
+ id);
+
+ ret = i3c_hub_of_get_setting(tp_node, "pullup", tp_pullup_settings,
+ ARRAY_SIZE(tp_pullup_settings),
+ &tp_setting[id].pullup_en);
+ if (ret)
+ dev_warn(dev,
+ "Invalid or not specified setting for target port[%i].pullup\n",
+ id);
+ }
+}
+
+static void i3c_hub_of_get_configuration(struct device *dev, const struct device_node *node)
+{
+ struct i3c_hub *priv = dev_get_drvdata(dev);
+ int ret;
+
+ ret = i3c_hub_of_get_setting(node, "cp0-ldo", ldo_settings, ARRAY_SIZE(ldo_settings),
+ &priv->settings.cp0_ldo);
+ if (ret)
+ dev_warn(dev, "Invalid or not specified setting for cp0-ldo\n");
+
+ ret = i3c_hub_of_get_setting(node, "cp1-ldo", ldo_settings, ARRAY_SIZE(ldo_settings),
+ &priv->settings.cp1_ldo);
+ if (ret)
+ dev_warn(dev, "Invalid or not specified setting for cp1-ldo\n");
+
+ ret = i3c_hub_of_get_setting(node, "tp0145-ldo", ldo_settings, ARRAY_SIZE(ldo_settings),
+ &priv->settings.tp0145_ldo);
+ if (ret)
+ dev_warn(dev, "Invalid or not specified setting for tp0145-ldo\n");
+
+ ret = i3c_hub_of_get_setting(node, "tp2367-ldo", ldo_settings, ARRAY_SIZE(ldo_settings),
+ &priv->settings.tp2367_ldo);
+ if (ret)
+ dev_warn(dev, "Invalid or not specified setting for tp2367-ldo\n");
+
+ ret = i3c_hub_of_get_setting(node, "tp0145-pullup", pullup_settings,
+ ARRAY_SIZE(pullup_settings), &priv->settings.tp0145_pullup);
+ if (ret)
+ dev_warn(dev, "Invalid or not specified setting for tp0145-pullup\n");
+
+ ret = i3c_hub_of_get_setting(node, "tp2367-pullup", pullup_settings,
+ ARRAY_SIZE(pullup_settings), &priv->settings.tp2367_pullup);
+ if (ret)
+ dev_warn(dev, "Invalid or not specified setting for tp2367-pullup\n");
+
+ i3c_hub_tp_of_get_setting(dev, node, priv->settings.tp);
+}
+
+static void i3c_hub_of_default_configuration(struct device *dev)
+{
+ struct i3c_hub *priv = dev_get_drvdata(dev);
+ int id;
+
+ priv->settings.cp0_ldo = I3C_HUB_DT_LDO_NOT_DEFINED;
+ priv->settings.cp1_ldo = I3C_HUB_DT_LDO_NOT_DEFINED;
+ priv->settings.tp0145_ldo = I3C_HUB_DT_LDO_NOT_DEFINED;
+ priv->settings.tp2367_ldo = I3C_HUB_DT_LDO_NOT_DEFINED;
+ priv->settings.tp0145_pullup = I3C_HUB_DT_PULLUP_NOT_DEFINED;
+ priv->settings.tp2367_pullup = I3C_HUB_DT_PULLUP_NOT_DEFINED;
+
+ for (id = 0; id < I3C_HUB_TP_MAX_COUNT; ++id) {
+ priv->settings.tp[id].mode = I3C_HUB_DT_TP_MODE_NOT_DEFINED;
+ priv->settings.tp[id].pullup_en = I3C_HUB_DT_TP_PULLUP_NOT_DEFINED;
+ }
+}
+
+static int i3c_hub_hw_configure_pullup(struct device *dev)
+{
+ struct i3c_hub *priv = dev_get_drvdata(dev);
+ u8 mask = 0, value = 0;
+
+ if (priv->settings.tp0145_pullup != I3C_HUB_DT_PULLUP_NOT_DEFINED) {
+ mask |= TP0145_PULLUP_CONF_MASK;
+ value |= TP0145_PULLUP_CONF(i3c_hub_pullup_dt_to_reg(priv->settings.tp0145_pullup));
+ }
+
+ if (priv->settings.tp2367_pullup != I3C_HUB_DT_PULLUP_NOT_DEFINED) {
+ mask |= TP2367_PULLUP_CONF_MASK;
+ value |= TP2367_PULLUP_CONF(i3c_hub_pullup_dt_to_reg(priv->settings.tp2367_pullup));
+ }
+
+ return regmap_update_bits(priv->regmap, I3C_HUB_LDO_AND_PULLUP_CONF, mask, value);
+}
+
+static int i3c_hub_hw_configure_ldo(struct device *dev)
+{
+ struct i3c_hub *priv = dev_get_drvdata(dev);
+ u8 mask_all = 0, val_all = 0;
+ u8 ldo_dis = 0, ldo_en = 0;
+ u32 reg_val;
+ u8 val;
+ int ret;
+
+ /* Get LDOs configuration to figure out what is going to be changed */
+ ret = regmap_read(priv->regmap, I3C_HUB_LDO_CONF, &reg_val);
+ if (ret)
+ return ret;
+
+ if (priv->settings.cp0_ldo != I3C_HUB_DT_LDO_NOT_DEFINED) {
+ val = CP0_LDO_VOLTAGE(i3c_hub_ldo_dt_to_reg(priv->settings.cp0_ldo));
+ if ((reg_val & CP0_LDO_VOLTAGE_MASK) != val)
+ ldo_dis |= CP0_LDO_EN;
+ if (priv->settings.cp0_ldo != I3C_HUB_DT_LDO_DISABLED)
+ ldo_en |= CP0_LDO_EN;
+ mask_all |= CP0_LDO_VOLTAGE_MASK;
+ val_all |= val;
+ }
+ if (priv->settings.cp1_ldo != I3C_HUB_DT_LDO_NOT_DEFINED) {
+ val = CP1_LDO_VOLTAGE(i3c_hub_ldo_dt_to_reg(priv->settings.cp1_ldo));
+ if ((reg_val & CP1_LDO_VOLTAGE_MASK) != val)
+ ldo_dis |= CP1_LDO_EN;
+ if (priv->settings.cp1_ldo != I3C_HUB_DT_LDO_DISABLED)
+ ldo_en |= CP1_LDO_EN;
+ mask_all |= CP1_LDO_VOLTAGE_MASK;
+ val_all |= val;
+ }
+ if (priv->settings.tp0145_ldo != I3C_HUB_DT_LDO_NOT_DEFINED) {
+ val = TP0145_LDO_VOLTAGE(i3c_hub_ldo_dt_to_reg(priv->settings.tp0145_ldo));
+ if ((reg_val & TP0145_LDO_VOLTAGE_MASK) != val)
+ ldo_dis |= TP0145_LDO_EN;
+ if (priv->settings.tp0145_ldo != I3C_HUB_DT_LDO_DISABLED)
+ ldo_en |= TP0145_LDO_EN;
+ mask_all |= TP0145_LDO_VOLTAGE_MASK;
+ val_all |= val;
+ }
+ if (priv->settings.tp2367_ldo != I3C_HUB_DT_LDO_NOT_DEFINED) {
+ val = TP2367_LDO_VOLTAGE(i3c_hub_ldo_dt_to_reg(priv->settings.tp2367_ldo));
+ if ((reg_val & TP2367_LDO_VOLTAGE_MASK) != val)
+ ldo_dis |= TP2367_LDO_EN;
+ if (priv->settings.tp2367_ldo != I3C_HUB_DT_LDO_DISABLED)
+ ldo_en |= TP2367_LDO_EN;
+ mask_all |= TP2367_LDO_VOLTAGE_MASK;
+ val_all |= val;
+ }
+
+ /* Disable all LDOs if LDO configuration is going to be changed. */
+ ret = regmap_update_bits(priv->regmap, I3C_HUB_LDO_AND_PULLUP_CONF, ldo_dis, 0);
+ if (ret)
+ return ret;
+
+ /* Set LDOs configuration */
+ ret = regmap_update_bits(priv->regmap, I3C_HUB_LDO_CONF, mask_all, val_all);
+ if (ret)
+ return ret;
+
+ /* Re-enable LDOs if needed */
+ return regmap_update_bits(priv->regmap, I3C_HUB_LDO_AND_PULLUP_CONF, ldo_en, ldo_en);
+}
+
+static int i3c_hub_hw_configure_tp(struct device *dev)
+{
+ struct i3c_hub *priv = dev_get_drvdata(dev);
+ u8 pullup_mask = 0, pullup_val = 0;
+ u8 smbus_mask = 0, smbus_val = 0;
+ u8 gpio_mask = 0, gpio_val = 0;
+ u8 i3c_mask = 0, i3c_val = 0;
+ int ret;
+ int i;
+
+ /* TBD: Read type of HUB from register I3C_HUB_DEV_INFO_0 to learn target ports count. */
+ for (i = 0; i < I3C_HUB_TP_MAX_COUNT; ++i) {
+ if (priv->settings.tp[i].mode != I3C_HUB_DT_TP_MODE_NOT_DEFINED) {
+ i3c_mask |= TPn_NET_CON(i);
+ smbus_mask |= TPn_SMBUS_MODE_EN(i);
+ gpio_mask |= TPn_GPIO_MODE_EN(i);
+
+ if (priv->settings.tp[i].mode == I3C_HUB_DT_TP_MODE_I3C)
+ i3c_val |= TPn_NET_CON(i);
+ else if (priv->settings.tp[i].mode == I3C_HUB_DT_TP_MODE_SMBUS)
+ smbus_val |= TPn_SMBUS_MODE_EN(i);
+ else if (priv->settings.tp[i].mode == I3C_HUB_DT_TP_MODE_GPIO)
+ gpio_val |= TPn_GPIO_MODE_EN(i);
+ }
+ if (priv->settings.tp[i].pullup_en != I3C_HUB_DT_TP_PULLUP_NOT_DEFINED) {
+ pullup_mask |= TPn_PULLUP_EN(i);
+ if (priv->settings.tp[i].pullup_en == I3C_HUB_DT_TP_PULLUP_ENABLED)
+ pullup_val |= TPn_PULLUP_EN(i);
+ }
+ }
+
+ ret = regmap_update_bits(priv->regmap, I3C_HUB_TP_NET_CON_CONF, i3c_mask, i3c_val);
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(priv->regmap, I3C_HUB_TP_SMBUS_AGNT_EN, smbus_mask, smbus_val);
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(priv->regmap, I3C_HUB_TP_GPIO_MODE_EN, gpio_mask, gpio_val);
+ if (ret)
+ return ret;
+
+ /* Enable TP here in case TP was configured */
+ ret = regmap_update_bits(priv->regmap, I3C_HUB_TP_ENABLE, i3c_mask | smbus_mask | gpio_mask,
+ i3c_val | smbus_val | gpio_val);
+ if (ret)
+ return ret;
+
+ return regmap_update_bits(priv->regmap, I3C_HUB_TP_PULLUP_EN, pullup_mask, pullup_val);
+}
+
+static int i3c_hub_configure_hw(struct device *dev)
+{
+ int ret;
+
+ ret = i3c_hub_hw_configure_pullup(dev);
+ if (ret)
+ return ret;
+
+ ret = i3c_hub_hw_configure_ldo(dev);
+ if (ret)
+ return ret;
+
+ return i3c_hub_hw_configure_tp(dev);
+}
+
+static const struct i3c_device_id i3c_hub_ids[] = {
+ I3C_CLASS(I3C_DCR_HUB, NULL),
+ { },
+};
+
+static int fops_access_reg_get(void *ctx, u64 *val)
+{
+ struct i3c_hub *priv = ctx;
+ u32 reg_val;
+ int ret;
+
+ ret = regmap_read(priv->regmap, priv->reg_addr, &reg_val);
+ if (ret)
+ return ret;
+
+ *val = reg_val & 0xFF;
+ return 0;
+}
+
+static int fops_access_reg_set(void *ctx, u64 val)
+{
+ struct i3c_hub *priv = ctx;
+
+ return regmap_write(priv->regmap, priv->reg_addr, val & 0xFF);
+}
+DEFINE_DEBUGFS_ATTRIBUTE(fops_access_reg, fops_access_reg_get, fops_access_reg_set, "0x%llX\n");
+
+static int i3c_hub_debugfs_init(struct i3c_hub *priv, const char *hub_id)
+{
+ struct dentry *entry, *dt_conf_dir, *reg_dir;
+ int i;
+
+ entry = debugfs_create_dir(hub_id, NULL);
+ if (IS_ERR(entry))
+ return PTR_ERR(entry);
+
+ priv->debug_dir = entry;
+
+ entry = debugfs_create_dir("dt-conf", priv->debug_dir);
+ if (IS_ERR(entry))
+ goto err_remove;
+
+ dt_conf_dir = entry;
+
+ debugfs_create_u8("cp0-ldo", 0400, dt_conf_dir, &priv->settings.cp0_ldo);
+ debugfs_create_u8("cp1-ldo", 0400, dt_conf_dir, &priv->settings.cp1_ldo);
+ debugfs_create_u8("tp0145-ldo", 0400, dt_conf_dir, &priv->settings.tp0145_ldo);
+ debugfs_create_u8("tp2367-ldo", 0400, dt_conf_dir, &priv->settings.tp2367_ldo);
+ debugfs_create_u8("tp0145-pullup", 0400, dt_conf_dir, &priv->settings.tp0145_pullup);
+ debugfs_create_u8("tp2367-pullup", 0400, dt_conf_dir, &priv->settings.tp2367_pullup);
+
+ for (i = 0; i < I3C_HUB_TP_MAX_COUNT; ++i) {
+ char file_name[32];
+
+ sprintf(file_name, "tp%i.mode", i);
+ debugfs_create_u8(file_name, 0400, dt_conf_dir, &priv->settings.tp[i].mode);
+ sprintf(file_name, "tp%i.pullup_en", i);
+ debugfs_create_u8(file_name, 0400, dt_conf_dir, &priv->settings.tp[i].pullup_en);
+ }
+
+ entry = debugfs_create_dir("reg", priv->debug_dir);
+ if (IS_ERR(entry))
+ goto err_remove;
+
+ reg_dir = entry;
+
+ entry = debugfs_create_file_unsafe("access", 0600, reg_dir, priv, &fops_access_reg);
+ if (IS_ERR(entry))
+ goto err_remove;
+
+ debugfs_create_u8("offset", 0600, reg_dir, &priv->reg_addr);
+
+ return 0;
+
+err_remove:
+ debugfs_remove_recursive(priv->debug_dir);
+ return PTR_ERR(entry);
+}
+
+static int i3c_hub_probe(struct i3c_device *i3cdev)
+{
+ struct regmap_config i3c_hub_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ };
+ struct device *dev = &i3cdev->dev;
+ struct device_node *node;
+ struct regmap *regmap;
+ struct i3c_hub *priv;
+ char hub_id[32];
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->i3cdev = i3cdev;
+ i3cdev_set_drvdata(i3cdev, priv);
+
+ sprintf(hub_id, "i3c-hub-%d-%llx", i3cdev->bus->id, i3cdev->desc->info.pid);
+ ret = i3c_hub_debugfs_init(priv, hub_id);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to initialized DebugFS.\n");
+
+ i3c_hub_of_default_configuration(dev);
+
+ /* TBD: Support for multiple HUBs. */
+ /* Just get first hub node from DT */
+ node = of_get_child_by_name(dev->parent->of_node, "hub");
+ if (!node) {
+ dev_warn(dev, "Failed to find DT entry for the driver. Running with defaults.\n");
+ } else {
+ i3c_hub_of_get_configuration(dev, node);
+ of_node_put(node);
+ }
+
+ regmap = devm_regmap_init_i3c(i3cdev, &i3c_hub_regmap_config);
+ if (IS_ERR(regmap)) {
+ ret = PTR_ERR(regmap);
+ dev_err(dev, "Failed to register I3C HUB regmap\n");
+ goto error;
+ }
+
+ priv->regmap = regmap;
+
+ /* Unlock access to protected registers */
+ ret = regmap_write(priv->regmap, I3C_HUB_PROTECTION_CODE, REGISTERS_UNLOCK_CODE);
+ if (ret) {
+ dev_err(dev, "Failed to unlock HUB's protected registers\n");
+ goto error;
+ }
+
+ ret = i3c_hub_configure_hw(dev);
+ if (ret) {
+ dev_err(dev, "Failed to configure the HUB\n");
+ goto error;
+ }
+
+ /* Lock access to protected registers */
+ ret = regmap_write(priv->regmap, I3C_HUB_PROTECTION_CODE, REGISTERS_LOCK_CODE);
+ if (ret) {
+ dev_err(dev, "Failed to lock HUB's protected registers\n");
+ goto error;
+ }
+
+ /* TBD: Apply special/security lock here using DEV_CMD register */
+
+ return 0;
+
+error:
+ debugfs_remove_recursive(priv->debug_dir);
+ return ret;
+}
+
+static void i3c_hub_remove(struct i3c_device *i3cdev)
+{
+ struct i3c_hub *priv = i3cdev_get_drvdata(i3cdev);
+
+ debugfs_remove_recursive(priv->debug_dir);
+}
+
+static struct i3c_driver i3c_hub = {
+ .driver.name = "i3c-hub",
+ .id_table = i3c_hub_ids,
+ .probe = i3c_hub_probe,
+ .remove = i3c_hub_remove,
+};
+
+module_i3c_driver(i3c_hub);
+
+MODULE_AUTHOR("Zbigniew Lukwinski <zbigniew.lukwinski@linux.intel.com>");
+MODULE_DESCRIPTION("I3C HUB driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/i3c/i3cdev.c b/drivers/i3c/i3cdev.c
new file mode 100644
index 000000000000..fd8f2695d775
--- /dev/null
+++ b/drivers/i3c/i3cdev.c
@@ -0,0 +1,429 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 Synopsys, Inc. and/or its affiliates.
+ *
+ * Author: Vitor Soares <soares@synopsys.com>
+ */
+
+#include <linux/cdev.h>
+#include <linux/compat.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include <linux/i3c/i3cdev.h>
+
+#include "internals.h"
+
+struct i3cdev_data {
+ struct list_head list;
+ struct i3c_device *i3c;
+ struct cdev cdev;
+ struct device *dev;
+ int id;
+};
+
+static DEFINE_IDA(i3cdev_ida);
+static dev_t i3cdev_number;
+#define I3C_MINORS 32 /* 32 I3C devices supported for now */
+
+static LIST_HEAD(i3cdev_list);
+static DEFINE_SPINLOCK(i3cdev_list_lock);
+
+static struct i3cdev_data *i3cdev_get_by_i3c(struct i3c_device *i3c)
+{
+ struct i3cdev_data *i3cdev;
+
+ spin_lock(&i3cdev_list_lock);
+ list_for_each_entry(i3cdev, &i3cdev_list, list) {
+ if (i3cdev->i3c == i3c)
+ goto found;
+ }
+
+ i3cdev = NULL;
+
+found:
+ spin_unlock(&i3cdev_list_lock);
+ return i3cdev;
+}
+
+static struct i3cdev_data *get_free_i3cdev(struct i3c_device *i3c)
+{
+ struct i3cdev_data *i3cdev;
+ int id;
+
+ id = ida_simple_get(&i3cdev_ida, 0, I3C_MINORS, GFP_KERNEL);
+ if (id < 0) {
+ pr_err("i3cdev: no minor number available!\n");
+ return ERR_PTR(id);
+ }
+
+ i3cdev = kzalloc(sizeof(*i3cdev), GFP_KERNEL);
+ if (!i3cdev) {
+ ida_simple_remove(&i3cdev_ida, id);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ i3cdev->i3c = i3c;
+ i3cdev->id = id;
+
+ spin_lock(&i3cdev_list_lock);
+ list_add_tail(&i3cdev->list, &i3cdev_list);
+ spin_unlock(&i3cdev_list_lock);
+
+ return i3cdev;
+}
+
+static void put_i3cdev(struct i3cdev_data *i3cdev)
+{
+ spin_lock(&i3cdev_list_lock);
+ list_del(&i3cdev->list);
+ spin_unlock(&i3cdev_list_lock);
+ kfree(i3cdev);
+}
+
+static ssize_t
+i3cdev_read(struct file *file, char __user *buf, size_t count, loff_t *f_pos)
+{
+ struct i3c_device *i3c = file->private_data;
+ struct i3c_priv_xfer xfers = {
+ .rnw = true,
+ .len = count,
+ };
+ char *tmp;
+ int ret;
+
+ tmp = kzalloc(count, GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ xfers.data.in = tmp;
+
+ dev_dbg(&i3c->dev, "Reading %zu bytes.\n", count);
+
+ ret = i3c_device_do_priv_xfers(i3c, &xfers, 1);
+ if (!ret)
+ ret = copy_to_user(buf, tmp, count) ? -EFAULT : ret;
+
+ kfree(tmp);
+ return ret;
+}
+
+static ssize_t
+i3cdev_write(struct file *file, const char __user *buf, size_t count,
+ loff_t *f_pos)
+{
+ struct i3c_device *i3c = file->private_data;
+ struct i3c_priv_xfer xfers = {
+ .rnw = false,
+ .len = count,
+ };
+ char *tmp;
+ int ret;
+
+ tmp = memdup_user(buf, count);
+ if (IS_ERR(tmp))
+ return PTR_ERR(tmp);
+
+ xfers.data.out = tmp;
+
+ dev_dbg(&i3c->dev, "Writing %zu bytes.\n", count);
+
+ ret = i3c_device_do_priv_xfers(i3c, &xfers, 1);
+ kfree(tmp);
+ return (!ret) ? count : ret;
+}
+
+static int
+i3cdev_do_priv_xfer(struct i3c_device *dev, struct i3c_ioc_priv_xfer *xfers,
+ unsigned int nxfers)
+{
+ struct i3c_priv_xfer *k_xfers;
+ u8 **data_ptrs;
+ int i, ret = 0;
+
+ k_xfers = kcalloc(nxfers, sizeof(*k_xfers), GFP_KERNEL);
+ if (!k_xfers)
+ return -ENOMEM;
+
+ data_ptrs = kcalloc(nxfers, sizeof(*data_ptrs), GFP_KERNEL);
+ if (!data_ptrs) {
+ ret = -ENOMEM;
+ goto err_free_k_xfer;
+ }
+
+ for (i = 0; i < nxfers; i++) {
+ data_ptrs[i] = memdup_user((const u8 __user *)
+ (uintptr_t)xfers[i].data,
+ xfers[i].len);
+ if (IS_ERR(data_ptrs[i])) {
+ ret = PTR_ERR(data_ptrs[i]);
+ break;
+ }
+
+ k_xfers[i].len = xfers[i].len;
+ if (xfers[i].rnw) {
+ k_xfers[i].rnw = true;
+ k_xfers[i].data.in = data_ptrs[i];
+ } else {
+ k_xfers[i].rnw = false;
+ k_xfers[i].data.out = data_ptrs[i];
+ }
+ }
+
+ if (ret < 0) {
+ i--;
+ goto err_free_mem;
+ }
+
+ ret = i3c_device_do_priv_xfers(dev, k_xfers, nxfers);
+ if (ret)
+ goto err_free_mem;
+
+ for (i = 0; i < nxfers; i++) {
+ if (xfers[i].rnw) {
+ if (copy_to_user((void __user *)(uintptr_t)xfers[i].data,
+ data_ptrs[i], xfers[i].len))
+ ret = -EFAULT;
+ }
+ }
+
+err_free_mem:
+ for (; i >= 0; i--)
+ kfree(data_ptrs[i]);
+ kfree(data_ptrs);
+err_free_k_xfer:
+ kfree(k_xfers);
+ return ret;
+}
+
+static struct i3c_ioc_priv_xfer *
+i3cdev_get_ioc_priv_xfer(unsigned int cmd, struct i3c_ioc_priv_xfer *u_xfers,
+ unsigned int *nxfers)
+{
+ u32 tmp = _IOC_SIZE(cmd);
+
+ if ((tmp % sizeof(struct i3c_ioc_priv_xfer)) != 0)
+ return ERR_PTR(-EINVAL);
+
+ *nxfers = tmp / sizeof(struct i3c_ioc_priv_xfer);
+ if (*nxfers == 0)
+ return NULL;
+
+ return memdup_user(u_xfers, tmp);
+}
+
+static int
+i3cdev_ioc_priv_xfer(struct i3c_device *i3c, unsigned int cmd,
+ struct i3c_ioc_priv_xfer *u_xfers)
+{
+ struct i3c_ioc_priv_xfer *k_xfers;
+ unsigned int nxfers;
+ int ret;
+
+ k_xfers = i3cdev_get_ioc_priv_xfer(cmd, u_xfers, &nxfers);
+ if (IS_ERR_OR_NULL(k_xfers))
+ return PTR_ERR(k_xfers);
+
+ ret = i3cdev_do_priv_xfer(i3c, k_xfers, nxfers);
+
+ kfree(k_xfers);
+
+ return ret;
+}
+
+static long
+i3cdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct i3c_device *i3c = file->private_data;
+
+ dev_dbg(&i3c->dev, "ioctl, cmd=0x%02x, arg=0x%02lx\n", cmd, arg);
+
+ if (_IOC_TYPE(cmd) != I3C_DEV_IOC_MAGIC)
+ return -ENOTTY;
+
+ /* Check command number and direction */
+ if (_IOC_NR(cmd) == _IOC_NR(I3C_IOC_PRIV_XFER(0)) &&
+ _IOC_DIR(cmd) == (_IOC_READ | _IOC_WRITE))
+ return i3cdev_ioc_priv_xfer(i3c, cmd,
+ (struct i3c_ioc_priv_xfer __user *)arg);
+
+ return 0;
+}
+
+static int i3cdev_open(struct inode *inode, struct file *file)
+{
+ struct i3cdev_data *i3cdev = container_of(inode->i_cdev,
+ struct i3cdev_data,
+ cdev);
+
+ file->private_data = i3cdev->i3c;
+
+ return 0;
+}
+
+static int i3cdev_release(struct inode *inode, struct file *file)
+{
+ file->private_data = NULL;
+
+ return 0;
+}
+
+static const struct file_operations i3cdev_fops = {
+ .owner = THIS_MODULE,
+ .read = i3cdev_read,
+ .write = i3cdev_write,
+ .unlocked_ioctl = i3cdev_ioctl,
+ .open = i3cdev_open,
+ .release = i3cdev_release,
+};
+
+/* ------------------------------------------------------------------------- */
+
+static struct class *i3cdev_class;
+
+static int i3cdev_attach(struct device *dev, void *dummy)
+{
+ struct i3cdev_data *i3cdev;
+ struct i3c_device *i3c;
+ int res;
+
+ if (dev->type == &i3c_masterdev_type || dev->driver)
+ return 0;
+
+ i3c = dev_to_i3cdev(dev);
+
+ /* Get a device */
+ i3cdev = get_free_i3cdev(i3c);
+ if (IS_ERR(i3cdev))
+ return PTR_ERR(i3cdev);
+
+ cdev_init(&i3cdev->cdev, &i3cdev_fops);
+ i3cdev->cdev.owner = THIS_MODULE;
+ res = cdev_add(&i3cdev->cdev,
+ MKDEV(MAJOR(i3cdev_number), i3cdev->id), 1);
+ if (res)
+ goto error_cdev;
+
+ /* register this i3c device with the driver core */
+ i3cdev->dev = device_create(i3cdev_class, &i3c->dev,
+ MKDEV(MAJOR(i3cdev_number), i3cdev->id),
+ NULL, "i3c-%s", dev_name(&i3c->dev));
+ if (IS_ERR(i3cdev->dev)) {
+ res = PTR_ERR(i3cdev->dev);
+ goto error;
+ }
+ pr_debug("i3cdev: I3C device [%s] registered as minor %d\n",
+ dev_name(&i3c->dev), i3cdev->id);
+ return 0;
+
+error:
+ cdev_del(&i3cdev->cdev);
+error_cdev:
+ put_i3cdev(i3cdev);
+ return res;
+}
+
+static int i3cdev_detach(struct device *dev, void *dummy)
+{
+ struct i3cdev_data *i3cdev;
+ struct i3c_device *i3c;
+
+ if (dev->type == &i3c_masterdev_type)
+ return 0;
+
+ i3c = dev_to_i3cdev(dev);
+
+ i3cdev = i3cdev_get_by_i3c(i3c);
+ if (!i3cdev)
+ return 0;
+
+ cdev_del(&i3cdev->cdev);
+ device_destroy(i3cdev_class, MKDEV(MAJOR(i3cdev_number), i3cdev->id));
+ ida_simple_remove(&i3cdev_ida, i3cdev->id);
+ put_i3cdev(i3cdev);
+
+ pr_debug("i3cdev: device [%s] unregistered\n", dev_name(&i3c->dev));
+
+ return 0;
+}
+
+static int i3cdev_notifier_call(struct notifier_block *nb,
+ unsigned long action,
+ void *data)
+{
+ struct device *dev = data;
+
+ switch (action) {
+ case BUS_NOTIFY_ADD_DEVICE:
+ case BUS_NOTIFY_UNBOUND_DRIVER:
+ return i3cdev_attach(dev, NULL);
+ case BUS_NOTIFY_DEL_DEVICE:
+ case BUS_NOTIFY_BOUND_DRIVER:
+ case BUS_NOTIFY_REMOVED_DEVICE:
+ return i3cdev_detach(dev, NULL);
+ }
+
+ return 0;
+}
+
+static struct notifier_block i3c_notifier = {
+ .notifier_call = i3cdev_notifier_call,
+};
+
+static int __init i3cdev_init(void)
+{
+ int res;
+
+ /* Dynamically request unused major number */
+ res = alloc_chrdev_region(&i3cdev_number, 0, I3C_MINORS, "i3c");
+ if (res)
+ goto out;
+
+ /* Create a classe to populate sysfs entries*/
+ i3cdev_class = class_create(THIS_MODULE, "i3cdev");
+ if (IS_ERR(i3cdev_class)) {
+ res = PTR_ERR(i3cdev_class);
+ goto out_unreg_chrdev;
+ }
+
+ /* Keep track of busses which have devices to add or remove later */
+ res = bus_register_notifier(&i3c_bus_type, &i3c_notifier);
+ if (res)
+ goto out_unreg_class;
+
+ /* Bind to already existing device without driver right away */
+ i3c_for_each_dev(NULL, i3cdev_attach);
+
+ return 0;
+
+out_unreg_class:
+ class_destroy(i3cdev_class);
+out_unreg_chrdev:
+ unregister_chrdev_region(i3cdev_number, I3C_MINORS);
+out:
+ pr_err("%s: Driver Initialisation failed\n", __FILE__);
+ return res;
+}
+
+static void __exit i3cdev_exit(void)
+{
+ bus_unregister_notifier(&i3c_bus_type, &i3c_notifier);
+ i3c_for_each_dev(NULL, i3cdev_detach);
+ class_destroy(i3cdev_class);
+ unregister_chrdev_region(i3cdev_number, I3C_MINORS);
+}
+
+MODULE_AUTHOR("Vitor Soares <soares@synopsys.com>");
+MODULE_DESCRIPTION("I3C /dev entries driver");
+MODULE_LICENSE("GPL");
+
+module_init(i3cdev_init);
+module_exit(i3cdev_exit);
diff --git a/drivers/i3c/internals.h b/drivers/i3c/internals.h
index 86b7b44cfca2..524ad47fd916 100644
--- a/drivers/i3c/internals.h
+++ b/drivers/i3c/internals.h
@@ -9,8 +9,10 @@
#define I3C_INTERNALS_H
#include <linux/i3c/master.h>
+#include <linux/i3c/target.h>
extern struct bus_type i3c_bus_type;
+extern const struct device_type i3c_masterdev_type;
void i3c_bus_normaluse_lock(struct i3c_bus *bus);
void i3c_bus_normaluse_unlock(struct i3c_bus *bus);
@@ -23,4 +25,7 @@ int i3c_dev_enable_ibi_locked(struct i3c_dev_desc *dev);
int i3c_dev_request_ibi_locked(struct i3c_dev_desc *dev,
const struct i3c_ibi_setup *req);
void i3c_dev_free_ibi_locked(struct i3c_dev_desc *dev);
+int i3c_dev_getstatus_locked(struct i3c_dev_desc *dev, struct i3c_device_info *info);
+int i3c_for_each_dev(void *data, int (*fn)(struct device *, void *));
+int i3c_dev_generate_ibi_locked(struct i3c_dev_desc *dev, const u8 *data, int len);
#endif /* I3C_INTERNAL_H */
diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
index dfe18dcd008d..656f0398d3e5 100644
--- a/drivers/i3c/master.c
+++ b/drivers/i3c/master.c
@@ -298,19 +298,24 @@ static const struct device_type i3c_device_type = {
.uevent = i3c_device_uevent,
};
+const struct device_type i3c_target_device_type = {
+};
+
static int i3c_device_match(struct device *dev, struct device_driver *drv)
{
struct i3c_device *i3cdev;
struct i3c_driver *i3cdrv;
- if (dev->type != &i3c_device_type)
+ if (dev->type != &i3c_device_type && dev->type != &i3c_target_device_type)
return 0;
i3cdev = dev_to_i3cdev(dev);
i3cdrv = drv_to_i3cdrv(drv);
- if (i3c_device_match_id(i3cdev, i3cdrv->id_table))
- return 1;
+ if ((dev->type == &i3c_device_type && !i3cdrv->target) ||
+ (dev->type == &i3c_target_device_type && i3cdrv->target))
+ if (i3c_device_match_id(i3cdev, i3cdrv->id_table))
+ return 1;
return 0;
}
@@ -330,7 +335,8 @@ static void i3c_device_remove(struct device *dev)
if (driver->remove)
driver->remove(i3cdev);
- i3c_device_free_ibi(i3cdev);
+ if (!driver->target)
+ i3c_device_free_ibi(i3cdev);
}
struct bus_type i3c_bus_type = {
@@ -339,6 +345,7 @@ struct bus_type i3c_bus_type = {
.probe = i3c_device_probe,
.remove = i3c_device_remove,
};
+EXPORT_SYMBOL_GPL(i3c_bus_type);
static enum i3c_addr_slot_status
i3c_bus_get_addr_slot_status(struct i3c_bus *bus, u16 addr)
@@ -542,9 +549,10 @@ static void i3c_masterdev_release(struct device *dev)
of_node_put(dev->of_node);
}
-static const struct device_type i3c_masterdev_type = {
+const struct device_type i3c_masterdev_type = {
.groups = i3c_masterdev_groups,
};
+EXPORT_SYMBOL_GPL(i3c_masterdev_type);
static int i3c_bus_set_mode(struct i3c_bus *i3cbus, enum i3c_bus_mode mode,
unsigned long max_i2c_scl_rate)
@@ -989,6 +997,21 @@ static int i3c_master_setda_locked(struct i3c_master_controller *master,
return ret;
}
+static int i3c_master_setaasa_locked(struct i3c_master_controller *master)
+{
+ struct i3c_ccc_cmd_dest dest;
+ struct i3c_ccc_cmd cmd;
+ int ret;
+
+ i3c_ccc_cmd_dest_init(&dest, I3C_BROADCAST_ADDR, 0);
+ i3c_ccc_cmd_init(&cmd, false, I3C_CCC_SETAASA, &dest, 1);
+
+ ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
+ i3c_ccc_cmd_dest_cleanup(&dest);
+
+ return ret;
+}
+
static int i3c_master_setdasa_locked(struct i3c_master_controller *master,
u8 static_addr, u8 dyn_addr)
{
@@ -1001,6 +1024,26 @@ static int i3c_master_setnewda_locked(struct i3c_master_controller *master,
return i3c_master_setda_locked(master, oldaddr, newaddr, false);
}
+static int i3c_master_sethid_locked(struct i3c_master_controller *master)
+{
+ struct i3c_ccc_cmd_dest dest;
+ struct i3c_ccc_cmd cmd;
+ struct i3c_ccc_sethid *sethid;
+ int ret;
+
+ sethid = i3c_ccc_cmd_dest_init(&dest, I3C_BROADCAST_ADDR, 1);
+ if (!sethid)
+ return -ENOMEM;
+
+ sethid->hid = 0;
+ i3c_ccc_cmd_init(&cmd, false, I3C_CCC_SETHID, &dest, 1);
+
+ ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
+ i3c_ccc_cmd_dest_cleanup(&dest);
+
+ return ret;
+}
+
static int i3c_master_getmrl_locked(struct i3c_master_controller *master,
struct i3c_device_info *info)
{
@@ -1220,6 +1263,32 @@ out:
return ret;
}
+int i3c_dev_getstatus_locked(struct i3c_dev_desc *dev,
+ struct i3c_device_info *info)
+{
+ struct i3c_master_controller *master = i3c_dev_get_master(dev);
+ struct i3c_ccc_getstatus *getsts;
+ struct i3c_ccc_cmd_dest dest;
+ struct i3c_ccc_cmd cmd;
+ int ret;
+
+ getsts = i3c_ccc_cmd_dest_init(&dest, info->dyn_addr, sizeof(*getsts));
+ if (!getsts)
+ return -ENOMEM;
+
+ i3c_ccc_cmd_init(&cmd, true, I3C_CCC_GETSTATUS, &dest, 1);
+ ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
+ if (ret)
+ goto out;
+
+ info->status = getsts->status;
+
+out:
+ i3c_ccc_cmd_dest_cleanup(&dest);
+
+ return ret;
+}
+
static int i3c_master_retrieve_dev_info(struct i3c_dev_desc *dev)
{
struct i3c_master_controller *master = i3c_dev_get_master(dev);
@@ -1235,6 +1304,11 @@ static int i3c_master_retrieve_dev_info(struct i3c_dev_desc *dev)
slot_status == I3C_ADDR_SLOT_I2C_DEV)
return -EINVAL;
+ if (master->jdec_spd) {
+ dev->info.pid = dev->boardinfo->pid;
+ return 0;
+ }
+
ret = i3c_master_getpid_locked(master, &dev->info);
if (ret)
return ret;
@@ -1450,13 +1524,20 @@ static int i3c_master_early_i3c_dev_add(struct i3c_master_controller *master,
if (ret)
goto err_free_dev;
- ret = i3c_master_setdasa_locked(master, i3cdev->info.static_addr,
+ if (master->jdec_spd) {
+ i3cdev->info.dyn_addr = i3cdev->boardinfo->init_dyn_addr;
+ ret = i3c_master_reattach_i3c_dev(i3cdev,
+ i3cdev->info.static_addr);
+ } else {
+ ret = i3c_master_setdasa_locked(master,
+ i3cdev->info.static_addr,
i3cdev->boardinfo->init_dyn_addr);
- if (ret)
- goto err_detach_dev;
+ if (ret)
+ goto err_detach_dev;
- i3cdev->info.dyn_addr = i3cdev->boardinfo->init_dyn_addr;
- ret = i3c_master_reattach_i3c_dev(i3cdev, 0);
+ i3cdev->info.dyn_addr = i3cdev->boardinfo->init_dyn_addr;
+ ret = i3c_master_reattach_i3c_dev(i3cdev, 0);
+ }
if (ret)
goto err_rstdaa;
@@ -1531,9 +1612,14 @@ int i3c_master_do_daa(struct i3c_master_controller *master)
{
int ret;
- i3c_bus_maintenance_lock(&master->bus);
- ret = master->ops->do_daa(master);
- i3c_bus_maintenance_unlock(&master->bus);
+ if (master->jdec_spd) {
+ ret = i3c_master_sethid_locked(master);
+ ret = i3c_master_setaasa_locked(master);
+ } else {
+ i3c_bus_maintenance_lock(&master->bus);
+ ret = master->ops->do_daa(master);
+ i3c_bus_maintenance_unlock(&master->bus);
+ }
if (ret)
return ret;
@@ -1671,7 +1757,7 @@ static int i3c_master_bus_init(struct i3c_master_controller *master)
struct i2c_dev_boardinfo *i2cboardinfo;
struct i3c_dev_boardinfo *i3cboardinfo;
struct i2c_dev_desc *i2cdev;
- int ret;
+ int ret, n_i3cdev = 0;
/*
* First attach all devices with static definitions provided by the
@@ -1759,9 +1845,10 @@ static int i3c_master_bus_init(struct i3c_master_controller *master)
goto err_rstdaa;
}
- i3c_bus_set_addr_slot_status(&master->bus,
- i3cboardinfo->init_dyn_addr,
- I3C_ADDR_SLOT_I3C_DEV);
+ if (i3cboardinfo->static_addr != i3cboardinfo->init_dyn_addr)
+ i3c_bus_set_addr_slot_status(&master->bus,
+ i3cboardinfo->init_dyn_addr,
+ I3C_ADDR_SLOT_I3C_DEV);
/*
* Only try to create/attach devices that have a static
@@ -1773,8 +1860,17 @@ static int i3c_master_bus_init(struct i3c_master_controller *master)
if (i3cboardinfo->static_addr)
i3c_master_early_i3c_dev_add(master, i3cboardinfo);
+
+ n_i3cdev++;
}
+ /*
+ * Since SPD devices are all with static address. Don't do DAA if we
+ * know it is a pure I2C bus.
+ */
+ if (master->jdec_spd && n_i3cdev == 0)
+ return 0;
+
ret = i3c_master_do_daa(master);
if (ret)
goto err_rstdaa;
@@ -2101,17 +2197,29 @@ static int of_populate_i3c_bus(struct i3c_master_controller *master)
struct device *dev = &master->dev;
struct device_node *i3cbus_np = dev->of_node;
struct device_node *node;
- int ret;
+ int ret, i;
u32 val;
if (!i3cbus_np)
return 0;
+ if (of_get_property(i3cbus_np, "jdec-spd", NULL))
+ master->jdec_spd = 1;
+
+ /* For SPD bus, undo unnecessary address reservations. */
+ if (master->jdec_spd) {
+ for (i = 0; i < 7; i++)
+ i3c_bus_set_addr_slot_status(&master->bus, I3C_BROADCAST_ADDR ^ BIT(i),
+ I3C_ADDR_SLOT_FREE);
+ }
+
for_each_available_child_of_node(i3cbus_np, node) {
- ret = of_i3c_master_add_dev(master, node);
- if (ret) {
- of_node_put(node);
- return ret;
+ if (node->name && of_node_cmp(node->name, "hub")) {
+ ret = of_i3c_master_add_dev(master, node);
+ if (ret) {
+ of_node_put(node);
+ return ret;
+ }
}
}
@@ -2591,6 +2699,211 @@ int i3c_master_unregister(struct i3c_master_controller *master)
}
EXPORT_SYMBOL_GPL(i3c_master_unregister);
+static int i3c_target_bus_init(struct i3c_master_controller *master)
+{
+ return master->target_ops->bus_init(master);
+}
+
+static void i3c_target_bus_cleanup(struct i3c_master_controller *master)
+{
+ if (master->target_ops->bus_cleanup)
+ master->target_ops->bus_cleanup(master);
+}
+
+static void i3c_targetdev_release(struct device *dev)
+{
+ struct i3c_master_controller *master = container_of(dev, struct i3c_master_controller, dev);
+ struct i3c_bus *bus = &master->bus;
+
+ mutex_lock(&i3c_core_lock);
+ idr_remove(&i3c_bus_idr, bus->id);
+ mutex_unlock(&i3c_core_lock);
+
+ of_node_put(dev->of_node);
+}
+
+static void i3c_target_device_release(struct device *dev)
+{
+ struct i3c_device *i3cdev = dev_to_i3cdev(dev);
+ struct i3c_dev_desc *desc = i3cdev->desc;
+
+ kfree(i3cdev);
+ kfree(desc);
+}
+
+static void
+i3c_target_register_new_i3c_dev(struct i3c_master_controller *master, struct i3c_device_info info)
+{
+ struct i3c_dev_desc *desc;
+ int ret;
+
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return;
+
+ desc->dev = kzalloc(sizeof(*desc->dev), GFP_KERNEL);
+ if (!desc->dev) {
+ kfree(desc);
+ return;
+ }
+
+ desc->dev->bus = &master->bus;
+ desc->dev->desc = desc;
+ desc->dev->dev.parent = &master->dev;
+ desc->dev->dev.type = &i3c_target_device_type;
+ desc->dev->dev.bus = &i3c_bus_type;
+ desc->dev->dev.release = i3c_target_device_release;
+ desc->info = info;
+ desc->common.master = master;
+ dev_set_name(&desc->dev->dev, "%d-target", master->bus.id);
+
+ ret = device_register(&desc->dev->dev);
+ if (ret)
+ dev_err(&master->dev, "Failed to add I3C target device (err = %d)\n", ret);
+
+ master->this = desc;
+}
+
+static void i3c_target_unregister_i3c_dev(struct i3c_master_controller *master)
+{
+ struct i3c_dev_desc *i3cdev = master->this;
+
+ if (device_is_registered(&i3cdev->dev->dev))
+ device_unregister(&i3cdev->dev->dev);
+ else
+ put_device(&i3cdev->dev->dev);
+}
+
+static void i3c_target_read_device_info(struct device_node *np, struct i3c_device_info *info)
+{
+ u64 pid;
+ u32 dcr;
+ int ret;
+
+ ret = of_property_read_u64(np, "pid", &pid);
+ if (ret)
+ info->pid = 0;
+ else
+ info->pid = pid;
+
+ ret = of_property_read_u32(np, "dcr", &dcr);
+ if (ret)
+ info->pid = 0;
+ else
+ info->dcr = dcr;
+}
+
+static int i3c_target_check_ops(const struct i3c_target_ops *ops)
+{
+ if (!ops || !ops->bus_init)
+ return -EINVAL;
+
+ return 0;
+}
+
+int i3c_target_register(struct i3c_master_controller *master, struct device *parent,
+ const struct i3c_target_ops *ops)
+{
+ struct i3c_bus *i3cbus = i3c_master_get_bus(master);
+ struct i3c_device_info info;
+ int ret;
+
+ ret = i3c_target_check_ops(ops);
+ if (ret)
+ return ret;
+
+ master->dev.parent = parent;
+ master->dev.of_node = of_node_get(parent->of_node);
+ master->dev.bus = &i3c_bus_type;
+ master->dev.release = i3c_targetdev_release;
+ master->target_ops = ops;
+ i3cbus->mode = I3C_BUS_MODE_PURE;
+
+ init_rwsem(&i3cbus->lock);
+ mutex_lock(&i3c_core_lock);
+ ret = idr_alloc(&i3c_bus_idr, i3cbus, 0, 0, GFP_KERNEL);
+ mutex_unlock(&i3c_core_lock);
+ if (ret < 0)
+ return ret;
+ i3cbus->id = ret;
+
+ device_initialize(&master->dev);
+ dev_set_name(&master->dev, "i3c-%d", i3cbus->id);
+
+ ret = device_add(&master->dev);
+ if (ret)
+ goto err_put_device;
+
+ i3c_target_read_device_info(master->dev.of_node, &info);
+
+ i3c_target_register_new_i3c_dev(master, info);
+
+ ret = i3c_target_bus_init(master);
+ if (ret)
+ goto err_cleanup_bus;
+
+ return 0;
+
+err_cleanup_bus:
+ i3c_target_bus_cleanup(master);
+
+err_put_device:
+ put_device(&master->dev);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i3c_target_register);
+
+int i3c_target_unregister(struct i3c_master_controller *master)
+{
+ i3c_target_unregister_i3c_dev(master);
+ i3c_target_bus_cleanup(master);
+ device_unregister(&master->dev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(i3c_target_unregister);
+
+int i3c_target_read_register(struct i3c_device *dev, const struct i3c_target_read_setup *setup)
+{
+ dev->desc->target_info.read_handler = setup->handler;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(i3c_target_read_register);
+
+int i3c_register(struct i3c_master_controller *master,
+ struct device *parent,
+ const struct i3c_master_controller_ops *master_ops,
+ const struct i3c_target_ops *target_ops,
+ bool secondary)
+{
+ const char *role;
+ int ret;
+
+ ret = of_property_read_string(parent->of_node, "initial-role", &role);
+ if (ret || !strcmp("primary", role)) {
+ return i3c_master_register(master, parent, master_ops, secondary);
+ } else if (!strcmp("target", role)) {
+ master->target = true;
+ return i3c_target_register(master, parent, target_ops);
+ } else {
+ return -EOPNOTSUPP;
+ }
+}
+EXPORT_SYMBOL_GPL(i3c_register);
+
+int i3c_unregister(struct i3c_master_controller *master)
+{
+ if (master->target)
+ i3c_target_unregister(master);
+ else
+ i3c_master_unregister(master);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(i3c_unregister);
+
int i3c_dev_do_priv_xfers_locked(struct i3c_dev_desc *dev,
struct i3c_priv_xfer *xfers,
int nxfers)
@@ -2604,10 +2917,38 @@ int i3c_dev_do_priv_xfers_locked(struct i3c_dev_desc *dev,
if (!master || !xfers)
return -EINVAL;
- if (!master->ops->priv_xfers)
- return -ENOTSUPP;
+ if (!master->target) {
+ if (!master->ops->priv_xfers)
+ return -EOPNOTSUPP;
+
+ return master->ops->priv_xfers(dev, xfers, nxfers);
+ }
+
+ if (!master->target_ops->priv_xfers)
+ return -EOPNOTSUPP;
- return master->ops->priv_xfers(dev, xfers, nxfers);
+ return master->target_ops->priv_xfers(dev, xfers, nxfers);
+}
+
+int i3c_dev_generate_ibi_locked(struct i3c_dev_desc *dev, const u8 *data, int len)
+
+{
+ struct i3c_master_controller *master;
+
+ if (!dev)
+ return -ENOENT;
+
+ master = i3c_dev_get_master(dev);
+ if (!master)
+ return -EINVAL;
+
+ if (!master->target)
+ return -EINVAL;
+
+ if (!master->target_ops->generate_ibi)
+ return -EOPNOTSUPP;
+
+ return master->target_ops->generate_ibi(dev, data, len);
}
int i3c_dev_disable_ibi_locked(struct i3c_dev_desc *dev)
@@ -2695,6 +3036,18 @@ void i3c_dev_free_ibi_locked(struct i3c_dev_desc *dev)
dev->ibi = NULL;
}
+int i3c_for_each_dev(void *data, int (*fn)(struct device *, void *))
+{
+ int res;
+
+ mutex_lock(&i3c_core_lock);
+ res = bus_for_each_dev(&i3c_bus_type, NULL, data, fn);
+ mutex_unlock(&i3c_core_lock);
+
+ return res;
+}
+EXPORT_SYMBOL_GPL(i3c_for_each_dev);
+
static int __init i3c_init(void)
{
return bus_register(&i3c_bus_type);
diff --git a/drivers/i3c/master/dw-i3c-master.c b/drivers/i3c/master/dw-i3c-master.c
index 51a8608203de..f67ff56febc8 100644
--- a/drivers/i3c/master/dw-i3c-master.c
+++ b/drivers/i3c/master/dw-i3c-master.c
@@ -11,6 +11,7 @@
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/i3c/master.h>
+#include <linux/i3c/target.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/iopoll.h>
@@ -21,11 +22,13 @@
#include <linux/reset.h>
#include <linux/slab.h>
+#define CCC_WORKAROUND
#define DEVICE_CTRL 0x0
#define DEV_CTRL_ENABLE BIT(31)
#define DEV_CTRL_RESUME BIT(30)
#define DEV_CTRL_HOT_JOIN_NACK BIT(8)
#define DEV_CTRL_I2C_SLAVE_PRESENT BIT(7)
+#define DEV_CTRL_IBI_DATA_EN BIT(1)
#define DEVICE_ADDR 0x4
#define DEV_ADDR_DYNAMIC_ADDR_VALID BIT(31)
@@ -74,12 +77,31 @@
#define RX_TX_DATA_PORT 0x14
#define IBI_QUEUE_STATUS 0x18
+#define IBI_QUEUE_STATUS_RSP_NACK BIT(31)
+#define IBI_QUEUE_STATUS_PEC_ERR BIT(30)
+#define IBI_QUEUE_STATUS_IBI_ID(x) (((x) & GENMASK(15, 8)) >> 8)
+#define IBI_QUEUE_STATUS_DATA_LEN(x) ((x) & GENMASK(7, 0))
+
+#define IBI_QUEUE_IBI_ADDR(x) (IBI_QUEUE_STATUS_IBI_ID(x) >> 1)
+#define IBI_QUEUE_IBI_RNW(x) (IBI_QUEUE_STATUS_IBI_ID(x) & BIT(0))
+#define IBI_TYPE_SIR(x) \
+ ({ typeof(x) x_ = (x); \
+ (IBI_QUEUE_IBI_ADDR(x_) != I3C_HOT_JOIN_ADDR) && IBI_QUEUE_IBI_RNW(x_); })
+
+#define IBI_QUEUE_DATA 0x18
+#define IBI_QUEUE_DATA_STATUS_MASK GENMASK(31, 28)
+#define IBI_QUEUE_DATA_PAYLOAD_MASK GENMASK(15, 8)
#define QUEUE_THLD_CTRL 0x1c
+#define QUEUE_THLD_CTRL_IBI_STA_MASK GENMASK(31, 24)
+#define QUEUE_THLD_CTRL_IBI_STA(x) (((x) - 1) << 24)
+#define QUEUE_THLD_CTRL_IBI_DAT_MASK GENMASK(23, 16)
+#define QUEUE_THLD_CTRL_IBI_DAT(x) ((x) << 16)
#define QUEUE_THLD_CTRL_RESP_BUF_MASK GENMASK(15, 8)
#define QUEUE_THLD_CTRL_RESP_BUF(x) (((x) - 1) << 8)
#define DATA_BUFFER_THLD_CTRL 0x20
-#define DATA_BUFFER_THLD_CTRL_RX_BUF GENMASK(11, 8)
+#define DATA_BUFFER_THLD_TX_START GENMASK(18, 16)
+#define DATA_BUFFER_THLD_CTRL_RX_BUF GENMASK(10, 8)
#define IBI_QUEUE_CTRL 0x24
#define IBI_MR_REQ_REJECT 0x2C
@@ -95,6 +117,8 @@
#define RESET_CTRL_SOFT BIT(0)
#define SLV_EVENT_CTRL 0x38
+#define SLV_EVENT_CTRL_SIR_EN BIT(0)
+
#define INTR_STATUS 0x3c
#define INTR_STATUS_EN 0x40
#define INTR_SIGNAL_EN 0x44
@@ -125,10 +149,14 @@
INTR_IBI_THLD_STAT | \
INTR_TX_THLD_STAT | \
INTR_RX_THLD_STAT)
-
#define INTR_MASTER_MASK (INTR_TRANSFER_ERR_STAT | \
INTR_RESP_READY_STAT)
+#define INTR_TARGET_MASK (INTR_READ_REQ_RECV_STAT | \
+ INTR_RESP_READY_STAT | \
+ INTR_IBI_UPDATED_STAT | \
+ INTR_TRANSFER_ERR_STAT)
+
#define QUEUE_STATUS_LEVEL 0x4c
#define QUEUE_STATUS_IBI_STATUS_CNT(x) (((x) & GENMASK(28, 24)) >> 24)
#define QUEUE_STATUS_IBI_BUF_BLR(x) (((x) & GENMASK(23, 16)) >> 16)
@@ -146,14 +174,36 @@
#define DEV_CHAR_TABLE_POINTER 0x60
#define VENDOR_SPECIFIC_REG_POINTER 0x6c
+#define SLV_MIPI_ID_VALUE 0x70
#define SLV_PID_VALUE 0x74
+#define SLV_PID_HI(x) (((x) >> 32) & GENMASK(15, 0))
+#define SLV_PID_LO(x) ((x) & GENMASK(31, 0))
#define SLV_CHAR_CTRL 0x78
+#define SLV_DCR_MASK GENMASK(15, 8)
+#define SLV_DCR(x) (((x) << 8) & SLV_DCR_MASK)
+#define SLV_DEVICE_ROLE_MASK GENMASK(7, 6)
+#define SLV_DEVICE_ROLE(x) (((x) << 6) & SLV_DEVICE_ROLE_MASK)
+#define SLV_HDR_CAPABLE BIT(5)
+#define SLV_MAX_DATA_SPEED_LIMIT BIT(0)
+
#define SLV_MAX_LEN 0x7c
+#define SLV_MAX_RD_LEN(x) (((x) & GENMASK(31, 16)) >> 16)
+#define SLV_MAX_WR_LEN(x) ((x) & GENMASK(15, 0))
+
#define MAX_READ_TURNAROUND 0x80
#define MAX_DATA_SPEED 0x84
#define SLV_DEBUG_STATUS 0x88
#define SLV_INTR_REQ 0x8c
+#define SLV_INTR_REQ_IBI_STS(x) (((x) & GENMASK(9, 8)) >> 8)
+#define IBI_STS_ACCEPTED 0x01
+#define IBI_STS_NOT_ATTEMPTED 0x11
+
#define DEVICE_CTRL_EXTENDED 0xb0
+#define DEVICE_CTRL_EXTENDED_MODE_MASK GENMASK(1, 0)
+#define DEVICE_CTRL_EXTENDED_MODE(x) ((x) & DEVICE_CTRL_EXTENDED_MODE_MASK)
+#define DEV_OPERATION_MODE_CONTROLLER 0x00
+#define DEV_OPERATION_MODE_TARGET 0x01
+
#define SCL_I3C_OD_TIMING 0xb4
#define SCL_I3C_PP_TIMING 0xb8
#define SCL_I3C_TIMING_HCNT(x) (((x) << 16) & GENMASK(23, 16))
@@ -175,16 +225,33 @@
#define SCL_EXT_LCNT_1(x) ((x) & GENMASK(7, 0))
#define SCL_EXT_TERMN_LCNT_TIMING 0xcc
+
+#define SDA_HOLD_SWITCH_DLY_TIMING 0xd0
+#define SDA_TX_HOLD(x) (((x) << 16) & GENMASK(18, 16))
+#define SDA_TX_HOLD_MIN 1
+#define SDA_TX_HOLD_MAX 7
+
#define BUS_FREE_TIMING 0xd4
+#define BUS_AVAIL_TIME(x) (((x) << 16) & GENMASK(31, 16))
+#define MAX_BUS_AVAIL_CNT 0xffffU
#define BUS_I3C_MST_FREE(x) ((x) & GENMASK(15, 0))
#define BUS_IDLE_TIMING 0xd8
#define I3C_VER_ID 0xe0
#define I3C_VER_TYPE 0xe4
+#define I3C_VER_RELEASE_TYPE(x) (((x) & GENMASK(31, 16)) >> 16)
+#define I3C_VER_RELEASE_VERSION(x) ((x) & GENMASK(15, 0))
+
+#define I3C_LC_RELEASE 0x6c63
+
#define EXTENDED_CAPABILITY 0xe8
#define SLAVE_CONFIG 0xec
#define DEV_ADDR_TABLE_LEGACY_I2C_DEV BIT(31)
+#define DEV_ADDR_TABLE_DEV_NACK_RETRY(x) (((x) << 29) & GENMASK(30, 29))
+#define DEV_ADDR_TABLE_MR_REJECT BIT(14)
+#define DEV_ADDR_TABLE_SIR_REJECT BIT(13)
+#define DEV_ADDR_TABLE_IBI_WITH_DATA BIT(12)
#define DEV_ADDR_TABLE_DYNAMIC_ADDR(x) (((x) << 16) & GENMASK(23, 16))
#define DEV_ADDR_TABLE_STATIC_ADDR(x) ((x) & GENMASK(6, 0))
#define DEV_ADDR_TABLE_LOC(start, idx) ((start) + ((idx) << 2))
@@ -196,11 +263,19 @@
#define I3C_BUS_SDR3_SCL_RATE 4000000
#define I3C_BUS_SDR4_SCL_RATE 2000000
#define I3C_BUS_I2C_FM_TLOW_MIN_NS 1300
+#define I3C_BUS_I2C_FM_THIGH_MIN_NS 600
#define I3C_BUS_I2C_FMP_TLOW_MIN_NS 500
-#define I3C_BUS_THIGH_MAX_NS 41
+#define I3C_BUS_I2C_FMP_THIGH_MIN_NS 260
+#define I3C_BUS_I3C_OD_TLOW_MIN_NS 200
+#define I3C_BUS_I3C_OD_THIGH_MAX_NS 41
+#define I3C_BUS_I3C_PP_TLOW_MIN_NS 25
+#define I3C_BUS_I3C_PP_THIGH_MIN_NS 25
#define XFER_TIMEOUT (msecs_to_jiffies(1000))
+#define DW_I3C_TIMING_MIN 0x0
+#define DW_I3C_TIMING_MAX 0xffffffff
+
struct dw_i3c_master_caps {
u8 cmdfifodepth;
u8 datafifodepth;
@@ -225,6 +300,7 @@ struct dw_i3c_xfer {
};
struct dw_i3c_master {
+ struct device *dev;
struct i3c_master_controller base;
u16 maxdevs;
u16 datstartaddr;
@@ -234,17 +310,64 @@ struct dw_i3c_master {
struct dw_i3c_xfer *cur;
spinlock_t lock;
} xferqueue;
+ union {
+ struct {
+ struct i3c_dev_desc *slots[MAX_DEVS];
+ /*
+ * Prevents simultaneous access to IBI related registers
+ * and slots array.
+ */
+ spinlock_t lock;
+ } master;
+ struct {
+ struct completion comp;
+ } target;
+ } ibi;
struct dw_i3c_master_caps caps;
void __iomem *regs;
struct reset_control *core_rst;
struct clk *core_clk;
- char version[5];
- char type[5];
+ u32 ver_id;
+ u16 ver_type;
u8 addrs[MAX_DEVS];
+
+ /* All parameters are expressed in nanoseconds */
+ struct {
+ u32 i3c_od_scl_freq;
+ u32 i3c_od_scl_low;
+ u32 i3c_od_scl_high;
+ u32 i3c_pp_scl_freq;
+ u32 i3c_pp_scl_low;
+ u32 i3c_pp_scl_high;
+ u32 sda_tx_hold;
+ } timings;
+ /* Used for handling private write */
+ struct {
+ void *buf;
+ u16 max_len;
+ } target_rx;
};
struct dw_i3c_i2c_dev_data {
u8 index;
+ s8 ibi;
+ struct i3c_generic_ibi_pool *ibi_pool;
+};
+
+/*
+ * All timing parameters are expressed in nanoseconds.
+ * All frequency parameters are expressed in Hz
+ */
+struct dw_i3c_scl_timing {
+ u32 high;
+ u32 high_min;
+ u32 high_max;
+ u32 low;
+ u32 low_min;
+ u32 low_max;
+ u32 freq;
+ u32 freq_min;
+ u32 freq_max;
};
static u8 even_parity(u8 p)
@@ -286,6 +409,8 @@ static bool dw_i3c_master_supports_ccc_cmd(struct i3c_master_controller *m,
case I3C_CCC_GETSTATUS:
case I3C_CCC_GETMXDS:
case I3C_CCC_GETHDRCAP:
+ case I3C_CCC_SETAASA:
+ case I3C_CCC_SETHID:
return true;
default:
return false;
@@ -306,7 +431,7 @@ static void dw_i3c_master_disable(struct dw_i3c_master *master)
static void dw_i3c_master_enable(struct dw_i3c_master *master)
{
- writel(readl(master->regs + DEVICE_CTRL) | DEV_CTRL_ENABLE,
+ writel(readl(master->regs + DEVICE_CTRL) | DEV_CTRL_ENABLE | DEV_CTRL_IBI_DATA_EN,
master->regs + DEVICE_CTRL);
}
@@ -339,21 +464,43 @@ static void dw_i3c_master_wr_tx_fifo(struct dw_i3c_master *master,
memcpy(&tmp, bytes + (nbytes & ~3), nbytes & 3);
writesl(master->regs + RX_TX_DATA_PORT, &tmp, 1);
+ dev_dbg(master->dev, "TX data = %08x\n", tmp);
}
}
-static void dw_i3c_master_read_rx_fifo(struct dw_i3c_master *master,
- u8 *bytes, int nbytes)
+static void dw_i3c_master_read_fifo(struct dw_i3c_master *master, u32 fifo_reg,
+ u8 *bytes, int nbytes)
{
- readsl(master->regs + RX_TX_DATA_PORT, bytes, nbytes / 4);
+ readsl(master->regs + fifo_reg, bytes, nbytes / 4);
if (nbytes & 3) {
u32 tmp;
- readsl(master->regs + RX_TX_DATA_PORT, &tmp, 1);
+ readsl(master->regs + fifo_reg, &tmp, 1);
memcpy(bytes + (nbytes & ~3), &tmp, nbytes & 3);
}
}
+static void dw_i3c_master_read_rx_fifo(struct dw_i3c_master *master,
+ u8 *bytes, int nbytes)
+{
+ dw_i3c_master_read_fifo(master, RX_TX_DATA_PORT, bytes, nbytes);
+}
+
+static void dw_i3c_master_read_ibi_fifo(struct dw_i3c_master *master,
+ u8 *bytes, int nbytes)
+{
+ dw_i3c_master_read_fifo(master, IBI_QUEUE_DATA, bytes, nbytes);
+}
+
+static void dw_i3c_master_flush_ibi_fifo(struct dw_i3c_master *master, int nbytes)
+{
+ int nwords = (nbytes + 3) >> 2;
+ int i;
+
+ for (i = 0; i < nwords; i++)
+ readl(master->regs + IBI_QUEUE_DATA);
+}
+
static struct dw_i3c_xfer *
dw_i3c_master_alloc_xfer(struct dw_i3c_master *master, unsigned int ncmds)
{
@@ -515,11 +662,224 @@ static void dw_i3c_master_end_xfer_locked(struct dw_i3c_master *master, u32 isr)
dw_i3c_master_start_xfer_locked(master);
}
+static void _timing_calc_when_no_params(struct dw_i3c_scl_timing *timings, u32 *scl_high,
+ u32 *scl_low, u32 *scl_period_ns)
+{
+ u32 high, low, period;
+
+ period = DIV_ROUND_CLOSEST(1000000000, timings->freq_max);
+ high = clamp(period / 2, timings->high_min, timings->high_max);
+ low = timings->low_min;
+
+ if (period > high) {
+ u32 delta = period - high;
+
+ if (delta > timings->low_max)
+ low = timings->low_max;
+ else if (delta >= timings->low_min)
+ low = delta;
+ }
+
+ *scl_high = high;
+ *scl_low = low;
+ *scl_period_ns = high + low;
+}
+
+static int _timing_calc_when_scl_high(struct dw_i3c_scl_timing *timings, u32 *scl_high,
+ u32 *scl_low, u32 *scl_period_ns)
+{
+ u32 high, low, period;
+
+ high = timings->high;
+ low = timings->low_min;
+ period = DIV_ROUND_CLOSEST(1000000000, timings->freq_max);
+
+ if (period > high) {
+ u32 delta = period - high;
+
+ if (delta > timings->low_max)
+ low = timings->low_max;
+ else if (delta >= timings->low_min)
+ low = delta;
+ }
+
+ *scl_high = high;
+ *scl_low = low;
+ *scl_period_ns = high + low;
+
+ return 0;
+}
+
+static int _timing_calc_when_scl_low(struct dw_i3c_scl_timing *timings, u32 *scl_high,
+ u32 *scl_low, u32 *scl_period_ns)
+{
+ u32 high, low, period;
+
+ low = timings->low;
+ high = timings->high_min;
+ period = DIV_ROUND_CLOSEST(1000000000, timings->freq_max);
+
+ if (period > low) {
+ u32 delta = period - low;
+
+ if (delta > timings->high_max)
+ high = timings->high_max;
+ else if (delta >= timings->high_min)
+ high = delta;
+ }
+
+ *scl_high = high;
+ *scl_low = low;
+ *scl_period_ns = high + low;
+
+ return 0;
+}
+
+static int _timing_calc_when_scl_freq(struct dw_i3c_scl_timing *timings, u32 *scl_high,
+ u32 *scl_low, u32 *scl_period_ns)
+{
+ u32 high, period;
+
+ period = DIV_ROUND_CLOSEST(1000000000, timings->freq);
+ high = clamp(period / 2, timings->high_min, timings->high_max);
+ if (period <= high)
+ return -EINVAL;
+
+ *scl_high = high;
+ *scl_low = period - high;
+ *scl_period_ns = period;
+
+ return 0;
+}
+
+static int _timing_calc_when_scl_high_low(struct dw_i3c_scl_timing *timings, u32 *scl_high,
+ u32 *scl_low, u32 *scl_period_ns)
+{
+ *scl_high = timings->high;
+ *scl_low = timings->low;
+ *scl_period_ns = *scl_high + *scl_low;
+
+ return 0;
+}
+
+static int _timing_calc_when_scl_high_freq(struct dw_i3c_scl_timing *timings, u32 *scl_high,
+ u32 *scl_low, u32 *scl_period_ns)
+{
+ *scl_period_ns = DIV_ROUND_CLOSEST(1000000000, timings->freq);
+ *scl_high = timings->high;
+ if (*scl_period_ns <= *scl_high)
+ return -EINVAL;
+
+ *scl_low = *scl_period_ns - *scl_high;
+
+ return 0;
+}
+
+static int _timing_calc_when_scl_low_freq(struct dw_i3c_scl_timing *timings, u32 *scl_high,
+ u32 *scl_low, u32 *scl_period_ns)
+{
+ *scl_period_ns = DIV_ROUND_CLOSEST(1000000000, timings->freq);
+ *scl_low = timings->low;
+ if (*scl_period_ns <= *scl_low)
+ return -EINVAL;
+
+ *scl_high = *scl_period_ns - *scl_low;
+
+ return 0;
+}
+
+static int _timing_calc_when_all(struct dw_i3c_scl_timing *timings, u32 *scl_high,
+ u32 *scl_low, u32 *scl_period_ns)
+{
+ *scl_period_ns = DIV_ROUND_CLOSEST(1000000000, timings->freq);
+ *scl_high = timings->high;
+ *scl_low = timings->low;
+
+ return 0;
+}
+
+static int dw_i3c_timing_calc(struct dw_i3c_scl_timing *timings, u32 *scl_high, u32 *scl_low)
+{
+ u32 high = timings->high;
+ u32 low = timings->low;
+ u32 freq = timings->freq;
+ u32 period;
+ int ret = 0;
+
+ if ((high > 0 && (high < timings->high_min || high > timings->high_max)) ||
+ (low > 0 && (low < timings->low_min || low > timings->low_max)) ||
+ (freq > 0 && (freq < timings->freq_min || freq > timings->freq_max)))
+ return -EINVAL;
+
+ if (high == 0 && low == 0 && freq == 0)
+ _timing_calc_when_no_params(timings, &high, &low, &period);
+ else if (high > 0 && low == 0 && freq == 0)
+ ret = _timing_calc_when_scl_high(timings, &high, &low, &period);
+ else if (high == 0 && low > 0 && freq == 0)
+ ret = _timing_calc_when_scl_low(timings, &high, &low, &period);
+ else if (high == 0 && low == 0 && freq > 0)
+ ret = _timing_calc_when_scl_freq(timings, &high, &low, &period);
+ else if (high > 0 && low > 0 && freq == 0)
+ ret = _timing_calc_when_scl_high_low(timings, &high, &low, &period);
+ else if (high > 0 && low == 0 && freq > 0)
+ ret = _timing_calc_when_scl_high_freq(timings, &high, &low, &period);
+ else if (high == 0 && low > 0 && freq > 0)
+ ret = _timing_calc_when_scl_low_freq(timings, &high, &low, &period);
+ else
+ ret = _timing_calc_when_all(timings, &high, &low, &period);
+
+ if (ret)
+ return ret;
+
+ if (high < timings->high_min || high > timings->high_max ||
+ low < timings->low_min || low > timings->low_max)
+ return -EINVAL;
+
+ freq = DIV_ROUND_CLOSEST(1000000000, period);
+ if (freq < timings->freq_min || freq > timings->freq_max)
+ return -EINVAL;
+
+ if ((high + low) != period)
+ return -EINVAL;
+
+ *scl_high = high;
+ *scl_low = low;
+
+ return 0;
+}
+
+static void dw_i3c_timing_calc_cnt(u32 core_rate_hz, u32 high, u32 low, u8 *hcnt, u8 *lcnt)
+{
+ u32 hcnt_tmp, lcnt_tmp;
+ u32 core_period_ns;
+
+ core_period_ns = DIV_ROUND_CLOSEST(1000000000, core_rate_hz);
+ hcnt_tmp = DIV_ROUND_CLOSEST(high, core_period_ns);
+ lcnt_tmp = DIV_ROUND_CLOSEST(low, core_period_ns);
+
+ if (hcnt_tmp < SCL_I3C_TIMING_CNT_MIN)
+ *hcnt = SCL_I3C_TIMING_CNT_MIN;
+ else if (hcnt_tmp > 0xFF)
+ *hcnt = 0xFF;
+ else
+ *hcnt = (u8)hcnt_tmp;
+
+ if (lcnt_tmp < SCL_I3C_TIMING_CNT_MIN)
+ *lcnt = SCL_I3C_TIMING_CNT_MIN;
+ else if (lcnt_tmp > 0xFF)
+ *lcnt = 0xFF;
+ else
+ *lcnt = (u8)lcnt_tmp;
+}
+
static int dw_i3c_clk_cfg(struct dw_i3c_master *master)
{
unsigned long core_rate, core_period;
+ struct dw_i3c_scl_timing timings;
+ u32 high, low;
u32 scl_timing;
u8 hcnt, lcnt;
+ int ret;
core_rate = clk_get_rate(master->core_clk);
if (!core_rate)
@@ -527,23 +887,51 @@ static int dw_i3c_clk_cfg(struct dw_i3c_master *master)
core_period = DIV_ROUND_UP(1000000000, core_rate);
- hcnt = DIV_ROUND_UP(I3C_BUS_THIGH_MAX_NS, core_period) - 1;
- if (hcnt < SCL_I3C_TIMING_CNT_MIN)
- hcnt = SCL_I3C_TIMING_CNT_MIN;
+ /* Open-drain clock configuration */
+ timings.high = master->timings.i3c_od_scl_high;
+ timings.high_min = I3C_BUS_I3C_PP_THIGH_MIN_NS;
+ timings.high_max = DW_I3C_TIMING_MAX;
+ timings.low = master->timings.i3c_od_scl_low;
+ timings.low_min = I3C_BUS_I3C_OD_TLOW_MIN_NS;
+ timings.low_max = DW_I3C_TIMING_MAX;
+ timings.freq = master->timings.i3c_od_scl_freq;
+ timings.freq_min = DW_I3C_TIMING_MIN;
+ timings.freq_max = I3C_BUS_TYP_I3C_SCL_RATE;
+ ret = dw_i3c_timing_calc(&timings, &high, &low);
+ if (ret)
+ return ret;
- lcnt = DIV_ROUND_UP(core_rate, I3C_BUS_TYP_I3C_SCL_RATE) - hcnt;
- if (lcnt < SCL_I3C_TIMING_CNT_MIN)
- lcnt = SCL_I3C_TIMING_CNT_MIN;
+ dw_i3c_timing_calc_cnt(core_rate, high, low, &hcnt,
+ &lcnt);
+ scl_timing = SCL_I3C_TIMING_HCNT(hcnt) | SCL_I3C_TIMING_LCNT(lcnt);
+ writel(scl_timing, master->regs + SCL_I3C_OD_TIMING);
+ /* SDR0 (push-pull) clock configuration */
+ timings.high = master->timings.i3c_pp_scl_high;
+ timings.high_min = I3C_BUS_I3C_PP_THIGH_MIN_NS;
+ timings.high_max = DW_I3C_TIMING_MAX;
+ timings.low = master->timings.i3c_pp_scl_low;
+ timings.low_min = I3C_BUS_I3C_PP_TLOW_MIN_NS;
+ timings.low_max = DW_I3C_TIMING_MAX;
+ timings.freq = master->timings.i3c_pp_scl_freq;
+ timings.freq_min = DW_I3C_TIMING_MIN;
+ timings.freq_max = I3C_BUS_TYP_I3C_SCL_RATE;
+ ret = dw_i3c_timing_calc(&timings, &high, &low);
+ if (ret)
+ return ret;
+
+ dw_i3c_timing_calc_cnt(core_rate, high, low, &hcnt,
+ &lcnt);
scl_timing = SCL_I3C_TIMING_HCNT(hcnt) | SCL_I3C_TIMING_LCNT(lcnt);
writel(scl_timing, master->regs + SCL_I3C_PP_TIMING);
if (!(readl(master->regs + DEVICE_CTRL) & DEV_CTRL_I2C_SLAVE_PRESENT))
writel(BUS_I3C_MST_FREE(lcnt), master->regs + BUS_FREE_TIMING);
- lcnt = DIV_ROUND_UP(I3C_BUS_TLOW_OD_MIN_NS, core_period);
- scl_timing = SCL_I3C_TIMING_HCNT(hcnt) | SCL_I3C_TIMING_LCNT(lcnt);
- writel(scl_timing, master->regs + SCL_I3C_OD_TIMING);
+ /* SDR1, SDR2, SDR3, SDR4 (push-pull) clocks configuration */
+ hcnt = DIV_ROUND_UP(I3C_BUS_I3C_OD_THIGH_MAX_NS, core_period) - 1;
+ if (hcnt < SCL_I3C_TIMING_CNT_MIN)
+ hcnt = SCL_I3C_TIMING_CNT_MIN;
lcnt = DIV_ROUND_UP(core_rate, I3C_BUS_SDR1_SCL_RATE) - hcnt;
scl_timing = SCL_EXT_LCNT_1(lcnt);
@@ -589,12 +977,31 @@ static int dw_i2c_clk_cfg(struct dw_i3c_master *master)
return 0;
}
-static int dw_i3c_master_bus_init(struct i3c_master_controller *m)
+static int dw_sda_tx_hold_cfg(struct dw_i3c_master *master)
+{
+ unsigned long core_rate, core_period;
+ u8 sda_tx_hold;
+
+ /* Do not modify register if there is no DT configuration or 0 was provied */
+ if (!master->timings.sda_tx_hold)
+ return 0;
+
+ core_rate = clk_get_rate(master->core_clk);
+ if (!core_rate)
+ return -EINVAL;
+
+ core_period = DIV_ROUND_UP(1000000000, core_rate);
+ sda_tx_hold = clamp((u32)DIV_ROUND_CLOSEST(master->timings.sda_tx_hold, core_period),
+ (u32)SDA_TX_HOLD_MIN, (u32)SDA_TX_HOLD_MAX);
+ writel(SDA_TX_HOLD(sda_tx_hold), master->regs + SDA_HOLD_SWITCH_DLY_TIMING);
+
+ return 0;
+}
+
+static int dw_i3c_bus_clk_cfg(struct i3c_master_controller *m)
{
struct dw_i3c_master *master = to_dw_i3c_master(m);
struct i3c_bus *bus = i3c_master_get_bus(m);
- struct i3c_device_info info = { };
- u32 thld_ctrl;
int ret;
switch (bus->mode) {
@@ -613,6 +1020,91 @@ static int dw_i3c_master_bus_init(struct i3c_master_controller *m)
return -EINVAL;
}
+ return 0;
+}
+
+static int dw_i3c_target_bus_init(struct i3c_master_controller *m)
+{
+ struct dw_i3c_master *master = to_dw_i3c_master(m);
+ struct i3c_dev_desc *desc = master->base.this;
+ void *rx_buf;
+ u32 reg;
+ int ret;
+
+ ret = dw_i3c_bus_clk_cfg(m);
+ if (ret)
+ return ret;
+
+ reg = readl(master->regs + SLV_MAX_LEN);
+ /*
+ * Set max private write length value based on read-only register.
+ * TODO: Handle updates after receiving SETMWL CCC.
+ */
+ master->target_rx.max_len = SLV_MAX_WR_LEN(reg);
+
+ rx_buf = kzalloc(master->target_rx.max_len, GFP_KERNEL);
+ if (!rx_buf)
+ return -ENOMEM;
+
+ master->target_rx.buf = rx_buf;
+
+ dw_i3c_master_disable(master);
+
+ reg = readl(master->regs + QUEUE_THLD_CTRL) & ~QUEUE_THLD_CTRL_RESP_BUF_MASK;
+ writel(reg, master->regs + QUEUE_THLD_CTRL);
+
+ reg = readl(master->regs + DATA_BUFFER_THLD_CTRL) & ~DATA_BUFFER_THLD_CTRL_RX_BUF;
+ writel(reg, master->regs + DATA_BUFFER_THLD_CTRL);
+
+ writel(INTR_ALL, master->regs + INTR_STATUS);
+ writel(INTR_TARGET_MASK, master->regs + INTR_STATUS_EN);
+ writel(INTR_TARGET_MASK, master->regs + INTR_SIGNAL_EN);
+
+ reg = readl(master->regs + DEVICE_CTRL_EXTENDED) & ~DEVICE_CTRL_EXTENDED_MODE_MASK;
+ reg |= DEVICE_CTRL_EXTENDED_MODE(DEV_OPERATION_MODE_TARGET);
+ writel(reg, master->regs + DEVICE_CTRL_EXTENDED);
+
+ writel(SLV_PID_LO(desc->info.pid), master->regs + SLV_PID_VALUE);
+ writel(SLV_PID_HI(desc->info.pid), master->regs + SLV_MIPI_ID_VALUE);
+
+ reg = readl(master->regs + SLV_CHAR_CTRL) & ~SLV_DCR_MASK & ~SLV_DEVICE_ROLE_MASK;
+ reg |= SLV_DCR(desc->info.dcr) | SLV_DEVICE_ROLE(0);
+ writel(reg, master->regs + SLV_CHAR_CTRL);
+
+ reg = readl(master->regs + BUS_FREE_TIMING) | BUS_AVAIL_TIME(MAX_BUS_AVAIL_CNT);
+ writel(reg, master->regs + BUS_FREE_TIMING);
+
+ dw_i3c_master_enable(master);
+
+ return 0;
+}
+
+static void dw_i3c_target_bus_cleanup(struct i3c_master_controller *m)
+{
+ struct dw_i3c_master *master = to_dw_i3c_master(m);
+
+ dw_i3c_master_disable(master);
+ kfree(master->target_rx.buf);
+}
+
+static int dw_i3c_master_bus_init(struct i3c_master_controller *m)
+{
+ struct dw_i3c_master *master = to_dw_i3c_master(m);
+ u32 interrupt_mask = INTR_MASTER_MASK;
+ struct i3c_device_info info = { };
+ u32 thld_ctrl;
+ int ret;
+
+ spin_lock_init(&master->ibi.master.lock);
+
+ ret = dw_i3c_bus_clk_cfg(m);
+ if (ret)
+ return ret;
+
+ ret = dw_sda_tx_hold_cfg(master);
+ if (ret)
+ return ret;
+
thld_ctrl = readl(master->regs + QUEUE_THLD_CTRL);
thld_ctrl &= ~QUEUE_THLD_CTRL_RESP_BUF_MASK;
writel(thld_ctrl, master->regs + QUEUE_THLD_CTRL);
@@ -621,9 +1113,18 @@ static int dw_i3c_master_bus_init(struct i3c_master_controller *m)
thld_ctrl &= ~DATA_BUFFER_THLD_CTRL_RX_BUF;
writel(thld_ctrl, master->regs + DATA_BUFFER_THLD_CTRL);
+ if (master->ver_type >= I3C_LC_RELEASE) {
+ thld_ctrl = readl(master->regs + QUEUE_THLD_CTRL);
+ thld_ctrl &= ~(QUEUE_THLD_CTRL_IBI_STA_MASK | QUEUE_THLD_CTRL_IBI_DAT_MASK);
+ thld_ctrl |= QUEUE_THLD_CTRL_IBI_STA(1) | QUEUE_THLD_CTRL_IBI_DAT(1);
+ writel(thld_ctrl, master->regs + QUEUE_THLD_CTRL);
+ interrupt_mask |= INTR_IBI_THLD_STAT;
+ }
+
writel(INTR_ALL, master->regs + INTR_STATUS);
- writel(INTR_MASTER_MASK, master->regs + INTR_STATUS_EN);
- writel(INTR_MASTER_MASK, master->regs + INTR_SIGNAL_EN);
+ writel(interrupt_mask, master->regs + INTR_STATUS_EN);
+ writel(interrupt_mask, master->regs + INTR_SIGNAL_EN);
+
ret = i3c_master_get_free_addr(m, 0);
if (ret < 0)
@@ -639,7 +1140,6 @@ static int dw_i3c_master_bus_init(struct i3c_master_controller *m)
if (ret)
return ret;
- writel(IBI_REQ_REJECT_ALL, master->regs + IBI_SIR_REQ_REJECT);
writel(IBI_REQ_REJECT_ALL, master->regs + IBI_MR_REQ_REJECT);
/* For now don't support Hot-Join */
@@ -688,6 +1188,9 @@ static int dw_i3c_ccc_set(struct dw_i3c_master *master,
COMMAND_PORT_TOC |
COMMAND_PORT_ROC;
+ dev_dbg(master->dev, "%s:cmd_hi=0x%08x cmd_lo=0x%08x tx_len=%d id=%x\n",
+ __func__, cmd->cmd_hi, cmd->cmd_lo, cmd->tx_len, ccc->id);
+
dw_i3c_master_enqueue_xfer(master, xfer);
if (!wait_for_completion_timeout(&xfer->comp, XFER_TIMEOUT))
dw_i3c_master_dequeue_xfer(master, xfer);
@@ -729,6 +1232,9 @@ static int dw_i3c_ccc_get(struct dw_i3c_master *master, struct i3c_ccc_cmd *ccc)
COMMAND_PORT_TOC |
COMMAND_PORT_ROC;
+ dev_dbg(master->dev, "%s:cmd_hi=0x%08x cmd_lo=0x%08x rx_len=%d id=%x\n",
+ __func__, cmd->cmd_hi, cmd->cmd_lo, cmd->rx_len, ccc->id);
+
dw_i3c_master_enqueue_xfer(master, xfer);
if (!wait_for_completion_timeout(&xfer->comp, XFER_TIMEOUT))
dw_i3c_master_dequeue_xfer(master, xfer);
@@ -746,15 +1252,26 @@ static int dw_i3c_master_send_ccc_cmd(struct i3c_master_controller *m,
{
struct dw_i3c_master *master = to_dw_i3c_master(m);
int ret = 0;
+ u32 i3c_pp_timing, i3c_od_timing;
if (ccc->id == I3C_CCC_ENTDAA)
return -EINVAL;
+ i3c_od_timing = readl(master->regs + SCL_I3C_OD_TIMING);
+ i3c_pp_timing = readl(master->regs + SCL_I3C_PP_TIMING);
+ if ((ccc->id == I3C_CCC_SETAASA) || (ccc->id == I3C_CCC_SETHID) ||
+ (ccc->id == I3C_CCC_DEVCTRL)) {
+ writel(i3c_od_timing, master->regs + SCL_I3C_PP_TIMING);
+ }
+
if (ccc->rnw)
ret = dw_i3c_ccc_get(master, ccc);
else
ret = dw_i3c_ccc_set(master, ccc);
+ if ((ccc->id == I3C_CCC_SETAASA) || (ccc->id == I3C_CCC_SETHID))
+ writel(i3c_pp_timing, master->regs + SCL_I3C_PP_TIMING);
+
return ret;
}
@@ -822,7 +1339,85 @@ static int dw_i3c_master_daa(struct i3c_master_controller *m)
return 0;
}
+#ifdef CCC_WORKAROUND
+/*
+ * Provide an interface for sending CCC from userspace. Especially for the
+ * transfers with PEC and direct CCC.
+ */
+static int dw_i3c_master_ccc_xfers(struct i3c_dev_desc *dev,
+ struct i3c_priv_xfer *i3c_xfers,
+ int i3c_nxfers)
+{
+ struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct dw_i3c_master *master = to_dw_i3c_master(m);
+ struct dw_i3c_xfer *xfer;
+ int i, ret = 0;
+ struct dw_i3c_cmd *cmd_ccc;
+
+ xfer = dw_i3c_master_alloc_xfer(master, i3c_nxfers);
+ if (!xfer)
+ return -ENOMEM;
+
+ /* i3c_xfers[0] handles the CCC data */
+ cmd_ccc = &xfer->cmds[0];
+ cmd_ccc->cmd_hi = COMMAND_PORT_ARG_DATA_LEN(i3c_xfers[0].len - 1) |
+ COMMAND_PORT_TRANSFER_ARG;
+ cmd_ccc->tx_buf = i3c_xfers[0].data.out + 1;
+ cmd_ccc->tx_len = i3c_xfers[0].len - 1;
+ cmd_ccc->cmd_lo = COMMAND_PORT_SPEED(dev->info.max_write_ds);
+ cmd_ccc->cmd_lo |= COMMAND_PORT_TID(0) |
+ COMMAND_PORT_DEV_INDEX(master->maxdevs - 1) |
+ COMMAND_PORT_ROC;
+ if (i3c_nxfers == 1)
+ cmd_ccc->cmd_lo |= COMMAND_PORT_TOC;
+
+ dev_dbg(master->dev,
+ "%s:cmd_ccc_hi=0x%08x cmd_ccc_lo=0x%08x tx_len=%d\n", __func__,
+ cmd_ccc->cmd_hi, cmd_ccc->cmd_lo, cmd_ccc->tx_len);
+
+ for (i = 1; i < i3c_nxfers; i++) {
+ struct dw_i3c_cmd *cmd = &xfer->cmds[i];
+ cmd->cmd_hi = COMMAND_PORT_ARG_DATA_LEN(i3c_xfers[i].len) |
+ COMMAND_PORT_TRANSFER_ARG;
+
+ if (i3c_xfers[i].rnw) {
+ cmd->rx_buf = i3c_xfers[i].data.in;
+ cmd->rx_len = i3c_xfers[i].len;
+ cmd->cmd_lo = COMMAND_PORT_READ_TRANSFER |
+ COMMAND_PORT_SPEED(dev->info.max_read_ds);
+
+ } else {
+ cmd->tx_buf = i3c_xfers[i].data.out;
+ cmd->tx_len = i3c_xfers[i].len;
+ cmd->cmd_lo =
+ COMMAND_PORT_SPEED(dev->info.max_write_ds);
+ }
+
+ cmd->cmd_lo |= COMMAND_PORT_TID(i) |
+ COMMAND_PORT_DEV_INDEX(data->index) |
+ COMMAND_PORT_ROC;
+
+ if (i == (i3c_nxfers - 1))
+ cmd->cmd_lo |= COMMAND_PORT_TOC;
+
+ dev_dbg(master->dev,
+ "%s:cmd_hi=0x%08x cmd_lo=0x%08x tx_len=%d rx_len=%d\n",
+ __func__, cmd->cmd_hi, cmd->cmd_lo, cmd->tx_len,
+ cmd->rx_len);
+ }
+
+ dw_i3c_master_enqueue_xfer(master, xfer);
+ if (!wait_for_completion_timeout(&xfer->comp, XFER_TIMEOUT))
+ dw_i3c_master_dequeue_xfer(master, xfer);
+
+ ret = xfer->ret;
+ dw_i3c_master_free_xfer(xfer);
+
+ return ret;
+}
+#endif
static int dw_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
struct i3c_priv_xfer *i3c_xfers,
int i3c_nxfers)
@@ -851,6 +1446,17 @@ static int dw_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
nrxwords > master->caps.datafifodepth)
return -ENOTSUPP;
+#ifdef CCC_WORKAROUND
+ if (i3c_xfers[0].rnw == 0) {
+ /* write command: check if hit special address */
+ u8 tmp;
+
+ memcpy(&tmp, i3c_xfers[0].data.out, 1);
+ if (tmp == 0xff)
+ return dw_i3c_master_ccc_xfers(dev, i3c_xfers, i3c_nxfers);
+ }
+#endif
+
xfer = dw_i3c_master_alloc_xfer(master, i3c_nxfers);
if (!xfer)
return -ENOMEM;
@@ -880,6 +1486,11 @@ static int dw_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
if (i == (i3c_nxfers - 1))
cmd->cmd_lo |= COMMAND_PORT_TOC;
+
+ dev_dbg(master->dev,
+ "%s:cmd_hi=0x%08x cmd_lo=0x%08x tx_len=%d rx_len=%d\n",
+ __func__, cmd->cmd_hi, cmd->cmd_lo, cmd->tx_len,
+ cmd->rx_len);
}
dw_i3c_master_enqueue_xfer(master, xfer);
@@ -887,11 +1498,84 @@ static int dw_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
dw_i3c_master_dequeue_xfer(master, xfer);
ret = xfer->ret;
+ if (ret)
+ goto out;
+
+ for (i = 0; i < i3c_nxfers; i++)
+ if (i3c_xfers[i].rnw)
+ i3c_xfers[i].len = xfer->cmds[i].rx_len;
+out:
dw_i3c_master_free_xfer(xfer);
return ret;
}
+static int dw_i3c_target_priv_xfers(struct i3c_dev_desc *dev,
+ struct i3c_priv_xfer *i3c_xfers,
+ int i3c_nxfers)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct dw_i3c_master *master = to_dw_i3c_master(m);
+ struct dw_i3c_xfer *xfer;
+ int i;
+
+ if (!i3c_nxfers)
+ return 0;
+
+ xfer = dw_i3c_master_alloc_xfer(master, i3c_nxfers);
+ if (!xfer)
+ return -ENOMEM;
+
+ for (i = 0; i < i3c_nxfers; i++) {
+ struct dw_i3c_cmd *cmd = &xfer->cmds[i];
+
+ if (!i3c_xfers[i].rnw) {
+ cmd->tx_buf = i3c_xfers[i].data.out;
+ cmd->tx_len = i3c_xfers[i].len;
+ cmd->cmd_lo = 0 | (i << 3) | (cmd->tx_len << 16);
+
+ dw_i3c_master_wr_tx_fifo(master, cmd->tx_buf, cmd->tx_len);
+ writel(cmd->cmd_lo, master->regs + COMMAND_QUEUE_PORT);
+ }
+ }
+
+ dw_i3c_master_free_xfer(xfer);
+
+ return 0;
+}
+
+static int dw_i3c_target_generate_ibi(struct i3c_dev_desc *dev, const u8 *data, int len)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct dw_i3c_master *master = to_dw_i3c_master(m);
+ u32 reg;
+
+ if (data || len != 0)
+ return -EOPNOTSUPP;
+
+ reg = readl(master->regs + SLV_EVENT_CTRL);
+ if ((reg & SLV_EVENT_CTRL_SIR_EN) == 0)
+ return -EPERM;
+
+ init_completion(&master->ibi.target.comp);
+ writel(1, master->regs + SLV_INTR_REQ);
+
+ if (!wait_for_completion_timeout(&master->ibi.target.comp, XFER_TIMEOUT)) {
+ pr_warn("timeout waiting for completion\n");
+ return -EINVAL;
+ }
+
+ reg = readl(master->regs + SLV_INTR_REQ);
+ if (SLV_INTR_REQ_IBI_STS(reg) != IBI_STS_ACCEPTED) {
+ reg = readl(master->regs + SLV_EVENT_CTRL);
+ if ((reg & SLV_EVENT_CTRL_SIR_EN) == 0)
+ pr_warn("sir is disabled by master\n");
+ return -EACCES;
+ }
+
+ return 0;
+}
+
static int dw_i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev,
u8 old_dyn_addr)
{
@@ -1032,6 +1716,146 @@ static int dw_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
return ret;
}
+static int dw_i3c_master_request_ibi(struct i3c_dev_desc *dev,
+ const struct i3c_ibi_setup *req)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct dw_i3c_master *master = to_dw_i3c_master(m);
+ struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+ unsigned int i;
+
+ if (master->ver_type < I3C_LC_RELEASE)
+ return -EOPNOTSUPP;
+
+ data->ibi_pool = i3c_generic_ibi_alloc_pool(dev, req);
+ if (IS_ERR(data->ibi_pool))
+ return PTR_ERR(data->ibi_pool);
+
+ spin_lock_irq(&master->ibi.master.lock);
+ for (i = 0; i < master->maxdevs; i++) {
+ if (!master->ibi.master.slots[i]) {
+ data->ibi = i;
+ master->ibi.master.slots[i] = dev;
+ break;
+ }
+ }
+ spin_unlock_irq(&master->ibi.master.lock);
+
+ if (i >= master->maxdevs) {
+ i3c_generic_ibi_free_pool(data->ibi_pool);
+ data->ibi_pool = NULL;
+ return -ENOSPC;
+ }
+
+ return 0;
+}
+
+static int dw_i3c_master_enable_ibi(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct dw_i3c_master *master = to_dw_i3c_master(m);
+ int ret, pos, dat_loc;
+ u32 reg;
+
+ pos = dw_i3c_master_get_addr_pos(master, dev->info.dyn_addr);
+ if (pos < 0)
+ return pos;
+
+ /*
+ * Clean-up the bit in IBI_SIR_REQ_REJECT so that the SIR request from the specific
+ * slave device is acknowledged by the master device.
+ */
+ spin_lock_irq(&master->ibi.master.lock);
+ reg = readl(master->regs + IBI_SIR_REQ_REJECT) & ~BIT(dev->info.dyn_addr);
+ writel(reg, master->regs + IBI_SIR_REQ_REJECT);
+
+ /*
+ * Corresponding changes to DAT: ACK the SIR from the specific device;
+ * One or more data bytes must be present.
+ */
+ dat_loc = DEV_ADDR_TABLE_LOC(master->datstartaddr, pos);
+ reg = readl(master->regs + dat_loc);
+ reg &= ~DEV_ADDR_TABLE_SIR_REJECT;
+ if (dev->info.bcr & I3C_BCR_IBI_PAYLOAD)
+ reg |= DEV_ADDR_TABLE_IBI_WITH_DATA;
+ writel(reg, master->regs + dat_loc);
+
+ spin_unlock_irq(&master->ibi.master.lock);
+
+ /* Enable SIR generation on the requested slave device */
+ ret = i3c_master_enec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
+ if (ret) {
+ spin_lock_irq(&master->ibi.master.lock);
+ reg = readl(master->regs + IBI_SIR_REQ_REJECT);
+ reg |= BIT(dev->info.dyn_addr);
+ writel(reg, master->regs + IBI_SIR_REQ_REJECT);
+
+ reg = readl(master->regs + dat_loc);
+ reg |= DEV_ADDR_TABLE_SIR_REJECT;
+ reg &= ~DEV_ADDR_TABLE_IBI_WITH_DATA;
+ writel(reg, master->regs + dat_loc);
+ spin_unlock_irq(&master->ibi.master.lock);
+ }
+
+ return ret;
+}
+
+static int dw_i3c_master_disable_ibi(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct dw_i3c_master *master = to_dw_i3c_master(m);
+ u32 reg;
+ int ret, pos, dat_loc;
+
+ /* Disable SIR generation on the requested slave device */
+ ret = i3c_master_disec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
+ if (ret)
+ return ret;
+
+ pos = dw_i3c_master_get_addr_pos(master, dev->info.dyn_addr);
+ if (pos < 0) {
+ dev_warn(master->dev, "Failed to get DAT addr pos for dev %02x\n",
+ dev->info.dyn_addr);
+ return pos;
+ }
+
+ spin_lock_irq(&master->ibi.master.lock);
+ reg = readl(master->regs + IBI_SIR_REQ_REJECT);
+ reg |= BIT(dev->info.dyn_addr);
+ writel(reg, master->regs + IBI_SIR_REQ_REJECT);
+
+ dat_loc = DEV_ADDR_TABLE_LOC(master->datstartaddr, pos);
+ reg = readl(master->regs + dat_loc);
+ reg |= DEV_ADDR_TABLE_SIR_REJECT;
+ reg &= ~DEV_ADDR_TABLE_IBI_WITH_DATA;
+ writel(reg, master->regs + dat_loc);
+ spin_unlock_irq(&master->ibi.master.lock);
+
+ return 0;
+}
+
+static void dw_i3c_master_free_ibi(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct dw_i3c_master *master = to_dw_i3c_master(m);
+ struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+
+ spin_lock_irq(&master->ibi.master.lock);
+ master->ibi.master.slots[data->ibi] = NULL;
+ data->ibi = -1;
+ spin_unlock_irq(&master->ibi.master.lock);
+
+ i3c_generic_ibi_free_pool(data->ibi_pool);
+}
+
+static void dw_i3c_master_recycle_ibi_slot(struct i3c_dev_desc *dev,
+ struct i3c_ibi_slot *slot)
+{
+ struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+
+ i3c_generic_ibi_recycle_slot(data->ibi_pool, slot);
+}
+
static int dw_i3c_master_attach_i2c_dev(struct i2c_dev_desc *dev)
{
struct i3c_master_controller *m = i2c_dev_get_master(dev);
@@ -1076,27 +1900,204 @@ static void dw_i3c_master_detach_i2c_dev(struct i2c_dev_desc *dev)
kfree(data);
}
+static struct i3c_dev_desc *dw_get_i3c_dev_by_addr(struct dw_i3c_master *master,
+ u8 addr)
+{
+ int i;
+
+ for (i = 0; i < master->maxdevs; i++) {
+ if (master->ibi.master.slots[i] &&
+ master->ibi.master.slots[i]->info.dyn_addr == addr)
+ return master->ibi.master.slots[i];
+ }
+ return NULL;
+}
+
+static void dw_i3c_master_sir_handler(struct dw_i3c_master *master,
+ u32 ibi_status)
+{
+ u8 length = IBI_QUEUE_STATUS_DATA_LEN(ibi_status);
+ u8 addr = IBI_QUEUE_IBI_ADDR(ibi_status);
+ struct dw_i3c_i2c_dev_data *data;
+ struct i3c_ibi_slot *slot;
+ struct i3c_dev_desc *dev;
+ u8 *buf;
+
+ dev = dw_get_i3c_dev_by_addr(master, addr);
+ if (!dev) {
+ dev_warn(master->dev, "no matching dev\n");
+ goto err;
+ }
+
+ data = i3c_dev_get_master_data(dev);
+ slot = i3c_generic_ibi_get_free_slot(data->ibi_pool);
+ if (!slot) {
+ dev_warn(master->dev, "no free ibi slot\n");
+ goto err;
+ }
+ buf = slot->data;
+ /* prepend ibi status */
+ memcpy(buf, &ibi_status, sizeof(ibi_status));
+ buf += sizeof(ibi_status);
+
+ dw_i3c_master_read_ibi_fifo(master, buf, length);
+
+ slot->len = length + sizeof(ibi_status);
+
+ i3c_master_queue_ibi(dev, slot);
+ return;
+
+err:
+ dw_i3c_master_flush_ibi_fifo(master, length);
+}
+
+static void dw_i3c_master_demux_ibis(struct dw_i3c_master *master)
+{
+ u32 nibi, status, intr_signal_en;
+ int i;
+
+ nibi = QUEUE_STATUS_IBI_STATUS_CNT(readl(master->regs + QUEUE_STATUS_LEVEL));
+
+ spin_lock(&master->ibi.master.lock);
+ intr_signal_en = readl(master->regs + INTR_SIGNAL_EN);
+ intr_signal_en &= ~INTR_IBI_THLD_STAT;
+ writel(intr_signal_en, master->regs + INTR_SIGNAL_EN);
+
+ for (i = 0; i < nibi; i++) {
+ status = readl(master->regs + IBI_QUEUE_STATUS);
+
+ if (status & IBI_QUEUE_STATUS_RSP_NACK)
+ dev_warn_once(master->dev, "ibi from unrecognized slave %02lx\n",
+ IBI_QUEUE_IBI_ADDR(status));
+
+ if (status & IBI_QUEUE_STATUS_PEC_ERR)
+ dev_warn(master->dev, "ibi crc/pec error\n");
+
+ if (IBI_TYPE_SIR(status))
+ dw_i3c_master_sir_handler(master, status);
+ }
+
+ intr_signal_en = readl(master->regs + INTR_SIGNAL_EN);
+ intr_signal_en |= INTR_IBI_THLD_STAT;
+ writel(intr_signal_en, master->regs + INTR_SIGNAL_EN);
+ spin_unlock(&master->ibi.master.lock);
+}
+
+static void dw_i3c_target_handle_response_ready(struct dw_i3c_master *master)
+{
+ struct i3c_dev_desc *desc = master->base.this;
+ u32 reg = readl(master->regs + QUEUE_STATUS_LEVEL);
+ u32 nresp = QUEUE_STATUS_LEVEL_RESP(reg);
+ int i;
+
+ for (i = 0; i < nresp; i++) {
+ u32 resp = readl(master->regs + RESPONSE_QUEUE_PORT);
+ u32 nbytes = RESPONSE_PORT_DATA_LEN(resp);
+
+ if (nbytes > master->target_rx.max_len) {
+ dev_warn(master->dev, "private write data length is larger than max\n");
+ return;
+ }
+
+ dw_i3c_master_read_rx_fifo(master, master->target_rx.buf, nbytes);
+
+ if (desc->target_info.read_handler)
+ desc->target_info.read_handler(desc->dev, master->target_rx.buf, nbytes);
+ }
+}
+
static irqreturn_t dw_i3c_master_irq_handler(int irq, void *dev_id)
{
struct dw_i3c_master *master = dev_id;
u32 status;
status = readl(master->regs + INTR_STATUS);
-
if (!(status & readl(master->regs + INTR_STATUS_EN))) {
writel(INTR_ALL, master->regs + INTR_STATUS);
return IRQ_NONE;
}
+ if (master->base.target) {
+ if (status & INTR_IBI_UPDATED_STAT) {
+ writel(INTR_IBI_UPDATED_STAT, master->regs + INTR_STATUS);
+ complete(&master->ibi.target.comp);
+ }
+
+ if (status & INTR_READ_REQ_RECV_STAT) {
+ /*
+ * TODO: Pass this information to the driver to take
+ * appropriate action.
+ */
+ dev_dbg(master->dev,
+ "private read received from controller when cmd queue is empty\n");
+ writel(INTR_READ_REQ_RECV_STAT, master->regs + INTR_STATUS);
+ }
+
+ if (status & INTR_RESP_READY_STAT)
+ dw_i3c_target_handle_response_ready(master);
+ }
+
spin_lock(&master->xferqueue.lock);
dw_i3c_master_end_xfer_locked(master, status);
if (status & INTR_TRANSFER_ERR_STAT)
writel(INTR_TRANSFER_ERR_STAT, master->regs + INTR_STATUS);
spin_unlock(&master->xferqueue.lock);
+ if (status & INTR_IBI_THLD_STAT)
+ dw_i3c_master_demux_ibis(master);
+
return IRQ_HANDLED;
}
+static void dw_i3c_master_of_timings(struct dw_i3c_master *master,
+ struct device_node *node)
+{
+ u32 val;
+
+ if (!of_property_read_u32(node, "i2c-scl-hz", &val))
+ master->timings.i3c_od_scl_freq = val;
+
+ if (!of_property_read_u32(node, "i3c-od-scl-low-ns", &val)) {
+ if (val < I3C_BUS_I3C_OD_TLOW_MIN_NS)
+ dev_warn(master->dev,
+ "invalid i3c-od-scl-low-ns: %u, ignoring provided value\n", val);
+ else
+ master->timings.i3c_od_scl_low = val;
+ }
+
+ if (!of_property_read_u32(node, "i3c-od-scl-high-ns", &val))
+ master->timings.i3c_od_scl_high = val;
+
+ if (!of_property_read_u32(node, "i3c-scl-hz", &val))
+ master->timings.i3c_pp_scl_freq = val;
+
+ if (!of_property_read_u32(node, "i3c-pp-scl-low-ns", &val)) {
+ if (val < I3C_BUS_I3C_PP_TLOW_MIN_NS)
+ dev_warn(master->dev,
+ "invalid i3c-pp-scl-low-ns: %u, ignoring provided value\n", val);
+ else
+ master->timings.i3c_pp_scl_low = val;
+ }
+
+ if (!of_property_read_u32(node, "i3c-pp-scl-high-ns", &val)) {
+ if (val < I3C_BUS_I3C_PP_THIGH_MIN_NS)
+ dev_warn(master->dev,
+ "invalid i3c-pp-scl-high-ns: %u, ignoring provided value\n", val);
+ else
+ master->timings.i3c_pp_scl_high = val;
+ }
+
+ if (!of_property_read_u32(node, "sda-tx-hold-ns", &val))
+ master->timings.sda_tx_hold = val;
+}
+
+static const struct i3c_target_ops dw_mipi_i3c_target_ops = {
+ .bus_init = dw_i3c_target_bus_init,
+ .bus_cleanup = dw_i3c_target_bus_cleanup,
+ .priv_xfers = dw_i3c_target_priv_xfers,
+ .generate_ibi = dw_i3c_target_generate_ibi,
+};
+
static const struct i3c_master_controller_ops dw_mipi_i3c_ops = {
.bus_init = dw_i3c_master_bus_init,
.bus_cleanup = dw_i3c_master_bus_cleanup,
@@ -1110,6 +2111,11 @@ static const struct i3c_master_controller_ops dw_mipi_i3c_ops = {
.attach_i2c_dev = dw_i3c_master_attach_i2c_dev,
.detach_i2c_dev = dw_i3c_master_detach_i2c_dev,
.i2c_xfers = dw_i3c_master_i2c_xfers,
+ .request_ibi = dw_i3c_master_request_ibi,
+ .enable_ibi = dw_i3c_master_enable_ibi,
+ .free_ibi = dw_i3c_master_free_ibi,
+ .disable_ibi = dw_i3c_master_disable_ibi,
+ .recycle_ibi_slot = dw_i3c_master_recycle_ibi_slot,
};
static int dw_i3c_probe(struct platform_device *pdev)
@@ -1121,6 +2127,8 @@ static int dw_i3c_probe(struct platform_device *pdev)
if (!master)
return -ENOMEM;
+ master->dev = &pdev->dev;
+
master->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(master->regs))
return PTR_ERR(master->regs);
@@ -1143,13 +2151,7 @@ static int dw_i3c_probe(struct platform_device *pdev)
spin_lock_init(&master->xferqueue.lock);
INIT_LIST_HEAD(&master->xferqueue.list);
- writel(INTR_ALL, master->regs + INTR_STATUS);
- irq = platform_get_irq(pdev, 0);
- ret = devm_request_irq(&pdev->dev, irq,
- dw_i3c_master_irq_handler, 0,
- dev_name(&pdev->dev), master);
- if (ret)
- goto err_assert_rst;
+ spin_lock_init(&master->ibi.master.lock);
platform_set_drvdata(pdev, master);
@@ -1164,12 +2166,37 @@ static int dw_i3c_probe(struct platform_device *pdev)
master->datstartaddr = ret;
master->maxdevs = ret >> 16;
master->free_pos = GENMASK(master->maxdevs - 1, 0);
+#ifdef CCC_WORKAROUND
+ if (master->maxdevs > 0) {
+ master->free_pos &= ~BIT(master->maxdevs - 1);
+ ret = (even_parity(I3C_BROADCAST_ADDR) << 7) | I3C_BROADCAST_ADDR;
+ master->addrs[master->maxdevs - 1] = ret;
+ writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(ret),
+ master->regs + DEV_ADDR_TABLE_LOC(master->datstartaddr,
+ master->maxdevs - 1));
+ }
+#endif
+ writel(INTR_ALL, master->regs + INTR_STATUS);
+ irq = platform_get_irq(pdev, 0);
+ ret = devm_request_irq(&pdev->dev, irq,
+ dw_i3c_master_irq_handler, 0,
+ dev_name(&pdev->dev), master);
+ if (ret)
+ goto err_assert_rst;
- ret = i3c_master_register(&master->base, &pdev->dev,
- &dw_mipi_i3c_ops, false);
+ ret = readl(master->regs + I3C_VER_TYPE);
+ master->ver_type = I3C_VER_RELEASE_TYPE(ret);
+
+ dw_i3c_master_of_timings(master, pdev->dev.of_node);
+
+ ret = i3c_register(&master->base, &pdev->dev, &dw_mipi_i3c_ops, &dw_mipi_i3c_target_ops,
+ false);
if (ret)
goto err_assert_rst;
+ dev_info(&pdev->dev, "i3c bus %d registered, irq %d\n",
+ master->base.bus.id, irq);
+
return 0;
err_assert_rst:
@@ -1186,7 +2213,7 @@ static int dw_i3c_remove(struct platform_device *pdev)
struct dw_i3c_master *master = platform_get_drvdata(pdev);
int ret;
- ret = i3c_master_unregister(&master->base);
+ ret = i3c_unregister(&master->base);
if (ret)
return ret;
diff --git a/drivers/i3c/mctp/Kconfig b/drivers/i3c/mctp/Kconfig
new file mode 100644
index 000000000000..e0b9743b4c6d
--- /dev/null
+++ b/drivers/i3c/mctp/Kconfig
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config I3C_MCTP
+ tristate "I3C MCTP driver"
+ depends on I3C
+help
+ Say yes here to enable the I3C MCTP driver.
diff --git a/drivers/i3c/mctp/Makefile b/drivers/i3c/mctp/Makefile
new file mode 100644
index 000000000000..b6e19ada2916
--- /dev/null
+++ b/drivers/i3c/mctp/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_I3C_MCTP) += i3c-mctp.o
diff --git a/drivers/i3c/mctp/i3c-mctp.c b/drivers/i3c/mctp/i3c-mctp.c
new file mode 100644
index 000000000000..b0e381456404
--- /dev/null
+++ b/drivers/i3c/mctp/i3c-mctp.c
@@ -0,0 +1,349 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2022 Intel Corporation.*/
+
+#include <linux/cdev.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/poll.h>
+#include <linux/preempt.h>
+#include <linux/timer.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include <linux/i3c/device.h>
+
+#define I3C_MCTP_MINORS 32
+#define CCC_DEVICE_STATUS_PENDING_INTR(x) (((x) & GENMASK(3, 0)) >> 0)
+#define POLLING_TIMEOUT_MS 50
+#define MCTP_INTERRUPT_NUMBER 1
+
+struct i3c_mctp {
+ struct i3c_device *i3c;
+ struct cdev cdev;
+ struct device *dev;
+ struct delayed_work polling_work;
+ int id;
+ wait_queue_head_t *wait_queue;
+ bool transfer_rdy;
+ /*
+ * Prevent simultaneous access to the transfer_rdy
+ * flag which signalizes about read transfer readiness.
+ */
+ spinlock_t transfer_rdy_lock;
+ /*
+ * Restrict an access to the /dev descriptor to one
+ * user at a time.
+ */
+ spinlock_t device_file_lock;
+ int device_open;
+};
+
+static struct class *i3c_mctp_class;
+static dev_t i3c_mctp_devt;
+static DEFINE_IDA(i3c_mctp_ida);
+
+static void i3c_mctp_polling_work(struct work_struct *work)
+{
+ struct i3c_mctp *priv = container_of(to_delayed_work(work), struct i3c_mctp, polling_work);
+ struct i3c_device *i3cdev = priv->i3c;
+ struct i3c_device_info info;
+ int ret;
+
+ i3c_device_get_info(i3cdev, &info);
+ ret = i3c_device_getstatus_ccc(i3cdev, &info);
+ if (ret)
+ return;
+
+ if (CCC_DEVICE_STATUS_PENDING_INTR(info.status) != MCTP_INTERRUPT_NUMBER)
+ return;
+
+ spin_lock(&priv->transfer_rdy_lock);
+ priv->transfer_rdy = true;
+ spin_unlock(&priv->transfer_rdy_lock);
+
+ wake_up_all(priv->wait_queue);
+
+ schedule_delayed_work(&priv->polling_work, msecs_to_jiffies(POLLING_TIMEOUT_MS));
+}
+
+static ssize_t i3c_mctp_write(struct file *file, const char __user *buf, size_t count,
+ loff_t *f_pos)
+{
+ struct i3c_mctp *priv = file->private_data;
+ struct i3c_device *i3c = priv->i3c;
+ struct i3c_priv_xfer xfers = {
+ .rnw = false,
+ .len = count,
+ };
+ u8 *data;
+ int ret;
+
+ data = memdup_user(buf, count);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ xfers.data.out = data;
+
+ ret = i3c_device_do_priv_xfers(i3c, &xfers, 1);
+ kfree(data);
+ return ret ?: count;
+}
+
+static ssize_t i3c_mctp_read(struct file *file, char __user *buf, size_t count, loff_t *f_pos)
+{
+ struct i3c_mctp *priv = file->private_data;
+ struct i3c_device *i3c = priv->i3c;
+ struct i3c_priv_xfer xfers = {
+ .rnw = true,
+ .len = count,
+ };
+ u8 *data;
+ int ret;
+
+ data = kzalloc(count, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ xfers.data.in = data;
+
+ if (!priv->transfer_rdy) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ ret = i3c_device_do_priv_xfers(i3c, &xfers, 1);
+ if (ret)
+ goto out;
+
+ if (copy_to_user(buf, data, xfers.len)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ spin_lock(&priv->transfer_rdy_lock);
+ priv->transfer_rdy = false;
+ spin_unlock(&priv->transfer_rdy_lock);
+
+out:
+ kfree(data);
+ return ret ?: xfers.len;
+}
+
+static int i3c_mctp_open(struct inode *inode, struct file *file)
+{
+ struct i3c_mctp *priv = container_of(inode->i_cdev, struct i3c_mctp, cdev);
+
+ spin_lock(&priv->device_file_lock);
+ if (priv->device_open) {
+ spin_unlock(&priv->device_file_lock);
+ return -EBUSY;
+ }
+ priv->device_open++;
+ spin_unlock(&priv->device_file_lock);
+
+ file->private_data = priv;
+ init_waitqueue_head(priv->wait_queue);
+
+ return 0;
+}
+
+static int i3c_mctp_release(struct inode *inode, struct file *file)
+{
+ struct i3c_mctp *priv = file->private_data;
+
+ spin_lock(&priv->device_file_lock);
+ priv->device_open--;
+ spin_unlock(&priv->device_file_lock);
+
+ file->private_data = NULL;
+
+ return 0;
+}
+
+static __poll_t i3c_mctp_poll(struct file *file, struct poll_table_struct *pt)
+{
+ struct i3c_mctp *priv = file->private_data;
+ __poll_t ret = 0;
+
+ poll_wait(file, priv->wait_queue, pt);
+
+ return priv->transfer_rdy ? EPOLLIN : ret;
+}
+
+static const struct file_operations i3c_mctp_fops = {
+ .owner = THIS_MODULE,
+ .read = i3c_mctp_read,
+ .write = i3c_mctp_write,
+ .poll = i3c_mctp_poll,
+ .open = i3c_mctp_open,
+ .release = i3c_mctp_release,
+};
+
+static struct i3c_mctp *i3c_mctp_alloc(struct i3c_device *i3c)
+{
+ struct i3c_mctp *priv;
+ int id;
+
+ priv = devm_kzalloc(i3cdev_to_dev(i3c), sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return ERR_PTR(-ENOMEM);
+
+ id = ida_alloc(&i3c_mctp_ida, GFP_KERNEL);
+ if (id < 0) {
+ pr_err("i3c_mctp: no minor number available!\n");
+ return ERR_PTR(id);
+ }
+
+ priv->id = id;
+ priv->i3c = i3c;
+ priv->wait_queue = devm_kzalloc(i3cdev_to_dev(i3c), sizeof(wait_queue_head_t), GFP_KERNEL);
+ if (!priv->wait_queue) {
+ ida_free(&i3c_mctp_ida, id);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ spin_lock_init(&priv->device_file_lock);
+ spin_lock_init(&priv->transfer_rdy_lock);
+
+ return priv;
+}
+
+static void i3c_mctp_ibi_handler(struct i3c_device *dev, const struct i3c_ibi_payload *payload)
+{
+ struct i3c_mctp *priv = dev_get_drvdata(i3cdev_to_dev(dev));
+
+ spin_lock(&priv->device_file_lock);
+ if (!priv->device_open) {
+ spin_unlock(&priv->device_file_lock);
+ return;
+ }
+ spin_unlock(&priv->device_file_lock);
+
+ wake_up_all(priv->wait_queue);
+ spin_lock(&priv->transfer_rdy_lock);
+ priv->transfer_rdy = true;
+ spin_unlock(&priv->transfer_rdy_lock);
+}
+
+static int i3c_mctp_init(struct i3c_driver *drv)
+{
+ int ret;
+
+ /* Dynamically request unused major number */
+ ret = alloc_chrdev_region(&i3c_mctp_devt, 0, I3C_MCTP_MINORS, "i3c-mctp");
+ if (ret)
+ goto out;
+
+ /* Create a class to populate sysfs entries*/
+ i3c_mctp_class = class_create(THIS_MODULE, "i3c-mctp");
+ if (IS_ERR(i3c_mctp_class)) {
+ ret = PTR_ERR(i3c_mctp_class);
+ goto out_unreg_chrdev;
+ }
+
+ i3c_driver_register(drv);
+
+ return 0;
+
+out_unreg_chrdev:
+ unregister_chrdev_region(i3c_mctp_devt, I3C_MCTP_MINORS);
+out:
+ pr_err("i3c_mctp: driver initialisation failed\n");
+ return ret;
+}
+
+static void i3c_mctp_free(struct i3c_driver *drv)
+{
+ i3c_driver_unregister(drv);
+ class_destroy(i3c_mctp_class);
+ unregister_chrdev_region(i3c_mctp_devt, I3C_MCTP_MINORS);
+}
+
+static int i3c_mctp_enable_ibi(struct i3c_device *i3cdev)
+{
+ struct i3c_ibi_setup ibireq = {
+ .handler = i3c_mctp_ibi_handler,
+ .max_payload_len = 2,
+ .num_slots = 10,
+ };
+ int ret;
+
+ ret = i3c_device_request_ibi(i3cdev, &ibireq);
+ if (ret)
+ return ret;
+ ret = i3c_device_enable_ibi(i3cdev);
+ if (ret)
+ i3c_device_free_ibi(i3cdev);
+
+ return ret;
+}
+
+static int i3c_mctp_probe(struct i3c_device *i3cdev)
+{
+ struct i3c_mctp *priv;
+ struct device *dev = i3cdev_to_dev(i3cdev);
+ int ret;
+
+ priv = i3c_mctp_alloc(i3cdev);
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
+
+ cdev_init(&priv->cdev, &i3c_mctp_fops);
+
+ priv->cdev.owner = THIS_MODULE;
+ ret = cdev_add(&priv->cdev, MKDEV(MAJOR(i3c_mctp_devt), priv->id), 1);
+ if (ret)
+ goto error_cdev;
+
+ /* register this i3c device with the driver core */
+ priv->dev = device_create(i3c_mctp_class, dev,
+ MKDEV(MAJOR(i3c_mctp_devt), priv->id),
+ NULL, "i3c-mctp-%s", dev_name(dev));
+ if (IS_ERR(priv->dev)) {
+ ret = PTR_ERR(priv->dev);
+ goto error;
+ }
+
+ dev_set_drvdata(i3cdev_to_dev(i3cdev), priv);
+
+ if (i3c_mctp_enable_ibi(i3cdev)) {
+ INIT_DELAYED_WORK(&priv->polling_work, i3c_mctp_polling_work);
+ schedule_delayed_work(&priv->polling_work, msecs_to_jiffies(POLLING_TIMEOUT_MS));
+ }
+
+ return 0;
+
+error:
+ cdev_del(&priv->cdev);
+error_cdev:
+ put_device(dev);
+ return ret;
+}
+
+static void i3c_mctp_remove(struct i3c_device *i3cdev)
+{
+ struct i3c_mctp *priv = dev_get_drvdata(i3cdev_to_dev(i3cdev));
+
+ i3c_device_disable_ibi(i3cdev);
+ i3c_device_free_ibi(i3cdev);
+
+ device_destroy(i3c_mctp_class, MKDEV(MAJOR(i3c_mctp_devt), priv->id));
+ cdev_del(&priv->cdev);
+ ida_free(&i3c_mctp_ida, priv->id);
+}
+
+static const struct i3c_device_id i3c_mctp_ids[] = {
+ I3C_CLASS(0xCC, 0x0),
+ { },
+};
+
+static struct i3c_driver i3c_mctp_drv = {
+ .driver.name = "i3c-mctp",
+ .id_table = i3c_mctp_ids,
+ .probe = i3c_mctp_probe,
+ .remove = i3c_mctp_remove,
+};
+
+module_driver(i3c_mctp_drv, i3c_mctp_init, i3c_mctp_free);
+MODULE_AUTHOR("Oleksandr Shulzhenko <oleksandr.shulzhenko.viktorovych@intel.com>");
+MODULE_DESCRIPTION("I3C MCTP driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/jtag/Kconfig b/drivers/jtag/Kconfig
new file mode 100644
index 000000000000..0cc163f9ad44
--- /dev/null
+++ b/drivers/jtag/Kconfig
@@ -0,0 +1,31 @@
+menuconfig JTAG
+ tristate "JTAG support"
+ help
+ This provides basic core functionality support for JTAG class devices.
+ Hardware that is equipped with a JTAG microcontroller can be
+ supported by using this driver's interfaces.
+ This driver exposes a set of IOCTLs to the user space for
+ the following commands:
+ SDR: Performs an IEEE 1149.1 Data Register scan
+ SIR: Performs an IEEE 1149.1 Instruction Register scan.
+ RUNTEST: Forces the IEEE 1149.1 bus to a run state for a specified
+ number of clocks or a specified time period.
+
+ If you want this support, you should say Y here.
+
+ To compile this driver as a module, choose M here: the module will
+ be called jtag.
+
+menuconfig JTAG_ASPEED
+ tristate "Aspeed SoC JTAG controller support"
+ depends on JTAG && HAS_IOMEM
+ depends on ARCH_ASPEED || COMPILE_TEST
+ help
+ This provides a support for Aspeed JTAG device, equipped on
+ Aspeed SoC 24xx and 25xx families. Drivers allows programming
+ of hardware devices, connected to SoC through the JTAG interface.
+
+ If you want this support, you should say Y here.
+
+ To compile this driver as a module, choose M here: the module will
+ be called jtag-aspeed.
diff --git a/drivers/jtag/Makefile b/drivers/jtag/Makefile
new file mode 100644
index 000000000000..04a855e2df28
--- /dev/null
+++ b/drivers/jtag/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_JTAG) += jtag.o
+obj-$(CONFIG_JTAG_ASPEED) += jtag-aspeed.o
diff --git a/drivers/jtag/jtag-aspeed.c b/drivers/jtag/jtag-aspeed.c
new file mode 100644
index 000000000000..6701787dd5a5
--- /dev/null
+++ b/drivers/jtag/jtag-aspeed.c
@@ -0,0 +1,1574 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2018 Mellanox Technologies. All rights reserved.
+// Copyright (c) 2018 Oleksandr Shamray <oleksandrs@mellanox.com>
+// Copyright (c) 2019 Intel Corporation
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/jtag.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <uapi/linux/jtag.h>
+
+#define ASPEED_JTAG_DATA 0x00
+#define ASPEED_JTAG_INST 0x04
+#define ASPEED_JTAG_CTRL 0x08
+#define ASPEED_JTAG_ISR 0x0C
+#define ASPEED_JTAG_SW 0x10
+#define ASPEED_JTAG_TCK 0x14
+#define ASPEED_JTAG_EC 0x18
+
+#define ASPEED_JTAG_DATA_MSB 0x01
+#define ASPEED_JTAG_DATA_CHUNK_SIZE 0x20
+#define ASPEED_JTAG_HW2_DATA_CHUNK_SIZE 512
+
+/* ASPEED_JTAG_CTRL: Engine Control 24xx and 25xx series*/
+#define ASPEED_JTAG_CTL_ENG_EN BIT(31)
+#define ASPEED_JTAG_CTL_ENG_OUT_EN BIT(30)
+#define ASPEED_JTAG_CTL_FORCE_TMS BIT(29)
+#define ASPEED_JTAG_CTL_IR_UPDATE BIT(26)
+#define ASPEED_JTAG_CTL_INST_LEN(x) ((x) << 20)
+#define ASPEED_JTAG_CTL_LASPEED_INST BIT(17)
+#define ASPEED_JTAG_CTL_INST_EN BIT(16)
+#define ASPEED_JTAG_CTL_DR_UPDATE BIT(10)
+#define ASPEED_JTAG_CTL_DATA_LEN(x) ((x) << 4)
+#define ASPEED_JTAG_CTL_LASPEED_DATA BIT(1)
+#define ASPEED_JTAG_CTL_DATA_EN BIT(0)
+
+/* ASPEED_JTAG_CTRL: Engine Control 26xx series*/
+#define ASPEED_JTAG_CTL_26XX_RESET_FIFO BIT(21)
+#define ASPEED_JTAG_CTL_26XX_FIFO_MODE_CTRL BIT(20)
+#define ASPEED_JTAG_CTL_26XX_TRANS_LEN(x) ((x) << 8)
+#define ASPEED_JTAG_CTL_26XX_TRANS_MASK GENMASK(17, 8)
+#define ASPEED_JTAG_CTL_26XX_MSB_FIRST BIT(6)
+#define ASPEED_JTAG_CTL_26XX_TERM_TRANS BIT(5)
+#define ASPEED_JTAG_CTL_26XX_LASPEED_TRANS BIT(4)
+#define ASPEED_JTAG_CTL_26XX_INST_EN BIT(1)
+
+/* ASPEED_JTAG_ISR : Interrupt status and enable */
+#define ASPEED_JTAG_ISR_INST_PAUSE BIT(19)
+#define ASPEED_JTAG_ISR_INST_COMPLETE BIT(18)
+#define ASPEED_JTAG_ISR_DATA_PAUSE BIT(17)
+#define ASPEED_JTAG_ISR_DATA_COMPLETE BIT(16)
+#define ASPEED_JTAG_ISR_INST_PAUSE_EN BIT(3)
+#define ASPEED_JTAG_ISR_INST_COMPLETE_EN BIT(2)
+#define ASPEED_JTAG_ISR_DATA_PAUSE_EN BIT(1)
+#define ASPEED_JTAG_ISR_DATA_COMPLETE_EN BIT(0)
+#define ASPEED_JTAG_ISR_INT_EN_MASK GENMASK(3, 0)
+#define ASPEED_JTAG_ISR_INT_MASK GENMASK(19, 16)
+
+/* ASPEED_JTAG_SW : Software Mode and Status */
+#define ASPEED_JTAG_SW_MODE_EN BIT(19)
+#define ASPEED_JTAG_SW_MODE_TCK BIT(18)
+#define ASPEED_JTAG_SW_MODE_TMS BIT(17)
+#define ASPEED_JTAG_SW_MODE_TDIO BIT(16)
+
+/* ASPEED_JTAG_TCK : TCK Control */
+#define ASPEED_JTAG_TCK_DIVISOR_MASK GENMASK(10, 0)
+#define ASPEED_JTAG_TCK_GET_DIV(x) ((x) & ASPEED_JTAG_TCK_DIVISOR_MASK)
+
+/* ASPEED_JTAG_EC : Controller set for go to IDLE */
+#define ASPEED_JTAG_EC_GO_IDLE BIT(0)
+
+#define ASPEED_JTAG_IOUT_LEN(len) \
+ (ASPEED_JTAG_CTL_ENG_EN | \
+ ASPEED_JTAG_CTL_ENG_OUT_EN | \
+ ASPEED_JTAG_CTL_INST_LEN(len))
+
+#define ASPEED_JTAG_DOUT_LEN(len) \
+ (ASPEED_JTAG_CTL_ENG_EN | \
+ ASPEED_JTAG_CTL_ENG_OUT_EN | \
+ ASPEED_JTAG_CTL_DATA_LEN(len))
+
+#define ASPEED_JTAG_TRANS_LEN(len) \
+ (ASPEED_JTAG_CTL_ENG_EN | \
+ ASPEED_JTAG_CTL_ENG_OUT_EN | \
+ ASPEED_JTAG_CTL_26XX_TRANS_LEN(len))
+
+#define ASPEED_JTAG_SW_TDIO (ASPEED_JTAG_SW_MODE_EN | ASPEED_JTAG_SW_MODE_TDIO)
+
+#define ASPEED_JTAG_GET_TDI(direction, byte) \
+ (((direction) & JTAG_WRITE_XFER) ? byte : UINT_MAX)
+
+#define ASPEED_JTAG_TCK_WAIT 10
+#define ASPEED_JTAG_RESET_CNTR 10
+#define WAIT_ITERATIONS 300
+
+/* Use this macro to switch between HW mode 1(comment out) and 2(defined) */
+#define ASPEED_JTAG_HW_MODE_2_ENABLE 1
+
+/* ASPEED JTAG HW MODE 2 (Only supported in AST26xx series) */
+#define ASPEED_JTAG_SHDATA 0x20
+#define ASPEED_JTAG_SHINST 0x24
+#define ASPEED_JTAG_PADCTRL0 0x28
+#define ASPEED_JTAG_PADCTRL1 0x2C
+#define ASPEED_JTAG_SHCTRL 0x30
+#define ASPEED_JTAG_GBLCTRL 0x34
+#define ASPEED_JTAG_INTCTRL 0x38
+#define ASPEED_JTAG_STAT 0x3C
+
+/* ASPEED_JTAG_PADCTRLx : Padding control 0 and 1 */
+#define ASPEED_JTAG_PADCTRL_PAD_DATA BIT(24)
+#define ASPEED_JTAG_PADCTRL_POSTPAD(x) (((x) & GENMASK(8, 0)) << 12)
+#define ASPEED_JTAG_PADCTRL_PREPAD(x) (((x) & GENMASK(8, 0)) << 0)
+
+/* ASPEED_JTAG_SHCTRL: Shift Control */
+#define ASPEED_JTAG_SHCTRL_FRUN_TCK_EN BIT(31)
+#define ASPEED_JTAG_SHCTRL_STSHIFT_EN BIT(30)
+#define ASPEED_JTAG_SHCTRL_TMS(x) (((x) & GENMASK(13, 0)) << 16)
+#define ASPEED_JTAG_SHCTRL_POST_TMS(x) (((x) & GENMASK(3, 0)) << 13)
+#define ASPEED_JTAG_SHCTRL_PRE_TMS(x) (((x) & GENMASK(3, 0)) << 10)
+#define ASPEED_JTAG_SHCTRL_PAD_SEL0 (0)
+#define ASPEED_JTAG_SHCTRL_PAD_SEL1 BIT(9)
+#define ASPEED_JTAG_SHCTRL_END_SHIFT BIT(8)
+#define ASPEED_JTAG_SHCTRL_START_SHIFT BIT(7)
+#define ASPEED_JTAG_SHCTRL_LWRDT_SHIFT(x) ((x) & GENMASK(6, 0))
+
+#define ASPEED_JTAG_END_SHIFT_DISABLED 0
+
+/* ASPEED_JTAG_GBLCTRL : Global Control */
+#define ASPEED_JTAG_GBLCTRL_ENG_MODE_EN BIT(31)
+#define ASPEED_JTAG_GBLCTRL_ENG_OUT_EN BIT(30)
+#define ASPEED_JTAG_GBLCTRL_FORCE_TMS BIT(29)
+#define ASPEED_JTAG_GBLCTRL_SHIFT_COMPLETE BIT(28)
+#define ASPEED_JTAG_GBLCTRL_RESET_FIFO BIT(25)
+#define ASPEED_JTAG_GBLCTRL_FIFO_CTRL_MODE BIT(24)
+#define ASPEED_JTAG_GBLCTRL_UPDT_SHIFT(x) (((x) & GENMASK(9, 7)) << 13)
+#define ASPEED_JTAG_GBLCTRL_STSHIFT(x) (((x) & GENMASK(0, 0)) << 16)
+#define ASPEED_JTAG_GBLCTRL_TRST BIT(15)
+#define ASPEED_JTAG_CLK_DIVISOR_MASK GENMASK(11, 0)
+#define ASPEED_JTAG_CLK_GET_DIV(x) ((x) & ASPEED_JTAG_CLK_DIVISOR_MASK)
+
+/* ASPEED_JTAG_INTCTRL: Interrupt Control */
+#define ASPEED_JTAG_INTCTRL_SHCPL_IRQ_EN BIT(16)
+#define ASPEED_JTAG_INTCTRL_SHCPL_IRQ_STAT BIT(0)
+
+/* ASPEED_JTAG_STAT: JTAG HW mode 2 status */
+#define ASPEED_JTAG_STAT_ENG_IDLE BIT(0)
+
+#define ASPEED_JTAG_MAX_PAD_SIZE 512
+
+/* Use this macro to set us delay to WA the intensive R/W FIFO usage issue */
+#define AST26XX_FIFO_UDELAY 2
+
+/* Use this macro to set us delay for JTAG Master Controller to be programmed */
+#define AST26XX_JTAG_CTRL_UDELAY 2
+
+/*#define USE_INTERRUPTS*/
+#define DEBUG_JTAG
+
+static const char * const regnames[] = {
+ [ASPEED_JTAG_DATA] = "ASPEED_JTAG_DATA",
+ [ASPEED_JTAG_INST] = "ASPEED_JTAG_INST",
+ [ASPEED_JTAG_CTRL] = "ASPEED_JTAG_CTRL",
+ [ASPEED_JTAG_ISR] = "ASPEED_JTAG_ISR",
+ [ASPEED_JTAG_SW] = "ASPEED_JTAG_SW",
+ [ASPEED_JTAG_TCK] = "ASPEED_JTAG_TCK",
+ [ASPEED_JTAG_EC] = "ASPEED_JTAG_EC",
+ [ASPEED_JTAG_SHDATA] = "ASPEED_JTAG_SHDATA",
+ [ASPEED_JTAG_SHINST] = "ASPEED_JTAG_SHINST",
+ [ASPEED_JTAG_PADCTRL0] = "ASPEED_JTAG_PADCTRL0",
+ [ASPEED_JTAG_PADCTRL1] = "ASPEED_JTAG_PADCTRL1",
+ [ASPEED_JTAG_SHCTRL] = "ASPEED_JTAG_SHCTRL",
+ [ASPEED_JTAG_GBLCTRL] = "ASPEED_JTAG_GBLCTRL",
+ [ASPEED_JTAG_INTCTRL] = "ASPEED_JTAG_INTCTRL",
+ [ASPEED_JTAG_STAT] = "ASPEED_JTAG_STAT",
+};
+
+#define ASPEED_JTAG_NAME "jtag-aspeed"
+
+struct aspeed_jtag {
+ void __iomem *reg_base;
+ struct device *dev;
+ struct clk *pclk;
+ enum jtag_tapstate status;
+ int irq;
+ struct reset_control *rst;
+ u32 flag;
+ wait_queue_head_t jtag_wq;
+ u32 mode;
+ enum jtag_tapstate current_state;
+ const struct jtag_low_level_functions *llops;
+ u32 pad_data_one[ASPEED_JTAG_MAX_PAD_SIZE / 32];
+ u32 pad_data_zero[ASPEED_JTAG_MAX_PAD_SIZE / 32];
+};
+
+/*
+ * Multi generation support is enabled by fops and low level assped function
+ * mapping using asped_jtag_functions struct as config mechanism.
+ */
+
+struct jtag_low_level_functions {
+ void (*output_disable)(struct aspeed_jtag *aspeed_jtag);
+ void (*master_enable)(struct aspeed_jtag *aspeed_jtag);
+ int (*xfer_push_data)(struct aspeed_jtag *aspeed_jtag,
+ enum jtag_xfer_type type, u32 bits_len);
+ int (*xfer_push_data_last)(struct aspeed_jtag *aspeed_jtag,
+ enum jtag_xfer_type type, u32 bits_len);
+ void (*xfer_sw)(struct aspeed_jtag *aspeed_jtag, struct jtag_xfer *xfer,
+ u32 *data);
+ int (*xfer_hw)(struct aspeed_jtag *aspeed_jtag, struct jtag_xfer *xfer,
+ u32 *data);
+ void (*xfer_hw_fifo_delay)(void);
+ void (*xfer_sw_delay)(struct aspeed_jtag *aspeed_jtag);
+ irqreturn_t (*jtag_interrupt)(s32 this_irq, void *dev_id);
+};
+
+struct aspeed_jtag_functions {
+ const struct jtag_ops *aspeed_jtag_ops;
+ const struct jtag_low_level_functions *aspeed_jtag_llops;
+};
+
+#ifdef DEBUG_JTAG
+static char *end_status_str[] = { "tlr", "idle", "selDR", "capDR",
+ "sDR", "ex1DR", "pDR", "ex2DR",
+ "updDR", "selIR", "capIR", "sIR",
+ "ex1IR", "pIR", "ex2IR", "updIR" };
+#endif
+
+static u32 aspeed_jtag_read(struct aspeed_jtag *aspeed_jtag, u32 reg)
+{
+ u32 val = readl(aspeed_jtag->reg_base + reg);
+
+#ifdef DEBUG_JTAG
+ dev_dbg(aspeed_jtag->dev, "read:%s val = 0x%08x\n", regnames[reg], val);
+#endif
+ return val;
+}
+
+static void aspeed_jtag_write(struct aspeed_jtag *aspeed_jtag, u32 val, u32 reg)
+{
+#ifdef DEBUG_JTAG
+ dev_dbg(aspeed_jtag->dev, "write:%s val = 0x%08x\n", regnames[reg],
+ val);
+#endif
+ writel(val, aspeed_jtag->reg_base + reg);
+}
+
+static int aspeed_jtag_freq_set(struct jtag *jtag, u32 freq)
+{
+ struct aspeed_jtag *aspeed_jtag = jtag_priv(jtag);
+ unsigned long apb_frq;
+ u32 tck_val;
+ u16 div;
+
+ if (!freq)
+ return -EINVAL;
+
+ apb_frq = clk_get_rate(aspeed_jtag->pclk);
+ if (!apb_frq)
+ return -EOPNOTSUPP;
+
+ div = (apb_frq - 1) / freq;
+ tck_val = aspeed_jtag_read(aspeed_jtag, ASPEED_JTAG_TCK);
+ aspeed_jtag_write(aspeed_jtag,
+ (tck_val & ~ASPEED_JTAG_TCK_DIVISOR_MASK) | div,
+ ASPEED_JTAG_TCK);
+ return 0;
+}
+
+static int aspeed_jtag_freq_set_26xx(struct jtag *jtag, u32 freq)
+{
+ struct aspeed_jtag *aspeed_jtag = jtag_priv(jtag);
+ unsigned long apb_frq;
+ u32 tck_val;
+ u16 div;
+
+ if (!freq)
+ return -EINVAL;
+
+ apb_frq = clk_get_rate(aspeed_jtag->pclk);
+ if (!apb_frq)
+ return -EOPNOTSUPP;
+
+ div = (apb_frq - 1) / freq;
+ tck_val = aspeed_jtag_read(aspeed_jtag, ASPEED_JTAG_GBLCTRL);
+ aspeed_jtag_write(aspeed_jtag,
+ (tck_val & ~ASPEED_JTAG_CLK_DIVISOR_MASK) | div,
+ ASPEED_JTAG_GBLCTRL);
+ return 0;
+}
+
+static int aspeed_jtag_freq_get(struct jtag *jtag, u32 *frq)
+{
+ struct aspeed_jtag *aspeed_jtag = jtag_priv(jtag);
+ u32 pclk;
+ u32 tck;
+
+ pclk = clk_get_rate(aspeed_jtag->pclk);
+ tck = aspeed_jtag_read(aspeed_jtag, ASPEED_JTAG_TCK);
+ *frq = pclk / (ASPEED_JTAG_TCK_GET_DIV(tck) + 1);
+
+ return 0;
+}
+
+static int aspeed_jtag_freq_get_26xx(struct jtag *jtag, u32 *frq)
+{
+ struct aspeed_jtag *aspeed_jtag = jtag_priv(jtag);
+ u32 pclk;
+ u32 tck;
+
+ pclk = clk_get_rate(aspeed_jtag->pclk);
+ tck = aspeed_jtag_read(aspeed_jtag, ASPEED_JTAG_GBLCTRL);
+ *frq = pclk / (ASPEED_JTAG_CLK_GET_DIV(tck) + 1);
+
+ return 0;
+}
+
+static inline void aspeed_jtag_output_disable(struct aspeed_jtag *aspeed_jtag)
+{
+ aspeed_jtag_write(aspeed_jtag, 0, ASPEED_JTAG_CTRL);
+}
+
+static inline void
+aspeed_jtag_output_disable_26xx(struct aspeed_jtag *aspeed_jtag)
+{
+ aspeed_jtag_write(aspeed_jtag, 0, ASPEED_JTAG_CTRL);
+ aspeed_jtag_write(aspeed_jtag, 0, ASPEED_JTAG_GBLCTRL);
+}
+
+static inline void aspeed_jtag_master(struct aspeed_jtag *aspeed_jtag)
+{
+ aspeed_jtag_write(aspeed_jtag,
+ (ASPEED_JTAG_CTL_ENG_EN | ASPEED_JTAG_CTL_ENG_OUT_EN),
+ ASPEED_JTAG_CTRL);
+
+ aspeed_jtag_write(aspeed_jtag,
+ ASPEED_JTAG_SW_MODE_EN | ASPEED_JTAG_SW_MODE_TDIO,
+ ASPEED_JTAG_SW);
+ aspeed_jtag_write(aspeed_jtag,
+ ASPEED_JTAG_ISR_INST_PAUSE |
+ ASPEED_JTAG_ISR_INST_COMPLETE |
+ ASPEED_JTAG_ISR_DATA_PAUSE |
+ ASPEED_JTAG_ISR_DATA_COMPLETE |
+ ASPEED_JTAG_ISR_INST_PAUSE_EN |
+ ASPEED_JTAG_ISR_INST_COMPLETE_EN |
+ ASPEED_JTAG_ISR_DATA_PAUSE_EN |
+ ASPEED_JTAG_ISR_DATA_COMPLETE_EN,
+ ASPEED_JTAG_ISR); /* Enable Interrupt */
+}
+
+static inline void aspeed_jtag_master_26xx(struct aspeed_jtag *aspeed_jtag)
+{
+ if (aspeed_jtag->mode & JTAG_XFER_HW_MODE) {
+ aspeed_jtag_write(aspeed_jtag, 0, ASPEED_JTAG_CTRL);
+ aspeed_jtag_write(aspeed_jtag, 0, ASPEED_JTAG_SW);
+ aspeed_jtag_write(aspeed_jtag,
+ ASPEED_JTAG_GBLCTRL_ENG_MODE_EN |
+ ASPEED_JTAG_GBLCTRL_ENG_OUT_EN,
+ ASPEED_JTAG_GBLCTRL);
+ } else {
+ aspeed_jtag_write(aspeed_jtag, 0, ASPEED_JTAG_GBLCTRL);
+ aspeed_jtag_write(aspeed_jtag,
+ ASPEED_JTAG_CTL_ENG_EN |
+ ASPEED_JTAG_CTL_ENG_OUT_EN,
+ ASPEED_JTAG_CTRL);
+
+ aspeed_jtag_write(aspeed_jtag,
+ ASPEED_JTAG_SW_MODE_EN |
+ ASPEED_JTAG_SW_MODE_TDIO,
+ ASPEED_JTAG_SW);
+ }
+ aspeed_jtag_write(aspeed_jtag,
+ ASPEED_JTAG_INTCTRL_SHCPL_IRQ_EN |
+ ASPEED_JTAG_INTCTRL_SHCPL_IRQ_STAT,
+ ASPEED_JTAG_INTCTRL); /* Enable HW2 IRQ */
+
+ aspeed_jtag_write(aspeed_jtag,
+ ASPEED_JTAG_ISR_INST_PAUSE |
+ ASPEED_JTAG_ISR_INST_COMPLETE |
+ ASPEED_JTAG_ISR_DATA_PAUSE |
+ ASPEED_JTAG_ISR_DATA_COMPLETE |
+ ASPEED_JTAG_ISR_INST_PAUSE_EN |
+ ASPEED_JTAG_ISR_INST_COMPLETE_EN |
+ ASPEED_JTAG_ISR_DATA_PAUSE_EN |
+ ASPEED_JTAG_ISR_DATA_COMPLETE_EN,
+ ASPEED_JTAG_ISR); /* Enable HW1 Interrupts */
+}
+
+static int aspeed_jtag_mode_set(struct jtag *jtag, struct jtag_mode *jtag_mode)
+{
+ struct aspeed_jtag *aspeed_jtag = jtag_priv(jtag);
+
+ switch (jtag_mode->feature) {
+ case JTAG_XFER_MODE:
+ aspeed_jtag->mode = jtag_mode->mode;
+ aspeed_jtag->llops->master_enable(aspeed_jtag);
+ break;
+ case JTAG_CONTROL_MODE:
+ if (jtag_mode->mode == JTAG_MASTER_OUTPUT_DISABLE)
+ aspeed_jtag->llops->output_disable(aspeed_jtag);
+ else if (jtag_mode->mode == JTAG_MASTER_MODE)
+ aspeed_jtag->llops->master_enable(aspeed_jtag);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/*
+ * We read and write from an unused JTAG Master controller register in SW
+ * mode to create a delay in xfers.
+ * We found this mechanism better than any udelay or usleep option.
+ */
+static inline void aspeed_jtag_sw_delay_26xx(struct aspeed_jtag *aspeed_jtag)
+{
+ u32 read_reg = aspeed_jtag_read(aspeed_jtag, ASPEED_JTAG_PADCTRL1);
+
+ aspeed_jtag_write(aspeed_jtag, read_reg, ASPEED_JTAG_PADCTRL1);
+}
+
+static char aspeed_jtag_tck_cycle(struct aspeed_jtag *aspeed_jtag, u8 tms,
+ u8 tdi)
+{
+ char tdo = 0;
+
+ /* TCK = 0 */
+ aspeed_jtag_write(aspeed_jtag,
+ ASPEED_JTAG_SW_MODE_EN |
+ (tms * ASPEED_JTAG_SW_MODE_TMS) |
+ (tdi * ASPEED_JTAG_SW_MODE_TDIO),
+ ASPEED_JTAG_SW);
+
+ /* Wait until JTAG Master controller finishes the operation */
+ if (aspeed_jtag->llops->xfer_sw_delay)
+ aspeed_jtag->llops->xfer_sw_delay(aspeed_jtag);
+ else
+ aspeed_jtag_read(aspeed_jtag, ASPEED_JTAG_SW);
+
+ /* TCK = 1 */
+ aspeed_jtag_write(aspeed_jtag,
+ ASPEED_JTAG_SW_MODE_EN | ASPEED_JTAG_SW_MODE_TCK |
+ (tms * ASPEED_JTAG_SW_MODE_TMS) |
+ (tdi * ASPEED_JTAG_SW_MODE_TDIO),
+ ASPEED_JTAG_SW);
+
+ /* Wait until JTAG Master controller finishes the operation */
+ if (aspeed_jtag->llops->xfer_sw_delay)
+ aspeed_jtag->llops->xfer_sw_delay(aspeed_jtag);
+
+ if (aspeed_jtag_read(aspeed_jtag, ASPEED_JTAG_SW) &
+ ASPEED_JTAG_SW_MODE_TDIO)
+ tdo = 1;
+
+ return tdo;
+}
+
+static int aspeed_jtag_bitbang(struct jtag *jtag,
+ struct bitbang_packet *bitbang,
+ struct tck_bitbang *bitbang_data)
+{
+ struct aspeed_jtag *aspeed_jtag = jtag_priv(jtag);
+ int i = 0;
+
+ for (i = 0; i < bitbang->length; i++) {
+ bitbang_data[i].tdo =
+ aspeed_jtag_tck_cycle(aspeed_jtag, bitbang_data[i].tms,
+ bitbang_data[i].tdi);
+ }
+ return 0;
+}
+
+static inline void aspeed_jtag_xfer_hw_fifo_delay_26xx(void)
+{
+ udelay(AST26XX_FIFO_UDELAY);
+}
+
+static int aspeed_jtag_isr_wait(struct aspeed_jtag *aspeed_jtag, u32 bit)
+{
+ int res = 0;
+#ifdef USE_INTERRUPTS
+ res = wait_event_interruptible(aspeed_jtag->jtag_wq,
+ aspeed_jtag->flag & bit);
+ aspeed_jtag->flag &= ~bit;
+#else
+ u32 status = 0;
+ u32 iterations = 0;
+
+ while ((status & bit) == 0) {
+ status = aspeed_jtag_read(aspeed_jtag, ASPEED_JTAG_ISR);
+#ifdef DEBUG_JTAG
+ dev_dbg(aspeed_jtag->dev, "%s = 0x%08x\n", __func__, status);
+#endif
+ iterations++;
+ if (iterations > WAIT_ITERATIONS) {
+ dev_err(aspeed_jtag->dev, "%s %d in ASPEED_JTAG_ISR\n",
+ "aspeed_jtag driver timed out waiting for bit",
+ bit);
+ res = -EFAULT;
+ break;
+ }
+ if ((status & ASPEED_JTAG_ISR_DATA_COMPLETE) == 0) {
+ if (iterations % 25 == 0)
+ usleep_range(1, 5);
+ else
+ udelay(1);
+ }
+ }
+ aspeed_jtag_write(aspeed_jtag, bit | (status & 0xf), ASPEED_JTAG_ISR);
+#endif
+ return res;
+}
+
+static int aspeed_jtag_wait_shift_complete(struct aspeed_jtag *aspeed_jtag)
+{
+ int res = 0;
+#ifdef USE_INTERRUPTS
+ res = wait_event_interruptible(aspeed_jtag->jtag_wq,
+ aspeed_jtag->flag &
+ ASPEED_JTAG_INTCTRL_SHCPL_IRQ_STAT);
+ aspeed_jtag->flag &= ~ASPEED_JTAG_INTCTRL_SHCPL_IRQ_STAT;
+#else
+ u32 status = 0;
+ u32 iterations = 0;
+
+ while ((status & ASPEED_JTAG_INTCTRL_SHCPL_IRQ_STAT) == 0) {
+ status = aspeed_jtag_read(aspeed_jtag, ASPEED_JTAG_INTCTRL);
+#ifdef DEBUG_JTAG
+ dev_dbg(aspeed_jtag->dev, "%s = 0x%08x\n", __func__, status);
+#endif
+ iterations++;
+ if (iterations > WAIT_ITERATIONS) {
+ dev_err(aspeed_jtag->dev,
+ "aspeed_jtag driver timed out waiting for shift completed\n");
+ res = -EFAULT;
+ break;
+ }
+ if (iterations % 25 == 0)
+ usleep_range(1, 5);
+ else
+ udelay(1);
+ }
+ aspeed_jtag_write(aspeed_jtag,
+ ASPEED_JTAG_INTCTRL_SHCPL_IRQ_STAT |
+ ASPEED_JTAG_INTCTRL_SHCPL_IRQ_EN,
+ ASPEED_JTAG_INTCTRL);
+#endif
+ return res;
+}
+
+static void aspeed_jtag_set_tap_state(struct aspeed_jtag *aspeed_jtag,
+ enum jtag_tapstate from_state,
+ enum jtag_tapstate end_state)
+{
+ int i = 0;
+ enum jtag_tapstate from, to;
+
+ from = from_state;
+ to = end_state;
+
+ if (from == JTAG_STATE_CURRENT)
+ from = aspeed_jtag->status;
+
+ for (i = 0; i < _tms_cycle_lookup[from][to].count; i++)
+ aspeed_jtag_tck_cycle(aspeed_jtag,
+ ((_tms_cycle_lookup[from][to].tmsbits
+ >> i) & 0x1), 0);
+ aspeed_jtag->current_state = end_state;
+}
+
+static void aspeed_jtag_set_tap_state_sw(struct aspeed_jtag *aspeed_jtag,
+ struct jtag_tap_state *tapstate)
+{
+ /* SW mode from curent tap state -> to end_state */
+ if (tapstate->reset) {
+ int i = 0;
+
+ for (i = 0; i < ASPEED_JTAG_RESET_CNTR; i++)
+ aspeed_jtag_tck_cycle(aspeed_jtag, 1, 0);
+ aspeed_jtag->current_state = JTAG_STATE_TLRESET;
+ }
+
+ aspeed_jtag_set_tap_state(aspeed_jtag, tapstate->from,
+ tapstate->endstate);
+}
+
+static int aspeed_jtag_status_set(struct jtag *jtag,
+ struct jtag_tap_state *tapstate)
+{
+ struct aspeed_jtag *aspeed_jtag = jtag_priv(jtag);
+
+#ifdef DEBUG_JTAG
+ dev_dbg(aspeed_jtag->dev, "Set TAP state: %s\n",
+ end_status_str[tapstate->endstate]);
+#endif
+
+ if (!(aspeed_jtag->mode & JTAG_XFER_HW_MODE)) {
+ aspeed_jtag_set_tap_state_sw(aspeed_jtag, tapstate);
+ return 0;
+ }
+
+ /* x TMS high + 1 TMS low */
+ if (tapstate->reset) {
+ /* Disable sw mode */
+ aspeed_jtag_write(aspeed_jtag, 0, ASPEED_JTAG_SW);
+ mdelay(1);
+ aspeed_jtag_write(aspeed_jtag,
+ ASPEED_JTAG_CTL_ENG_EN |
+ ASPEED_JTAG_CTL_ENG_OUT_EN |
+ ASPEED_JTAG_CTL_FORCE_TMS,
+ ASPEED_JTAG_CTRL);
+ mdelay(1);
+ aspeed_jtag_write(aspeed_jtag, ASPEED_JTAG_SW_TDIO,
+ ASPEED_JTAG_SW);
+ aspeed_jtag->current_state = JTAG_STATE_TLRESET;
+ }
+
+ return 0;
+}
+
+static void aspeed_jtag_shctrl_tms_mask(enum jtag_tapstate from,
+ enum jtag_tapstate to,
+ enum jtag_tapstate there,
+ enum jtag_tapstate endstate,
+ u32 start_shift, u32 end_shift,
+ u32 *tms_mask)
+{
+ u32 pre_tms = start_shift ? _tms_cycle_lookup[from][to].count : 0;
+ u32 post_tms = end_shift ? _tms_cycle_lookup[there][endstate].count : 0;
+ u32 tms_value = start_shift ? _tms_cycle_lookup[from][to].tmsbits : 0;
+
+ tms_value |= end_shift ? _tms_cycle_lookup[there][endstate].tmsbits
+ << pre_tms :
+ 0;
+ *tms_mask = start_shift | ASPEED_JTAG_SHCTRL_PRE_TMS(pre_tms) |
+ end_shift | ASPEED_JTAG_SHCTRL_POST_TMS(post_tms) |
+ ASPEED_JTAG_SHCTRL_TMS(tms_value);
+}
+
+static void aspeed_jtag_set_tap_state_hw2(struct aspeed_jtag *aspeed_jtag,
+ struct jtag_tap_state *tapstate)
+{
+ u32 reg_val;
+
+ /* x TMS high + 1 TMS low */
+ if (tapstate->reset) {
+ /* Disable sw mode */
+ aspeed_jtag_write(aspeed_jtag, 0, ASPEED_JTAG_SW);
+ udelay(AST26XX_JTAG_CTRL_UDELAY);
+ reg_val = aspeed_jtag_read(aspeed_jtag, ASPEED_JTAG_GBLCTRL);
+ aspeed_jtag_write(aspeed_jtag,
+ reg_val | ASPEED_JTAG_GBLCTRL_ENG_MODE_EN |
+ ASPEED_JTAG_GBLCTRL_ENG_OUT_EN |
+ ASPEED_JTAG_GBLCTRL_RESET_FIFO |
+ ASPEED_JTAG_GBLCTRL_FORCE_TMS,
+ ASPEED_JTAG_GBLCTRL);
+ udelay(AST26XX_JTAG_CTRL_UDELAY);
+ aspeed_jtag->current_state = JTAG_STATE_TLRESET;
+ } else if (tapstate->endstate == JTAG_STATE_IDLE &&
+ aspeed_jtag->current_state != JTAG_STATE_IDLE) {
+ /* Always go to RTI, do not wait for shift operation */
+ aspeed_jtag_set_tap_state(aspeed_jtag,
+ aspeed_jtag->current_state,
+ JTAG_STATE_IDLE);
+ aspeed_jtag->current_state = JTAG_STATE_IDLE;
+ }
+}
+
+static int aspeed_jtag_status_set_26xx(struct jtag *jtag,
+ struct jtag_tap_state *tapstate)
+{
+ struct aspeed_jtag *aspeed_jtag = jtag_priv(jtag);
+
+#ifdef DEBUG_JTAG
+ dev_dbg(aspeed_jtag->dev, "Set TAP state: status %s from %s to %s\n",
+ end_status_str[aspeed_jtag->current_state],
+ end_status_str[tapstate->from],
+ end_status_str[tapstate->endstate]);
+#endif
+
+ if (!(aspeed_jtag->mode & JTAG_XFER_HW_MODE)) {
+ aspeed_jtag_set_tap_state_sw(aspeed_jtag, tapstate);
+ return 0;
+ }
+
+ aspeed_jtag_set_tap_state_hw2(aspeed_jtag, tapstate);
+ return 0;
+}
+
+static void aspeed_jtag_xfer_sw(struct aspeed_jtag *aspeed_jtag,
+ struct jtag_xfer *xfer, u32 *data)
+{
+ unsigned long remain_xfer = xfer->length;
+ unsigned long shift_bits = 0;
+ unsigned long index = 0;
+ unsigned long tdi;
+ char tdo;
+
+#ifdef DEBUG_JTAG
+ dev_dbg(aspeed_jtag->dev, "SW JTAG SHIFT %s, length = %d\n",
+ (xfer->type == JTAG_SIR_XFER) ? "IR" : "DR", xfer->length);
+#endif
+
+ if (xfer->type == JTAG_SIR_XFER)
+ aspeed_jtag_set_tap_state(aspeed_jtag, xfer->from,
+ JTAG_STATE_SHIFTIR);
+ else
+ aspeed_jtag_set_tap_state(aspeed_jtag, xfer->from,
+ JTAG_STATE_SHIFTDR);
+
+ tdi = ASPEED_JTAG_GET_TDI(xfer->direction, data[index]);
+ data[index] = 0;
+ while (remain_xfer > 1) {
+ tdo = aspeed_jtag_tck_cycle(aspeed_jtag, 0,
+ tdi & ASPEED_JTAG_DATA_MSB);
+ data[index] |= tdo
+ << (shift_bits % ASPEED_JTAG_DATA_CHUNK_SIZE);
+ tdi >>= 1;
+ shift_bits++;
+ remain_xfer--;
+
+ if (shift_bits % ASPEED_JTAG_DATA_CHUNK_SIZE == 0) {
+ tdo = 0;
+ index++;
+ tdi = ASPEED_JTAG_GET_TDI(xfer->direction, data[index]);
+ data[index] = 0;
+ }
+ }
+
+ if ((xfer->endstate == (xfer->type == JTAG_SIR_XFER ?
+ JTAG_STATE_SHIFTIR :
+ JTAG_STATE_SHIFTDR))) {
+ /* Stay in Shift IR/DR*/
+ tdo = aspeed_jtag_tck_cycle(aspeed_jtag, 0,
+ tdi & ASPEED_JTAG_DATA_MSB);
+ data[index] |= tdo
+ << (shift_bits % ASPEED_JTAG_DATA_CHUNK_SIZE);
+ } else {
+ /* Goto end state */
+ tdo = aspeed_jtag_tck_cycle(aspeed_jtag, 1,
+ tdi & ASPEED_JTAG_DATA_MSB);
+ data[index] |= tdo
+ << (shift_bits % ASPEED_JTAG_DATA_CHUNK_SIZE);
+ aspeed_jtag->status = (xfer->type == JTAG_SIR_XFER) ?
+ JTAG_STATE_EXIT1IR :
+ JTAG_STATE_EXIT1DR;
+ aspeed_jtag_set_tap_state(aspeed_jtag, aspeed_jtag->status,
+ xfer->endstate);
+ }
+}
+
+static int aspeed_jtag_xfer_push_data_26xx(struct aspeed_jtag *aspeed_jtag,
+ enum jtag_xfer_type type,
+ u32 bits_len)
+{
+ int res = 0;
+
+ aspeed_jtag_write(aspeed_jtag, ASPEED_JTAG_TRANS_LEN(bits_len),
+ ASPEED_JTAG_CTRL);
+ if (type == JTAG_SIR_XFER) {
+ aspeed_jtag_write(aspeed_jtag,
+ ASPEED_JTAG_TRANS_LEN(bits_len) |
+ ASPEED_JTAG_CTL_26XX_INST_EN,
+ ASPEED_JTAG_CTRL);
+ res = aspeed_jtag_isr_wait(aspeed_jtag,
+ ASPEED_JTAG_ISR_INST_PAUSE);
+ } else {
+ aspeed_jtag_write(aspeed_jtag,
+ ASPEED_JTAG_TRANS_LEN(bits_len) |
+ ASPEED_JTAG_CTL_DATA_EN,
+ ASPEED_JTAG_CTRL);
+ res = aspeed_jtag_isr_wait(aspeed_jtag,
+ ASPEED_JTAG_ISR_DATA_PAUSE);
+ }
+ return res;
+}
+
+static int aspeed_jtag_xfer_push_data(struct aspeed_jtag *aspeed_jtag,
+ enum jtag_xfer_type type, u32 bits_len)
+{
+ int res = 0;
+
+ if (type == JTAG_SIR_XFER) {
+ aspeed_jtag_write(aspeed_jtag, ASPEED_JTAG_IOUT_LEN(bits_len),
+ ASPEED_JTAG_CTRL);
+ aspeed_jtag_write(aspeed_jtag,
+ ASPEED_JTAG_IOUT_LEN(bits_len) |
+ ASPEED_JTAG_CTL_INST_EN,
+ ASPEED_JTAG_CTRL);
+ res = aspeed_jtag_isr_wait(aspeed_jtag,
+ ASPEED_JTAG_ISR_INST_PAUSE);
+ } else {
+ aspeed_jtag_write(aspeed_jtag, ASPEED_JTAG_DOUT_LEN(bits_len),
+ ASPEED_JTAG_CTRL);
+ aspeed_jtag_write(aspeed_jtag,
+ ASPEED_JTAG_DOUT_LEN(bits_len) |
+ ASPEED_JTAG_CTL_DATA_EN,
+ ASPEED_JTAG_CTRL);
+ res = aspeed_jtag_isr_wait(aspeed_jtag,
+ ASPEED_JTAG_ISR_DATA_PAUSE);
+ }
+ return res;
+}
+
+static int aspeed_jtag_xfer_push_data_last_26xx(struct aspeed_jtag *aspeed_jtag,
+ enum jtag_xfer_type type,
+ u32 shift_bits)
+{
+ int res = 0;
+
+ aspeed_jtag_write(aspeed_jtag,
+ ASPEED_JTAG_TRANS_LEN(shift_bits) |
+ ASPEED_JTAG_CTL_26XX_LASPEED_TRANS,
+ ASPEED_JTAG_CTRL);
+ if (type == JTAG_SIR_XFER) {
+ aspeed_jtag_write(aspeed_jtag,
+ ASPEED_JTAG_TRANS_LEN(shift_bits) |
+ ASPEED_JTAG_CTL_26XX_LASPEED_TRANS |
+ ASPEED_JTAG_CTL_26XX_INST_EN,
+ ASPEED_JTAG_CTRL);
+ res = aspeed_jtag_isr_wait(aspeed_jtag,
+ ASPEED_JTAG_ISR_INST_COMPLETE);
+ } else {
+ aspeed_jtag_write(aspeed_jtag,
+ ASPEED_JTAG_TRANS_LEN(shift_bits) |
+ ASPEED_JTAG_CTL_26XX_LASPEED_TRANS |
+ ASPEED_JTAG_CTL_DATA_EN,
+ ASPEED_JTAG_CTRL);
+ res = aspeed_jtag_isr_wait(aspeed_jtag,
+ ASPEED_JTAG_ISR_DATA_COMPLETE);
+ }
+ return res;
+}
+
+static int aspeed_jtag_xfer_push_data_last(struct aspeed_jtag *aspeed_jtag,
+ enum jtag_xfer_type type,
+ u32 shift_bits)
+{
+ int res = 0;
+
+ if (type == JTAG_SIR_XFER) {
+ aspeed_jtag_write(aspeed_jtag,
+ ASPEED_JTAG_IOUT_LEN(shift_bits) |
+ ASPEED_JTAG_CTL_LASPEED_INST,
+ ASPEED_JTAG_CTRL);
+ aspeed_jtag_write(aspeed_jtag,
+ ASPEED_JTAG_IOUT_LEN(shift_bits) |
+ ASPEED_JTAG_CTL_LASPEED_INST |
+ ASPEED_JTAG_CTL_INST_EN,
+ ASPEED_JTAG_CTRL);
+ res = aspeed_jtag_isr_wait(aspeed_jtag,
+ ASPEED_JTAG_ISR_INST_COMPLETE);
+ } else {
+ aspeed_jtag_write(aspeed_jtag,
+ ASPEED_JTAG_DOUT_LEN(shift_bits) |
+ ASPEED_JTAG_CTL_LASPEED_DATA,
+ ASPEED_JTAG_CTRL);
+ aspeed_jtag_write(aspeed_jtag,
+ ASPEED_JTAG_DOUT_LEN(shift_bits) |
+ ASPEED_JTAG_CTL_LASPEED_DATA |
+ ASPEED_JTAG_CTL_DATA_EN,
+ ASPEED_JTAG_CTRL);
+ res = aspeed_jtag_isr_wait(aspeed_jtag,
+ ASPEED_JTAG_ISR_DATA_COMPLETE);
+ }
+ return res;
+}
+
+static int aspeed_jtag_xfer_hw(struct aspeed_jtag *aspeed_jtag,
+ struct jtag_xfer *xfer, u32 *data)
+{
+ unsigned long remain_xfer = xfer->length;
+ unsigned long index = 0;
+ char shift_bits;
+ u32 data_reg;
+ u32 scan_end;
+ union pad_config padding;
+ int retval = 0;
+
+ padding.int_value = xfer->padding;
+
+#ifdef DEBUG_JTAG
+ dev_dbg(aspeed_jtag->dev, "HW JTAG SHIFT %s, length = %d pad = 0x%x\n",
+ (xfer->type == JTAG_SIR_XFER) ? "IR" : "DR", xfer->length,
+ xfer->padding);
+#endif
+ data_reg = xfer->type == JTAG_SIR_XFER ? ASPEED_JTAG_INST :
+ ASPEED_JTAG_DATA;
+ if (xfer->endstate == JTAG_STATE_SHIFTIR ||
+ xfer->endstate == JTAG_STATE_SHIFTDR ||
+ xfer->endstate == JTAG_STATE_PAUSEIR ||
+ xfer->endstate == JTAG_STATE_PAUSEDR) {
+ scan_end = 0;
+ } else {
+ if (padding.post_pad_number)
+ scan_end = 0;
+ else
+ scan_end = 1;
+ }
+
+ /* Perform pre padding */
+ if (padding.pre_pad_number) {
+ struct jtag_xfer pre_xfer = {
+ .type = xfer->type,
+ .direction = JTAG_WRITE_XFER,
+ .from = xfer->from,
+ .endstate = xfer->type == JTAG_SIR_XFER ?
+ JTAG_STATE_SHIFTIR : JTAG_STATE_SHIFTDR,
+ .padding = 0,
+ .length = padding.pre_pad_number,
+ };
+ if (padding.pre_pad_number > ASPEED_JTAG_MAX_PAD_SIZE)
+ return -EINVAL;
+ retval = aspeed_jtag_xfer_hw(aspeed_jtag, &pre_xfer,
+ padding.pad_data ?
+ aspeed_jtag->pad_data_one :
+ aspeed_jtag->pad_data_zero);
+ if (retval)
+ return retval;
+ }
+
+ while (remain_xfer) {
+ if (xfer->direction & JTAG_WRITE_XFER)
+ aspeed_jtag_write(aspeed_jtag, data[index], data_reg);
+ else
+ aspeed_jtag_write(aspeed_jtag, 0, data_reg);
+ if (aspeed_jtag->llops->xfer_hw_fifo_delay)
+ aspeed_jtag->llops->xfer_hw_fifo_delay();
+
+ if (remain_xfer > ASPEED_JTAG_DATA_CHUNK_SIZE) {
+#ifdef DEBUG_JTAG
+ dev_dbg(aspeed_jtag->dev,
+ "Chunk len=%d chunk_size=%d remain_xfer=%lu\n",
+ xfer->length, ASPEED_JTAG_DATA_CHUNK_SIZE,
+ remain_xfer);
+#endif
+ shift_bits = ASPEED_JTAG_DATA_CHUNK_SIZE;
+
+ /*
+ * Transmit bytes that were not equals to column length
+ * and after the transfer go to Pause IR/DR.
+ */
+ if (aspeed_jtag->llops->xfer_push_data(aspeed_jtag,
+ xfer->type,
+ shift_bits)
+ != 0) {
+ return -EFAULT;
+ }
+ } else {
+ /*
+ * Read bytes equals to column length
+ */
+ shift_bits = remain_xfer;
+ if (scan_end) {
+ /*
+ * If this data is the end of the transmission
+ * send remaining bits and go to endstate
+ */
+#ifdef DEBUG_JTAG
+ dev_dbg(aspeed_jtag->dev,
+ "Last len=%d chunk_size=%d remain_xfer=%lu\n",
+ xfer->length,
+ ASPEED_JTAG_DATA_CHUNK_SIZE,
+ remain_xfer);
+#endif
+ if (aspeed_jtag->llops->xfer_push_data_last(
+ aspeed_jtag, xfer->type,
+ shift_bits) != 0) {
+ return -EFAULT;
+ }
+ } else {
+ /*
+ * If transmission is waiting for additional
+ * data send remaining bits and then go to
+ * Pause IR/DR.
+ */
+#ifdef DEBUG_JTAG
+ dev_dbg(aspeed_jtag->dev,
+ "Tail len=%d chunk_size=%d remain_xfer=%lu\n",
+ xfer->length,
+ ASPEED_JTAG_DATA_CHUNK_SIZE,
+ remain_xfer);
+#endif
+ if (aspeed_jtag->llops->xfer_push_data(
+ aspeed_jtag, xfer->type,
+ shift_bits) != 0) {
+ return -EFAULT;
+ }
+ }
+ }
+
+ if (xfer->direction & JTAG_READ_XFER) {
+ if (shift_bits < ASPEED_JTAG_DATA_CHUNK_SIZE) {
+ data[index] =
+ aspeed_jtag_read(aspeed_jtag, data_reg);
+
+ data[index] >>= ASPEED_JTAG_DATA_CHUNK_SIZE -
+ shift_bits;
+ } else {
+ data[index] =
+ aspeed_jtag_read(aspeed_jtag, data_reg);
+ }
+ if (aspeed_jtag->llops->xfer_hw_fifo_delay)
+ aspeed_jtag->llops->xfer_hw_fifo_delay();
+ }
+
+ remain_xfer = remain_xfer - shift_bits;
+ index++;
+ }
+
+ /* Perform post padding */
+ if (padding.post_pad_number) {
+ struct jtag_xfer post_xfer = {
+ .type = xfer->type,
+ .direction = JTAG_WRITE_XFER,
+ .from = xfer->from,
+ .endstate = xfer->endstate,
+ .padding = 0,
+ .length = padding.post_pad_number,
+ };
+ if (padding.post_pad_number > ASPEED_JTAG_MAX_PAD_SIZE)
+ return -EINVAL;
+ retval = aspeed_jtag_xfer_hw(aspeed_jtag, &post_xfer,
+ padding.pad_data ?
+ aspeed_jtag->pad_data_one :
+ aspeed_jtag->pad_data_zero);
+ if (retval)
+ return retval;
+ }
+ return 0;
+}
+
+static int aspeed_jtag_xfer(struct jtag *jtag, struct jtag_xfer *xfer,
+ u8 *xfer_data)
+{
+ struct aspeed_jtag *aspeed_jtag = jtag_priv(jtag);
+
+ if (!(aspeed_jtag->mode & JTAG_XFER_HW_MODE)) {
+ /* SW mode */
+ aspeed_jtag_write(aspeed_jtag, ASPEED_JTAG_SW_TDIO,
+ ASPEED_JTAG_SW);
+
+ aspeed_jtag->llops->xfer_sw(aspeed_jtag, xfer,
+ (u32 *)xfer_data);
+ } else {
+ /* HW mode */
+ aspeed_jtag_write(aspeed_jtag, 0, ASPEED_JTAG_SW);
+ if (aspeed_jtag->llops->xfer_hw(aspeed_jtag, xfer,
+ (u32 *)xfer_data) != 0)
+ return -EFAULT;
+ }
+
+ aspeed_jtag->status = xfer->endstate;
+ return 0;
+}
+
+static int aspeed_jtag_xfer_hw2(struct aspeed_jtag *aspeed_jtag,
+ struct jtag_xfer *xfer, u32 *data)
+{
+ unsigned long remain_xfer = xfer->length;
+ unsigned long partial_xfer_size = 0;
+ unsigned long index = 0;
+ u32 shift_bits;
+ u32 data_reg;
+ u32 reg_val;
+ enum jtag_tapstate shift;
+ enum jtag_tapstate exit;
+ enum jtag_tapstate exitx;
+ enum jtag_tapstate pause;
+ enum jtag_tapstate endstate;
+ u32 start_shift;
+ u32 end_shift;
+ u32 tms_mask;
+
+ if (xfer->type == JTAG_SIR_XFER) {
+ data_reg = ASPEED_JTAG_SHDATA;
+ shift = JTAG_STATE_SHIFTIR;
+ pause = JTAG_STATE_PAUSEIR;
+ exit = JTAG_STATE_EXIT1IR;
+ exitx = JTAG_STATE_EXIT1DR;
+ } else {
+ data_reg = ASPEED_JTAG_SHDATA;
+ shift = JTAG_STATE_SHIFTDR;
+ pause = JTAG_STATE_PAUSEDR;
+ exit = JTAG_STATE_EXIT1DR;
+ exitx = JTAG_STATE_EXIT1IR;
+ }
+#ifdef DEBUG_JTAG
+ dev_dbg(aspeed_jtag->dev,
+ "HW2 JTAG SHIFT %s, length %d status %s from %s to %s then %s pad 0x%x\n",
+ (xfer->type == JTAG_SIR_XFER) ? "IR" : "DR", xfer->length,
+ end_status_str[aspeed_jtag->current_state],
+ end_status_str[xfer->from],
+ end_status_str[shift],
+ end_status_str[xfer->endstate], xfer->padding);
+#endif
+
+ if (aspeed_jtag->current_state == shift) {
+ start_shift = 0;
+ } else if (aspeed_jtag->current_state == JTAG_STATE_IDLE ||
+ aspeed_jtag->current_state == JTAG_STATE_TLRESET ||
+ aspeed_jtag->current_state == pause ||
+ aspeed_jtag->current_state == exit ||
+ aspeed_jtag->current_state == exitx) {
+ start_shift = ASPEED_JTAG_SHCTRL_START_SHIFT;
+ } else {
+ return -EINVAL;
+ }
+
+ if (xfer->endstate == shift) {
+ /*
+ * In the case of shifting 1 bit of data and attempting to stay
+ * in the SHIFT state, the AST2600 JTAG Master Controller in
+ * Hardware mode 2 has been observed to go to EXIT1 IR/DR
+ * instead of staying in the SHIFT IR/DR state. The following
+ * code special cases this one bit shift and directs the state
+ * machine to go to the PAUSE IR/DR state instead.
+ * Alternatively, the application making driver calls can avoid
+ * this situation as follows:
+ * 1.) Bundle all of the shift bits together into one call
+ * AND/OR
+ * 2.) Direct all partial shifts to move to the PAUSE-IR/DR
+ * state.
+ */
+ if (xfer->length == 1) {
+#ifdef DEBUG_JTAG
+ dev_warn(aspeed_jtag->dev, "JTAG Silicon WA: going to pause instead of shift");
+#endif
+ end_shift = ASPEED_JTAG_SHCTRL_END_SHIFT;
+ endstate = pause;
+ } else {
+ end_shift = 0;
+ endstate = shift;
+ }
+ } else if (xfer->endstate == exit) {
+ endstate = exit;
+ end_shift = ASPEED_JTAG_SHCTRL_END_SHIFT;
+ } else if (xfer->endstate == JTAG_STATE_IDLE) {
+ endstate = JTAG_STATE_IDLE;
+ end_shift = ASPEED_JTAG_SHCTRL_END_SHIFT;
+ } else if (xfer->endstate == pause) {
+ endstate = pause;
+ end_shift = ASPEED_JTAG_SHCTRL_END_SHIFT;
+ } else {
+ return -EINVAL;
+ }
+
+ aspeed_jtag_write(aspeed_jtag, xfer->padding, ASPEED_JTAG_PADCTRL0);
+
+ while (remain_xfer) {
+ unsigned long partial_xfer;
+ unsigned long partial_index;
+
+ if (remain_xfer > ASPEED_JTAG_HW2_DATA_CHUNK_SIZE)
+ partial_xfer_size = ASPEED_JTAG_HW2_DATA_CHUNK_SIZE;
+ else
+ partial_xfer_size = remain_xfer;
+
+ partial_index = index;
+ partial_xfer = partial_xfer_size;
+
+ reg_val = aspeed_jtag_read(aspeed_jtag, ASPEED_JTAG_GBLCTRL);
+ aspeed_jtag_write(aspeed_jtag, reg_val |
+ ASPEED_JTAG_GBLCTRL_RESET_FIFO,
+ ASPEED_JTAG_GBLCTRL);
+
+ /* Switch internal FIFO into CPU mode */
+ reg_val = reg_val & ~BIT(24);
+ aspeed_jtag_write(aspeed_jtag, reg_val,
+ ASPEED_JTAG_GBLCTRL);
+
+ while (partial_xfer) {
+ if (partial_xfer > ASPEED_JTAG_DATA_CHUNK_SIZE)
+ shift_bits = ASPEED_JTAG_DATA_CHUNK_SIZE;
+ else
+ shift_bits = partial_xfer;
+
+ if (xfer->direction & JTAG_WRITE_XFER)
+ aspeed_jtag_write(aspeed_jtag,
+ data[partial_index++],
+ data_reg);
+ else
+ aspeed_jtag_write(aspeed_jtag, 0, data_reg);
+ if (aspeed_jtag->llops->xfer_hw_fifo_delay)
+ aspeed_jtag->llops->xfer_hw_fifo_delay();
+ partial_xfer = partial_xfer - shift_bits;
+ }
+ if (remain_xfer > ASPEED_JTAG_HW2_DATA_CHUNK_SIZE) {
+ shift_bits = ASPEED_JTAG_HW2_DATA_CHUNK_SIZE;
+
+ /*
+ * Transmit bytes that were not equals to column length
+ * and after the transfer go to Pause IR/DR.
+ */
+
+ aspeed_jtag_shctrl_tms_mask(aspeed_jtag->current_state,
+ shift, exit, endstate,
+ start_shift, 0, &tms_mask);
+
+ reg_val = aspeed_jtag_read(aspeed_jtag,
+ ASPEED_JTAG_GBLCTRL);
+ reg_val = reg_val & ~(GENMASK(22, 20));
+ aspeed_jtag_write(aspeed_jtag, reg_val |
+ ASPEED_JTAG_GBLCTRL_FIFO_CTRL_MODE |
+ ASPEED_JTAG_GBLCTRL_UPDT_SHIFT(
+ shift_bits),
+ ASPEED_JTAG_GBLCTRL);
+
+ aspeed_jtag_write(aspeed_jtag, tms_mask |
+ ASPEED_JTAG_SHCTRL_LWRDT_SHIFT(shift_bits),
+ ASPEED_JTAG_SHCTRL);
+ aspeed_jtag_wait_shift_complete(aspeed_jtag);
+ } else {
+ /*
+ * Read bytes equals to column length
+ */
+ shift_bits = remain_xfer;
+ aspeed_jtag_shctrl_tms_mask(aspeed_jtag->current_state,
+ shift, exit, endstate,
+ start_shift, end_shift,
+ &tms_mask);
+
+ reg_val = aspeed_jtag_read(aspeed_jtag,
+ ASPEED_JTAG_GBLCTRL);
+ reg_val = reg_val & ~(GENMASK(22, 20));
+ aspeed_jtag_write(aspeed_jtag, reg_val |
+ ASPEED_JTAG_GBLCTRL_FIFO_CTRL_MODE |
+ ASPEED_JTAG_GBLCTRL_UPDT_SHIFT(
+ shift_bits),
+ ASPEED_JTAG_GBLCTRL);
+
+ aspeed_jtag_write(aspeed_jtag, tms_mask |
+ ASPEED_JTAG_SHCTRL_LWRDT_SHIFT(
+ shift_bits),
+ ASPEED_JTAG_SHCTRL);
+
+ aspeed_jtag_wait_shift_complete(aspeed_jtag);
+ }
+
+ partial_index = index;
+ partial_xfer = partial_xfer_size;
+ while (partial_xfer) {
+ if (partial_xfer >
+ ASPEED_JTAG_DATA_CHUNK_SIZE) {
+ shift_bits =
+ ASPEED_JTAG_DATA_CHUNK_SIZE;
+ data[partial_index++] =
+ aspeed_jtag_read(aspeed_jtag,
+ data_reg);
+
+ } else {
+ shift_bits = partial_xfer;
+ data[partial_index++] =
+ aspeed_jtag_read(aspeed_jtag,
+ data_reg);
+ }
+ if (aspeed_jtag->llops->xfer_hw_fifo_delay)
+ aspeed_jtag->llops->xfer_hw_fifo_delay();
+ partial_xfer = partial_xfer - shift_bits;
+ }
+
+ remain_xfer = remain_xfer - partial_xfer_size;
+ index = partial_index;
+ start_shift = 0;
+ }
+ aspeed_jtag->current_state = endstate;
+ return 0;
+}
+
+static int aspeed_jtag_status_get(struct jtag *jtag, u32 *status)
+{
+ struct aspeed_jtag *aspeed_jtag = jtag_priv(jtag);
+
+ *status = aspeed_jtag->current_state;
+ return 0;
+}
+
+static irqreturn_t aspeed_jtag_interrupt(s32 this_irq, void *dev_id)
+{
+ struct aspeed_jtag *aspeed_jtag = dev_id;
+ irqreturn_t ret;
+ u32 status;
+
+ status = aspeed_jtag_read(aspeed_jtag, ASPEED_JTAG_ISR);
+
+ if (status & ASPEED_JTAG_ISR_INT_MASK) {
+ aspeed_jtag_write(aspeed_jtag,
+ (status & ASPEED_JTAG_ISR_INT_MASK) |
+ (status &
+ ASPEED_JTAG_ISR_INT_EN_MASK),
+ ASPEED_JTAG_ISR);
+ aspeed_jtag->flag |= status & ASPEED_JTAG_ISR_INT_MASK;
+ }
+
+ if (aspeed_jtag->flag) {
+ wake_up_interruptible(&aspeed_jtag->jtag_wq);
+ ret = IRQ_HANDLED;
+ } else {
+ dev_err(aspeed_jtag->dev, "irq status:%x\n", status);
+ ret = IRQ_NONE;
+ }
+ return ret;
+}
+
+static irqreturn_t aspeed_jtag_interrupt_hw2(s32 this_irq, void *dev_id)
+{
+ struct aspeed_jtag *aspeed_jtag = dev_id;
+ irqreturn_t ret;
+ u32 status;
+
+ status = aspeed_jtag_read(aspeed_jtag, ASPEED_JTAG_INTCTRL);
+
+ if (status & ASPEED_JTAG_INTCTRL_SHCPL_IRQ_STAT) {
+ aspeed_jtag_write(aspeed_jtag,
+ status | ASPEED_JTAG_INTCTRL_SHCPL_IRQ_STAT,
+ ASPEED_JTAG_INTCTRL);
+ aspeed_jtag->flag |= status & ASPEED_JTAG_INTCTRL_SHCPL_IRQ_STAT;
+ }
+
+ if (aspeed_jtag->flag) {
+ wake_up_interruptible(&aspeed_jtag->jtag_wq);
+ ret = IRQ_HANDLED;
+ } else {
+ dev_err(aspeed_jtag->dev, "irq status:%x\n", status);
+ ret = IRQ_NONE;
+ }
+ return ret;
+}
+
+static int aspeed_jtag_enable(struct jtag *jtag)
+{
+ struct aspeed_jtag *aspeed_jtag = jtag_priv(jtag);
+
+ aspeed_jtag->llops->master_enable(aspeed_jtag);
+ return 0;
+}
+
+static int aspeed_jtag_disable(struct jtag *jtag)
+{
+ struct aspeed_jtag *aspeed_jtag = jtag_priv(jtag);
+
+ aspeed_jtag->llops->output_disable(aspeed_jtag);
+ return 0;
+}
+
+static int aspeed_jtag_init(struct platform_device *pdev,
+ struct aspeed_jtag *aspeed_jtag)
+{
+ struct resource *res;
+#ifdef USE_INTERRUPTS
+ int err;
+#endif
+ memset(aspeed_jtag->pad_data_one, ~0,
+ sizeof(aspeed_jtag->pad_data_one));
+ memset(aspeed_jtag->pad_data_zero, 0,
+ sizeof(aspeed_jtag->pad_data_zero));
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ aspeed_jtag->reg_base = devm_ioremap_resource(aspeed_jtag->dev, res);
+ if (IS_ERR(aspeed_jtag->reg_base))
+ return -ENOMEM;
+
+ aspeed_jtag->pclk = devm_clk_get(aspeed_jtag->dev, NULL);
+ if (IS_ERR(aspeed_jtag->pclk)) {
+ dev_err(aspeed_jtag->dev, "devm_clk_get failed\n");
+ return PTR_ERR(aspeed_jtag->pclk);
+ }
+
+#ifdef USE_INTERRUPTS
+ aspeed_jtag->irq = platform_get_irq(pdev, 0);
+ if (aspeed_jtag->irq < 0) {
+ dev_err(aspeed_jtag->dev, "no irq specified\n");
+ return -ENOENT;
+ }
+#endif
+
+ if (clk_prepare_enable(aspeed_jtag->pclk)) {
+ dev_err(aspeed_jtag->dev, "no irq specified\n");
+ return -ENOENT;
+ }
+
+ aspeed_jtag->rst = devm_reset_control_get_shared(&pdev->dev, NULL);
+ if (IS_ERR(aspeed_jtag->rst)) {
+ dev_err(aspeed_jtag->dev,
+ "missing or invalid reset controller device tree entry");
+ return PTR_ERR(aspeed_jtag->rst);
+ }
+ reset_control_deassert(aspeed_jtag->rst);
+
+#ifdef USE_INTERRUPTS
+ err = devm_request_irq(aspeed_jtag->dev, aspeed_jtag->irq,
+ aspeed_jtag->llops->jtag_interrupt, 0,
+ "aspeed-jtag", aspeed_jtag);
+ if (err) {
+ dev_err(aspeed_jtag->dev, "unable to get IRQ");
+ clk_disable_unprepare(aspeed_jtag->pclk);
+ return err;
+ }
+#endif
+
+ aspeed_jtag->llops->output_disable(aspeed_jtag);
+
+ aspeed_jtag->flag = 0;
+ aspeed_jtag->mode = 0;
+ init_waitqueue_head(&aspeed_jtag->jtag_wq);
+ return 0;
+}
+
+static int aspeed_jtag_deinit(struct platform_device *pdev,
+ struct aspeed_jtag *aspeed_jtag)
+{
+ aspeed_jtag_write(aspeed_jtag, 0, ASPEED_JTAG_ISR);
+ /* Disable clock */
+ aspeed_jtag_write(aspeed_jtag, 0, ASPEED_JTAG_CTRL);
+ reset_control_assert(aspeed_jtag->rst);
+ clk_disable_unprepare(aspeed_jtag->pclk);
+ return 0;
+}
+
+static const struct jtag_ops aspeed_jtag_ops = {
+ .freq_get = aspeed_jtag_freq_get,
+ .freq_set = aspeed_jtag_freq_set,
+ .status_get = aspeed_jtag_status_get,
+ .status_set = aspeed_jtag_status_set,
+ .xfer = aspeed_jtag_xfer,
+ .mode_set = aspeed_jtag_mode_set,
+ .bitbang = aspeed_jtag_bitbang,
+ .enable = aspeed_jtag_enable,
+ .disable = aspeed_jtag_disable
+};
+
+static const struct jtag_ops aspeed_jtag_ops_26xx = {
+#ifdef ASPEED_JTAG_HW_MODE_2_ENABLE
+ .freq_get = aspeed_jtag_freq_get_26xx,
+ .freq_set = aspeed_jtag_freq_set_26xx,
+ .status_get = aspeed_jtag_status_get,
+ .status_set = aspeed_jtag_status_set_26xx,
+#else
+ .freq_get = aspeed_jtag_freq_get,
+ .freq_set = aspeed_jtag_freq_set,
+ .status_get = aspeed_jtag_status_get,
+ .status_set = aspeed_jtag_status_set,
+#endif
+ .xfer = aspeed_jtag_xfer,
+ .mode_set = aspeed_jtag_mode_set,
+ .bitbang = aspeed_jtag_bitbang,
+ .enable = aspeed_jtag_enable,
+ .disable = aspeed_jtag_disable
+};
+
+static const struct jtag_low_level_functions ast25xx_llops = {
+ .master_enable = aspeed_jtag_master,
+ .output_disable = aspeed_jtag_output_disable,
+ .xfer_push_data = aspeed_jtag_xfer_push_data,
+ .xfer_push_data_last = aspeed_jtag_xfer_push_data_last,
+ .xfer_sw = aspeed_jtag_xfer_sw,
+ .xfer_hw = aspeed_jtag_xfer_hw,
+ .xfer_hw_fifo_delay = NULL,
+ .xfer_sw_delay = NULL,
+ .jtag_interrupt = aspeed_jtag_interrupt
+};
+
+static const struct aspeed_jtag_functions ast25xx_functions = {
+ .aspeed_jtag_ops = &aspeed_jtag_ops,
+ .aspeed_jtag_llops = &ast25xx_llops
+};
+
+static const struct jtag_low_level_functions ast26xx_llops = {
+#ifdef ASPEED_JTAG_HW_MODE_2_ENABLE
+ .master_enable = aspeed_jtag_master_26xx,
+ .output_disable = aspeed_jtag_output_disable_26xx,
+ .xfer_push_data = aspeed_jtag_xfer_push_data_26xx,
+ .xfer_push_data_last = aspeed_jtag_xfer_push_data_last_26xx,
+ .xfer_sw = aspeed_jtag_xfer_sw,
+ .xfer_hw = aspeed_jtag_xfer_hw2,
+ .xfer_hw_fifo_delay = aspeed_jtag_xfer_hw_fifo_delay_26xx,
+ .xfer_sw_delay = aspeed_jtag_sw_delay_26xx,
+ .jtag_interrupt = aspeed_jtag_interrupt_hw2
+#else
+ .master_enable = aspeed_jtag_master,
+ .output_disable = aspeed_jtag_output_disable,
+ .xfer_push_data = aspeed_jtag_xfer_push_data_26xx,
+ .xfer_push_data_last = aspeed_jtag_xfer_push_data_last_26xx,
+ .xfer_sw = aspeed_jtag_xfer_sw,
+ .xfer_hw = aspeed_jtag_xfer_hw,
+ .xfer_hw_fifo_delay = aspeed_jtag_xfer_hw_fifo_delay_26xx,
+ .xfer_sw_delay = aspeed_jtag_sw_delay_26xx,
+ .jtag_interrupt = aspeed_jtag_interrupt
+#endif
+};
+
+static const struct aspeed_jtag_functions ast26xx_functions = {
+ .aspeed_jtag_ops = &aspeed_jtag_ops_26xx,
+ .aspeed_jtag_llops = &ast26xx_llops
+};
+
+static const struct of_device_id aspeed_jtag_of_match[] = {
+ { .compatible = "aspeed,ast2400-jtag", .data = &ast25xx_functions },
+ { .compatible = "aspeed,ast2500-jtag", .data = &ast25xx_functions },
+ { .compatible = "aspeed,ast2600-jtag", .data = &ast26xx_functions },
+ {}
+};
+
+static int aspeed_jtag_probe(struct platform_device *pdev)
+{
+ struct aspeed_jtag *aspeed_jtag;
+ struct jtag *jtag;
+ const struct of_device_id *match;
+ const struct aspeed_jtag_functions *jtag_functions;
+ int err;
+
+ match = of_match_node(aspeed_jtag_of_match, pdev->dev.of_node);
+ if (!match)
+ return -ENODEV;
+ jtag_functions = match->data;
+
+ jtag = jtag_alloc(&pdev->dev, sizeof(*aspeed_jtag),
+ jtag_functions->aspeed_jtag_ops);
+ if (!jtag)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, jtag);
+ aspeed_jtag = jtag_priv(jtag);
+ aspeed_jtag->dev = &pdev->dev;
+
+ aspeed_jtag->llops = jtag_functions->aspeed_jtag_llops;
+
+ /* Initialize device*/
+ err = aspeed_jtag_init(pdev, aspeed_jtag);
+ if (err)
+ goto err_jtag_init;
+
+ /* Initialize JTAG core structure*/
+ err = devm_jtag_register(aspeed_jtag->dev, jtag);
+ if (err)
+ goto err_jtag_register;
+
+ return 0;
+
+err_jtag_register:
+ aspeed_jtag_deinit(pdev, aspeed_jtag);
+err_jtag_init:
+ jtag_free(jtag);
+ return err;
+}
+
+static int aspeed_jtag_remove(struct platform_device *pdev)
+{
+ struct jtag *jtag = platform_get_drvdata(pdev);
+
+ aspeed_jtag_deinit(pdev, jtag_priv(jtag));
+ return 0;
+}
+
+static struct platform_driver aspeed_jtag_driver = {
+ .probe = aspeed_jtag_probe,
+ .remove = aspeed_jtag_remove,
+ .driver = {
+ .name = ASPEED_JTAG_NAME,
+ .of_match_table = aspeed_jtag_of_match,
+ },
+};
+module_platform_driver(aspeed_jtag_driver);
+
+MODULE_AUTHOR("Oleksandr Shamray <oleksandrs@mellanox.com>");
+MODULE_DESCRIPTION("ASPEED JTAG driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/jtag/jtag.c b/drivers/jtag/jtag.c
new file mode 100644
index 000000000000..d4f0250d56be
--- /dev/null
+++ b/drivers/jtag/jtag.c
@@ -0,0 +1,337 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (c) 2018 Mellanox Technologies. All rights reserved.
+// Copyright (c) 2018 Oleksandr Shamray <oleksandrs@mellanox.com>
+// Copyright (c) 2019 Intel Corporation
+
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/jtag.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/rtnetlink.h>
+#include <linux/spinlock.h>
+#include <uapi/linux/jtag.h>
+
+struct jtag {
+ struct miscdevice miscdev;
+ const struct jtag_ops *ops;
+ int id;
+ unsigned long priv[0];
+};
+
+static DEFINE_IDA(jtag_ida);
+
+void *jtag_priv(struct jtag *jtag)
+{
+ return jtag->priv;
+}
+EXPORT_SYMBOL_GPL(jtag_priv);
+
+static long jtag_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct jtag *jtag = file->private_data;
+ struct jtag_tap_state tapstate;
+ struct jtag_xfer xfer;
+ struct bitbang_packet bitbang;
+ struct tck_bitbang *bitbang_data;
+ struct jtag_mode mode;
+ u8 *xfer_data;
+ u32 data_size;
+ u32 value;
+ int err;
+
+ if (!arg)
+ return -EINVAL;
+
+ switch (cmd) {
+ case JTAG_GIOCFREQ:
+ if (!jtag->ops->freq_get)
+ return -EOPNOTSUPP;
+
+ err = jtag->ops->freq_get(jtag, &value);
+ if (err)
+ break;
+
+ if (put_user(value, (__u32 __user *)arg))
+ err = -EFAULT;
+ break;
+
+ case JTAG_SIOCFREQ:
+ if (!jtag->ops->freq_set)
+ return -EOPNOTSUPP;
+
+ if (get_user(value, (__u32 __user *)arg))
+ return -EFAULT;
+ if (value == 0)
+ return -EINVAL;
+
+ err = jtag->ops->freq_set(jtag, value);
+ break;
+
+ case JTAG_SIOCSTATE:
+ if (copy_from_user(&tapstate, (const void __user *)arg,
+ sizeof(struct jtag_tap_state)))
+ return -EFAULT;
+
+ if (tapstate.from > JTAG_STATE_CURRENT)
+ return -EINVAL;
+
+ if (tapstate.endstate > JTAG_STATE_CURRENT)
+ return -EINVAL;
+
+ if (tapstate.reset > JTAG_FORCE_RESET)
+ return -EINVAL;
+
+ err = jtag->ops->status_set(jtag, &tapstate);
+ break;
+
+ case JTAG_IOCXFER:
+ {
+ u8 ubit_mask = GENMASK(7, 0);
+ u8 remaining_bits = 0x0;
+
+ if (copy_from_user(&xfer, (const void __user *)arg,
+ sizeof(struct jtag_xfer)))
+ return -EFAULT;
+
+ if (xfer.length >= JTAG_MAX_XFER_DATA_LEN)
+ return -EINVAL;
+
+ if (xfer.type > JTAG_SDR_XFER)
+ return -EINVAL;
+
+ if (xfer.direction > JTAG_READ_WRITE_XFER)
+ return -EINVAL;
+
+ if (xfer.from > JTAG_STATE_CURRENT)
+ return -EINVAL;
+
+ if (xfer.endstate > JTAG_STATE_CURRENT)
+ return -EINVAL;
+
+ data_size = DIV_ROUND_UP(xfer.length, BITS_PER_BYTE);
+ xfer_data = memdup_user(u64_to_user_ptr(xfer.tdio), data_size);
+
+ /* Save unused remaining bits in this transfer */
+ if ((xfer.length % BITS_PER_BYTE)) {
+ ubit_mask = GENMASK((xfer.length % BITS_PER_BYTE) - 1,
+ 0);
+ remaining_bits = xfer_data[data_size - 1] & ~ubit_mask;
+ }
+
+ if (IS_ERR(xfer_data))
+ return -EFAULT;
+
+ err = jtag->ops->xfer(jtag, &xfer, xfer_data);
+ if (err) {
+ kfree(xfer_data);
+ return err;
+ }
+
+ /* Restore unused remaining bits in this transfer */
+ xfer_data[data_size - 1] = (xfer_data[data_size - 1]
+ & ubit_mask) | remaining_bits;
+
+ err = copy_to_user(u64_to_user_ptr(xfer.tdio),
+ (void *)xfer_data, data_size);
+ kfree(xfer_data);
+ if (err)
+ return -EFAULT;
+
+ if (copy_to_user((void __user *)arg, (void *)&xfer,
+ sizeof(struct jtag_xfer)))
+ return -EFAULT;
+ break;
+ }
+
+ case JTAG_GIOCSTATUS:
+ err = jtag->ops->status_get(jtag, &value);
+ if (err)
+ break;
+
+ err = put_user(value, (__u32 __user *)arg);
+ break;
+ case JTAG_IOCBITBANG:
+ if (copy_from_user(&bitbang, (const void __user *)arg,
+ sizeof(struct bitbang_packet)))
+ return -EFAULT;
+
+ if (bitbang.length >= JTAG_MAX_XFER_DATA_LEN)
+ return -EINVAL;
+
+ data_size = bitbang.length * sizeof(struct tck_bitbang);
+ bitbang_data = memdup_user((void __user *)bitbang.data,
+ data_size);
+ if (IS_ERR(bitbang_data))
+ return -EFAULT;
+
+ err = jtag->ops->bitbang(jtag, &bitbang, bitbang_data);
+ if (err) {
+ kfree(bitbang_data);
+ return err;
+ }
+ err = copy_to_user((void __user *)bitbang.data,
+ (void *)bitbang_data, data_size);
+ kfree(bitbang_data);
+ if (err)
+ return -EFAULT;
+ break;
+ case JTAG_SIOCMODE:
+ if (!jtag->ops->mode_set)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&mode, (const void __user *)arg,
+ sizeof(struct jtag_mode)))
+ return -EFAULT;
+
+ err = jtag->ops->mode_set(jtag, &mode);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ return err;
+}
+
+static int jtag_open(struct inode *inode, struct file *file)
+{
+ struct jtag *jtag = container_of(file->private_data,
+ struct jtag,
+ miscdev);
+
+ file->private_data = jtag;
+ if (jtag->ops->enable(jtag))
+ return -EBUSY;
+ return nonseekable_open(inode, file);
+}
+
+static int jtag_release(struct inode *inode, struct file *file)
+{
+ struct jtag *jtag = file->private_data;
+
+ if (jtag->ops->disable(jtag))
+ return -EBUSY;
+ return 0;
+}
+
+static const struct file_operations jtag_fops = {
+ .owner = THIS_MODULE,
+ .open = jtag_open,
+ .llseek = noop_llseek,
+ .unlocked_ioctl = jtag_ioctl,
+ .release = jtag_release,
+};
+
+struct jtag *jtag_alloc(struct device *host, size_t priv_size,
+ const struct jtag_ops *ops)
+{
+ struct jtag *jtag;
+
+ if (!host)
+ return NULL;
+
+ if (!ops)
+ return NULL;
+
+ if (!ops->status_set || !ops->status_get || !ops->xfer)
+ return NULL;
+
+ jtag = kzalloc(sizeof(*jtag) + priv_size, GFP_KERNEL);
+ if (!jtag)
+ return NULL;
+
+ jtag->ops = ops;
+ jtag->miscdev.parent = host;
+
+ return jtag;
+}
+EXPORT_SYMBOL_GPL(jtag_alloc);
+
+void jtag_free(struct jtag *jtag)
+{
+ kfree(jtag);
+}
+EXPORT_SYMBOL_GPL(jtag_free);
+
+static int jtag_register(struct jtag *jtag)
+{
+ struct device *dev = jtag->miscdev.parent;
+ int err;
+ int id;
+
+ if (!dev)
+ return -ENODEV;
+
+ id = ida_simple_get(&jtag_ida, 0, 0, GFP_KERNEL);
+ if (id < 0)
+ return id;
+
+ jtag->id = id;
+
+ jtag->miscdev.fops = &jtag_fops;
+ jtag->miscdev.minor = MISC_DYNAMIC_MINOR;
+ jtag->miscdev.name = kasprintf(GFP_KERNEL, "jtag%d", id);
+ if (!jtag->miscdev.name) {
+ err = -ENOMEM;
+ goto err_jtag_alloc;
+ }
+
+ err = misc_register(&jtag->miscdev);
+ if (err) {
+ dev_err(jtag->miscdev.parent, "Unable to register device\n");
+ goto err_jtag_name;
+ }
+ return 0;
+
+err_jtag_name:
+ kfree(jtag->miscdev.name);
+err_jtag_alloc:
+ ida_simple_remove(&jtag_ida, id);
+ return err;
+}
+
+static void jtag_unregister(struct jtag *jtag)
+{
+ misc_deregister(&jtag->miscdev);
+ kfree(jtag->miscdev.name);
+ ida_simple_remove(&jtag_ida, jtag->id);
+}
+
+static void devm_jtag_unregister(struct device *dev, void *res)
+{
+ jtag_unregister(*(struct jtag **)res);
+}
+
+int devm_jtag_register(struct device *dev, struct jtag *jtag)
+{
+ struct jtag **ptr;
+ int ret;
+
+ ptr = devres_alloc(devm_jtag_unregister, sizeof(struct jtag *),
+ GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ ret = jtag_register(jtag);
+ if (!ret) {
+ *ptr = jtag;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+ return ret;
+}
+EXPORT_SYMBOL_GPL(devm_jtag_register);
+
+static void __exit jtag_exit(void)
+{
+ ida_destroy(&jtag_ida);
+}
+
+module_exit(jtag_exit);
+
+MODULE_AUTHOR("Oleksandr Shamray <oleksandrs@mellanox.com>");
+MODULE_DESCRIPTION("Generic jtag support");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index d2f345245538..ac76f5cf7ceb 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -676,6 +676,23 @@ config MFD_INTEL_LPSS_PCI
I2C, SPI and HS-UART starting from Intel Sunrisepoint (Intel Skylake
PCH) in PCI mode.
+config MFD_INTEL_PECI_CLIENT
+ tristate "Intel PECI client"
+ depends on (PECI || COMPILE_TEST)
+ select MFD_CORE
+ help
+ If you say yes to this option, support will be included for the
+ Intel PECI (Platform Environment Control Interface) client. PECI is a
+ one-wire bus interface that provides a communication channel from PECI
+ clients in Intel processors and chipset components to external
+ monitoring or control devices.
+
+ Additional drivers must be enabled in order to use the functionality
+ of the device.
+
+ This driver can also be built as a module. If so, the module
+ will be called intel-peci-client.
+
config MFD_INTEL_PMC_BXT
tristate "Intel PMC Driver for Broxton"
depends on X86
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 2ba6646e874c..b940be7d8aea 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -211,6 +211,7 @@ obj-$(CONFIG_MFD_ATMEL_SMC) += atmel-smc.o
obj-$(CONFIG_MFD_INTEL_LPSS) += intel-lpss.o
obj-$(CONFIG_MFD_INTEL_LPSS_PCI) += intel-lpss-pci.o
obj-$(CONFIG_MFD_INTEL_LPSS_ACPI) += intel-lpss-acpi.o
+obj-$(CONFIG_MFD_INTEL_PECI_CLIENT) += intel-peci-client.o
obj-$(CONFIG_MFD_INTEL_PMC_BXT) += intel_pmc_bxt.o
obj-$(CONFIG_MFD_INTEL_PMT) += intel_pmt.o
obj-$(CONFIG_MFD_PALMAS) += palmas.o
diff --git a/drivers/mfd/intel-peci-client.c b/drivers/mfd/intel-peci-client.c
new file mode 100644
index 000000000000..3eb2c59a2424
--- /dev/null
+++ b/drivers/mfd/intel-peci-client.c
@@ -0,0 +1,163 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2018-2019 Intel Corporation
+
+#include <linux/bitfield.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/intel-peci-client.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/peci.h>
+
+#define CPU_ID_MODEL_MASK GENMASK(7, 4)
+#define CPU_ID_FAMILY_MASK GENMASK(11, 8)
+#define CPU_ID_EXT_MODEL_MASK GENMASK(19, 16)
+#define CPU_ID_EXT_FAMILY_MASK GENMASK(27, 20)
+
+#define LOWER_NIBBLE_MASK GENMASK(3, 0)
+#define UPPER_NIBBLE_MASK GENMASK(7, 4)
+#define LOWER_BYTE_MASK GENMASK(7, 0)
+#define UPPER_BYTE_MASK GENMASK(16, 8)
+
+static struct mfd_cell peci_functions[] = {
+ { .name = "peci-cputemp", },
+ { .name = "peci-dimmtemp", },
+ { .name = "peci-cpupower", },
+ { .name = "peci-dimmpower", },
+};
+
+static const struct cpu_gen_info cpu_gen_info_table[] = {
+ { /* Haswell Xeon */
+ .family = INTEL_FAM6,
+ .model = INTEL_FAM6_HASWELL_X,
+ .core_mask_bits = CORE_MASK_BITS_ON_HSX,
+ .chan_rank_max = CHAN_RANK_MAX_ON_HSX,
+ .dimm_idx_max = DIMM_IDX_MAX_ON_HSX },
+ { /* Broadwell Xeon */
+ .family = INTEL_FAM6,
+ .model = INTEL_FAM6_BROADWELL_X,
+ .core_mask_bits = CORE_MASK_BITS_ON_BDX,
+ .chan_rank_max = CHAN_RANK_MAX_ON_BDX,
+ .dimm_idx_max = DIMM_IDX_MAX_ON_BDX },
+ { /* Skylake Xeon */
+ .family = INTEL_FAM6,
+ .model = INTEL_FAM6_SKYLAKE_X,
+ .core_mask_bits = CORE_MASK_BITS_ON_SKX,
+ .chan_rank_max = CHAN_RANK_MAX_ON_SKX,
+ .dimm_idx_max = DIMM_IDX_MAX_ON_SKX },
+ { /* Skylake Xeon D */
+ .family = INTEL_FAM6,
+ .model = INTEL_FAM6_SKYLAKE_XD,
+ .core_mask_bits = CORE_MASK_BITS_ON_SKXD,
+ .chan_rank_max = CHAN_RANK_MAX_ON_SKXD,
+ .dimm_idx_max = DIMM_IDX_MAX_ON_SKXD },
+ { /* Icelake Xeon */
+ .family = INTEL_FAM6,
+ .model = INTEL_FAM6_ICELAKE_X,
+ .core_mask_bits = CORE_MASK_BITS_ON_ICX,
+ .chan_rank_max = CHAN_RANK_MAX_ON_ICX,
+ .dimm_idx_max = DIMM_IDX_MAX_ON_ICX },
+ { /* Icelake Xeon D */
+ .family = INTEL_FAM6,
+ .model = INTEL_FAM6_ICELAKE_XD,
+ .core_mask_bits = CORE_MASK_BITS_ON_ICXD,
+ .chan_rank_max = CHAN_RANK_MAX_ON_ICXD,
+ .dimm_idx_max = DIMM_IDX_MAX_ON_ICXD },
+};
+
+static int peci_client_get_cpu_gen_info(struct peci_client_manager *priv)
+{
+ struct device *dev = &priv->client->dev;
+ u32 cpu_id;
+ u16 family;
+ u8 model;
+ int ret;
+ int i;
+
+ ret = peci_get_cpu_id(priv->client->adapter, priv->client->addr,
+ &cpu_id);
+ if (ret)
+ return ret;
+
+ family = FIELD_PREP(LOWER_BYTE_MASK,
+ FIELD_GET(CPU_ID_FAMILY_MASK, cpu_id)) |
+ FIELD_PREP(UPPER_BYTE_MASK,
+ FIELD_GET(CPU_ID_EXT_FAMILY_MASK, cpu_id));
+ model = FIELD_PREP(LOWER_NIBBLE_MASK,
+ FIELD_GET(CPU_ID_MODEL_MASK, cpu_id)) |
+ FIELD_PREP(UPPER_NIBBLE_MASK,
+ FIELD_GET(CPU_ID_EXT_MODEL_MASK, cpu_id));
+
+ for (i = 0; i < ARRAY_SIZE(cpu_gen_info_table); i++) {
+ const struct cpu_gen_info *cpu_info = &cpu_gen_info_table[i];
+
+ if (family == cpu_info->family && model == cpu_info->model) {
+ priv->gen_info = cpu_info;
+ break;
+ }
+ }
+
+ if (!priv->gen_info) {
+ dev_err(dev, "Can't support this CPU: 0x%x\n", cpu_id);
+ ret = -ENODEV;
+ }
+
+ return ret;
+}
+
+static int peci_client_probe(struct peci_client *client)
+{
+ struct device *dev = &client->dev;
+ struct peci_client_manager *priv;
+ int device_id;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, priv);
+ priv->client = client;
+
+ ret = peci_client_get_cpu_gen_info(priv);
+ if (ret)
+ return ret;
+
+ device_id = (client->adapter->nr << 4) | (client->addr - PECI_BASE_ADDR);
+
+ ret = devm_mfd_add_devices(dev, device_id, peci_functions,
+ ARRAY_SIZE(peci_functions), NULL, 0, NULL);
+ if (ret < 0) {
+ dev_err(dev, "Failed to register child devices: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id peci_client_of_table[] = {
+ { .compatible = "intel,peci-client" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, peci_client_of_table);
+#endif
+
+static const struct peci_device_id peci_client_ids[] = {
+ { .name = "peci-client" },
+ { }
+};
+MODULE_DEVICE_TABLE(peci, peci_client_ids);
+
+static struct peci_driver peci_client_driver = {
+ .probe = peci_client_probe,
+ .id_table = peci_client_ids,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = of_match_ptr(peci_client_of_table),
+ },
+};
+module_peci_driver(peci_client_driver);
+
+MODULE_AUTHOR("Jae Hyun Yoo <jae.hyun.yoo@linux.intel.com>");
+MODULE_DESCRIPTION("PECI client driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/syscon.c b/drivers/mfd/syscon.c
index 191fdb87c424..0be2e33fd000 100644
--- a/drivers/mfd/syscon.c
+++ b/drivers/mfd/syscon.c
@@ -83,6 +83,10 @@ static struct syscon *of_syscon_register(struct device_node *np, bool check_clk)
if (ret)
reg_io_width = 4;
+ if (of_device_is_compatible(np, "aspeed,ast2500-scu") ||
+ of_device_is_compatible(np, "aspeed,ast2600-scu"))
+ syscon_config.use_raw_spinlock = true;
+
ret = of_hwspin_lock_get_id(np, 0);
if (ret > 0 || (IS_ENABLED(CONFIG_HWSPINLOCK) && ret == 0)) {
syscon_config.use_hwlock = true;
diff --git a/drivers/mtd/spi-nor/controllers/aspeed-smc.c b/drivers/mtd/spi-nor/controllers/aspeed-smc.c
index 74fa46439246..367d97abb3f6 100644
--- a/drivers/mtd/spi-nor/controllers/aspeed-smc.c
+++ b/drivers/mtd/spi-nor/controllers/aspeed-smc.c
@@ -21,6 +21,11 @@
#include <linux/sysfs.h>
#define DEVICE_NAME "aspeed-smc"
+#define AST2600A0 0x05000303
+#define AST2600A0_MAX_FREQ 50000000
+#define AST2600A0_SAFE_FREQ 40000000
+#define AST_MAX_FREQ 100000000
+#define AST2600_REVISION_ID_SCU 0x1e6e2004
/*
* The driver only support SPI flash
@@ -543,6 +548,10 @@ static int aspeed_smc_get_io_mode(struct aspeed_smc_chip *chip)
return CONTROL_IO_DUAL_DATA;
case SNOR_PROTO_1_2_2:
return CONTROL_IO_DUAL_ADDR_DATA;
+ case SNOR_PROTO_1_1_4:
+ return CONTROL_IO_QUAD_DATA;
+ case SNOR_PROTO_1_4_4:
+ return CONTROL_IO_QUAD_ADDR_DATA;
default:
dev_err(chip->nor.dev, "unsupported SPI read mode\n");
return -EINVAL;
@@ -574,7 +583,7 @@ static ssize_t aspeed_smc_read_user(struct spi_nor *nor, loff_t from,
aspeed_smc_write_to_ahb(chip->ahb_base, &dummy, sizeof(dummy));
/* Set IO mode only for data */
- if (io_mode == CONTROL_IO_DUAL_DATA)
+ if (io_mode == CONTROL_IO_DUAL_DATA || io_mode == CONTROL_IO_QUAD_DATA)
aspeed_smc_set_io_mode(chip, io_mode);
aspeed_smc_read_from_ahb(read_buf, chip->ahb_base, len);
@@ -1235,6 +1244,25 @@ static int aspeed_smc_chip_setup_finish(struct aspeed_smc_chip *chip)
return 0;
}
+static void aspeed_allowed_max_freq(struct aspeed_smc_chip *chip)
+{
+ void __iomem *scu_ast_revision_id = ioremap(AST2600_REVISION_ID_SCU, 4);
+ u32 rev_id = readl(scu_ast_revision_id);
+
+ /*Limit max spi frequency less than 50MHz on AST2600-A0 due
+ * to FWSPICLK signal quality issue.
+ */
+ if(rev_id == AST2600A0 && chip->clk_rate > AST2600A0_MAX_FREQ)
+ chip->clk_rate = AST2600A0_MAX_FREQ;
+}
+
+static u32 get_hwcaps(unsigned int tx_width){
+ if(tx_width == 4)
+ return SNOR_HWCAPS_READ_1_1_4;
+ else
+ return SNOR_HWCAPS_READ_1_1_2;
+}
+
static const struct spi_nor_controller_ops aspeed_smc_controller_ops = {
.prepare = aspeed_smc_prep,
.unprepare = aspeed_smc_unprep,
@@ -1247,17 +1275,13 @@ static const struct spi_nor_controller_ops aspeed_smc_controller_ops = {
static int aspeed_smc_setup_flash(struct aspeed_smc_controller *controller,
struct device_node *np, struct resource *r)
{
- const struct spi_nor_hwcaps hwcaps = {
- .mask = SNOR_HWCAPS_READ |
- SNOR_HWCAPS_READ_FAST |
- SNOR_HWCAPS_READ_1_1_2 |
- SNOR_HWCAPS_PP,
- };
+ struct spi_nor_hwcaps hwcaps;
const struct aspeed_smc_info *info = controller->info;
struct device *dev = controller->dev;
struct device_node *child;
unsigned int cs;
int ret = -ENODEV;
+ unsigned int spi_tx_width;
bool found_one = false;
for_each_available_child_of_node(np, child) {
@@ -1299,9 +1323,20 @@ static int aspeed_smc_setup_flash(struct aspeed_smc_controller *controller,
&chip->clk_rate)) {
chip->clk_rate = ASPEED_SPI_DEFAULT_FREQ;
}
+ aspeed_allowed_max_freq(chip);
dev_info(dev, "Using %d MHz SPI frequency\n",
chip->clk_rate / 1000000);
+ if (of_property_read_u32(child, "spi-tx-bus-width",
+ &spi_tx_width)) {
+ spi_tx_width = 2;
+ }
+ dev_info(dev, "tx width: %ld\n", spi_tx_width);
+
+ hwcaps.mask = SNOR_HWCAPS_READ |
+ SNOR_HWCAPS_READ_FAST |
+ get_hwcaps(spi_tx_width) |
+ SNOR_HWCAPS_PP;
chip->controller = controller;
chip->ctl = controller->regs + info->ctl0 + cs * 4;
chip->cs = cs;
diff --git a/drivers/mtd/spi-nor/micron-st.c b/drivers/mtd/spi-nor/micron-st.c
index c224e59820a1..5222af071b3f 100644
--- a/drivers/mtd/spi-nor/micron-st.c
+++ b/drivers/mtd/spi-nor/micron-st.c
@@ -172,8 +172,13 @@ static const struct flash_info st_parts[] = {
SECT_4K | USE_FSR | SPI_NOR_QUAD_READ |
SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB |
SPI_NOR_4BIT_BP | SPI_NOR_BP3_SR_BIT6) },
+ /* Removed n25q00 Quad I/O support for the time being due to clock issue with chip 'Micron 8UA15 - rw182 (128MB)'
+ * while enabling Quad I/O mode. As this chip is default shipped in platforms, marking it
+ * as Not supported for the time being. Once all chips are replaced with the new model, this can be enabled
+ * back(Note:- Certain other chips having same name(n25q00) but different part number has no issues).
+ */
{ "n25q00", INFO(0x20ba21, 0, 64 * 1024, 2048,
- SECT_4K | USE_FSR | SPI_NOR_QUAD_READ |
+ SECT_4K | USE_FSR |
SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB |
SPI_NOR_4BIT_BP | SPI_NOR_BP3_SR_BIT6 |
NO_CHIP_ERASE) },
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index c96d4bf4d5e3..33e119833b20 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -1255,10 +1255,30 @@ static int ftgmac100_set_pauseparam(struct net_device *netdev,
return 0;
}
+int ftgmac100_ethtool_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct phy_device *phydev = netdev->phydev;
+ struct ftgmac100 *priv = netdev_priv(netdev);
+ int retval = 0;
+
+ if (phydev) {
+ phy_ethtool_ksettings_get(phydev, cmd);
+ } else if (priv->use_ncsi) {
+ cmd->base.speed = priv->cur_speed;
+ cmd->base.duplex = priv->cur_duplex;
+ cmd->base.autoneg = 0;
+ } else {
+ retval = -ENODEV;
+ }
+
+ return retval;
+}
+
static const struct ethtool_ops ftgmac100_ethtool_ops = {
.get_drvinfo = ftgmac100_get_drvinfo,
.get_link = ethtool_op_get_link,
- .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .get_link_ksettings = ftgmac100_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
.nway_reset = phy_ethtool_nway_reset,
.get_ringparam = ftgmac100_get_ringparam,
diff --git a/drivers/peci/Kconfig b/drivers/peci/Kconfig
new file mode 100644
index 000000000000..a64fed7bb367
--- /dev/null
+++ b/drivers/peci/Kconfig
@@ -0,0 +1,37 @@
+#
+# Platform Environment Control Interface (PECI) subsystem configuration
+#
+
+menu "PECI support"
+
+config PECI
+ tristate "PECI support"
+ select CRC8
+ help
+ The Platform Environment Control Interface (PECI) is a one-wire bus
+ interface that provides a communication channel from Intel processors
+ and chipset components to external monitoring or control devices.
+
+ If you want PECI support, you should say Y here and also to the
+ specific driver for your bus adapter(s) below.
+
+ This support is also available as a module. If so, the module
+ will be called peci-core.
+
+if PECI
+
+config PECI_CHARDEV
+ tristate "PECI device interface"
+ help
+ Say Y here to use peci-* device files, usually found in the /dev
+ directory on your system. They make it possible to have user-space
+ programs use the PECI bus.
+
+ This support is also available as a module. If so, the module
+ will be called peci-dev.
+
+source "drivers/peci/busses/Kconfig"
+
+endif # PECI
+
+endmenu
diff --git a/drivers/peci/Makefile b/drivers/peci/Makefile
new file mode 100644
index 000000000000..da8b0a33fa42
--- /dev/null
+++ b/drivers/peci/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the PECI core drivers.
+#
+
+# Core functionality
+obj-$(CONFIG_PECI) += peci-core.o
+obj-$(CONFIG_PECI_CHARDEV) += peci-dev.o
+
+# Hardware specific bus drivers
+obj-y += busses/
diff --git a/drivers/peci/busses/Kconfig b/drivers/peci/busses/Kconfig
new file mode 100644
index 000000000000..20a1a7472d96
--- /dev/null
+++ b/drivers/peci/busses/Kconfig
@@ -0,0 +1,47 @@
+#
+# PECI hardware bus configuration
+#
+
+menu "PECI Hardware Bus support"
+
+config PECI_ASPEED
+ tristate "ASPEED PECI support"
+ depends on ARCH_ASPEED || COMPILE_TEST
+ depends on OF
+ depends on HAS_IOMEM
+ depends on PECI
+ help
+ Say Y here if you want support for the Platform Environment Control
+ Interface (PECI) bus adapter driver on the ASPEED SoCs.
+
+ This support is also available as a module. If so, the module
+ will be called peci-aspeed.
+
+config PECI_NPCM
+ tristate "Nuvoton NPCM PECI support"
+ select REGMAP_MMIO
+ depends on OF
+ depends on HAS_IOMEM
+ depends on ARCH_NPCM || COMPILE_TEST
+ depends on PECI
+ help
+ Say Y here if you want support for the Platform Environment Control
+ Interface (PECI) bus adapter driver on the Nuvoton NPCM SoCs.
+
+ This support is also available as a module. If so, the module
+ will be called peci-npcm.
+
+config PECI_MCTP
+ tristate "PECI over MCTP support"
+ depends on ARCH_ASPEED || COMPILE_TEST
+ depends on PECI
+ depends on ASPEED_MCTP
+
+ help
+ Say Y here if you want support for the Platform Environment Control
+ Interface (PECI) over MCTP bus adapter driver.
+
+ This support is also available as a module. If so, the module
+ will be called peci-mctp.
+
+endmenu
diff --git a/drivers/peci/busses/Makefile b/drivers/peci/busses/Makefile
new file mode 100644
index 000000000000..ebf3fda9bedc
--- /dev/null
+++ b/drivers/peci/busses/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the PECI hardware bus drivers.
+#
+
+obj-$(CONFIG_PECI_ASPEED) += peci-aspeed.o
+obj-$(CONFIG_PECI_NPCM) += peci-npcm.o
+obj-$(CONFIG_PECI_MCTP) += peci-mctp.o
diff --git a/drivers/peci/busses/peci-aspeed.c b/drivers/peci/busses/peci-aspeed.c
new file mode 100644
index 000000000000..224a3ae8492c
--- /dev/null
+++ b/drivers/peci/busses/peci-aspeed.c
@@ -0,0 +1,522 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2012-2017 ASPEED Technology Inc.
+// Copyright (c) 2018-2019 Intel Corporation
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/peci.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+/* ASPEED PECI Registers */
+/* Control Register */
+#define ASPEED_PECI_CTRL 0x00
+#define ASPEED_PECI_CTRL_SAMPLING_MASK GENMASK(19, 16)
+#define ASPEED_PECI_CTRL_READ_MODE_MASK GENMASK(13, 12)
+#define ASPEED_PECI_CTRL_READ_MODE_COUNT BIT(12)
+#define ASPEED_PECI_CTRL_READ_MODE_DBG BIT(13)
+#define ASPEED_PECI_CTRL_CLK_SOURCE_MASK BIT(11)
+#define ASPEED_PECI_CTRL_CLK_DIV_MASK GENMASK(10, 8)
+#define ASPEED_PECI_CTRL_INVERT_OUT BIT(7)
+#define ASPEED_PECI_CTRL_INVERT_IN BIT(6)
+#define ASPEED_PECI_CTRL_BUS_CONTENT_EN BIT(5)
+#define ASPEED_PECI_CTRL_PECI_EN BIT(4)
+#define ASPEED_PECI_CTRL_PECI_CLK_EN BIT(0)
+
+/* Timing Negotiation Register */
+#define ASPEED_PECI_TIMING_NEGOTIATION 0x04
+#define ASPEED_PECI_TIMING_MESSAGE_MASK GENMASK(15, 8)
+#define ASPEED_PECI_TIMING_ADDRESS_MASK GENMASK(7, 0)
+
+/* Command Register */
+#define ASPEED_PECI_CMD 0x08
+#define ASPEED_PECI_CMD_PIN_MON BIT(31)
+#define ASPEED_PECI_CMD_STS_MASK GENMASK(27, 24)
+#define ASPEED_PECI_CMD_STS_ADDR_T_NEGO 0x3
+#define ASPEED_PECI_CMD_IDLE_MASK \
+ (ASPEED_PECI_CMD_STS_MASK | ASPEED_PECI_CMD_PIN_MON)
+#define ASPEED_PECI_CMD_FIRE BIT(0)
+
+/* Read/Write Length Register */
+#define ASPEED_PECI_RW_LENGTH 0x0c
+#define ASPEED_PECI_AW_FCS_EN BIT(31)
+#define ASPEED_PECI_READ_LEN_MASK GENMASK(23, 16)
+#define ASPEED_PECI_WRITE_LEN_MASK GENMASK(15, 8)
+#define ASPEED_PECI_TAGET_ADDR_MASK GENMASK(7, 0)
+
+/* Expected FCS Data Register */
+#define ASPEED_PECI_EXP_FCS 0x10
+#define ASPEED_PECI_EXP_READ_FCS_MASK GENMASK(23, 16)
+#define ASPEED_PECI_EXP_AW_FCS_AUTO_MASK GENMASK(15, 8)
+#define ASPEED_PECI_EXP_WRITE_FCS_MASK GENMASK(7, 0)
+
+/* Captured FCS Data Register */
+#define ASPEED_PECI_CAP_FCS 0x14
+#define ASPEED_PECI_CAP_READ_FCS_MASK GENMASK(23, 16)
+#define ASPEED_PECI_CAP_WRITE_FCS_MASK GENMASK(7, 0)
+
+/* Interrupt Register */
+#define ASPEED_PECI_INT_CTRL 0x18
+#define ASPEED_PECI_TIMING_NEGO_SEL_MASK GENMASK(31, 30)
+#define ASPEED_PECI_1ST_BIT_OF_ADDR_NEGO 0
+#define ASPEED_PECI_2ND_BIT_OF_ADDR_NEGO 1
+#define ASPEED_PECI_MESSAGE_NEGO 2
+#define ASPEED_PECI_INT_MASK GENMASK(4, 0)
+#define ASPEED_PECI_INT_BUS_TIMEOUT BIT(4)
+#define ASPEED_PECI_INT_BUS_CONNECT BIT(3)
+#define ASPEED_PECI_INT_W_FCS_BAD BIT(2)
+#define ASPEED_PECI_INT_W_FCS_ABORT BIT(1)
+#define ASPEED_PECI_INT_CMD_DONE BIT(0)
+
+/* Interrupt Status Register */
+#define ASPEED_PECI_INT_STS 0x1c
+#define ASPEED_PECI_INT_TIMING_RESULT_MASK GENMASK(29, 16)
+ /* bits[4..0]: Same bit fields in the 'Interrupt Register' */
+
+/* Rx/Tx Data Buffer Registers */
+#define ASPEED_PECI_W_DATA0 0x20
+#define ASPEED_PECI_W_DATA1 0x24
+#define ASPEED_PECI_W_DATA2 0x28
+#define ASPEED_PECI_W_DATA3 0x2c
+#define ASPEED_PECI_R_DATA0 0x30
+#define ASPEED_PECI_R_DATA1 0x34
+#define ASPEED_PECI_R_DATA2 0x38
+#define ASPEED_PECI_R_DATA3 0x3c
+#define ASPEED_PECI_W_DATA4 0x40
+#define ASPEED_PECI_W_DATA5 0x44
+#define ASPEED_PECI_W_DATA6 0x48
+#define ASPEED_PECI_W_DATA7 0x4c
+#define ASPEED_PECI_R_DATA4 0x50
+#define ASPEED_PECI_R_DATA5 0x54
+#define ASPEED_PECI_R_DATA6 0x58
+#define ASPEED_PECI_R_DATA7 0x5c
+#define ASPEED_PECI_DATA_BUF_SIZE_MAX 32
+
+/* Timing Negotiation */
+#define ASPEED_PECI_RD_SAMPLING_POINT_DEFAULT 8
+#define ASPEED_PECI_RD_SAMPLING_POINT_MAX 15
+#define ASPEED_PECI_CLK_DIV_DEFAULT 0
+#define ASPEED_PECI_CLK_DIV_MAX 7
+#define ASPEED_PECI_MSG_TIMING_DEFAULT 1
+#define ASPEED_PECI_MSG_TIMING_MAX 255
+#define ASPEED_PECI_ADDR_TIMING_DEFAULT 1
+#define ASPEED_PECI_ADDR_TIMING_MAX 255
+
+/* Timeout */
+#define ASPEED_PECI_IDLE_CHECK_TIMEOUT_USEC 50000
+#define ASPEED_PECI_IDLE_CHECK_INTERVAL_USEC 10000
+#define ASPEED_PECI_CMD_TIMEOUT_MS_DEFAULT 1000
+#define ASPEED_PECI_CMD_TIMEOUT_MS_MAX 60000
+
+struct aspeed_peci {
+ struct peci_adapter *adapter;
+ struct device *dev;
+ void __iomem *base;
+ struct clk *clk;
+ struct reset_control *rst;
+ int irq;
+ spinlock_t lock; /* to sync completion status handling */
+ struct completion xfer_complete;
+ u32 status;
+ u32 cmd_timeout_ms;
+ u32 msg_timing;
+ u32 addr_timing;
+ u32 rd_sampling_point;
+ u32 clk_div_val;
+};
+
+static void aspeed_peci_init_regs(struct aspeed_peci *priv)
+{
+ writel(FIELD_PREP(ASPEED_PECI_CTRL_CLK_DIV_MASK,
+ ASPEED_PECI_CLK_DIV_DEFAULT) |
+ ASPEED_PECI_CTRL_PECI_CLK_EN, priv->base + ASPEED_PECI_CTRL);
+
+ /*
+ * Timing negotiation period setting.
+ * The unit of the programmed value is 4 times of PECI clock period.
+ */
+ writel(FIELD_PREP(ASPEED_PECI_TIMING_MESSAGE_MASK, priv->msg_timing) |
+ FIELD_PREP(ASPEED_PECI_TIMING_ADDRESS_MASK, priv->addr_timing),
+ priv->base + ASPEED_PECI_TIMING_NEGOTIATION);
+
+ /* Clear interrupts */
+ writel(readl(priv->base + ASPEED_PECI_INT_STS) | ASPEED_PECI_INT_MASK,
+ priv->base + ASPEED_PECI_INT_STS);
+
+ /* Set timing negotiation mode and enable interrupts */
+ writel(FIELD_PREP(ASPEED_PECI_TIMING_NEGO_SEL_MASK,
+ ASPEED_PECI_1ST_BIT_OF_ADDR_NEGO) |
+ ASPEED_PECI_INT_MASK, priv->base + ASPEED_PECI_INT_CTRL);
+
+ /* Read sampling point and clock speed setting */
+ writel(FIELD_PREP(ASPEED_PECI_CTRL_SAMPLING_MASK, priv->rd_sampling_point) |
+ FIELD_PREP(ASPEED_PECI_CTRL_CLK_DIV_MASK, priv->clk_div_val) |
+ ASPEED_PECI_CTRL_PECI_EN | ASPEED_PECI_CTRL_PECI_CLK_EN,
+ priv->base + ASPEED_PECI_CTRL);
+}
+
+static inline int aspeed_peci_check_idle(struct aspeed_peci *priv)
+{
+ u32 cmd_sts = readl(priv->base + ASPEED_PECI_CMD);
+ int ret;
+
+ /*
+ * Under normal circumstances, we expect to be idle here.
+ * In case there were any errors/timeouts that led to the situation
+ * where the hardware is not in idle state - we need to reset and
+ * reinitialize it to avoid potential controller hang.
+ */
+ if (FIELD_GET(ASPEED_PECI_CMD_STS_MASK, cmd_sts)) {
+ ret = reset_control_assert(priv->rst);
+ if (ret) {
+ dev_err(priv->dev, "cannot assert reset control\n");
+ return ret;
+ }
+
+ ret = reset_control_deassert(priv->rst);
+ if (ret) {
+ dev_err(priv->dev, "cannot deassert reset control\n");
+ return ret;
+ }
+
+ aspeed_peci_init_regs(priv);
+ }
+
+ return readl_poll_timeout(priv->base + ASPEED_PECI_CMD,
+ cmd_sts,
+ !(cmd_sts & ASPEED_PECI_CMD_IDLE_MASK),
+ ASPEED_PECI_IDLE_CHECK_INTERVAL_USEC,
+ ASPEED_PECI_IDLE_CHECK_TIMEOUT_USEC);
+}
+
+static int aspeed_peci_xfer(struct peci_adapter *adapter,
+ struct peci_xfer_msg *msg)
+{
+ struct aspeed_peci *priv = peci_get_adapdata(adapter);
+ long err, timeout = msecs_to_jiffies(priv->cmd_timeout_ms);
+ u32 peci_head, peci_state, rx_data = 0;
+ ulong flags;
+ int i, ret;
+ uint reg;
+
+ if (msg->tx_len > ASPEED_PECI_DATA_BUF_SIZE_MAX ||
+ msg->rx_len > ASPEED_PECI_DATA_BUF_SIZE_MAX)
+ return -EINVAL;
+
+ /* Check command sts and bus idle state */
+ ret = aspeed_peci_check_idle(priv);
+ if (ret)
+ return ret; /* -ETIMEDOUT */
+
+ spin_lock_irqsave(&priv->lock, flags);
+ reinit_completion(&priv->xfer_complete);
+
+ peci_head = FIELD_PREP(ASPEED_PECI_TAGET_ADDR_MASK, msg->addr) |
+ FIELD_PREP(ASPEED_PECI_WRITE_LEN_MASK, msg->tx_len) |
+ FIELD_PREP(ASPEED_PECI_READ_LEN_MASK, msg->rx_len);
+
+ writel(peci_head, priv->base + ASPEED_PECI_RW_LENGTH);
+
+ for (i = 0; i < msg->tx_len; i += 4) {
+ reg = i < 16 ? ASPEED_PECI_W_DATA0 + i % 16 :
+ ASPEED_PECI_W_DATA4 + i % 16;
+ writel(le32_to_cpup((__le32 *)&msg->tx_buf[i]),
+ priv->base + reg);
+ }
+
+ dev_dbg(priv->dev, "HEAD : 0x%08x\n", peci_head);
+ print_hex_dump_debug("TX : ", DUMP_PREFIX_NONE, 16, 1,
+ msg->tx_buf, msg->tx_len, true);
+
+ priv->status = 0;
+ writel(ASPEED_PECI_CMD_FIRE, priv->base + ASPEED_PECI_CMD);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ err = wait_for_completion_interruptible_timeout(&priv->xfer_complete,
+ timeout);
+
+ spin_lock_irqsave(&priv->lock, flags);
+ dev_dbg(priv->dev, "INT_STS : 0x%08x\n", priv->status);
+ peci_state = readl(priv->base + ASPEED_PECI_CMD);
+ dev_dbg(priv->dev, "PECI_STATE : 0x%lx\n",
+ FIELD_GET(ASPEED_PECI_CMD_STS_MASK, peci_state));
+
+ writel(0, priv->base + ASPEED_PECI_CMD);
+
+ if (err <= 0 || priv->status != ASPEED_PECI_INT_CMD_DONE) {
+ if (err < 0) { /* -ERESTARTSYS */
+ ret = (int)err;
+ goto err_irqrestore;
+ } else if (err == 0) {
+ dev_dbg(priv->dev, "Timeout waiting for a response!\n");
+ ret = -ETIMEDOUT;
+ goto err_irqrestore;
+ }
+
+ dev_dbg(priv->dev, "No valid response!\n");
+ ret = -EIO;
+ goto err_irqrestore;
+ }
+
+ /*
+ * Note that rx_len and rx_buf size can be an odd number.
+ * Byte handling is more efficient.
+ */
+ for (i = 0; i < msg->rx_len; i++) {
+ u8 byte_offset = i % 4;
+
+ if (byte_offset == 0) {
+ reg = i < 16 ? ASPEED_PECI_R_DATA0 + i % 16 :
+ ASPEED_PECI_R_DATA4 + i % 16;
+ rx_data = readl(priv->base + reg);
+ }
+
+ msg->rx_buf[i] = (u8)(rx_data >> (byte_offset << 3));
+ }
+
+ print_hex_dump_debug("RX : ", DUMP_PREFIX_NONE, 16, 1,
+ msg->rx_buf, msg->rx_len, true);
+
+ peci_state = readl(priv->base + ASPEED_PECI_CMD);
+ dev_dbg(priv->dev, "PECI_STATE : 0x%lx\n",
+ FIELD_GET(ASPEED_PECI_CMD_STS_MASK, peci_state));
+ dev_dbg(priv->dev, "------------------------\n");
+
+err_irqrestore:
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return ret;
+}
+
+static irqreturn_t aspeed_peci_irq_handler(int irq, void *arg)
+{
+ struct aspeed_peci *priv = arg;
+ u32 status;
+
+ spin_lock(&priv->lock);
+ status = readl(priv->base + ASPEED_PECI_INT_STS);
+ writel(status, priv->base + ASPEED_PECI_INT_STS);
+ priv->status |= (status & ASPEED_PECI_INT_MASK);
+
+ /*
+ * In most cases, interrupt bits will be set one by one but also note
+ * that multiple interrupt bits could be set at the same time.
+ */
+ if (status & ASPEED_PECI_INT_BUS_TIMEOUT)
+ dev_dbg(priv->dev, "ASPEED_PECI_INT_BUS_TIMEOUT\n");
+
+ if (status & ASPEED_PECI_INT_BUS_CONNECT)
+ dev_dbg(priv->dev, "ASPEED_PECI_INT_BUS_CONNECT\n");
+
+ if (status & ASPEED_PECI_INT_W_FCS_BAD)
+ dev_dbg(priv->dev, "ASPEED_PECI_INT_W_FCS_BAD\n");
+
+ if (status & ASPEED_PECI_INT_W_FCS_ABORT)
+ dev_dbg(priv->dev, "ASPEED_PECI_INT_W_FCS_ABORT\n");
+
+ /*
+ * All commands should be ended up with a ASPEED_PECI_INT_CMD_DONE bit
+ * set even in an error case.
+ */
+ if (status & ASPEED_PECI_INT_CMD_DONE) {
+ dev_dbg(priv->dev, "ASPEED_PECI_INT_CMD_DONE\n");
+ complete(&priv->xfer_complete);
+ }
+
+ spin_unlock(&priv->lock);
+
+ return IRQ_HANDLED;
+}
+
+static int aspeed_peci_init_ctrl(struct aspeed_peci *priv)
+{
+ u32 msg_timing, addr_timing, rd_sampling_point;
+ u32 clk_freq, clk_divisor, clk_div_val = 0;
+ int ret;
+
+ priv->clk = devm_clk_get(priv->dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ dev_err(priv->dev, "Failed to get clk source.\n");
+ return PTR_ERR(priv->clk);
+ }
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret) {
+ dev_err(priv->dev, "Failed to enable clock.\n");
+ return ret;
+ }
+
+ ret = device_property_read_u32(priv->dev, "clock-frequency", &clk_freq);
+ if (ret) {
+ dev_err(priv->dev,
+ "Could not read clock-frequency property.\n");
+ clk_disable_unprepare(priv->clk);
+ return ret;
+ }
+
+ clk_divisor = clk_get_rate(priv->clk) / clk_freq;
+
+ while ((clk_divisor >>= 1) && (clk_div_val < ASPEED_PECI_CLK_DIV_MAX))
+ clk_div_val++;
+ priv->clk_div_val = clk_div_val;
+
+ ret = device_property_read_u32(priv->dev, "msg-timing", &msg_timing);
+ if (ret || msg_timing > ASPEED_PECI_MSG_TIMING_MAX) {
+ if (!ret)
+ dev_warn(priv->dev,
+ "Invalid msg-timing : %u, Use default : %u\n",
+ msg_timing, ASPEED_PECI_MSG_TIMING_DEFAULT);
+ msg_timing = ASPEED_PECI_MSG_TIMING_DEFAULT;
+ }
+ priv->msg_timing = msg_timing;
+
+ ret = device_property_read_u32(priv->dev, "addr-timing", &addr_timing);
+ if (ret || addr_timing > ASPEED_PECI_ADDR_TIMING_MAX) {
+ if (!ret)
+ dev_warn(priv->dev,
+ "Invalid addr-timing : %u, Use default : %u\n",
+ addr_timing, ASPEED_PECI_ADDR_TIMING_DEFAULT);
+ addr_timing = ASPEED_PECI_ADDR_TIMING_DEFAULT;
+ }
+ priv->addr_timing = addr_timing;
+
+ ret = device_property_read_u32(priv->dev, "rd-sampling-point",
+ &rd_sampling_point);
+ if (ret || rd_sampling_point > ASPEED_PECI_RD_SAMPLING_POINT_MAX) {
+ if (!ret)
+ dev_warn(priv->dev,
+ "Invalid rd-sampling-point : %u. Use default : %u\n",
+ rd_sampling_point,
+ ASPEED_PECI_RD_SAMPLING_POINT_DEFAULT);
+ rd_sampling_point = ASPEED_PECI_RD_SAMPLING_POINT_DEFAULT;
+ }
+ priv->rd_sampling_point = rd_sampling_point;
+
+ ret = device_property_read_u32(priv->dev, "cmd-timeout-ms",
+ &priv->cmd_timeout_ms);
+ if (ret || priv->cmd_timeout_ms > ASPEED_PECI_CMD_TIMEOUT_MS_MAX ||
+ priv->cmd_timeout_ms == 0) {
+ if (!ret)
+ dev_warn(priv->dev,
+ "Invalid cmd-timeout-ms : %u. Use default : %u\n",
+ priv->cmd_timeout_ms,
+ ASPEED_PECI_CMD_TIMEOUT_MS_DEFAULT);
+ priv->cmd_timeout_ms = ASPEED_PECI_CMD_TIMEOUT_MS_DEFAULT;
+ }
+
+ aspeed_peci_init_regs(priv);
+
+ return 0;
+}
+
+static int aspeed_peci_probe(struct platform_device *pdev)
+{
+ struct peci_adapter *adapter;
+ struct aspeed_peci *priv;
+ int ret;
+
+ adapter = peci_alloc_adapter(&pdev->dev, sizeof(*priv));
+ if (!adapter)
+ return -ENOMEM;
+
+ priv = peci_get_adapdata(adapter);
+ priv->adapter = adapter;
+ priv->dev = &pdev->dev;
+ dev_set_drvdata(&pdev->dev, priv);
+
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->base)) {
+ ret = PTR_ERR(priv->base);
+ goto err_put_adapter_dev;
+ }
+
+ priv->irq = platform_get_irq(pdev, 0);
+ if (!priv->irq) {
+ ret = -ENODEV;
+ goto err_put_adapter_dev;
+ }
+
+ ret = devm_request_irq(&pdev->dev, priv->irq, aspeed_peci_irq_handler,
+ 0, "peci-aspeed-irq", priv);
+ if (ret)
+ goto err_put_adapter_dev;
+
+ init_completion(&priv->xfer_complete);
+ spin_lock_init(&priv->lock);
+
+ priv->adapter->owner = THIS_MODULE;
+ priv->adapter->dev.of_node = of_node_get(dev_of_node(priv->dev));
+ strlcpy(priv->adapter->name, pdev->name, sizeof(priv->adapter->name));
+ priv->adapter->xfer = aspeed_peci_xfer;
+ priv->adapter->use_dma = false;
+
+ priv->rst = devm_reset_control_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->rst)) {
+ dev_err(&pdev->dev,
+ "missing or invalid reset controller entry\n");
+ ret = PTR_ERR(priv->rst);
+ goto err_put_adapter_dev;
+ }
+ reset_control_deassert(priv->rst);
+
+ ret = aspeed_peci_init_ctrl(priv);
+ if (ret)
+ goto err_put_adapter_dev;
+
+ ret = peci_add_adapter(priv->adapter);
+ if (ret)
+ goto err_put_adapter_dev;
+
+ dev_info(&pdev->dev, "peci bus %d registered, irq %d\n",
+ priv->adapter->nr, priv->irq);
+
+ return 0;
+
+err_put_adapter_dev:
+ put_device(&adapter->dev);
+
+ return ret;
+}
+
+static int aspeed_peci_remove(struct platform_device *pdev)
+{
+ struct aspeed_peci *priv = dev_get_drvdata(&pdev->dev);
+
+ peci_del_adapter(priv->adapter);
+ complete(&priv->xfer_complete);
+ clk_disable_unprepare(priv->clk);
+ reset_control_assert(priv->rst);
+ of_node_put(priv->adapter->dev.of_node);
+
+ return 0;
+}
+
+static const struct of_device_id aspeed_peci_of_table[] = {
+ { .compatible = "aspeed,ast2400-peci", },
+ { .compatible = "aspeed,ast2500-peci", },
+ { .compatible = "aspeed,ast2600-peci", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, aspeed_peci_of_table);
+
+static struct platform_driver aspeed_peci_driver = {
+ .probe = aspeed_peci_probe,
+ .remove = aspeed_peci_remove,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = of_match_ptr(aspeed_peci_of_table),
+ },
+};
+module_platform_driver(aspeed_peci_driver);
+
+MODULE_AUTHOR("Ryan Chen <ryan_chen@aspeedtech.com>");
+MODULE_AUTHOR("Jae Hyun Yoo <jae.hyun.yoo@linux.intel.com>");
+MODULE_DESCRIPTION("ASPEED PECI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/peci/busses/peci-mctp.c b/drivers/peci/busses/peci-mctp.c
new file mode 100644
index 000000000000..56f652c5db48
--- /dev/null
+++ b/drivers/peci/busses/peci-mctp.c
@@ -0,0 +1,450 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2020 Intel Corporation
+
+#include <linux/aspeed-mctp.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/peci.h>
+#include <linux/platform_device.h>
+
+#define PCIE_SET_DATA_LEN(x, val) ((x)->len_lo |= (val))
+#define PCIE_GET_DATA_LEN(x) ((x)->len_lo)
+#define PCIE_GET_PAD_LEN(x) (((x)->tag >> 4) & 0x3)
+#define PCIE_SET_TARGET_ID(x, val) ((x)->target |= (swab16(val)))
+#define PCIE_PKT_ALIGN(x) ALIGN(x, sizeof(u32))
+#define PCIE_GET_REQUESTER_ID(x) (swab16((x)->requester))
+
+/*
+ * PCIe header template in "network format" - Big Endian
+ */
+#define MSG_4DW_HDR_ROUTE_BY_ID 0x72
+#define MSG_CODE_VDM_TYPE_1 0x7f
+#define VENDOR_ID_DMTF_VDM 0xb41a
+static const struct pcie_transport_hdr pcie_hdr_template_be = {
+ .fmt_type = MSG_4DW_HDR_ROUTE_BY_ID,
+ .code = MSG_CODE_VDM_TYPE_1,
+ .vendor = VENDOR_ID_DMTF_VDM
+};
+
+#define MSG_TAG_MASK GENMASK(2, 0)
+#define MCTP_SET_MSG_TAG(x, val) ((x)->flags_seq_tag |= ((val) & MSG_TAG_MASK))
+#define MCTP_GET_MSG_TAG(x) ((x)->flags_seq_tag & MSG_TAG_MASK)
+#define MCTP_HDR_VERSION 1
+#define REQUEST_FLAGS 0xc8
+#define RESPONSE_FLAGS 0xc0
+static const struct mctp_protocol_hdr mctp_hdr_template_be = {
+ .ver = MCTP_HDR_VERSION,
+ .flags_seq_tag = REQUEST_FLAGS
+};
+
+static struct mctp_peci_vdm_hdr {
+ u8 type;
+ u16 vendor_id;
+ u8 instance_req_d;
+ u8 vendor_code;
+} __packed;
+
+#define PCIE_VDM_TYPE 0x7e
+#define INTEL_VENDOR_ID 0x8680
+#define PECI_REQUEST 0x80
+#define PECI_RESPONSE 0
+#define PECI_MSG_OPCODE 0x02
+static const struct mctp_peci_vdm_hdr peci_hdr_template = {
+ .type = PCIE_VDM_TYPE,
+ .vendor_id = INTEL_VENDOR_ID,
+ .instance_req_d = PECI_REQUEST,
+ .vendor_code = PECI_MSG_OPCODE
+};
+
+#define PECI_VDM_TYPE 0x0200
+#define PECI_VDM_MASK 0xff00
+
+#define CPUNODEID_CFG_LCLNODEID_MASK GENMASK(2, 0)
+#define CPUNODEID_CFG_OFFSET 0xc0
+#define CPUNODEID_CFG_BUS 0x1e
+#define CPUNODEID_CFG_DEV 0
+#define CPUNODEID_CFG_FUNC 0
+
+struct node_cfg {
+ u8 eid;
+ u16 bdf;
+ u8 domain_id;
+};
+
+struct mctp_peci {
+ struct peci_adapter *adapter;
+ struct device *dev;
+ struct mctp_client *peci_client;
+ struct node_cfg cpus[PECI_OFFSET_MAX][DOMAIN_OFFSET_MAX];
+ bool is_discovery_done;
+ u8 tag;
+};
+
+static void
+prepare_tx_packet(struct mctp_pcie_packet *tx_packet, struct node_cfg *cpu,
+ u8 tx_len, u8 rx_len, u8 *tx_buf, u8 tag)
+{
+ struct pcie_transport_hdr *pcie_hdr;
+ struct mctp_protocol_hdr *mctp_hdr;
+ struct mctp_peci_vdm_hdr *peci_hdr;
+ u8 *peci_payload;
+ u32 payload_len, payload_len_dw;
+
+ BUILD_BUG_ON((sizeof(struct pcie_transport_hdr) +
+ sizeof(struct mctp_protocol_hdr)) != PCIE_VDM_HDR_SIZE);
+
+ pcie_hdr = (struct pcie_transport_hdr *)tx_packet;
+ *pcie_hdr = pcie_hdr_template_be;
+
+ mctp_hdr = (struct mctp_protocol_hdr *)&tx_packet->data.hdr[3];
+ *mctp_hdr = mctp_hdr_template_be;
+
+ peci_hdr = (struct mctp_peci_vdm_hdr *)tx_packet->data.payload;
+ *peci_hdr = peci_hdr_template;
+
+ peci_payload = (u8 *)(tx_packet->data.payload) + sizeof(struct mctp_peci_vdm_hdr);
+ peci_payload[0] = tx_len;
+ peci_payload[1] = rx_len;
+ memcpy(&peci_payload[2], tx_buf, tx_len);
+
+ /*
+ * MCTP packet payload consists of PECI VDM header, WL, RL and actual
+ * PECI payload
+ */
+ payload_len = sizeof(struct mctp_peci_vdm_hdr) + 2 + tx_len;
+ payload_len_dw = PCIE_PKT_ALIGN(payload_len) / sizeof(u32);
+
+ PCIE_SET_DATA_LEN(pcie_hdr, payload_len_dw);
+
+ tx_packet->size = PCIE_PKT_ALIGN(payload_len) + PCIE_VDM_HDR_SIZE;
+
+ mctp_hdr->dest = cpu->eid;
+ PCIE_SET_TARGET_ID(pcie_hdr, cpu->bdf);
+ MCTP_SET_MSG_TAG(mctp_hdr, tag);
+}
+
+static int
+verify_rx_packet(struct peci_adapter *adapter, struct mctp_pcie_packet *rx_packet,
+ struct node_cfg *cpu, u8 tag)
+{
+ struct mctp_peci *priv = peci_get_adapdata(adapter);
+ bool invalid_packet = false;
+ struct pcie_transport_hdr *pcie_hdr;
+ struct mctp_protocol_hdr *mctp_hdr;
+ struct mctp_peci_vdm_hdr *peci_hdr;
+ u8 expected_flags;
+ u16 requester_id;
+
+ expected_flags = (RESPONSE_FLAGS | (tag & MSG_TAG_MASK));
+
+ pcie_hdr = (struct pcie_transport_hdr *)rx_packet;
+ mctp_hdr = (struct mctp_protocol_hdr *)&rx_packet->data.hdr[3];
+ peci_hdr = (struct mctp_peci_vdm_hdr *)rx_packet->data.payload;
+
+ requester_id = PCIE_GET_REQUESTER_ID(pcie_hdr);
+
+ if (requester_id != cpu->bdf) {
+ dev_dbg(priv->dev,
+ "mismatch in src bdf: expected: 0x%.4x, got: 0x%.4x",
+ cpu->bdf, requester_id);
+ invalid_packet = true;
+ }
+ if (mctp_hdr->src != cpu->eid) {
+ dev_dbg(priv->dev,
+ "mismatch in src eid: expected: 0x%.2x, got: 0x%.2x",
+ cpu->eid, mctp_hdr->src);
+ invalid_packet = true;
+ }
+ if (mctp_hdr->flags_seq_tag != expected_flags) {
+ dev_dbg(priv->dev,
+ "mismatch in mctp flags: expected: 0x%.2x, got: 0x%.2x",
+ expected_flags, mctp_hdr->flags_seq_tag);
+ invalid_packet = true;
+ }
+ if (peci_hdr->instance_req_d != PECI_RESPONSE) {
+ dev_dbg(priv->dev,
+ "packet doesn't match a response: expected: 0x%.2x, got: 0x%.2x",
+ PECI_RESPONSE, peci_hdr->instance_req_d);
+ invalid_packet = true;
+ }
+
+ if (invalid_packet) {
+ dev_warn_ratelimited(priv->dev, "unexpected peci response found\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static struct mctp_pcie_packet *
+mctp_peci_send_receive(struct peci_adapter *adapter, struct node_cfg *cpu,
+ u8 tx_len, u8 rx_len, u8 *tx_buf)
+{
+ struct mctp_peci *priv = peci_get_adapdata(adapter);
+ /* XXX: Sporadically it can take up to 1100 ms for response to arrive */
+ unsigned long timeout = msecs_to_jiffies(1100);
+ u8 tag = priv->tag;
+ struct mctp_pcie_packet *tx_packet, *rx_packet;
+ unsigned long current_time, end_time;
+ struct pcie_transport_hdr *pcie_hdr;
+ u32 payload_len, rx_packet_size;
+ int ret;
+
+ tx_packet = aspeed_mctp_packet_alloc(GFP_KERNEL);
+ if (!tx_packet)
+ return ERR_PTR(-ENOMEM);
+
+ prepare_tx_packet(tx_packet, cpu, tx_len, rx_len, tx_buf, tag);
+
+ aspeed_mctp_flush_rx_queue(priv->peci_client);
+
+ print_hex_dump_bytes("TX : ", DUMP_PREFIX_NONE, &tx_packet->data, tx_packet->size);
+
+ ret = aspeed_mctp_send_packet(priv->peci_client, tx_packet);
+ if (ret) {
+ dev_dbg_ratelimited(priv->dev, "failed to send mctp packet: %d\n", ret);
+ aspeed_mctp_packet_free(tx_packet);
+ return ERR_PTR(ret);
+ }
+ priv->tag++;
+
+ end_time = jiffies + timeout;
+retry:
+ rx_packet = aspeed_mctp_receive_packet(priv->peci_client, timeout);
+ if (IS_ERR(rx_packet)) {
+ if (PTR_ERR(rx_packet) != -ERESTARTSYS)
+ dev_err_ratelimited(priv->dev, "failed to receive mctp packet: %ld\n",
+ PTR_ERR(rx_packet));
+
+ return rx_packet;
+ }
+ BUG_ON(!rx_packet);
+
+ ret = verify_rx_packet(adapter, rx_packet, cpu, tag);
+ current_time = jiffies;
+ if (ret && time_before(current_time, end_time)) {
+ aspeed_mctp_packet_free(rx_packet);
+ timeout = ((long)end_time - (long)current_time);
+ goto retry;
+ }
+
+ pcie_hdr = (struct pcie_transport_hdr *)rx_packet;
+ payload_len = PCIE_GET_DATA_LEN(pcie_hdr) * sizeof(u32) - PCIE_GET_PAD_LEN(pcie_hdr);
+ rx_packet_size = payload_len + PCIE_VDM_HDR_SIZE;
+ print_hex_dump_bytes("RX : ", DUMP_PREFIX_NONE, &rx_packet->data, rx_packet_size);
+
+ return rx_packet;
+}
+
+static void mctp_peci_cpu_discovery(struct peci_adapter *adapter)
+{
+ const u8 eids[] = { 0x1d, 0x3d, 0x5d, 0x7d, 0x9d, 0xbd, 0xdd, 0xfd };
+ struct mctp_peci *priv = peci_get_adapdata(adapter);
+ u8 tx_buf[PECI_RDENDPTCFG_PCI_WRITE_LEN];
+ struct mctp_pcie_packet *rx_packet;
+ struct node_cfg cpu;
+ int i, domain_id, node_id, ret;
+ bool is_discovery_done = false;
+ u8 *rx_buf;
+ u32 addr;
+
+ addr = CPUNODEID_CFG_OFFSET; /* [11:0] offset */
+ addr |= CPUNODEID_CFG_FUNC << 12;/* [14:12] function */
+ addr |= CPUNODEID_CFG_DEV << 15; /* [19:15] device */
+ addr |= CPUNODEID_CFG_BUS << 20; /* [27:20] bus, [31:28] reserved */
+
+ tx_buf[0] = PECI_RDENDPTCFG_CMD;
+ tx_buf[1] = 0;
+ tx_buf[2] = PECI_ENDPTCFG_TYPE_LOCAL_PCI;
+ tx_buf[3] = 0; /* Endpoint ID */
+ tx_buf[4] = 0; /* Reserved */
+ tx_buf[5] = 0; /* Reserved */
+ tx_buf[6] = PECI_ENDPTCFG_ADDR_TYPE_PCI;
+ tx_buf[7] = 0; /* PCI Segment */
+ tx_buf[8] = (u8)addr;
+ tx_buf[9] = (u8)(addr >> 8);
+ tx_buf[10] = (u8)(addr >> 16);
+ tx_buf[11] = (u8)(addr >> 24);
+
+ for (i = 0; i < PECI_OFFSET_MAX; i++) {
+ memset(&cpu, 0, sizeof(cpu));
+ cpu.eid = eids[i];
+ ret = aspeed_mctp_get_eid_bdf(priv->peci_client, cpu.eid, &cpu.bdf);
+ if (ret)
+ continue;
+
+ for (domain_id = 0; domain_id < DOMAIN_OFFSET_MAX; domain_id++) {
+ ret = aspeed_mctp_get_eid(priv->peci_client,
+ cpu.bdf, domain_id,
+ &cpu.eid);
+
+ /* No entries for specific BDF/domain_Id. */
+ if (ret)
+ continue;
+
+ rx_packet = mctp_peci_send_receive(adapter, &cpu,
+ PECI_RDENDPTCFG_PCI_WRITE_LEN,
+ PECI_RDENDPTCFG_READ_LEN_BASE + 4,
+ tx_buf);
+
+ if (IS_ERR(rx_packet)) {
+ dev_warn(priv->dev, "Device EID=%d DomainId=%d not discovered\n",
+ cpu.eid, cpu.domain_id);
+ continue;
+ }
+
+ rx_buf = (u8 *)(rx_packet->data.payload) + sizeof(struct mctp_peci_vdm_hdr);
+ node_id = rx_buf[1] & CPUNODEID_CFG_LCLNODEID_MASK;
+ if (node_id < PECI_OFFSET_MAX) {
+ is_discovery_done = true;
+ priv->cpus[node_id][domain_id] = cpu;
+ } else {
+ dev_warn(priv->dev, "Incorrect node_id=%d (EID=%d DomainId=%d)\n",
+ node_id, cpu.eid, cpu.domain_id);
+ }
+ aspeed_mctp_packet_free(rx_packet);
+ }
+ }
+ priv->is_discovery_done = is_discovery_done;
+}
+
+static int
+mctp_peci_get_address(struct peci_adapter *adapter, u8 peci_addr, u8 domain_id,
+ struct node_cfg *cpu)
+{
+ struct mctp_peci *priv = peci_get_adapdata(adapter);
+ int node_id = peci_addr - 0x30;
+
+ /*
+ * XXX: Is it possible we're able to communicate with CPU 0 before other
+ * CPUs are up? Make sure we're always discovering all CPUs.
+ */
+ if (!priv->is_discovery_done)
+ mctp_peci_cpu_discovery(adapter);
+
+ if (node_id < PECI_OFFSET_MAX && domain_id < DOMAIN_OFFSET_MAX &&
+ priv->is_discovery_done && priv->cpus[node_id][domain_id].eid) {
+ *cpu = priv->cpus[node_id][domain_id];
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int
+mctp_peci_xfer(struct peci_adapter *adapter, struct peci_xfer_msg *msg)
+{
+ u32 max_len = sizeof(struct mctp_pcie_packet_data) -
+ PCIE_VDM_HDR_SIZE - sizeof(struct mctp_peci_vdm_hdr);
+ struct mctp_pcie_packet *rx_packet;
+ struct node_cfg cpu;
+ u8 domain_id = 0;
+ int ret;
+
+ if (msg->tx_len > max_len || msg->rx_len > max_len)
+ return -EINVAL;
+
+ if (msg->tx_len > 2)
+ domain_id = (msg->tx_buf[1] >> 1);
+
+ ret = mctp_peci_get_address(adapter, msg->addr, domain_id, &cpu);
+ if (ret)
+ return ret;
+
+ rx_packet = mctp_peci_send_receive(adapter, &cpu, msg->tx_len, msg->rx_len, msg->tx_buf);
+ if (IS_ERR(rx_packet))
+ return PTR_ERR(rx_packet);
+
+ memcpy(msg->rx_buf,
+ (u8 *)(rx_packet->data.payload) + sizeof(struct mctp_peci_vdm_hdr),
+ msg->rx_len);
+
+ aspeed_mctp_packet_free(rx_packet);
+
+ return 0;
+}
+
+static int mctp_peci_init_peci_client(struct mctp_peci *priv)
+{
+ struct device *parent = priv->dev->parent;
+ int ret;
+
+ priv->peci_client = aspeed_mctp_create_client(dev_get_drvdata(parent));
+ if (IS_ERR(priv->peci_client))
+ return -ENOMEM;
+
+ ret = aspeed_mctp_add_type_handler(priv->peci_client, PCIE_VDM_TYPE,
+ INTEL_VENDOR_ID, PECI_VDM_TYPE,
+ PECI_VDM_MASK);
+ if (ret)
+ aspeed_mctp_delete_client(priv->peci_client);
+
+ return ret;
+}
+
+static int mctp_peci_probe(struct platform_device *pdev)
+{
+ struct peci_adapter *adapter;
+ struct mctp_peci *priv;
+ int ret;
+
+ adapter = peci_alloc_adapter(&pdev->dev, sizeof(*priv));
+ if (!adapter)
+ return -ENOMEM;
+
+ priv = peci_get_adapdata(adapter);
+ priv->dev = &pdev->dev;
+ dev_set_drvdata(&pdev->dev, priv);
+
+ adapter->owner = THIS_MODULE;
+ strlcpy(adapter->name, pdev->name, sizeof(adapter->name));
+
+ adapter->xfer = mctp_peci_xfer;
+ adapter->peci_revision = 0x41;
+
+ priv->adapter = adapter;
+
+ ret = mctp_peci_init_peci_client(priv);
+ if (ret)
+ goto out_put_device;
+
+ ret = peci_add_adapter(adapter);
+ if (ret)
+ goto out_del_client;
+
+ return 0;
+
+out_del_client:
+ aspeed_mctp_delete_client(priv->peci_client);
+out_put_device:
+ put_device(&adapter->dev);
+ return ret;
+}
+
+static int mctp_peci_remove(struct platform_device *pdev)
+{
+ struct mctp_peci *priv = dev_get_drvdata(&pdev->dev);
+
+ if (!priv)
+ goto out;
+
+ aspeed_mctp_delete_client(priv->peci_client);
+
+ peci_del_adapter(priv->adapter);
+out:
+ return 0;
+}
+
+static struct platform_driver mctp_peci_driver = {
+ .probe = mctp_peci_probe,
+ .remove = mctp_peci_remove,
+ .driver = {
+ .name = "peci-mctp",
+ },
+};
+module_platform_driver(mctp_peci_driver);
+
+MODULE_ALIAS("platform:peci-mctp");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Iwona Winiarska <iwona.winiarska@intel.com>");
+MODULE_DESCRIPTION("PECI MCTP driver");
diff --git a/drivers/peci/busses/peci-npcm.c b/drivers/peci/busses/peci-npcm.c
new file mode 100644
index 000000000000..bdebbf1ec7f1
--- /dev/null
+++ b/drivers/peci/busses/peci-npcm.c
@@ -0,0 +1,406 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019 Nuvoton Technology corporation.
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/peci.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+#include <linux/reset.h>
+
+/* NPCM7xx GCR module */
+#define NPCM7XX_INTCR3_OFFSET 0x9C
+#define NPCM7XX_INTCR3_PECIVSEL BIT(19)
+
+/* NPCM PECI Registers */
+#define NPCM_PECI_CTL_STS 0x00
+#define NPCM_PECI_RD_LENGTH 0x04
+#define NPCM_PECI_ADDR 0x08
+#define NPCM_PECI_CMD 0x0C
+#define NPCM_PECI_CTL2 0x10
+#define NPCM_PECI_WR_LENGTH 0x1C
+#define NPCM_PECI_PDDR 0x2C
+#define NPCM_PECI_DAT_INOUT(n) (0x100 + ((n) * 4))
+
+#define NPCM_PECI_MAX_REG 0x200
+
+/* NPCM_PECI_CTL_STS - 0x00 : Control Register */
+#define NPCM_PECI_CTRL_DONE_INT_EN BIT(6)
+#define NPCM_PECI_CTRL_ABRT_ERR BIT(4)
+#define NPCM_PECI_CTRL_CRC_ERR BIT(3)
+#define NPCM_PECI_CTRL_DONE BIT(1)
+#define NPCM_PECI_CTRL_START_BUSY BIT(0)
+
+/* NPCM_PECI_RD_LENGTH - 0x04 : Command Register */
+#define NPCM_PECI_RD_LEN_MASK GENMASK(6, 0)
+
+/* NPCM_PECI_CMD - 0x10 : Command Register */
+#define NPCM_PECI_CTL2_MASK GENMASK(7, 6)
+
+/* NPCM_PECI_WR_LENGTH - 0x1C : Command Register */
+#define NPCM_PECI_WR_LEN_MASK GENMASK(6, 0)
+
+/* NPCM_PECI_PDDR - 0x2C : Command Register */
+#define NPCM_PECI_PDDR_MASK GENMASK(4, 0)
+
+#define NPCM_PECI_INT_MASK \
+ (NPCM_PECI_CTRL_ABRT_ERR | NPCM_PECI_CTRL_CRC_ERR | NPCM_PECI_CTRL_DONE)
+
+#define NPCM_PECI_IDLE_CHECK_TIMEOUT_USEC 50000
+#define NPCM_PECI_IDLE_CHECK_INTERVAL_USEC 10000
+#define NPCM_PECI_CMD_TIMEOUT_MS_DEFAULT 1000
+#define NPCM_PECI_CMD_TIMEOUT_MS_MAX 60000
+#define NPCM_PECI_HOST_NEG_BIT_RATE_MAX 31
+#define NPCM_PECI_HOST_NEG_BIT_RATE_MIN 7
+#define NPCM_PECI_HOST_NEG_BIT_RATE_DEFAULT 15
+#define NPCM_PECI_PULL_DOWN_DEFAULT 0
+#define NPCM_PECI_PULL_DOWN_MAX 2
+
+struct npcm_peci {
+ u32 cmd_timeout_ms;
+ u32 host_bit_rate;
+ struct completion xfer_complete;
+ struct regmap *gcr_regmap;
+ struct peci_adapter *adapter;
+ struct regmap *regmap;
+ u32 status;
+ spinlock_t lock; /* to sync completion status handling */
+ struct device *dev;
+ struct clk *clk;
+ int irq;
+};
+
+static int npcm_peci_xfer_native(struct npcm_peci *priv,
+ struct peci_xfer_msg *msg)
+{
+ long err, timeout = msecs_to_jiffies(priv->cmd_timeout_ms);
+ unsigned long flags;
+ unsigned int msg_rd;
+ u32 cmd_sts;
+ int i, rc;
+
+ /* Check command sts and bus idle state */
+ rc = regmap_read_poll_timeout(priv->regmap, NPCM_PECI_CTL_STS, cmd_sts,
+ !(cmd_sts & NPCM_PECI_CTRL_START_BUSY),
+ NPCM_PECI_IDLE_CHECK_INTERVAL_USEC,
+ NPCM_PECI_IDLE_CHECK_TIMEOUT_USEC);
+ if (rc)
+ return rc; /* -ETIMEDOUT */
+
+ spin_lock_irqsave(&priv->lock, flags);
+ reinit_completion(&priv->xfer_complete);
+
+ regmap_write(priv->regmap, NPCM_PECI_ADDR, msg->addr);
+ regmap_write(priv->regmap, NPCM_PECI_RD_LENGTH,
+ NPCM_PECI_WR_LEN_MASK & msg->rx_len);
+ regmap_write(priv->regmap, NPCM_PECI_WR_LENGTH,
+ NPCM_PECI_WR_LEN_MASK & msg->tx_len);
+
+ if (msg->tx_len) {
+ regmap_write(priv->regmap, NPCM_PECI_CMD, msg->tx_buf[0]);
+
+ for (i = 0; i < (msg->tx_len - 1); i++)
+ regmap_write(priv->regmap, NPCM_PECI_DAT_INOUT(i),
+ msg->tx_buf[i + 1]);
+ }
+
+ priv->status = 0;
+ regmap_update_bits(priv->regmap, NPCM_PECI_CTL_STS,
+ NPCM_PECI_CTRL_START_BUSY,
+ NPCM_PECI_CTRL_START_BUSY);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ err = wait_for_completion_interruptible_timeout(&priv->xfer_complete,
+ timeout);
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ regmap_write(priv->regmap, NPCM_PECI_CMD, 0);
+
+ if (err <= 0 || priv->status != NPCM_PECI_CTRL_DONE) {
+ if (err < 0) { /* -ERESTARTSYS */
+ rc = (int)err;
+ goto err_irqrestore;
+ } else if (err == 0) {
+ dev_dbg(priv->dev, "Timeout waiting for a response!\n");
+ rc = -ETIMEDOUT;
+ goto err_irqrestore;
+ }
+
+ dev_dbg(priv->dev, "No valid response!\n");
+ rc = -EIO;
+ goto err_irqrestore;
+ }
+
+ for (i = 0; i < msg->rx_len; i++) {
+ regmap_read(priv->regmap, NPCM_PECI_DAT_INOUT(i), &msg_rd);
+ msg->rx_buf[i] = (u8)msg_rd;
+ }
+
+err_irqrestore:
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return rc;
+}
+
+static irqreturn_t npcm_peci_irq_handler(int irq, void *arg)
+{
+ struct npcm_peci *priv = arg;
+ u32 status_ack = 0;
+ u32 status;
+
+ spin_lock(&priv->lock);
+ regmap_read(priv->regmap, NPCM_PECI_CTL_STS, &status);
+ priv->status |= (status & NPCM_PECI_INT_MASK);
+
+ if (status & NPCM_PECI_CTRL_CRC_ERR) {
+ dev_dbg(priv->dev, "PECI_INT_W_FCS_BAD\n");
+ status_ack |= NPCM_PECI_CTRL_CRC_ERR;
+ }
+
+ if (status & NPCM_PECI_CTRL_ABRT_ERR) {
+ dev_dbg(priv->dev, "NPCM_PECI_CTRL_ABRT_ERR\n");
+ status_ack |= NPCM_PECI_CTRL_ABRT_ERR;
+ }
+
+ /*
+ * All commands should be ended up with a NPCM_PECI_CTRL_DONE
+ * bit set even in an error case.
+ */
+ if (status & NPCM_PECI_CTRL_DONE) {
+ dev_dbg(priv->dev, "NPCM_PECI_CTRL_DONE\n");
+ status_ack |= NPCM_PECI_CTRL_DONE;
+ complete(&priv->xfer_complete);
+ }
+
+ regmap_write_bits(priv->regmap, NPCM_PECI_CTL_STS,
+ NPCM_PECI_INT_MASK, status_ack);
+
+ spin_unlock(&priv->lock);
+ return IRQ_HANDLED;
+}
+
+static int npcm_peci_init_ctrl(struct npcm_peci *priv)
+{
+ u32 cmd_sts, host_neg_bit_rate = 0, pull_down = 0;
+ int ret;
+
+ priv->clk = devm_clk_get(priv->dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ dev_err(priv->dev, "Failed to get clk source.\n");
+ return PTR_ERR(priv->clk);
+ }
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret) {
+ dev_err(priv->dev, "Failed to enable clock.\n");
+ return ret;
+ }
+
+ ret = of_property_read_u32(priv->dev->of_node, "cmd-timeout-ms",
+ &priv->cmd_timeout_ms);
+ if (ret || priv->cmd_timeout_ms > NPCM_PECI_CMD_TIMEOUT_MS_MAX ||
+ priv->cmd_timeout_ms == 0) {
+ if (ret)
+ dev_warn(priv->dev,
+ "cmd-timeout-ms not found, use default : %u\n",
+ NPCM_PECI_CMD_TIMEOUT_MS_DEFAULT);
+ else
+ dev_warn(priv->dev,
+ "Invalid cmd-timeout-ms : %u. Use default : %u\n",
+ priv->cmd_timeout_ms,
+ NPCM_PECI_CMD_TIMEOUT_MS_DEFAULT);
+
+ priv->cmd_timeout_ms = NPCM_PECI_CMD_TIMEOUT_MS_DEFAULT;
+ }
+
+ if (of_device_is_compatible(priv->dev->of_node,
+ "nuvoton,npcm750-peci")) {
+ priv->gcr_regmap = syscon_regmap_lookup_by_compatible
+ ("nuvoton,npcm750-gcr");
+ if (!IS_ERR(priv->gcr_regmap)) {
+ bool volt = of_property_read_bool(priv->dev->of_node,
+ "high-volt-range");
+ if (volt)
+ regmap_update_bits(priv->gcr_regmap,
+ NPCM7XX_INTCR3_OFFSET,
+ NPCM7XX_INTCR3_PECIVSEL,
+ NPCM7XX_INTCR3_PECIVSEL);
+ else
+ regmap_update_bits(priv->gcr_regmap,
+ NPCM7XX_INTCR3_OFFSET,
+ NPCM7XX_INTCR3_PECIVSEL, 0);
+ }
+ }
+
+ ret = of_property_read_u32(priv->dev->of_node, "pull-down",
+ &pull_down);
+ if (ret || pull_down > NPCM_PECI_PULL_DOWN_MAX) {
+ if (ret)
+ dev_warn(priv->dev,
+ "pull-down not found, use default : %u\n",
+ NPCM_PECI_PULL_DOWN_DEFAULT);
+ else
+ dev_warn(priv->dev,
+ "Invalid pull-down : %u. Use default : %u\n",
+ pull_down,
+ NPCM_PECI_PULL_DOWN_DEFAULT);
+ pull_down = NPCM_PECI_PULL_DOWN_DEFAULT;
+ }
+
+ regmap_update_bits(priv->regmap, NPCM_PECI_CTL2, NPCM_PECI_CTL2_MASK,
+ pull_down << 6);
+
+ ret = of_property_read_u32(priv->dev->of_node, "host-neg-bit-rate",
+ &host_neg_bit_rate);
+ if (ret || host_neg_bit_rate > NPCM_PECI_HOST_NEG_BIT_RATE_MAX ||
+ host_neg_bit_rate < NPCM_PECI_HOST_NEG_BIT_RATE_MIN) {
+ if (ret)
+ dev_warn(priv->dev,
+ "host-neg-bit-rate not found, use default : %u\n",
+ NPCM_PECI_HOST_NEG_BIT_RATE_DEFAULT);
+ else
+ dev_warn(priv->dev,
+ "Invalid host-neg-bit-rate : %u. Use default : %u\n",
+ host_neg_bit_rate,
+ NPCM_PECI_HOST_NEG_BIT_RATE_DEFAULT);
+ host_neg_bit_rate = NPCM_PECI_HOST_NEG_BIT_RATE_DEFAULT;
+ }
+
+ regmap_update_bits(priv->regmap, NPCM_PECI_PDDR, NPCM_PECI_PDDR_MASK,
+ host_neg_bit_rate);
+
+ priv->host_bit_rate = clk_get_rate(priv->clk) /
+ (4 * (host_neg_bit_rate + 1));
+
+ ret = regmap_read_poll_timeout(priv->regmap, NPCM_PECI_CTL_STS, cmd_sts,
+ !(cmd_sts & NPCM_PECI_CTRL_START_BUSY),
+ NPCM_PECI_IDLE_CHECK_INTERVAL_USEC,
+ NPCM_PECI_IDLE_CHECK_TIMEOUT_USEC);
+ if (ret)
+ return ret; /* -ETIMEDOUT */
+
+ /* PECI interrupt enable */
+ regmap_update_bits(priv->regmap, NPCM_PECI_CTL_STS,
+ NPCM_PECI_CTRL_DONE_INT_EN,
+ NPCM_PECI_CTRL_DONE_INT_EN);
+
+ return 0;
+}
+
+static const struct regmap_config npcm_peci_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = NPCM_PECI_MAX_REG,
+ .fast_io = true,
+};
+
+static int npcm_peci_xfer(struct peci_adapter *adapter,
+ struct peci_xfer_msg *msg)
+{
+ struct npcm_peci *priv = peci_get_adapdata(adapter);
+
+ return npcm_peci_xfer_native(priv, msg);
+}
+
+static int npcm_peci_probe(struct platform_device *pdev)
+{
+ struct peci_adapter *adapter;
+ struct npcm_peci *priv;
+ void __iomem *base;
+ int ret;
+
+ adapter = peci_alloc_adapter(&pdev->dev, sizeof(*priv));
+ if (!adapter)
+ return -ENOMEM;
+
+ priv = peci_get_adapdata(adapter);
+ priv->adapter = adapter;
+ priv->dev = &pdev->dev;
+ dev_set_drvdata(&pdev->dev, priv);
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base)) {
+ ret = PTR_ERR(base);
+ goto err_put_adapter_dev;
+ }
+
+ priv->regmap = devm_regmap_init_mmio(&pdev->dev, base,
+ &npcm_peci_regmap_config);
+ if (IS_ERR(priv->regmap)) {
+ ret = PTR_ERR(priv->regmap);
+ goto err_put_adapter_dev;
+ }
+
+ priv->irq = platform_get_irq(pdev, 0);
+ if (!priv->irq) {
+ ret = -ENODEV;
+ goto err_put_adapter_dev;
+ }
+
+ ret = devm_request_irq(&pdev->dev, priv->irq, npcm_peci_irq_handler,
+ 0, "peci-npcm-irq", priv);
+ if (ret)
+ goto err_put_adapter_dev;
+
+ init_completion(&priv->xfer_complete);
+ spin_lock_init(&priv->lock);
+
+ priv->adapter->owner = THIS_MODULE;
+ priv->adapter->dev.of_node = of_node_get(dev_of_node(priv->dev));
+ strlcpy(priv->adapter->name, pdev->name, sizeof(priv->adapter->name));
+ priv->adapter->xfer = npcm_peci_xfer;
+
+ ret = npcm_peci_init_ctrl(priv);
+ if (ret)
+ goto err_put_adapter_dev;
+
+ ret = peci_add_adapter(priv->adapter);
+ if (ret)
+ goto err_put_adapter_dev;
+
+ dev_info(&pdev->dev, "peci bus %d registered, host negotiation bit rate %dHz",
+ priv->adapter->nr, priv->host_bit_rate);
+
+ return 0;
+
+err_put_adapter_dev:
+ put_device(&adapter->dev);
+ return ret;
+}
+
+static int npcm_peci_remove(struct platform_device *pdev)
+{
+ struct npcm_peci *priv = dev_get_drvdata(&pdev->dev);
+
+ clk_disable_unprepare(priv->clk);
+ peci_del_adapter(priv->adapter);
+ of_node_put(priv->adapter->dev.of_node);
+
+ return 0;
+}
+
+static const struct of_device_id npcm_peci_of_table[] = {
+ { .compatible = "nuvoton,npcm750-peci", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, npcm_peci_of_table);
+
+static struct platform_driver npcm_peci_driver = {
+ .probe = npcm_peci_probe,
+ .remove = npcm_peci_remove,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = of_match_ptr(npcm_peci_of_table),
+ },
+};
+module_platform_driver(npcm_peci_driver);
+
+MODULE_AUTHOR("Tomer Maimon <tomer.maimon@nuvoton.com>");
+MODULE_DESCRIPTION("NPCM Platform Environment Control Interface (PECI) driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/peci/peci-core.c b/drivers/peci/peci-core.c
new file mode 100644
index 000000000000..1f47ab749fb6
--- /dev/null
+++ b/drivers/peci/peci-core.c
@@ -0,0 +1,2158 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2018-2019 Intel Corporation
+
+#include <linux/bitfield.h>
+#include <linux/crc8.h>
+#include <linux/delay.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/peci.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
+#include <linux/sched/task_stack.h>
+#include <linux/slab.h>
+
+/* Mask for getting minor revision number from DIB */
+#define REVISION_NUM_MASK GENMASK(15, 8)
+
+/* CRC8 table for Assured Write Frame Check */
+#define PECI_CRC8_POLYNOMIAL 0x07
+DECLARE_CRC8_TABLE(peci_crc8_table);
+
+static bool is_registered;
+
+static DEFINE_MUTEX(core_lock);
+static DEFINE_IDR(peci_adapter_idr);
+
+struct peci_adapter *peci_get_adapter(int nr)
+{
+ struct peci_adapter *adapter;
+
+ mutex_lock(&core_lock);
+ adapter = idr_find(&peci_adapter_idr, nr);
+ if (!adapter)
+ goto out_unlock;
+
+ if (try_module_get(adapter->owner))
+ get_device(&adapter->dev);
+ else
+ adapter = NULL;
+
+out_unlock:
+ mutex_unlock(&core_lock);
+
+ return adapter;
+}
+EXPORT_SYMBOL_GPL(peci_get_adapter);
+
+void peci_put_adapter(struct peci_adapter *adapter)
+{
+ if (!adapter)
+ return;
+
+ put_device(&adapter->dev);
+ module_put(adapter->owner);
+}
+EXPORT_SYMBOL_GPL(peci_put_adapter);
+
+static ssize_t name_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%s\n", dev->type == &peci_client_type ?
+ to_peci_client(dev)->name : to_peci_adapter(dev)->name);
+}
+static DEVICE_ATTR_RO(name);
+
+static void peci_client_dev_release(struct device *dev)
+{
+ struct peci_client *client = to_peci_client(dev);
+
+ dev_dbg(dev, "%s: %s\n", __func__, client->name);
+ peci_put_adapter(client->adapter);
+ kfree(client);
+}
+
+static struct attribute *peci_device_attrs[] = {
+ &dev_attr_name.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(peci_device);
+
+struct device_type peci_client_type = {
+ .groups = peci_device_groups,
+ .release = peci_client_dev_release,
+};
+EXPORT_SYMBOL_GPL(peci_client_type);
+
+/**
+ * peci_verify_client - return parameter as peci_client, or NULL
+ * @dev: device, probably from some driver model iterator
+ *
+ * Return: pointer to peci_client on success, else NULL.
+ */
+struct peci_client *peci_verify_client(struct device *dev)
+{
+ return (dev->type == &peci_client_type)
+ ? to_peci_client(dev)
+ : NULL;
+}
+EXPORT_SYMBOL_GPL(peci_verify_client);
+
+/**
+ * peci_get_xfer_msg() - get a DMA safe peci_xfer_msg for the given tx and rx
+ * length
+ * @tx_len: the length of tx_buf. May be 0 if tx_buf isn't needed.
+ * @rx_len: the length of rx_buf. May be 0 if rx_buf isn't needed.
+ *
+ * Return: NULL if a DMA safe buffer was not obtained.
+ * Or a valid pointer to be used with DMA. After use, release it by
+ * calling peci_put_xfer_msg().
+ *
+ * This function must only be called from process context!
+ */
+struct peci_xfer_msg *peci_get_xfer_msg(u8 tx_len, u8 rx_len)
+{
+ struct peci_xfer_msg *msg;
+ u8 *tx_buf, *rx_buf;
+
+ if (tx_len) {
+ tx_buf = kzalloc(tx_len, GFP_KERNEL);
+ if (!tx_buf)
+ return NULL;
+ } else {
+ tx_buf = NULL;
+ }
+
+ if (rx_len) {
+ rx_buf = kzalloc(rx_len, GFP_KERNEL);
+ if (!rx_buf)
+ goto err_free_tx_buf;
+ } else {
+ rx_buf = NULL;
+ }
+
+ msg = kzalloc(sizeof(*msg), GFP_KERNEL);
+ if (!msg)
+ goto err_free_tx_rx_buf;
+
+ msg->tx_len = tx_len;
+ msg->tx_buf = tx_buf;
+ msg->rx_len = rx_len;
+ msg->rx_buf = rx_buf;
+
+ return msg;
+
+err_free_tx_rx_buf:
+ kfree(rx_buf);
+err_free_tx_buf:
+ kfree(tx_buf);
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(peci_get_xfer_msg);
+
+/**
+ * peci_put_xfer_msg - release a DMA safe peci_xfer_msg
+ * @msg: the message obtained from peci_get_xfer_msg(). May be NULL.
+ */
+void peci_put_xfer_msg(struct peci_xfer_msg *msg)
+{
+ if (!msg)
+ return;
+
+ kfree(msg->rx_buf);
+ kfree(msg->tx_buf);
+ kfree(msg);
+}
+EXPORT_SYMBOL_GPL(peci_put_xfer_msg);
+
+/* Calculate an Assured Write Frame Check Sequence byte */
+static int peci_aw_fcs(struct peci_xfer_msg *msg, int len, u8 *aw_fcs)
+{
+ u8 *tmp_buf;
+
+ /* Allocate a temporary buffer to use a contiguous byte array */
+ tmp_buf = kmalloc(len, GFP_KERNEL);
+ if (!tmp_buf)
+ return -ENOMEM;
+
+ tmp_buf[0] = msg->addr;
+ tmp_buf[1] = msg->tx_len;
+ tmp_buf[2] = msg->rx_len;
+ memcpy(&tmp_buf[3], msg->tx_buf, len - 3);
+
+ *aw_fcs = crc8(peci_crc8_table, tmp_buf, (size_t)len, 0);
+
+ kfree(tmp_buf);
+
+ return 0;
+}
+
+static int __peci_xfer(struct peci_adapter *adapter, struct peci_xfer_msg *msg,
+ bool do_retry, bool has_aw_fcs)
+{
+ uint interval_us = PECI_DEV_RETRY_INTERVAL_MIN_USEC;
+ char task_name[TASK_COMM_LEN];
+ ulong timeout = jiffies;
+ u8 aw_fcs;
+ int ret;
+
+ /*
+ * In case if adapter uses DMA, check at here whether tx and rx buffers
+ * are DMA capable or not.
+ */
+ if (IS_ENABLED(CONFIG_HAS_DMA) && adapter->use_dma) {
+ if (is_vmalloc_addr(msg->tx_buf) ||
+ is_vmalloc_addr(msg->rx_buf)) {
+ WARN_ONCE(1, "xfer msg is not dma capable\n");
+ return -EAGAIN;
+ } else if (object_is_on_stack(msg->tx_buf) ||
+ object_is_on_stack(msg->rx_buf)) {
+ WARN_ONCE(1, "xfer msg is on stack\n");
+ return -EAGAIN;
+ }
+ }
+
+ get_task_comm(task_name, current);
+ dev_dbg(&adapter->dev, "%s is called by %s(%d) through %s\n",
+ __func__, task_name, current->pid, adapter->name);
+
+ /*
+ * For some commands, the PECI originator may need to retry a command if
+ * the processor PECI client responds with a 0x8x completion code. In
+ * each instance, the processor PECI client may have started the
+ * operation but not completed it yet. When the 'retry' bit is set, the
+ * PECI client will ignore a new request if it exactly matches a
+ * previous valid request. For better performance and for reducing
+ * retry traffic, the interval time will be increased exponentially.
+ */
+
+ if (do_retry)
+ timeout += PECI_DEV_RETRY_TIMEOUT;
+
+ for (;;) {
+ ret = adapter->xfer(adapter, msg);
+
+ if (!do_retry || ret || !msg->rx_buf)
+ break;
+
+ /* Retry is needed when completion code is 0x8x */
+ if ((msg->rx_buf[0] & PECI_DEV_CC_RETRY_CHECK_MASK) !=
+ PECI_DEV_CC_NEED_RETRY)
+ break;
+
+ /* Set the retry bit to indicate a retry attempt */
+ msg->tx_buf[1] |= PECI_DEV_RETRY_BIT;
+
+ /* Recalculate the AW FCS if it has one */
+ if (has_aw_fcs) {
+ ret = peci_aw_fcs(msg, 2 + msg->tx_len, &aw_fcs);
+ if (ret)
+ break;
+
+ msg->tx_buf[msg->tx_len - 1] = 0x80 ^ aw_fcs;
+ }
+
+ /* Retry it for 'timeout' before returning an error. */
+ if (time_after(jiffies, timeout)) {
+ dev_dbg(&adapter->dev, "Timeout retrying xfer!\n");
+ ret = -ETIMEDOUT;
+ break;
+ }
+
+ usleep_range(interval_us, interval_us * 2);
+
+ interval_us *= 2;
+ if (interval_us > PECI_DEV_RETRY_INTERVAL_MAX_USEC)
+ interval_us = PECI_DEV_RETRY_INTERVAL_MAX_USEC;
+ }
+
+ if (ret)
+ dev_dbg(&adapter->dev, "xfer error: %d\n", ret);
+
+ return ret;
+}
+
+static int peci_xfer(struct peci_adapter *adapter, struct peci_xfer_msg *msg)
+{
+ return __peci_xfer(adapter, msg, false, false);
+}
+
+static int peci_xfer_with_retries(struct peci_adapter *adapter,
+ struct peci_xfer_msg *msg,
+ bool has_aw_fcs)
+{
+ return __peci_xfer(adapter, msg, true, has_aw_fcs);
+}
+
+static int peci_scan_cmd_mask(struct peci_adapter *adapter)
+{
+ struct peci_xfer_msg *msg;
+ u8 revision;
+ int ret;
+ u64 dib;
+
+ /* Update command mask just once */
+ if (adapter->cmd_mask & BIT(PECI_CMD_XFER))
+ return 0;
+
+ msg = peci_get_xfer_msg(PECI_GET_DIB_WR_LEN, PECI_GET_DIB_RD_LEN);
+ if (!msg)
+ return -ENOMEM;
+
+ msg->addr = PECI_BASE_ADDR;
+ msg->tx_buf[0] = PECI_GET_DIB_CMD;
+
+ ret = peci_xfer(adapter, msg);
+ if (ret) {
+ ret = -EAGAIN;
+ goto out;
+ }
+ if (msg->rx_buf[0] == PECI_DEV_CC_INVALID_REQ) {
+ /*
+ * if GetDIB() is not supported, use a revision property of
+ * hardware adapter
+ */
+ revision = adapter->peci_revision;
+ } else {
+ dib = le64_to_cpup((__le64 *)msg->rx_buf);
+
+ /* Check special case for Get DIB command */
+ if (dib == 0) {
+ dev_dbg(&adapter->dev, "DIB read as 0\n");
+ ret = -EIO;
+ goto out;
+ }
+
+ /*
+ * Setting up the supporting commands based on revision number.
+ * See PECI Spec Table 3-1.
+ */
+ revision = FIELD_GET(REVISION_NUM_MASK, dib);
+ }
+
+ if (revision >= 0x40) { /* Rev. 4.0 */
+ adapter->cmd_mask |= BIT(PECI_CMD_RD_IA_MSREX);
+ adapter->cmd_mask |= BIT(PECI_CMD_RD_END_PT_CFG);
+ adapter->cmd_mask |= BIT(PECI_CMD_WR_END_PT_CFG);
+ adapter->cmd_mask |= BIT(PECI_CMD_CRASHDUMP_DISC);
+ adapter->cmd_mask |= BIT(PECI_CMD_CRASHDUMP_GET_FRAME);
+ }
+ if (revision >= 0x36) /* Rev. 3.6 */
+ adapter->cmd_mask |= BIT(PECI_CMD_WR_IA_MSR);
+ if (revision >= 0x35) /* Rev. 3.5 */
+ adapter->cmd_mask |= BIT(PECI_CMD_WR_PCI_CFG);
+ if (revision >= 0x34) /* Rev. 3.4 */
+ adapter->cmd_mask |= BIT(PECI_CMD_RD_PCI_CFG);
+ if (revision >= 0x33) { /* Rev. 3.3 */
+ adapter->cmd_mask |= BIT(PECI_CMD_RD_PCI_CFG_LOCAL);
+ adapter->cmd_mask |= BIT(PECI_CMD_WR_PCI_CFG_LOCAL);
+ }
+ if (revision >= 0x32) /* Rev. 3.2 */
+ adapter->cmd_mask |= BIT(PECI_CMD_RD_IA_MSR);
+ if (revision >= 0x31) { /* Rev. 3.1 */
+ adapter->cmd_mask |= BIT(PECI_CMD_RD_PKG_CFG);
+ adapter->cmd_mask |= BIT(PECI_CMD_WR_PKG_CFG);
+ }
+
+ adapter->cmd_mask |= BIT(PECI_CMD_XFER);
+ adapter->cmd_mask |= BIT(PECI_CMD_GET_TEMP);
+ adapter->cmd_mask |= BIT(PECI_CMD_GET_DIB);
+ adapter->cmd_mask |= BIT(PECI_CMD_PING);
+
+out:
+ peci_put_xfer_msg(msg);
+
+ return ret;
+}
+
+static int peci_check_cmd_support(struct peci_adapter *adapter,
+ enum peci_cmd cmd)
+{
+ if (!(adapter->cmd_mask & BIT(PECI_CMD_PING)) &&
+ peci_scan_cmd_mask(adapter) < 0) {
+ dev_dbg(&adapter->dev, "Failed to scan command mask\n");
+ return -EIO;
+ }
+
+ if (!(adapter->cmd_mask & BIT(cmd))) {
+ dev_dbg(&adapter->dev, "Command %d is not supported\n", cmd);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int peci_cmd_xfer(struct peci_adapter *adapter, uint msg_len, void *vmsg)
+{
+ struct peci_xfer_msg *msg = vmsg;
+ u8 aw_fcs;
+ int ret;
+
+ if (!msg->tx_len) {
+ ret = peci_xfer(adapter, msg);
+ } else {
+ switch (msg->tx_buf[0]) {
+ case PECI_GET_DIB_CMD:
+ case PECI_GET_TEMP_CMD:
+ ret = peci_xfer(adapter, msg);
+ break;
+ case PECI_WRPKGCFG_CMD:
+ case PECI_WRIAMSR_CMD:
+ case PECI_WRPCICFG_CMD:
+ case PECI_WRPCICFGLOCAL_CMD:
+ case PECI_WRENDPTCFG_CMD:
+ /*
+ * The sender may not have supplied the AW FCS byte.
+ * Unconditionally add an Assured Write Frame Check
+ * Sequence byte
+ */
+ ret = peci_aw_fcs(msg, 2 + msg->tx_len, &aw_fcs);
+ if (ret)
+ break;
+
+ msg->tx_buf[msg->tx_len - 1] = 0x80 ^ aw_fcs;
+
+ ret = peci_xfer_with_retries(adapter, msg, true);
+ break;
+ default:
+ ret = peci_xfer_with_retries(adapter, msg, false);
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int peci_cmd_ping(struct peci_adapter *adapter, uint msg_len, void *vmsg)
+{
+ struct peci_ping_msg *umsg = vmsg;
+ struct peci_xfer_msg *msg;
+ int ret;
+
+ msg = peci_get_xfer_msg(0, 0);
+ if (!msg)
+ return -ENOMEM;
+
+ msg->addr = umsg->addr;
+
+ ret = peci_xfer(adapter, msg);
+
+ peci_put_xfer_msg(msg);
+
+ return ret;
+}
+
+static int peci_cmd_get_dib(struct peci_adapter *adapter, uint msg_len, void *vmsg)
+{
+ struct peci_get_dib_msg *umsg = vmsg;
+ struct peci_xfer_msg *msg;
+ int ret;
+
+ msg = peci_get_xfer_msg(PECI_GET_DIB_WR_LEN, PECI_GET_DIB_RD_LEN);
+ if (!msg)
+ return -ENOMEM;
+
+ msg->addr = umsg->addr;
+ msg->tx_buf[0] = PECI_GET_DIB_CMD;
+
+ ret = peci_xfer(adapter, msg);
+ if (ret)
+ goto out;
+
+ umsg->dib = le64_to_cpup((__le64 *)msg->rx_buf);
+
+out:
+ peci_put_xfer_msg(msg);
+
+ return ret;
+}
+
+static int peci_cmd_get_temp(struct peci_adapter *adapter, uint msg_len, void *vmsg)
+{
+ struct peci_get_temp_msg *umsg = vmsg;
+ struct peci_xfer_msg *msg;
+ int ret;
+
+ msg = peci_get_xfer_msg(PECI_GET_TEMP_WR_LEN, PECI_GET_TEMP_RD_LEN);
+ if (!msg)
+ return -ENOMEM;
+
+ msg->addr = umsg->addr;
+ msg->tx_buf[0] = PECI_GET_TEMP_CMD;
+
+ ret = peci_xfer(adapter, msg);
+ if (ret)
+ goto out;
+
+ umsg->temp_raw = le16_to_cpup((__le16 *)msg->rx_buf);
+
+out:
+ peci_put_xfer_msg(msg);
+
+ return ret;
+}
+
+static int peci_cmd_rd_pkg_cfg(struct peci_adapter *adapter, uint msg_len, void *vmsg)
+{
+ struct peci_rd_pkg_cfg_msg *umsg = vmsg;
+ struct peci_xfer_msg *msg;
+ u8 domain_id;
+ int ret;
+
+ /*
+ * vmsg may not have a domain ID defined, so we need to check the msg_len.
+ * If the msg_len is the same size as the struct, then domain ID is provided.
+ * Otherwise the domain ID is 0.
+ */
+ domain_id = (msg_len == sizeof(*umsg)) ? umsg->domain_id : 0;
+
+ /* Per the PECI spec, the read length must be a byte, word, or dword */
+ if (umsg->rx_len != 1 && umsg->rx_len != 2 && umsg->rx_len != 4) {
+ dev_dbg(&adapter->dev, "Invalid read length, rx_len: %d\n",
+ umsg->rx_len);
+ return -EINVAL;
+ }
+
+ msg = peci_get_xfer_msg(PECI_RDPKGCFG_WRITE_LEN,
+ PECI_RDPKGCFG_READ_LEN_BASE + umsg->rx_len);
+ if (!msg)
+ return -ENOMEM;
+
+ msg->addr = umsg->addr;
+ msg->tx_buf[0] = PECI_RDPKGCFG_CMD;
+ msg->tx_buf[1] = domain_id << 1; /* Domain ID [7:1] | Retry bit [0] */
+ msg->tx_buf[2] = umsg->index; /* RdPkgConfig index */
+ msg->tx_buf[3] = (u8)umsg->param; /* LSB - Config parameter */
+ msg->tx_buf[4] = (u8)(umsg->param >> 8); /* MSB - Config parameter */
+
+ ret = peci_xfer_with_retries(adapter, msg, false);
+ if (!ret)
+ memcpy(umsg->pkg_config, &msg->rx_buf[1], umsg->rx_len);
+
+ umsg->cc = msg->rx_buf[0];
+ peci_put_xfer_msg(msg);
+
+ return ret;
+}
+
+static int peci_cmd_wr_pkg_cfg(struct peci_adapter *adapter, uint msg_len, void *vmsg)
+{
+ struct peci_wr_pkg_cfg_msg *umsg = vmsg;
+ struct peci_xfer_msg *msg;
+ u8 aw_fcs, domain_id;
+ int ret, i;
+
+ /*
+ * vmsg may not have a domain ID defined, so we need to check the msg_len.
+ * If the msg_len is the same size as the struct, then domain ID is provided.
+ * Otherwise the domain ID is 0.
+ */
+ domain_id = (msg_len == sizeof(*umsg)) ? umsg->domain_id : 0;
+
+ /* Per the PECI spec, the write length must be a dword */
+ if (umsg->tx_len != 4) {
+ dev_dbg(&adapter->dev, "Invalid write length, tx_len: %d\n",
+ umsg->tx_len);
+ return -EINVAL;
+ }
+
+ msg = peci_get_xfer_msg(PECI_WRPKGCFG_WRITE_LEN_BASE + umsg->tx_len,
+ PECI_WRPKGCFG_READ_LEN);
+ if (!msg)
+ return -ENOMEM;
+
+ msg->addr = umsg->addr;
+ msg->tx_buf[0] = PECI_WRPKGCFG_CMD;
+ msg->tx_buf[1] = domain_id << 1; /* Domain ID [7:1] | Retry bit [0] */
+ msg->tx_buf[2] = umsg->index; /* RdPkgConfig index */
+ msg->tx_buf[3] = (u8)umsg->param; /* LSB - Config parameter */
+ msg->tx_buf[4] = (u8)(umsg->param >> 8); /* MSB - Config parameter */
+ for (i = 0; i < umsg->tx_len; i++)
+ msg->tx_buf[5 + i] = (u8)(umsg->value >> (i << 3));
+
+ /* Add an Assured Write Frame Check Sequence byte */
+ ret = peci_aw_fcs(msg, 8 + umsg->tx_len, &aw_fcs);
+ if (ret)
+ goto out;
+
+ msg->tx_buf[5 + i] = 0x80 ^ aw_fcs;
+
+ ret = peci_xfer_with_retries(adapter, msg, true);
+
+out:
+ umsg->cc = msg->rx_buf[0];
+ peci_put_xfer_msg(msg);
+
+ return ret;
+}
+
+static int peci_cmd_rd_ia_msr(struct peci_adapter *adapter, uint msg_len, void *vmsg)
+{
+ struct peci_rd_ia_msr_msg *umsg = vmsg;
+ struct peci_xfer_msg *msg;
+ u8 domain_id;
+ int ret;
+
+ /*
+ * vmsg may not have a domain ID defined, so we need to check the msg_len.
+ * If the msg_len is the same size as the struct, then domain ID is provided.
+ * Otherwise the domain ID is 0.
+ */
+ domain_id = (msg_len == sizeof(*umsg)) ? umsg->domain_id : 0;
+
+ msg = peci_get_xfer_msg(PECI_RDIAMSR_WRITE_LEN, PECI_RDIAMSR_READ_LEN);
+ if (!msg)
+ return -ENOMEM;
+
+ msg->addr = umsg->addr;
+ msg->tx_buf[0] = PECI_RDIAMSR_CMD;
+ msg->tx_buf[1] = domain_id << 1; /* Domain ID [7:1] | Retry bit [0] */
+ msg->tx_buf[2] = umsg->thread_id;
+ msg->tx_buf[3] = (u8)umsg->address;
+ msg->tx_buf[4] = (u8)(umsg->address >> 8);
+
+ ret = peci_xfer_with_retries(adapter, msg, false);
+ if (!ret)
+ memcpy(&umsg->value, &msg->rx_buf[1], sizeof(uint64_t));
+
+ umsg->cc = msg->rx_buf[0];
+ peci_put_xfer_msg(msg);
+
+ return ret;
+}
+
+static int peci_cmd_rd_ia_msrex(struct peci_adapter *adapter, uint msg_len, void *vmsg)
+{
+ struct peci_rd_ia_msrex_msg *umsg = vmsg;
+ struct peci_xfer_msg *msg;
+ u8 domain_id;
+ int ret;
+
+ /*
+ * vmsg may not have a domain ID defined, so we need to check the msg_len.
+ * If the msg_len is the same size as the struct, then domain ID is provided.
+ * Otherwise the domain ID is 0.
+ */
+ domain_id = (msg_len == sizeof(*umsg)) ? umsg->domain_id : 0;
+
+ msg = peci_get_xfer_msg(PECI_RDIAMSREX_WRITE_LEN,
+ PECI_RDIAMSREX_READ_LEN);
+ if (!msg)
+ return -ENOMEM;
+
+ msg->addr = umsg->addr;
+ msg->tx_buf[0] = PECI_RDIAMSREX_CMD;
+ msg->tx_buf[1] = domain_id << 1; /* Domain ID [7:1] | Retry bit [0] */
+ msg->tx_buf[2] = (u8)umsg->thread_id;
+ msg->tx_buf[3] = (u8)(umsg->thread_id >> 8);
+ msg->tx_buf[4] = (u8)umsg->address;
+ msg->tx_buf[5] = (u8)(umsg->address >> 8);
+
+ ret = peci_xfer_with_retries(adapter, msg, false);
+ if (!ret)
+ memcpy(&umsg->value, &msg->rx_buf[1], sizeof(uint64_t));
+
+ umsg->cc = msg->rx_buf[0];
+ peci_put_xfer_msg(msg);
+
+ return ret;
+}
+
+static int peci_cmd_wr_ia_msr(struct peci_adapter *adapter, uint msg_len, void *vmsg)
+{
+ return -ENOSYS; /* Not implemented yet */
+}
+
+static int peci_cmd_rd_pci_cfg(struct peci_adapter *adapter, uint msg_len, void *vmsg)
+{
+ struct peci_rd_pci_cfg_msg *umsg = vmsg;
+ struct peci_xfer_msg *msg;
+ u8 domain_id;
+ u32 address;
+ int ret;
+
+ /*
+ * vmsg may not have a domain ID defined, so we need to check the msg_len.
+ * If the msg_len is the same size as the struct, then domain ID is provided.
+ * Otherwise the domain ID is 0.
+ */
+ domain_id = (msg_len == sizeof(*umsg)) ? umsg->domain_id : 0;
+
+ msg = peci_get_xfer_msg(PECI_RDPCICFG_WRITE_LEN,
+ PECI_RDPCICFG_READ_LEN);
+ if (!msg)
+ return -ENOMEM;
+
+ address = umsg->reg; /* [11:0] - Register */
+ address |= (u32)umsg->function << 12; /* [14:12] - Function */
+ address |= (u32)umsg->device << 15; /* [19:15] - Device */
+ address |= (u32)umsg->bus << 20; /* [27:20] - Bus */
+ /* [31:28] - Reserved */
+ msg->addr = umsg->addr;
+ msg->tx_buf[0] = PECI_RDPCICFG_CMD;
+ msg->tx_buf[1] = domain_id << 1; /* Domain ID [7:1] | Retry bit [0] */
+ msg->tx_buf[2] = (u8)address; /* LSB - PCI Config Address */
+ msg->tx_buf[3] = (u8)(address >> 8); /* PCI Config Address */
+ msg->tx_buf[4] = (u8)(address >> 16); /* PCI Config Address */
+ msg->tx_buf[5] = (u8)(address >> 24); /* MSB - PCI Config Address */
+
+ ret = peci_xfer_with_retries(adapter, msg, false);
+ if (!ret)
+ memcpy(umsg->pci_config, &msg->rx_buf[1], 4);
+
+ umsg->cc = msg->rx_buf[0];
+ peci_put_xfer_msg(msg);
+
+ return ret;
+}
+
+static int peci_cmd_wr_pci_cfg(struct peci_adapter *adapter, uint msg_len, void *vmsg)
+{
+ return -ENOSYS; /* Not implemented yet */
+}
+
+static int peci_cmd_rd_pci_cfg_local(struct peci_adapter *adapter, uint msg_len, void *vmsg)
+{
+ struct peci_rd_pci_cfg_local_msg *umsg = vmsg;
+ struct peci_xfer_msg *msg;
+ u8 domain_id;
+ u32 address;
+ int ret;
+
+ /*
+ * vmsg may not have a domain ID defined, so we need to check the msg_len.
+ * If the msg_len is the same size as the struct, then domain ID is provided.
+ * Otherwise the domain ID is 0.
+ */
+ domain_id = (msg_len == sizeof(*umsg)) ? umsg->domain_id : 0;
+
+ /* Per the PECI spec, the read length must be a byte, word, or dword */
+ if (umsg->rx_len != 1 && umsg->rx_len != 2 && umsg->rx_len != 4) {
+ dev_dbg(&adapter->dev, "Invalid read length, rx_len: %d\n",
+ umsg->rx_len);
+ return -EINVAL;
+ }
+
+ msg = peci_get_xfer_msg(PECI_RDPCICFGLOCAL_WRITE_LEN,
+ PECI_RDPCICFGLOCAL_READ_LEN_BASE +
+ umsg->rx_len);
+ if (!msg)
+ return -ENOMEM;
+
+ address = umsg->reg; /* [11:0] - Register */
+ address |= (u32)umsg->function << 12; /* [14:12] - Function */
+ address |= (u32)umsg->device << 15; /* [19:15] - Device */
+ address |= (u32)umsg->bus << 20; /* [23:20] - Bus */
+
+ msg->addr = umsg->addr;
+ msg->tx_buf[0] = PECI_RDPCICFGLOCAL_CMD;
+ msg->tx_buf[1] = domain_id << 1; /* Domain ID [7:1] | Retry bit [0] */
+ msg->tx_buf[2] = (u8)address; /* LSB - PCI Configuration Address */
+ msg->tx_buf[3] = (u8)(address >> 8); /* PCI Configuration Address */
+ msg->tx_buf[4] = (u8)(address >> 16); /* PCI Configuration Address */
+
+ ret = peci_xfer_with_retries(adapter, msg, false);
+ if (!ret)
+ memcpy(umsg->pci_config, &msg->rx_buf[1], umsg->rx_len);
+
+ umsg->cc = msg->rx_buf[0];
+ peci_put_xfer_msg(msg);
+
+ return ret;
+}
+
+static int peci_cmd_wr_pci_cfg_local(struct peci_adapter *adapter, uint msg_len, void *vmsg)
+{
+ struct peci_wr_pci_cfg_local_msg *umsg = vmsg;
+ struct peci_xfer_msg *msg;
+ u8 aw_fcs, domain_id;
+ u32 address;
+ int ret, i;
+
+ /*
+ * vmsg may not have a domain ID defined, so we need to check the msg_len.
+ * If the msg_len is the same size as the struct, then domain ID is provided.
+ * Otherwise the domain ID is 0.
+ */
+ domain_id = (msg_len == sizeof(*umsg)) ? umsg->domain_id : 0;
+
+ /* Per the PECI spec, the write length must be a byte, word, or dword */
+ if (umsg->tx_len != 1 && umsg->tx_len != 2 && umsg->tx_len != 4) {
+ dev_dbg(&adapter->dev, "Invalid write length, tx_len: %d\n",
+ umsg->tx_len);
+ return -EINVAL;
+ }
+
+ msg = peci_get_xfer_msg(PECI_WRPCICFGLOCAL_WRITE_LEN_BASE +
+ umsg->tx_len, PECI_WRPCICFGLOCAL_READ_LEN);
+ if (!msg)
+ return -ENOMEM;
+
+ address = umsg->reg; /* [11:0] - Register */
+ address |= (u32)umsg->function << 12; /* [14:12] - Function */
+ address |= (u32)umsg->device << 15; /* [19:15] - Device */
+ address |= (u32)umsg->bus << 20; /* [23:20] - Bus */
+
+ msg->addr = umsg->addr;
+ msg->tx_buf[0] = PECI_WRPCICFGLOCAL_CMD;
+ msg->tx_buf[1] = domain_id << 1; /* Domain ID [7:1] | Retry bit [0] */
+ msg->tx_buf[2] = (u8)address; /* LSB - PCI Configuration Address */
+ msg->tx_buf[3] = (u8)(address >> 8); /* PCI Configuration Address */
+ msg->tx_buf[4] = (u8)(address >> 16); /* PCI Configuration Address */
+ for (i = 0; i < umsg->tx_len; i++)
+ msg->tx_buf[5 + i] = (u8)(umsg->value >> (i << 3));
+
+ /* Add an Assured Write Frame Check Sequence byte */
+ ret = peci_aw_fcs(msg, 8 + umsg->tx_len, &aw_fcs);
+ if (ret)
+ goto out;
+
+ msg->tx_buf[5 + i] = 0x80 ^ aw_fcs;
+
+ ret = peci_xfer_with_retries(adapter, msg, true);
+
+out:
+ umsg->cc = msg->rx_buf[0];
+ peci_put_xfer_msg(msg);
+
+ return ret;
+}
+
+static int peci_cmd_rd_end_pt_cfg(struct peci_adapter *adapter, uint msg_len, void *vmsg)
+{
+ struct peci_rd_end_pt_cfg_msg *umsg = vmsg;
+ struct peci_xfer_msg *msg = NULL;
+ u8 tx_size, domain_id;
+ u32 address;
+ int ret;
+
+ /*
+ * vmsg may not have a domain ID defined, so we need to check the msg_len.
+ * If the msg_len is the same size as the struct, then domain ID is provided.
+ * Otherwise the domain ID is 0.
+ */
+ domain_id = (msg_len == sizeof(*umsg)) ? umsg->domain_id : 0;
+
+ switch (umsg->msg_type) {
+ case PECI_ENDPTCFG_TYPE_LOCAL_PCI:
+ case PECI_ENDPTCFG_TYPE_PCI:
+ /*
+ * Per the PECI spec, the read length must be a byte, word,
+ * or dword
+ */
+ if (umsg->rx_len != 1 && umsg->rx_len != 2 &&
+ umsg->rx_len != 4) {
+ dev_dbg(&adapter->dev,
+ "Invalid read length, rx_len: %d\n",
+ umsg->rx_len);
+ return -EINVAL;
+ }
+
+ msg = peci_get_xfer_msg(PECI_RDENDPTCFG_PCI_WRITE_LEN,
+ PECI_RDENDPTCFG_READ_LEN_BASE +
+ umsg->rx_len);
+ if (!msg)
+ return -ENOMEM;
+
+ address = umsg->params.pci_cfg.reg; /* [11:0] - Register */
+ address |= (u32)umsg->params.pci_cfg.function
+ << 12; /* [14:12] - Function */
+ address |= (u32)umsg->params.pci_cfg.device
+ << 15; /* [19:15] - Device */
+ address |= (u32)umsg->params.pci_cfg.bus
+ << 20; /* [27:20] - Bus */
+ /* [31:28] - Reserved */
+ msg->addr = umsg->addr;
+ msg->tx_buf[0] = PECI_RDENDPTCFG_CMD;
+ msg->tx_buf[1] = domain_id << 1; /* Domain ID [7:1] | Retry bit [0] */
+ msg->tx_buf[2] = umsg->msg_type; /* Message Type */
+ msg->tx_buf[3] = 0x00; /* Endpoint ID */
+ msg->tx_buf[4] = 0x00; /* Reserved */
+ msg->tx_buf[5] = 0x00; /* Reserved */
+ msg->tx_buf[6] = PECI_ENDPTCFG_ADDR_TYPE_PCI; /* Addr Type */
+ msg->tx_buf[7] = umsg->params.pci_cfg.seg; /* PCI Segment */
+ msg->tx_buf[8] = (u8)address; /* LSB - PCI Config Address */
+ msg->tx_buf[9] = (u8)(address >> 8); /* PCI Config Address */
+ msg->tx_buf[10] = (u8)(address >> 16); /* PCI Config Address */
+ msg->tx_buf[11] =
+ (u8)(address >> 24); /* MSB - PCI Config Address */
+ break;
+
+ case PECI_ENDPTCFG_TYPE_MMIO:
+ /*
+ * Per the PECI spec, the read length must be a byte, word,
+ * dword, or qword
+ */
+ if (umsg->rx_len != 1 && umsg->rx_len != 2 &&
+ umsg->rx_len != 4 && umsg->rx_len != 8) {
+ dev_dbg(&adapter->dev,
+ "Invalid read length, rx_len: %d\n",
+ umsg->rx_len);
+ return -EINVAL;
+ }
+ /*
+ * Per the PECI spec, the address type must specify either DWORD
+ * or QWORD
+ */
+ if (umsg->params.mmio.addr_type !=
+ PECI_ENDPTCFG_ADDR_TYPE_MMIO_D &&
+ umsg->params.mmio.addr_type !=
+ PECI_ENDPTCFG_ADDR_TYPE_MMIO_Q) {
+ dev_dbg(&adapter->dev,
+ "Invalid address type, addr_type: %d\n",
+ umsg->params.mmio.addr_type);
+ return -EINVAL;
+ }
+
+ if (umsg->params.mmio.addr_type ==
+ PECI_ENDPTCFG_ADDR_TYPE_MMIO_D)
+ tx_size = PECI_RDENDPTCFG_MMIO_D_WRITE_LEN;
+ else
+ tx_size = PECI_RDENDPTCFG_MMIO_Q_WRITE_LEN;
+ msg = peci_get_xfer_msg(tx_size,
+ PECI_RDENDPTCFG_READ_LEN_BASE +
+ umsg->rx_len);
+ if (!msg)
+ return -ENOMEM;
+
+ address = umsg->params.mmio.function; /* [2:0] - Function */
+ address |= (u32)umsg->params.mmio.device
+ << 3; /* [7:3] - Device */
+
+ msg->addr = umsg->addr;
+ msg->tx_buf[0] = PECI_RDENDPTCFG_CMD;
+ msg->tx_buf[1] = domain_id << 1; /* Domain ID [7:1] | Retry bit [0] */
+ msg->tx_buf[2] = umsg->msg_type; /* Message Type */
+ msg->tx_buf[3] = 0x00; /* Endpoint ID */
+ msg->tx_buf[4] = 0x00; /* Reserved */
+ msg->tx_buf[5] = umsg->params.mmio.bar; /* BAR # */
+ msg->tx_buf[6] = umsg->params.mmio.addr_type; /* Address Type */
+ msg->tx_buf[7] = umsg->params.mmio.seg; /* PCI Segment */
+ msg->tx_buf[8] = (u8)address; /* Function/Device */
+ msg->tx_buf[9] = umsg->params.mmio.bus; /* PCI Bus */
+ msg->tx_buf[10] = (u8)umsg->params.mmio
+ .offset; /* LSB - Register Offset */
+ msg->tx_buf[11] = (u8)(umsg->params.mmio.offset
+ >> 8); /* Register Offset */
+ msg->tx_buf[12] = (u8)(umsg->params.mmio.offset
+ >> 16); /* Register Offset */
+ msg->tx_buf[13] = (u8)(umsg->params.mmio.offset
+ >> 24); /* MSB - DWORD Register Offset */
+ if (umsg->params.mmio.addr_type ==
+ PECI_ENDPTCFG_ADDR_TYPE_MMIO_Q) {
+ msg->tx_buf[14] = (u8)(umsg->params.mmio.offset
+ >> 32); /* Register Offset */
+ msg->tx_buf[15] = (u8)(umsg->params.mmio.offset
+ >> 40); /* Register Offset */
+ msg->tx_buf[16] = (u8)(umsg->params.mmio.offset
+ >> 48); /* Register Offset */
+ msg->tx_buf[17] =
+ (u8)(umsg->params.mmio.offset
+ >> 56); /* MSB - QWORD Register Offset */
+ }
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ ret = peci_xfer_with_retries(adapter, msg, false);
+ if (!ret)
+ memcpy(umsg->data, &msg->rx_buf[1], umsg->rx_len);
+
+ umsg->cc = msg->rx_buf[0];
+ peci_put_xfer_msg(msg);
+
+ return ret;
+}
+
+static int peci_cmd_wr_end_pt_cfg(struct peci_adapter *adapter, uint msg_len, void *vmsg)
+{
+ struct peci_wr_end_pt_cfg_msg *umsg = vmsg;
+ struct peci_xfer_msg *msg = NULL;
+ u8 tx_size, aw_fcs, domain_id;
+ int ret, i, idx;
+ u32 address;
+
+ /*
+ * vmsg may not have a domain ID defined, so we need to check the msg_len.
+ * If the msg_len is the same size as the struct, then domain ID is provided.
+ * Otherwise the domain ID is 0.
+ */
+ domain_id = (msg_len == sizeof(*umsg)) ? umsg->domain_id : 0;
+
+ switch (umsg->msg_type) {
+ case PECI_ENDPTCFG_TYPE_LOCAL_PCI:
+ case PECI_ENDPTCFG_TYPE_PCI:
+ /*
+ * Per the PECI spec, the write length must be a byte, word,
+ * or dword
+ */
+ if (umsg->tx_len != 1 && umsg->tx_len != 2 &&
+ umsg->tx_len != 4) {
+ dev_dbg(&adapter->dev,
+ "Invalid write length, tx_len: %d\n",
+ umsg->tx_len);
+ return -EINVAL;
+ }
+
+ msg = peci_get_xfer_msg(PECI_WRENDPTCFG_PCI_WRITE_LEN_BASE +
+ umsg->tx_len, PECI_WRENDPTCFG_READ_LEN);
+ if (!msg)
+ return -ENOMEM;
+
+ address = umsg->params.pci_cfg.reg; /* [11:0] - Register */
+ address |= (u32)umsg->params.pci_cfg.function
+ << 12; /* [14:12] - Function */
+ address |= (u32)umsg->params.pci_cfg.device
+ << 15; /* [19:15] - Device */
+ address |= (u32)umsg->params.pci_cfg.bus
+ << 20; /* [27:20] - Bus */
+ /* [31:28] - Reserved */
+ msg->addr = umsg->addr;
+ msg->tx_buf[0] = PECI_WRENDPTCFG_CMD;
+ msg->tx_buf[1] = domain_id << 1; /* Domain ID [7:1] | Retry bit [0] */
+ msg->tx_buf[2] = umsg->msg_type; /* Message Type */
+ msg->tx_buf[3] = 0x00; /* Endpoint ID */
+ msg->tx_buf[4] = 0x00; /* Reserved */
+ msg->tx_buf[5] = 0x00; /* Reserved */
+ msg->tx_buf[6] = PECI_ENDPTCFG_ADDR_TYPE_PCI; /* Addr Type */
+ msg->tx_buf[7] = umsg->params.pci_cfg.seg; /* PCI Segment */
+ msg->tx_buf[8] = (u8)address; /* LSB - PCI Config Address */
+ msg->tx_buf[9] = (u8)(address >> 8); /* PCI Config Address */
+ msg->tx_buf[10] = (u8)(address >> 16); /* PCI Config Address */
+ msg->tx_buf[11] =
+ (u8)(address >> 24); /* MSB - PCI Config Address */
+ for (i = 0; i < umsg->tx_len; i++)
+ msg->tx_buf[12 + i] = (u8)(umsg->value >> (i << 3));
+
+ /* Add an Assured Write Frame Check Sequence byte */
+ ret = peci_aw_fcs(msg, 15 + umsg->tx_len, &aw_fcs);
+ if (ret)
+ goto out;
+
+ msg->tx_buf[12 + i] = 0x80 ^ aw_fcs;
+ break;
+
+ case PECI_ENDPTCFG_TYPE_MMIO:
+ /*
+ * Per the PECI spec, the write length must be a byte, word,
+ * dword, or qword
+ */
+ if (umsg->tx_len != 1 && umsg->tx_len != 2 &&
+ umsg->tx_len != 4 && umsg->tx_len != 8) {
+ dev_dbg(&adapter->dev,
+ "Invalid write length, tx_len: %d\n",
+ umsg->tx_len);
+ return -EINVAL;
+ }
+ /*
+ * Per the PECI spec, the address type must specify either DWORD
+ * or QWORD
+ */
+ if (umsg->params.mmio.addr_type !=
+ PECI_ENDPTCFG_ADDR_TYPE_MMIO_D &&
+ umsg->params.mmio.addr_type !=
+ PECI_ENDPTCFG_ADDR_TYPE_MMIO_Q) {
+ dev_dbg(&adapter->dev,
+ "Invalid address type, addr_type: %d\n",
+ umsg->params.mmio.addr_type);
+ return -EINVAL;
+ }
+
+ if (umsg->params.mmio.addr_type ==
+ PECI_ENDPTCFG_ADDR_TYPE_MMIO_D)
+ tx_size = PECI_WRENDPTCFG_MMIO_D_WRITE_LEN_BASE +
+ umsg->tx_len;
+ else
+ tx_size = PECI_WRENDPTCFG_MMIO_Q_WRITE_LEN_BASE +
+ umsg->tx_len;
+ msg = peci_get_xfer_msg(tx_size, PECI_WRENDPTCFG_READ_LEN);
+ if (!msg)
+ return -ENOMEM;
+
+ address = umsg->params.mmio.function; /* [2:0] - Function */
+ address |= (u32)umsg->params.mmio.device
+ << 3; /* [7:3] - Device */
+
+ msg->addr = umsg->addr;
+ msg->tx_buf[0] = PECI_WRENDPTCFG_CMD;
+ msg->tx_buf[1] = domain_id << 1; /* Domain ID [7:1] | Retry bit [0] */
+ msg->tx_buf[2] = umsg->msg_type; /* Message Type */
+ msg->tx_buf[3] = 0x00; /* Endpoint ID */
+ msg->tx_buf[4] = 0x00; /* Reserved */
+ msg->tx_buf[5] = umsg->params.mmio.bar; /* BAR # */
+ msg->tx_buf[6] = umsg->params.mmio.addr_type; /* Address Type */
+ msg->tx_buf[7] = umsg->params.mmio.seg; /* PCI Segment */
+ msg->tx_buf[8] = (u8)address; /* Function/Device */
+ msg->tx_buf[9] = umsg->params.mmio.bus; /* PCI Bus */
+ msg->tx_buf[10] = (u8)umsg->params.mmio
+ .offset; /* LSB - Register Offset */
+ msg->tx_buf[11] = (u8)(umsg->params.mmio.offset
+ >> 8); /* Register Offset */
+ msg->tx_buf[12] = (u8)(umsg->params.mmio.offset
+ >> 16); /* Register Offset */
+ msg->tx_buf[13] = (u8)(umsg->params.mmio.offset
+ >> 24); /* MSB - DWORD Register Offset */
+ if (umsg->params.mmio.addr_type ==
+ PECI_ENDPTCFG_ADDR_TYPE_MMIO_Q) {
+ msg->tx_buf[14] = (u8)(umsg->params.mmio.offset
+ >> 32); /* Register Offset */
+ msg->tx_buf[15] = (u8)(umsg->params.mmio.offset
+ >> 40); /* Register Offset */
+ msg->tx_buf[16] = (u8)(umsg->params.mmio.offset
+ >> 48); /* Register Offset */
+ msg->tx_buf[17] =
+ (u8)(umsg->params.mmio.offset
+ >> 56); /* MSB - QWORD Register Offset */
+ idx = 18;
+ } else {
+ idx = 14;
+ }
+ for (i = 0; i < umsg->tx_len; i++)
+ msg->tx_buf[idx + i] = (u8)(umsg->value >> (i << 3));
+
+ /* Add an Assured Write Frame Check Sequence byte */
+ ret = peci_aw_fcs(msg, idx + 3 + umsg->tx_len, &aw_fcs);
+ if (ret)
+ goto out;
+
+ msg->tx_buf[idx + i] = 0x80 ^ aw_fcs;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ ret = peci_xfer_with_retries(adapter, msg, true);
+
+out:
+ umsg->cc = msg->rx_buf[0];
+ peci_put_xfer_msg(msg);
+
+ return ret;
+}
+
+static int peci_cmd_crashdump_disc(struct peci_adapter *adapter, uint msg_len, void *vmsg)
+{
+ struct peci_crashdump_disc_msg *umsg = vmsg;
+ struct peci_xfer_msg *msg;
+ u8 domain_id;
+ int ret;
+
+ /*
+ * vmsg may not have a domain ID defined, so we need to check the msg_len.
+ * If the msg_len is the same size as the struct, then domain ID is provided.
+ * Otherwise the domain ID is 0.
+ */
+ domain_id = (msg_len == sizeof(*umsg)) ? umsg->domain_id : 0;
+
+ /* Per the EDS, the read length must be a byte, word, or qword */
+ if (umsg->rx_len != 1 && umsg->rx_len != 2 && umsg->rx_len != 8) {
+ dev_dbg(&adapter->dev, "Invalid read length, rx_len: %d\n",
+ umsg->rx_len);
+ return -EINVAL;
+ }
+
+ msg = peci_get_xfer_msg(PECI_CRASHDUMP_DISC_WRITE_LEN,
+ PECI_CRASHDUMP_DISC_READ_LEN_BASE +
+ umsg->rx_len);
+ if (!msg)
+ return -ENOMEM;
+
+ msg->addr = umsg->addr;
+ msg->tx_buf[0] = PECI_CRASHDUMP_CMD;
+ msg->tx_buf[1] = domain_id << 1; /* Domain ID [7:1] | Retry bit [0] */
+ msg->tx_buf[2] = PECI_CRASHDUMP_DISC_VERSION;
+ msg->tx_buf[3] = PECI_CRASHDUMP_DISC_OPCODE;
+ msg->tx_buf[4] = umsg->subopcode;
+ msg->tx_buf[5] = umsg->param0;
+ msg->tx_buf[6] = (u8)umsg->param1;
+ msg->tx_buf[7] = (u8)(umsg->param1 >> 8);
+ msg->tx_buf[8] = umsg->param2;
+
+ ret = peci_xfer_with_retries(adapter, msg, false);
+ if (!ret)
+ memcpy(umsg->data, &msg->rx_buf[1], umsg->rx_len);
+
+ umsg->cc = msg->rx_buf[0];
+ peci_put_xfer_msg(msg);
+
+ return ret;
+}
+
+static int peci_cmd_crashdump_get_frame(struct peci_adapter *adapter, uint msg_len,
+ void *vmsg)
+{
+ struct peci_crashdump_get_frame_msg *umsg = vmsg;
+ struct peci_xfer_msg *msg;
+ u8 domain_id;
+ int ret;
+
+ /*
+ * vmsg may not have a domain ID defined, so we need to check the msg_len.
+ * If the msg_len is the same size as the struct, then domain ID is provided.
+ * Otherwise the domain ID is 0.
+ */
+ domain_id = (msg_len == sizeof(*umsg)) ? umsg->domain_id : 0;
+
+ /* Per the EDS, the read length must be a qword or dqword */
+ if (umsg->rx_len != 8 && umsg->rx_len != 16) {
+ dev_dbg(&adapter->dev, "Invalid read length, rx_len: %d\n",
+ umsg->rx_len);
+ return -EINVAL;
+ }
+
+ msg = peci_get_xfer_msg(PECI_CRASHDUMP_GET_FRAME_WRITE_LEN,
+ PECI_CRASHDUMP_GET_FRAME_READ_LEN_BASE +
+ umsg->rx_len);
+ if (!msg)
+ return -ENOMEM;
+
+ msg->addr = umsg->addr;
+ msg->tx_buf[0] = PECI_CRASHDUMP_CMD;
+ msg->tx_buf[1] = domain_id << 1; /* Domain ID [7:1] | Retry bit [0] */
+ msg->tx_buf[2] = PECI_CRASHDUMP_GET_FRAME_VERSION;
+ msg->tx_buf[3] = PECI_CRASHDUMP_GET_FRAME_OPCODE;
+ msg->tx_buf[4] = (u8)umsg->param0;
+ msg->tx_buf[5] = (u8)(umsg->param0 >> 8);
+ msg->tx_buf[6] = (u8)umsg->param1;
+ msg->tx_buf[7] = (u8)(umsg->param1 >> 8);
+ msg->tx_buf[8] = (u8)umsg->param2;
+ msg->tx_buf[9] = (u8)(umsg->param2 >> 8);
+
+ ret = peci_xfer_with_retries(adapter, msg, false);
+ if (!ret)
+ memcpy(umsg->data, &msg->rx_buf[1], umsg->rx_len);
+
+ umsg->cc = msg->rx_buf[0];
+ peci_put_xfer_msg(msg);
+
+ return ret;
+}
+
+typedef int (*peci_cmd_fn_type)(struct peci_adapter *, uint, void *);
+
+static const peci_cmd_fn_type peci_cmd_fn[PECI_CMD_MAX] = {
+ peci_cmd_xfer,
+ peci_cmd_ping,
+ peci_cmd_get_dib,
+ peci_cmd_get_temp,
+ peci_cmd_rd_pkg_cfg,
+ peci_cmd_wr_pkg_cfg,
+ peci_cmd_rd_ia_msr,
+ peci_cmd_wr_ia_msr,
+ peci_cmd_rd_ia_msrex,
+ peci_cmd_rd_pci_cfg,
+ peci_cmd_wr_pci_cfg,
+ peci_cmd_rd_pci_cfg_local,
+ peci_cmd_wr_pci_cfg_local,
+ peci_cmd_rd_end_pt_cfg,
+ peci_cmd_wr_end_pt_cfg,
+ peci_cmd_crashdump_disc,
+ peci_cmd_crashdump_get_frame,
+};
+
+/**
+ * peci_command - transfer function of a PECI command
+ * @adapter: pointer to peci_adapter
+ * @vmsg: pointer to PECI messages
+ * Context: can sleep
+ *
+ * This performs a transfer of a PECI command using PECI messages parameter
+ * which has various formats on each command.
+ *
+ * Return: zero on success, else a negative error code.
+ */
+int peci_command(struct peci_adapter *adapter, enum peci_cmd cmd, uint msg_len, void *vmsg)
+{
+ int ret;
+
+ if (cmd >= PECI_CMD_MAX || cmd < PECI_CMD_XFER)
+ return -ENOTTY;
+
+ dev_dbg(&adapter->dev, "%s, cmd=0x%02x\n", __func__, cmd);
+
+ if (!peci_cmd_fn[cmd])
+ return -EINVAL;
+
+ mutex_lock(&adapter->bus_lock);
+
+ ret = peci_check_cmd_support(adapter, cmd);
+ if (!ret)
+ ret = peci_cmd_fn[cmd](adapter, msg_len, vmsg);
+
+ mutex_unlock(&adapter->bus_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(peci_command);
+
+static int peci_detect(struct peci_adapter *adapter, u8 addr)
+{
+ struct peci_ping_msg msg;
+
+ msg.addr = addr;
+
+ return peci_command(adapter, PECI_CMD_PING, sizeof(msg), &msg);
+}
+
+static const struct of_device_id *
+peci_of_match_device(const struct of_device_id *matches,
+ struct peci_client *client)
+{
+#if IS_ENABLED(CONFIG_OF)
+ if (!(client && matches))
+ return NULL;
+
+ return of_match_device(matches, &client->dev);
+#else /* CONFIG_OF */
+ return NULL;
+#endif /* CONFIG_OF */
+}
+
+static const struct peci_device_id *
+peci_match_id(const struct peci_device_id *id, struct peci_client *client)
+{
+ if (!(id && client))
+ return NULL;
+
+ while (id->name[0]) {
+ if (!strncmp(client->name, id->name, PECI_NAME_SIZE))
+ return id;
+ id++;
+ }
+
+ return NULL;
+}
+
+static int peci_device_match(struct device *dev, struct device_driver *drv)
+{
+ struct peci_client *client = peci_verify_client(dev);
+ struct peci_driver *driver;
+
+ /* Attempt an OF style match */
+ if (peci_of_match_device(drv->of_match_table, client))
+ return 1;
+
+ driver = to_peci_driver(drv);
+
+ /* Finally an ID match */
+ if (peci_match_id(driver->id_table, client))
+ return 1;
+
+ return 0;
+}
+
+static int peci_device_probe(struct device *dev)
+{
+ struct peci_client *client = peci_verify_client(dev);
+ struct peci_driver *driver;
+ int status = -EINVAL;
+
+ if (!client)
+ return 0;
+
+ driver = to_peci_driver(dev->driver);
+
+ if (!driver->id_table &&
+ !peci_of_match_device(dev->driver->of_match_table, client))
+ return -ENODEV;
+
+ dev_dbg(dev, "%s: name:%s\n", __func__, client->name);
+
+ status = dev_pm_domain_attach(&client->dev, true);
+ if (status == -EPROBE_DEFER)
+ return status;
+
+ if (driver->probe)
+ status = driver->probe(client);
+ else
+ status = -EINVAL;
+
+ if (status)
+ goto err_detach_pm_domain;
+
+ return 0;
+
+err_detach_pm_domain:
+ dev_pm_domain_detach(&client->dev, true);
+
+ return status;
+}
+
+static void peci_device_remove(struct device *dev)
+{
+ struct peci_client *client = peci_verify_client(dev);
+ struct peci_driver *driver;
+
+ if (!client || !dev->driver)
+ return;
+
+ driver = to_peci_driver(dev->driver);
+ if (driver->remove) {
+ dev_dbg(dev, "%s: name:%s\n", __func__, client->name);
+ driver->remove(client);
+ }
+
+ dev_pm_domain_detach(&client->dev, true);
+}
+
+static void peci_device_shutdown(struct device *dev)
+{
+ struct peci_client *client = peci_verify_client(dev);
+ struct peci_driver *driver;
+
+ if (!client || !dev->driver)
+ return;
+
+ dev_dbg(dev, "%s: name:%s\n", __func__, client->name);
+
+ driver = to_peci_driver(dev->driver);
+ if (driver->shutdown)
+ driver->shutdown(client);
+}
+
+struct bus_type peci_bus_type = {
+ .name = "peci",
+ .match = peci_device_match,
+ .probe = peci_device_probe,
+ .remove = peci_device_remove,
+ .shutdown = peci_device_shutdown,
+};
+EXPORT_SYMBOL_GPL(peci_bus_type);
+
+static int peci_check_addr_validity(u8 addr)
+{
+ if (addr < PECI_BASE_ADDR && addr > PECI_BASE_ADDR + PECI_OFFSET_MAX)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int peci_check_client_busy(struct device *dev, void *client_new_p)
+{
+ struct peci_client *client = peci_verify_client(dev);
+ struct peci_client *client_new = client_new_p;
+
+ if (client && client->addr == client_new->addr)
+ return -EBUSY;
+
+ return 0;
+}
+
+/**
+ * peci_get_cpu_id - read CPU ID from the Package Configuration Space of CPU
+ * @adapter: pointer to peci_adapter
+ * @addr: address of the PECI client CPU
+ * @cpu_id: where the CPU ID will be stored
+ * Context: can sleep
+ *
+ * Return: zero on success, else a negative error code.
+ */
+int peci_get_cpu_id(struct peci_adapter *adapter, u8 addr, u32 *cpu_id)
+{
+ struct peci_rd_pkg_cfg_msg msg;
+ int ret;
+
+ msg.addr = addr;
+ msg.index = PECI_MBX_INDEX_CPU_ID;
+ msg.param = PECI_PKG_ID_CPU_ID;
+ msg.rx_len = 4;
+
+ ret = peci_command(adapter, PECI_CMD_RD_PKG_CFG, sizeof(msg), &msg);
+ if (msg.cc != PECI_DEV_CC_SUCCESS)
+ ret = -EAGAIN;
+ if (ret)
+ return ret;
+
+ *cpu_id = le32_to_cpup((__le32 *)msg.pkg_config);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(peci_get_cpu_id);
+
+static struct peci_client *peci_new_device(struct peci_adapter *adapter,
+ struct peci_board_info const *info)
+{
+ struct peci_client *client;
+ int ret;
+
+ /* Increase reference count for the adapter assigned */
+ if (!peci_get_adapter(adapter->nr))
+ return NULL;
+
+ client = kzalloc(sizeof(*client), GFP_KERNEL);
+ if (!client)
+ goto err_put_adapter;
+
+ client->adapter = adapter;
+ client->addr = info->addr;
+ strlcpy(client->name, info->type, sizeof(client->name));
+
+ ret = peci_check_addr_validity(client->addr);
+ if (ret) {
+ dev_err(&adapter->dev, "Invalid PECI CPU address 0x%02hx\n",
+ client->addr);
+ goto err_free_client_silent;
+ }
+
+ /* Check online status of client */
+ ret = peci_detect(adapter, client->addr);
+ if (ret)
+ goto err_free_client;
+
+ ret = device_for_each_child(&adapter->dev, client,
+ peci_check_client_busy);
+ if (ret)
+ goto err_free_client;
+
+ client->dev.parent = &client->adapter->dev;
+ client->dev.bus = &peci_bus_type;
+ client->dev.type = &peci_client_type;
+ client->dev.of_node = of_node_get(info->of_node);
+ dev_set_name(&client->dev, "%d-%02x", adapter->nr, client->addr);
+
+ ret = device_register(&client->dev);
+ if (ret)
+ goto err_put_of_node;
+
+ dev_dbg(&adapter->dev, "client [%s] registered with bus id %s\n",
+ client->name, dev_name(&client->dev));
+
+ return client;
+
+err_put_of_node:
+ of_node_put(info->of_node);
+err_free_client:
+ dev_err(&adapter->dev,
+ "Failed to register peci client %s at 0x%02x (%d)\n",
+ client->name, client->addr, ret);
+err_free_client_silent:
+ kfree(client);
+err_put_adapter:
+ peci_put_adapter(adapter);
+
+ return NULL;
+}
+
+static void peci_unregister_device(struct peci_client *client)
+{
+ if (!client)
+ return;
+
+ if (client->dev.of_node) {
+ of_node_clear_flag(client->dev.of_node, OF_POPULATED);
+ of_node_put(client->dev.of_node);
+ }
+
+ device_unregister(&client->dev);
+}
+
+static int peci_unregister_client(struct device *dev, void *dummy)
+{
+ struct peci_client *client = peci_verify_client(dev);
+
+ peci_unregister_device(client);
+
+ return 0;
+}
+
+static void peci_adapter_dev_release(struct device *dev)
+{
+ struct peci_adapter *adapter = to_peci_adapter(dev);
+
+ dev_dbg(dev, "%s: %s\n", __func__, adapter->name);
+ mutex_destroy(&adapter->userspace_clients_lock);
+ mutex_destroy(&adapter->bus_lock);
+ kfree(adapter);
+}
+
+static ssize_t peci_sysfs_new_device(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct peci_adapter *adapter = to_peci_adapter(dev);
+ struct peci_board_info info = {};
+ struct peci_client *client;
+ char *blank, end;
+ short addr;
+ int ret;
+
+ /* Parse device type */
+ blank = strchr(buf, ' ');
+ if (!blank) {
+ dev_err(dev, "%s: Missing parameters\n", "new_device");
+ return -EINVAL;
+ }
+ if (blank - buf > PECI_NAME_SIZE - 1) {
+ dev_err(dev, "%s: Invalid device type\n", "new_device");
+ return -EINVAL;
+ }
+ memcpy(info.type, buf, blank - buf);
+
+ /* Parse remaining parameters, reject extra parameters */
+ ret = sscanf(++blank, "%hi%c", &addr, &end);
+ if (ret < 1) {
+ dev_err(dev, "%s: Can't parse client address\n", "new_device");
+ return -EINVAL;
+ }
+ if (ret > 1 && end != '\n') {
+ dev_err(dev, "%s: Extra parameters\n", "new_device");
+ return -EINVAL;
+ }
+
+ info.addr = (u8)addr;
+ client = peci_new_device(adapter, &info);
+ if (!client)
+ return -EINVAL;
+
+ /* Keep track of the added device */
+ mutex_lock(&adapter->userspace_clients_lock);
+ list_add_tail(&client->detected, &adapter->userspace_clients);
+ mutex_unlock(&adapter->userspace_clients_lock);
+ dev_dbg(dev, "%s: Instantiated device %s at 0x%02hx\n", "new_device",
+ info.type, info.addr);
+
+ return count;
+}
+static DEVICE_ATTR(new_device, 0200, NULL, peci_sysfs_new_device);
+
+static ssize_t peci_sysfs_delete_device(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct peci_adapter *adapter = to_peci_adapter(dev);
+ struct peci_client *client, *next;
+ struct peci_board_info info = {};
+ char *blank, end;
+ short addr;
+ int ret;
+
+ /* Parse device type */
+ blank = strchr(buf, ' ');
+ if (!blank) {
+ dev_err(dev, "%s: Missing parameters\n", "delete_device");
+ return -EINVAL;
+ }
+ if (blank - buf > PECI_NAME_SIZE - 1) {
+ dev_err(dev, "%s: Invalid device type\n", "delete_device");
+ return -EINVAL;
+ }
+ memcpy(info.type, buf, blank - buf);
+
+ /* Parse remaining parameters, reject extra parameters */
+ ret = sscanf(++blank, "%hi%c", &addr, &end);
+ if (ret < 1) {
+ dev_err(dev, "%s: Can't parse client address\n",
+ "delete_device");
+ return -EINVAL;
+ }
+ if (ret > 1 && end != '\n') {
+ dev_err(dev, "%s: Extra parameters\n", "delete_device");
+ return -EINVAL;
+ }
+
+ info.addr = (u8)addr;
+
+ /* Make sure the device was added through sysfs */
+ ret = -ENOENT;
+ mutex_lock(&adapter->userspace_clients_lock);
+ list_for_each_entry_safe(client, next, &adapter->userspace_clients,
+ detected) {
+ if (client->addr == info.addr &&
+ !strncmp(client->name, info.type, PECI_NAME_SIZE)) {
+ dev_dbg(dev, "%s: Deleting device %s at 0x%02hx\n",
+ "delete_device", client->name, client->addr);
+ list_del(&client->detected);
+ peci_unregister_device(client);
+ ret = count;
+ break;
+ }
+ }
+ mutex_unlock(&adapter->userspace_clients_lock);
+
+ if (ret < 0)
+ dev_dbg(dev, "%s: Can't find device in list\n",
+ "delete_device");
+
+ return ret;
+}
+static DEVICE_ATTR_IGNORE_LOCKDEP(delete_device, 0200, NULL,
+ peci_sysfs_delete_device);
+
+static struct attribute *peci_adapter_attrs[] = {
+ &dev_attr_name.attr,
+ &dev_attr_new_device.attr,
+ &dev_attr_delete_device.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(peci_adapter);
+
+struct device_type peci_adapter_type = {
+ .groups = peci_adapter_groups,
+ .release = peci_adapter_dev_release,
+};
+EXPORT_SYMBOL_GPL(peci_adapter_type);
+
+/**
+ * peci_verify_adapter - return parameter as peci_adapter, or NULL
+ * @dev: device, probably from some driver model iterator
+ *
+ * Return: pointer to peci_adapter on success, else NULL.
+ */
+struct peci_adapter *peci_verify_adapter(struct device *dev)
+{
+ return (dev->type == &peci_adapter_type)
+ ? to_peci_adapter(dev)
+ : NULL;
+}
+EXPORT_SYMBOL_GPL(peci_verify_adapter);
+
+#if IS_ENABLED(CONFIG_OF)
+static struct peci_client *peci_of_register_device(struct peci_adapter *adapter,
+ struct device_node *node)
+{
+ struct peci_board_info info = {};
+ struct peci_client *client;
+ u32 addr;
+ int ret;
+
+ dev_dbg(&adapter->dev, "register %pOF\n", node);
+
+ ret = of_property_read_u32(node, "reg", &addr);
+ if (ret) {
+ dev_err(&adapter->dev, "invalid reg on %pOF\n", node);
+ return ERR_PTR(ret);
+ }
+
+ info.addr = addr;
+ info.of_node = node;
+
+ client = peci_new_device(adapter, &info);
+ if (!client)
+ client = ERR_PTR(-EINVAL);
+
+ return client;
+}
+
+static void peci_of_register_devices(struct peci_adapter *adapter)
+{
+ struct device_node *bus, *node;
+ struct peci_client *client;
+
+ /* Only register child devices if the adapter has a node pointer set */
+ if (!adapter->dev.of_node)
+ return;
+
+ bus = of_get_child_by_name(adapter->dev.of_node, "peci-bus");
+ if (!bus)
+ bus = of_node_get(adapter->dev.of_node);
+
+ for_each_available_child_of_node(bus, node) {
+ if (of_node_test_and_set_flag(node, OF_POPULATED))
+ continue;
+
+ client = peci_of_register_device(adapter, node);
+ if (IS_ERR(client)) {
+ dev_warn(&adapter->dev,
+ "Failed to create PECI device for %pOF\n",
+ node);
+ of_node_clear_flag(node, OF_POPULATED);
+ }
+ }
+
+ of_node_put(bus);
+}
+#else /* CONFIG_OF */
+static void peci_of_register_devices(struct peci_adapter *adapter) { }
+#endif /* CONFIG_OF */
+
+#if IS_ENABLED(CONFIG_OF_DYNAMIC)
+static int peci_of_match_node(struct device *dev, const void *data)
+{
+ return dev->of_node == data;
+}
+
+/* must call put_device() when done with returned peci_client device */
+static struct peci_client *peci_of_find_device(struct device_node *node)
+{
+ struct peci_client *client;
+ struct device *dev;
+
+ dev = bus_find_device(&peci_bus_type, NULL, node, peci_of_match_node);
+ if (!dev)
+ return NULL;
+
+ client = peci_verify_client(dev);
+ if (!client)
+ put_device(dev);
+
+ return client;
+}
+
+/* must call put_device() when done with returned peci_adapter device */
+static struct peci_adapter *peci_of_find_adapter(struct device_node *node)
+{
+ struct peci_adapter *adapter;
+ struct device *dev;
+
+ dev = bus_find_device(&peci_bus_type, NULL, node, peci_of_match_node);
+ if (!dev)
+ return NULL;
+
+ adapter = peci_verify_adapter(dev);
+ if (!adapter)
+ put_device(dev);
+
+ return adapter;
+}
+
+static int peci_of_notify(struct notifier_block *nb, ulong action, void *arg)
+{
+ struct of_reconfig_data *rd = arg;
+ struct peci_adapter *adapter;
+ struct peci_client *client;
+
+ switch (of_reconfig_get_state_change(action, rd)) {
+ case OF_RECONFIG_CHANGE_ADD:
+ adapter = peci_of_find_adapter(rd->dn->parent);
+ if (!adapter)
+ return NOTIFY_OK; /* not for us */
+
+ if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) {
+ put_device(&adapter->dev);
+ return NOTIFY_OK;
+ }
+
+ client = peci_of_register_device(adapter, rd->dn);
+ put_device(&adapter->dev);
+
+ if (IS_ERR(client)) {
+ dev_err(&adapter->dev,
+ "failed to create client for '%pOF'\n", rd->dn);
+ of_node_clear_flag(rd->dn, OF_POPULATED);
+ return notifier_from_errno(PTR_ERR(client));
+ }
+ break;
+ case OF_RECONFIG_CHANGE_REMOVE:
+ /* already depopulated? */
+ if (!of_node_check_flag(rd->dn, OF_POPULATED))
+ return NOTIFY_OK;
+
+ /* find our device by node */
+ client = peci_of_find_device(rd->dn);
+ if (!client)
+ return NOTIFY_OK; /* no? not meant for us */
+
+ /* unregister takes one ref away */
+ peci_unregister_device(client);
+
+ /* and put the reference of the find */
+ put_device(&client->dev);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block peci_of_notifier = {
+ .notifier_call = peci_of_notify,
+};
+#else /* CONFIG_OF_DYNAMIC */
+extern struct notifier_block peci_of_notifier;
+#endif /* CONFIG_OF_DYNAMIC */
+
+/**
+ * peci_alloc_adapter - allocate a PECI adapter
+ * @dev: the adapter, possibly using the platform_bus
+ * @size: how much zeroed driver-private data to allocate; the pointer to this
+ * memory is in the driver_data field of the returned device,
+ * accessible with peci_get_adapdata().
+ * Context: can sleep
+ *
+ * This call is used only by PECI adapter drivers, which are the only ones
+ * directly touching chip registers. It's how they allocate a peci_adapter
+ * structure, prior to calling peci_add_adapter().
+ *
+ * This must be called from context that can sleep.
+ *
+ * The caller is responsible for initializing the adapter's methods before
+ * calling peci_add_adapter(); and (after errors while adding the device)
+ * calling put_device() to prevent a memory leak.
+ *
+ * Return: the peci_adapter structure on success, else NULL.
+ */
+struct peci_adapter *peci_alloc_adapter(struct device *dev, uint size)
+{
+ struct peci_adapter *adapter;
+
+ if (!dev)
+ return NULL;
+
+ adapter = kzalloc(size + sizeof(*adapter), GFP_KERNEL);
+ if (!adapter)
+ return NULL;
+
+ device_initialize(&adapter->dev);
+ adapter->dev.parent = dev;
+ adapter->dev.bus = &peci_bus_type;
+ adapter->dev.type = &peci_adapter_type;
+ peci_set_adapdata(adapter, &adapter[1]);
+
+ return adapter;
+}
+EXPORT_SYMBOL_GPL(peci_alloc_adapter);
+
+static int peci_register_adapter(struct peci_adapter *adapter)
+{
+ int ret = -EINVAL;
+
+ /* Can't register until after driver model init */
+ if (WARN_ON(!is_registered))
+ goto err_free_idr;
+
+ if (WARN(!adapter->name[0], "peci adapter has no name"))
+ goto err_free_idr;
+
+ if (WARN(!adapter->xfer, "peci adapter has no xfer function\n"))
+ goto err_free_idr;
+
+ mutex_init(&adapter->bus_lock);
+ mutex_init(&adapter->userspace_clients_lock);
+ INIT_LIST_HEAD(&adapter->userspace_clients);
+
+ dev_set_name(&adapter->dev, "peci-%d", adapter->nr);
+
+ ret = device_add(&adapter->dev);
+ if (ret) {
+ pr_err("adapter '%s': can't add device (%d)\n",
+ adapter->name, ret);
+ goto err_free_idr;
+ }
+
+ dev_dbg(&adapter->dev, "adapter [%s] registered\n", adapter->name);
+
+ pm_runtime_no_callbacks(&adapter->dev);
+ pm_suspend_ignore_children(&adapter->dev, true);
+ pm_runtime_enable(&adapter->dev);
+
+ /* create pre-declared device nodes */
+ peci_of_register_devices(adapter);
+
+ return 0;
+
+err_free_idr:
+ mutex_lock(&core_lock);
+ idr_remove(&peci_adapter_idr, adapter->nr);
+ mutex_unlock(&core_lock);
+ return ret;
+}
+
+static int peci_add_numbered_adapter(struct peci_adapter *adapter)
+{
+ int id;
+
+ mutex_lock(&core_lock);
+ id = idr_alloc(&peci_adapter_idr, adapter,
+ adapter->nr, adapter->nr + 1, GFP_KERNEL);
+ mutex_unlock(&core_lock);
+ if (WARN(id < 0, "couldn't get idr"))
+ return id == -ENOSPC ? -EBUSY : id;
+
+ return peci_register_adapter(adapter);
+}
+
+/**
+ * peci_add_adapter - add a PECI adapter
+ * @adapter: initialized adapter, originally from peci_alloc_adapter()
+ * Context: can sleep
+ *
+ * PECI adapters connect to their drivers using some non-PECI bus,
+ * such as the platform bus. The final stage of probe() in that code
+ * includes calling peci_add_adapter() to hook up to this PECI bus glue.
+ *
+ * This must be called from context that can sleep.
+ *
+ * It returns zero on success, else a negative error code (dropping the
+ * adapter's refcount). After a successful return, the caller is responsible
+ * for calling peci_del_adapter().
+ *
+ * Return: zero on success, else a negative error code.
+ */
+int peci_add_adapter(struct peci_adapter *adapter)
+{
+ struct device *dev = &adapter->dev;
+ int id;
+
+ id = of_alias_get_id(dev->of_node, "peci");
+ if (id >= 0) {
+ adapter->nr = id;
+ return peci_add_numbered_adapter(adapter);
+ }
+
+ mutex_lock(&core_lock);
+ id = idr_alloc(&peci_adapter_idr, adapter, 0, 0, GFP_KERNEL);
+ mutex_unlock(&core_lock);
+ if (WARN(id < 0, "couldn't get idr"))
+ return id;
+
+ adapter->nr = id;
+
+ return peci_register_adapter(adapter);
+}
+EXPORT_SYMBOL_GPL(peci_add_adapter);
+
+/**
+ * peci_del_adapter - delete a PECI adapter
+ * @adapter: the adpater being deleted
+ * Context: can sleep
+ *
+ * This call is used only by PECI adpater drivers, which are the only ones
+ * directly touching chip registers.
+ *
+ * This must be called from context that can sleep.
+ *
+ * Note that this function also drops a reference to the adapter.
+ */
+void peci_del_adapter(struct peci_adapter *adapter)
+{
+ struct peci_client *client, *next;
+ struct peci_adapter *found;
+ int nr;
+
+ /* First make sure that this adapter was ever added */
+ mutex_lock(&core_lock);
+ found = idr_find(&peci_adapter_idr, adapter->nr);
+ mutex_unlock(&core_lock);
+
+ if (found != adapter)
+ return;
+
+ /* Remove devices instantiated from sysfs */
+ mutex_lock(&adapter->userspace_clients_lock);
+ list_for_each_entry_safe(client, next, &adapter->userspace_clients,
+ detected) {
+ dev_dbg(&adapter->dev, "Removing %s at 0x%x\n", client->name,
+ client->addr);
+ list_del(&client->detected);
+ peci_unregister_device(client);
+ }
+ mutex_unlock(&adapter->userspace_clients_lock);
+
+ /*
+ * Detach any active clients. This can't fail, thus we do not
+ * check the returned value.
+ */
+ device_for_each_child(&adapter->dev, NULL, peci_unregister_client);
+
+ /* device name is gone after device_unregister */
+ dev_dbg(&adapter->dev, "adapter [%s] unregistered\n", adapter->name);
+
+ pm_runtime_disable(&adapter->dev);
+ nr = adapter->nr;
+ device_unregister(&adapter->dev);
+
+ /* free bus id */
+ mutex_lock(&core_lock);
+ idr_remove(&peci_adapter_idr, nr);
+ mutex_unlock(&core_lock);
+}
+EXPORT_SYMBOL_GPL(peci_del_adapter);
+
+int peci_for_each_dev(void *data, int (*fn)(struct device *, void *))
+{
+ int ret;
+
+ mutex_lock(&core_lock);
+ ret = bus_for_each_dev(&peci_bus_type, NULL, data, fn);
+ mutex_unlock(&core_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(peci_for_each_dev);
+
+/**
+ * peci_register_driver - register a PECI driver
+ * @owner: owner module of the driver being registered
+ * @driver: the driver being registered
+ * Context: can sleep
+ *
+ * Return: zero on success, else a negative error code.
+ */
+int peci_register_driver(struct module *owner, struct peci_driver *driver)
+{
+ int ret;
+
+ /* Can't register until after driver model init */
+ if (WARN_ON(!is_registered))
+ return -EAGAIN;
+
+ /* add the driver to the list of peci drivers in the driver core */
+ driver->driver.owner = owner;
+ driver->driver.bus = &peci_bus_type;
+
+ /*
+ * When registration returns, the driver core
+ * will have called probe() for all matching-but-unbound devices.
+ */
+ ret = driver_register(&driver->driver);
+ if (ret)
+ return ret;
+
+ pr_debug("driver [%s] registered\n", driver->driver.name);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(peci_register_driver);
+
+/**
+ * peci_del_driver - unregister a PECI driver
+ * @driver: the driver being unregistered
+ * Context: can sleep
+ */
+void peci_del_driver(struct peci_driver *driver)
+{
+ driver_unregister(&driver->driver);
+ pr_debug("driver [%s] unregistered\n", driver->driver.name);
+}
+EXPORT_SYMBOL_GPL(peci_del_driver);
+
+static int __init peci_init(void)
+{
+ int ret;
+
+ ret = bus_register(&peci_bus_type);
+ if (ret < 0) {
+ pr_err("peci: Failed to register PECI bus type!\n");
+ return ret;
+ }
+
+ crc8_populate_msb(peci_crc8_table, PECI_CRC8_POLYNOMIAL);
+
+ if (IS_ENABLED(CONFIG_OF_DYNAMIC))
+ WARN_ON(of_reconfig_notifier_register(&peci_of_notifier));
+
+ is_registered = true;
+
+ return 0;
+}
+
+static void __exit peci_exit(void)
+{
+ if (IS_ENABLED(CONFIG_OF_DYNAMIC))
+ WARN_ON(of_reconfig_notifier_unregister(&peci_of_notifier));
+
+ bus_unregister(&peci_bus_type);
+}
+
+subsys_initcall(peci_init);
+module_exit(peci_exit);
+
+MODULE_AUTHOR("Jason M Biils <jason.m.bills@linux.intel.com>");
+MODULE_AUTHOR("Jae Hyun Yoo <jae.hyun.yoo@linux.intel.com>");
+MODULE_DESCRIPTION("PECI bus core module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/peci/peci-dev.c b/drivers/peci/peci-dev.c
new file mode 100644
index 000000000000..f02666984ce7
--- /dev/null
+++ b/drivers/peci/peci-dev.c
@@ -0,0 +1,359 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2018-2019 Intel Corporation
+
+#include <linux/cdev.h>
+#include <linux/fs.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/peci.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+/*
+ * A peci_dev represents an peci_adapter ... an PECI or SMBus master, not a
+ * slave (peci_client) with which messages will be exchanged. It's coupled
+ * with a character special file which is accessed by user mode drivers.
+ *
+ * The list of peci_dev structures is parallel to the peci_adapter lists
+ * maintained by the driver model, and is updated using bus notifications.
+ */
+struct peci_dev {
+ struct list_head list;
+ struct peci_adapter *adapter;
+ struct device *dev;
+ struct cdev cdev;
+};
+
+#define PECI_MINORS MINORMASK
+
+static dev_t peci_devt;
+static LIST_HEAD(peci_dev_list);
+static DEFINE_SPINLOCK(peci_dev_list_lock);
+
+static struct peci_dev *peci_dev_get_by_minor(uint index)
+{
+ struct peci_dev *peci_dev;
+
+ spin_lock(&peci_dev_list_lock);
+ list_for_each_entry(peci_dev, &peci_dev_list, list) {
+ if (peci_dev->adapter->nr == index)
+ goto found;
+ }
+ peci_dev = NULL;
+found:
+ spin_unlock(&peci_dev_list_lock);
+
+ return peci_dev;
+}
+
+static struct peci_dev *peci_dev_alloc(struct peci_adapter *adapter)
+{
+ struct peci_dev *peci_dev;
+
+ if (adapter->nr >= PECI_MINORS) {
+ dev_err(&adapter->dev, "Out of device minors (%d)\n",
+ adapter->nr);
+ return ERR_PTR(-ENODEV);
+ }
+
+ peci_dev = kzalloc(sizeof(*peci_dev), GFP_KERNEL);
+ if (!peci_dev)
+ return ERR_PTR(-ENOMEM);
+ peci_dev->adapter = adapter;
+
+ spin_lock(&peci_dev_list_lock);
+ list_add_tail(&peci_dev->list, &peci_dev_list);
+ spin_unlock(&peci_dev_list_lock);
+
+ return peci_dev;
+}
+
+static void peci_dev_put(struct peci_dev *peci_dev)
+{
+ spin_lock(&peci_dev_list_lock);
+ list_del(&peci_dev->list);
+ spin_unlock(&peci_dev_list_lock);
+ kfree(peci_dev);
+}
+
+static ssize_t name_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct peci_dev *peci_dev = peci_dev_get_by_minor(MINOR(dev->devt));
+
+ if (!peci_dev)
+ return -ENODEV;
+
+ return sprintf(buf, "%s\n", peci_dev->adapter->name);
+}
+static DEVICE_ATTR_RO(name);
+
+static struct attribute *peci_dev_attrs[] = {
+ &dev_attr_name.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(peci_dev);
+
+static long peci_dev_ioctl(struct file *file, uint iocmd, ulong arg)
+{
+ struct peci_dev *peci_dev = file->private_data;
+ void __user *umsg = (void __user *)arg;
+ struct peci_xfer_msg *xmsg = NULL;
+ struct peci_xfer_msg uxmsg;
+ enum peci_cmd cmd;
+ u8 *msg = NULL;
+ uint msg_len;
+ int ret;
+
+ cmd = _IOC_NR(iocmd);
+ msg_len = _IOC_SIZE(iocmd);
+
+ switch (cmd) {
+ case PECI_CMD_XFER:
+ if (msg_len != sizeof(struct peci_xfer_msg)) {
+ ret = -EFAULT;
+ break;
+ }
+
+ if (copy_from_user(&uxmsg, umsg, msg_len)) {
+ ret = -EFAULT;
+ break;
+ }
+
+ xmsg = peci_get_xfer_msg(uxmsg.tx_len, uxmsg.rx_len);
+ if (!xmsg) {
+ ret = -ENOMEM;
+ break;
+ }
+
+ if (uxmsg.tx_len &&
+ copy_from_user(xmsg->tx_buf, (__u8 __user *)uxmsg.tx_buf,
+ uxmsg.tx_len)) {
+ ret = -EFAULT;
+ break;
+ }
+
+ xmsg->addr = uxmsg.addr;
+ xmsg->tx_len = uxmsg.tx_len;
+ xmsg->rx_len = uxmsg.rx_len;
+
+ /*
+ * Send the command and copy the results back to user space on
+ * either success or timeout to provide the completion code to
+ * the caller.
+ */
+ ret = peci_command(peci_dev->adapter, cmd, msg_len, xmsg);
+ if ((!ret || ret == -ETIMEDOUT) && xmsg->rx_len &&
+ copy_to_user((__u8 __user *)uxmsg.rx_buf, xmsg->rx_buf,
+ xmsg->rx_len))
+ ret = -EFAULT;
+
+ break;
+
+ default:
+ msg = memdup_user(umsg, msg_len);
+ if (IS_ERR(msg)) {
+ ret = PTR_ERR(msg);
+ break;
+ }
+
+ /*
+ * Send the command and copy the results back to user space on
+ * either success or timeout to provide the completion code to
+ * the caller.
+ */
+ ret = peci_command(peci_dev->adapter, cmd, msg_len, msg);
+ if ((!ret || ret == -ETIMEDOUT) &&
+ copy_to_user(umsg, msg, msg_len))
+ ret = -EFAULT;
+
+ break;
+ }
+
+ peci_put_xfer_msg(xmsg);
+ if (!IS_ERR(msg))
+ kfree(msg);
+
+ return (long)ret;
+}
+
+static int peci_dev_open(struct inode *inode, struct file *file)
+{
+ struct peci_adapter *adapter;
+ struct peci_dev *peci_dev;
+
+ peci_dev = peci_dev_get_by_minor(iminor(inode));
+ if (!peci_dev)
+ return -ENODEV;
+
+ adapter = peci_get_adapter(peci_dev->adapter->nr);
+ if (!adapter)
+ return -ENODEV;
+
+ file->private_data = peci_dev;
+
+ return 0;
+}
+
+static int peci_dev_release(struct inode *inode, struct file *file)
+{
+ struct peci_dev *peci_dev = file->private_data;
+
+ peci_put_adapter(peci_dev->adapter);
+ file->private_data = NULL;
+
+ return 0;
+}
+
+static const struct file_operations peci_dev_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = peci_dev_ioctl,
+ .open = peci_dev_open,
+ .release = peci_dev_release,
+ .llseek = no_llseek,
+};
+
+static struct class *peci_dev_class;
+
+static int peci_dev_attach_adapter(struct device *dev, void *dummy)
+{
+ struct peci_adapter *adapter;
+ struct peci_dev *peci_dev;
+ dev_t devt;
+ int ret;
+
+ if (dev->type != &peci_adapter_type)
+ return 0;
+
+ adapter = to_peci_adapter(dev);
+ peci_dev = peci_dev_alloc(adapter);
+ if (IS_ERR(peci_dev))
+ return PTR_ERR(peci_dev);
+
+ cdev_init(&peci_dev->cdev, &peci_dev_fops);
+ peci_dev->cdev.owner = THIS_MODULE;
+ devt = MKDEV(MAJOR(peci_devt), adapter->nr);
+
+ ret = cdev_add(&peci_dev->cdev, devt, 1);
+ if (ret)
+ goto err_put_dev;
+
+ /* register this peci device with the driver core */
+ peci_dev->dev = device_create(peci_dev_class, &adapter->dev, devt, NULL,
+ "peci-%d", adapter->nr);
+ if (IS_ERR(peci_dev->dev)) {
+ ret = PTR_ERR(peci_dev->dev);
+ goto err_del_cdev;
+ }
+
+ dev_info(dev, "cdev of adapter [%s] registered as minor %d\n",
+ adapter->name, adapter->nr);
+
+ return 0;
+
+err_del_cdev:
+ cdev_del(&peci_dev->cdev);
+err_put_dev:
+ peci_dev_put(peci_dev);
+
+ return ret;
+}
+
+static int peci_dev_detach_adapter(struct device *dev, void *dummy)
+{
+ struct peci_adapter *adapter;
+ struct peci_dev *peci_dev;
+ dev_t devt;
+
+ if (dev->type != &peci_adapter_type)
+ return 0;
+
+ adapter = to_peci_adapter(dev);
+ peci_dev = peci_dev_get_by_minor(adapter->nr);
+ if (!peci_dev)
+ return 0;
+
+ cdev_del(&peci_dev->cdev);
+ devt = peci_dev->dev->devt;
+ peci_dev_put(peci_dev);
+ device_destroy(peci_dev_class, devt);
+
+ dev_info(dev, "cdev of adapter [%s] unregistered\n", adapter->name);
+
+ return 0;
+}
+
+static int peci_dev_notifier_call(struct notifier_block *nb, ulong action,
+ void *data)
+{
+ struct device *dev = data;
+
+ switch (action) {
+ case BUS_NOTIFY_ADD_DEVICE:
+ return peci_dev_attach_adapter(dev, NULL);
+ case BUS_NOTIFY_DEL_DEVICE:
+ return peci_dev_detach_adapter(dev, NULL);
+ }
+
+ return 0;
+}
+
+static struct notifier_block peci_dev_notifier = {
+ .notifier_call = peci_dev_notifier_call,
+};
+
+static int __init peci_dev_init(void)
+{
+ int ret;
+
+ pr_debug("peci /dev entries driver\n");
+
+ ret = alloc_chrdev_region(&peci_devt, 0, PECI_MINORS, "peci");
+ if (ret < 0) {
+ pr_err("peci: Failed to allocate chr dev region!\n");
+ bus_unregister(&peci_bus_type);
+ goto err;
+ }
+
+ peci_dev_class = class_create(THIS_MODULE, KBUILD_MODNAME);
+ if (IS_ERR(peci_dev_class)) {
+ ret = PTR_ERR(peci_dev_class);
+ goto err_unreg_chrdev;
+ }
+ peci_dev_class->dev_groups = peci_dev_groups;
+
+ /* Keep track of adapters which will be added or removed later */
+ ret = bus_register_notifier(&peci_bus_type, &peci_dev_notifier);
+ if (ret)
+ goto err_destroy_class;
+
+ /* Bind to already existing adapters right away */
+ peci_for_each_dev(NULL, peci_dev_attach_adapter);
+
+ return 0;
+
+err_destroy_class:
+ class_destroy(peci_dev_class);
+err_unreg_chrdev:
+ unregister_chrdev_region(peci_devt, PECI_MINORS);
+err:
+ pr_err("%s: Driver Initialization failed\n", __FILE__);
+
+ return ret;
+}
+
+static void __exit peci_dev_exit(void)
+{
+ bus_unregister_notifier(&peci_bus_type, &peci_dev_notifier);
+ peci_for_each_dev(NULL, peci_dev_detach_adapter);
+ class_destroy(peci_dev_class);
+ unregister_chrdev_region(peci_devt, PECI_MINORS);
+}
+
+module_init(peci_dev_init);
+module_exit(peci_dev_exit);
+
+MODULE_AUTHOR("Jae Hyun Yoo <jae.hyun.yoo@linux.intel.com>");
+MODULE_DESCRIPTION("PECI /dev entries driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
index 4c0d26606b6c..69a05a03dd63 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
@@ -279,7 +279,7 @@ FUNC_GROUP_DECL(SD2, F19, E21, F20, D20, D21, E20, G18, C21);
#define B20 32
SIG_EXPR_LIST_DECL_SINGLE(B20, NCTS3, NCTS3, SIG_DESC_SET(SCU80, 16));
-SIG_EXPR_DECL_SINGLE(GPIE0IN, GPIE0, GPIE0_DESC);
+SIG_EXPR_DECL_SINGLE(GPIE0IN, GPIE0);
SIG_EXPR_DECL_SINGLE(GPIE0IN, GPIE, GPIE_DESC);
SIG_EXPR_LIST_DECL_DUAL(B20, GPIE0IN, GPIE0, GPIE);
PIN_DECL_2(B20, GPIOE0, NCTS3, GPIE0IN);
@@ -299,7 +299,7 @@ FUNC_GROUP_DECL(GPIE0, B20, C20);
#define F18 34
SIG_EXPR_LIST_DECL_SINGLE(F18, NDSR3, NDSR3, SIG_DESC_SET(SCU80, 18));
-SIG_EXPR_DECL_SINGLE(GPIE2IN, GPIE2, GPIE2_DESC);
+SIG_EXPR_DECL_SINGLE(GPIE2IN, GPIE2);
SIG_EXPR_DECL_SINGLE(GPIE2IN, GPIE, GPIE_DESC);
SIG_EXPR_LIST_DECL_DUAL(F18, GPIE2IN, GPIE2, GPIE);
PIN_DECL_2(F18, GPIOE2, NDSR3, GPIE2IN);
@@ -1412,7 +1412,7 @@ FUNC_GROUP_DECL(ADC15, H4);
#define R22 192
SIG_EXPR_DECL_SINGLE(SIOS3, SIOS3, SIG_DESC_SET(SCUA4, 8));
-SIG_EXPR_DECL_SINGLE(SIOS3, ACPI, ACPI_DESC);
+SIG_EXPR_DECL_SINGLE(SIOS3, ACPI);
SIG_EXPR_LIST_DECL_DUAL(R22, SIOS3, SIOS3, ACPI);
SIG_EXPR_LIST_DECL_SINGLE(R22, DASHR22, DASHR22, SIG_DESC_SET(SCU94, 10));
PIN_DECL_2(R22, GPIOY0, SIOS3, DASHR22);
@@ -1420,7 +1420,7 @@ FUNC_GROUP_DECL(SIOS3, R22);
#define R21 193
SIG_EXPR_DECL_SINGLE(SIOS5, SIOS5, SIG_DESC_SET(SCUA4, 9));
-SIG_EXPR_DECL_SINGLE(SIOS5, ACPI, ACPI_DESC);
+SIG_EXPR_DECL_SINGLE(SIOS5, ACPI);
SIG_EXPR_LIST_DECL_DUAL(R21, SIOS5, SIOS5, ACPI);
SIG_EXPR_LIST_DECL_SINGLE(R21, DASHR21, DASHR21, SIG_DESC_SET(SCU94, 10));
PIN_DECL_2(R21, GPIOY1, SIOS5, DASHR21);
@@ -1436,7 +1436,7 @@ FUNC_GROUP_DECL(SIOPWREQ, P22);
#define P21 195
SIG_EXPR_DECL_SINGLE(SIOONCTRL, SIOONCTRL, SIG_DESC_SET(SCUA4, 11));
-SIG_EXPR_DECL_SINGLE(SIOONCTRL, ACPI, ACPI_DESC);
+SIG_EXPR_DECL_SINGLE(SIOONCTRL, ACPI);
SIG_EXPR_LIST_DECL_DUAL(P21, SIOONCTRL, SIOONCTRL, ACPI);
SIG_EXPR_LIST_DECL_SINGLE(P21, DASHP21, DASHP21, SIG_DESC_SET(SCU94, 11));
PIN_DECL_2(P21, GPIOY3, SIOONCTRL, DASHP21);
@@ -2785,6 +2785,22 @@ static int aspeed_g5_sig_expr_set(struct aspeed_pinmux_data *ctx,
return 0;
}
+#define GPIOE1 33
+#define GPIOE3 35
+static void aspeed_g5_gpio_disable_free(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned int offset)
+{
+ /*
+ * If we're freeing GPIOE1 (33) or GPIOE3 (35) then re-enable the
+ * pass-through mux setting; otherwise, do nothing.
+ */
+ if (offset != GPIOE1 && offset != GPIOE3)
+ return;
+
+ aspeed_gpio_disable_free(pctldev, range, offset);
+}
+
static const struct aspeed_pin_config_map aspeed_g5_pin_config_map[] = {
{ PIN_CONFIG_BIAS_PULL_DOWN, 0, 1, BIT_MASK(0)},
{ PIN_CONFIG_BIAS_PULL_DOWN, -1, 0, BIT_MASK(0)},
@@ -2820,6 +2836,7 @@ static const struct pinmux_ops aspeed_g5_pinmux_ops = {
.get_function_groups = aspeed_pinmux_get_fn_groups,
.set_mux = aspeed_pinmux_set_mux,
.gpio_request_enable = aspeed_gpio_request_enable,
+ .gpio_disable_free = aspeed_g5_gpio_disable_free,
.strict = true,
};
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
index 80838dc54b3a..f77c7139d7d6 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
@@ -355,7 +355,7 @@ FUNC_GROUP_DECL(NRTS4, B24);
FUNC_GROUP_DECL(RGMII4, F24, E23, E24, E25, D26, D24, C25, C26, C24, B26, B25,
B24);
-FUNC_GROUP_DECL(RMII4, F24, E23, E24, E25, C25, C24, B26, B25, B24);
+FUNC_GROUP_DECL(RMII4, E23, E24, E25, C25, C24, B26, B25, B24);
#define D22 40
SIG_EXPR_LIST_DECL_SESG(D22, SD1CLK, SD1, SIG_DESC_SET(SCU414, 8));
@@ -776,7 +776,7 @@ SSSF_PIN_DECL(AC23, GPIOO7, PWM7, SIG_DESC_SET(SCU41C, 23));
#define AB22 120
SIG_EXPR_LIST_DECL_SEMG(AB22, PWM8, PWM8G1, PWM8, SIG_DESC_SET(SCU41C, 24));
-SIG_EXPR_LIST_DECL_SESG(AB22, THRUIN0, THRU0, SIG_DESC_SET(SCU4BC, 24));
+SIG_EXPR_LIST_DECL_SESG(AB22, THRUIN0, THRU0);
PIN_DECL_2(AB22, GPIOP0, PWM8, THRUIN0);
GROUP_DECL(PWM8G1, AB22);
FUNC_DECL_2(PWM8, PWM8G0, PWM8G1);
@@ -793,7 +793,7 @@ FUNC_DECL_2(PWM9, PWM9G0, PWM9G1);
#define AA23 122
SIG_EXPR_LIST_DECL_SEMG(AA23, PWM10, PWM10G1, PWM10, SIG_DESC_SET(SCU41C, 26));
-SIG_EXPR_LIST_DECL_SESG(AA23, THRUIN1, THRU1, SIG_DESC_SET(SCU4BC, 26));
+SIG_EXPR_LIST_DECL_SESG(AA23, THRUIN1, THRU1);
PIN_DECL_2(AA23, GPIOP2, PWM10, THRUIN1);
GROUP_DECL(PWM10G1, AA23);
FUNC_DECL_2(PWM10, PWM10G0, PWM10G1);
@@ -1086,16 +1086,16 @@ FUNC_GROUP_DECL(GPIU7, AC17);
FUNC_GROUP_DECL(ADC15, AC17);
#define AB15 168
-SSSF_PIN_DECL(AB15, GPIOV0, SIOS3, SIG_DESC_SET(SCU434, 8));
+SSSF_PIN_DECL(AB15, GPIOV0, SIOS3);
#define AF14 169
-SSSF_PIN_DECL(AF14, GPIOV1, SIOS5, SIG_DESC_SET(SCU434, 9));
+SSSF_PIN_DECL(AF14, GPIOV1, SIOS5);
#define AD14 170
SSSF_PIN_DECL(AD14, GPIOV2, SIOPWREQ, SIG_DESC_SET(SCU434, 10));
#define AC15 171
-SSSF_PIN_DECL(AC15, GPIOV3, SIOONCTRL, SIG_DESC_SET(SCU434, 11));
+SSSF_PIN_DECL(AC15, GPIOV3, SIOONCTRL);
#define AE15 172
SSSF_PIN_DECL(AE15, GPIOV4, SIOPWRGD, SIG_DESC_SET(SCU434, 12));
@@ -2692,6 +2692,22 @@ static int aspeed_g6_sig_expr_set(struct aspeed_pinmux_data *ctx,
return 0;
}
+#define GPIOP1 121
+#define GPIOP3 123
+static void aspeed_g6_gpio_disable_free(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned int offset)
+{
+ /*
+ * If we're freeing GPIOP1 (121) or GPIOP3 (123) then re-enable the
+ * pass-through mux setting; otherwise, do nothing.
+ */
+ if (offset != GPIOP1 && offset != GPIOP3)
+ return;
+
+ aspeed_gpio_disable_free(pctldev, range, offset);
+}
+
static const struct aspeed_pin_config_map aspeed_g6_pin_config_map[] = {
{ PIN_CONFIG_BIAS_PULL_DOWN, 0, 1, BIT_MASK(0)},
{ PIN_CONFIG_BIAS_PULL_DOWN, -1, 0, BIT_MASK(0)},
@@ -2732,6 +2748,7 @@ static const struct pinmux_ops aspeed_g6_pinmux_ops = {
.get_function_groups = aspeed_pinmux_get_fn_groups,
.set_mux = aspeed_pinmux_set_mux,
.gpio_request_enable = aspeed_gpio_request_enable,
+ .gpio_disable_free = aspeed_g6_gpio_disable_free,
.strict = true,
};
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed.c b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
index c94e24aadf92..ade658af580b 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
@@ -439,6 +439,59 @@ int aspeed_gpio_request_enable(struct pinctrl_dev *pctldev,
return 0;
}
+void aspeed_gpio_disable_free(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned int offset)
+{
+ struct aspeed_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
+ const struct aspeed_pin_desc *pdesc = pdata->pins[offset].drv_data;
+ const struct aspeed_sig_expr ***prios, **funcs, *expr;
+ int ret;
+
+ if (!pdesc)
+ return;
+
+ dev_dbg(pctldev->dev,
+ "Freeing pass-through pin %s (%d). Re-enabling pass-through.\n",
+ pdesc->name, offset);
+
+ prios = pdesc->prios;
+
+ if (!prios)
+ return;
+
+ /* Disable any functions of higher priority than GPIO just in case */
+ while ((funcs = *prios)) {
+ if (aspeed_gpio_in_exprs(funcs))
+ break;
+
+ ret = aspeed_disable_sig(&pdata->pinmux, funcs);
+ if (ret)
+ return;
+
+ prios++;
+ }
+
+ if (!funcs) {
+ char *signals = get_defined_signals(pdesc);
+
+ pr_warn("No GPIO signal type found on pin %s (%d). Found: %s\n",
+ pdesc->name, offset, signals);
+ kfree(signals);
+
+ return;
+ }
+
+ /*
+ * Pass-through should be one priority higher than the GPIO function,
+ * so decrement our prios and enable that function
+ */
+ prios--;
+ funcs = *prios;
+ expr = *funcs;
+ aspeed_sig_expr_enable(&pdata->pinmux, expr);
+}
+
int aspeed_pinctrl_probe(struct platform_device *pdev,
struct pinctrl_desc *pdesc,
struct aspeed_pinctrl_data *pdata)
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed.h b/drivers/pinctrl/aspeed/pinctrl-aspeed.h
index 4dcde3bc29c8..bd497c20a15f 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed.h
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed.h
@@ -101,6 +101,9 @@ int aspeed_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned int function,
int aspeed_gpio_request_enable(struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *range,
unsigned int offset);
+void aspeed_gpio_disable_free(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned int offset);
int aspeed_pinctrl_probe(struct platform_device *pdev,
struct pinctrl_desc *pdesc,
struct aspeed_pinctrl_data *pdata);
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
index aa29841bbb79..4ad30dcda4f9 100644
--- a/drivers/pwm/Kconfig
+++ b/drivers/pwm/Kconfig
@@ -195,6 +195,15 @@ config PWM_FSL_FTM
To compile this driver as a module, choose M here: the module
will be called pwm-fsl-ftm.
+config PWM_FTTMR010
+ tristate "Faraday Technology FTTMR010 timer PWM support"
+ help
+ Generic PWM framework driver for Faraday Technology FTTMR010 Timer
+ PWM output
+
+ To compile this driver as a module, choose M here: the module
+ will be called pwm-fttmr010
+
config PWM_HIBVT
tristate "HiSilicon BVT PWM support"
depends on ARCH_HISI || COMPILE_TEST
diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile
index 708840b7fba8..12605f055bc2 100644
--- a/drivers/pwm/Makefile
+++ b/drivers/pwm/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_PWM_CROS_EC) += pwm-cros-ec.o
obj-$(CONFIG_PWM_DWC) += pwm-dwc.o
obj-$(CONFIG_PWM_EP93XX) += pwm-ep93xx.o
obj-$(CONFIG_PWM_FSL_FTM) += pwm-fsl-ftm.o
+obj-$(CONFIG_PWM_FTTMR010) += pwm-fttmr010.o
obj-$(CONFIG_PWM_HIBVT) += pwm-hibvt.o
obj-$(CONFIG_PWM_IMG) += pwm-img.o
obj-$(CONFIG_PWM_IMX1) += pwm-imx1.o
diff --git a/drivers/pwm/pwm-fttmr010.c b/drivers/pwm/pwm-fttmr010.c
new file mode 100644
index 000000000000..2c1b2d59d05f
--- /dev/null
+++ b/drivers/pwm/pwm-fttmr010.c
@@ -0,0 +1,437 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019 Intel Corporation
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+
+#define TIMER_CR 0x30
+
+#define TIMER5_ASPEED_COUNT 0x50
+#define TIMER5_ASPEED_LOAD 0x54
+#define TIMER5_ASPEED_MATCH1 0x58
+#define TIMER5_ASPEED_MATCH2 0x5c
+#define TIMER6_ASPEED_COUNT 0x60
+#define TIMER6_ASPEED_LOAD 0x64
+#define TIMER6_ASPEED_MATCH1 0x68
+#define TIMER6_ASPEED_MATCH2 0x6c
+#define TIMER7_ASPEED_COUNT 0x70
+#define TIMER7_ASPEED_LOAD 0x74
+#define TIMER7_ASPEED_MATCH1 0x78
+#define TIMER7_ASPEED_MATCH2 0x7c
+#define TIMER8_ASPEED_COUNT 0x80
+#define TIMER8_ASPEED_LOAD 0x84
+#define TIMER8_ASPEED_MATCH1 0x88
+#define TIMER8_ASPEED_MATCH2 0x8c
+
+#define TIMER_5_CR_ASPEED_ENABLE BIT(16)
+#define TIMER_5_CR_ASPEED_CLOCK BIT(17)
+#define TIMER_5_CR_ASPEED_INT BIT(18)
+#define TIMER_5_CR_ASPEED_PULSE_OUT BIT(19)
+#define TIMER_6_CR_ASPEED_ENABLE BIT(20)
+#define TIMER_6_CR_ASPEED_CLOCK BIT(21)
+#define TIMER_6_CR_ASPEED_INT BIT(22)
+#define TIMER_6_CR_ASPEED_PULSE_OUT BIT(23)
+#define TIMER_7_CR_ASPEED_ENABLE BIT(24)
+#define TIMER_7_CR_ASPEED_CLOCK BIT(25)
+#define TIMER_7_CR_ASPEED_INT BIT(26)
+#define TIMER_7_CR_ASPEED_PULSE_OUT BIT(27)
+#define TIMER_8_CR_ASPEED_ENABLE BIT(28)
+#define TIMER_8_CR_ASPEED_CLOCK BIT(29)
+#define TIMER_8_CR_ASPEED_INT BIT(30)
+#define TIMER_8_CR_ASPEED_PULSE_OUT BIT(31)
+
+/**
+ * struct pwm_fttmr010_variant - variant data depends on SoC
+ * @bits: timer counter resolution
+ * @chan_min: lowest timer channel which has pwm pulse output
+ * @chan_max: highest timer channel which has pwm pulse output
+ * @output_mask: pwm pulse output mask which is defined in device tree
+ */
+struct pwm_fttmr010_variant {
+ u8 bits;
+ u8 chan_min;
+ u8 chan_max;
+ u8 output_mask;
+};
+
+/**
+ * struct pwm_fttmr010_chan - private data of FTTMR010 PWM channel
+ * @period_ns: current period in nanoseconds programmed to the hardware
+ * @duty_ns: current duty time in nanoseconds programmed to the hardware
+ */
+struct pwm_fttmr010_chan {
+ u32 period_ns;
+ u32 duty_ns;
+};
+
+/**
+ * struct pwm_fttmr010 - private data of FTTMR010 PWM
+ * @chip: generic PWM chip
+ * @variant: local copy of hardware variant data
+ * @disabled_mask: disabled status for all channels - one bit per channel
+ * @base: base address of mapped PWM registers
+ * @clk: clock used to drive the timers
+ */
+struct pwm_fttmr010 {
+ struct pwm_chip chip;
+ struct pwm_fttmr010_variant variant;
+ u8 disabled_mask;
+ void __iomem *base;
+ struct clk *clk;
+ u32 clk_tick_ns;
+};
+
+static inline
+struct pwm_fttmr010 *to_pwm_fttmr010(struct pwm_chip *chip)
+{
+ return container_of(chip, struct pwm_fttmr010, chip);
+}
+
+static int pwm_fttmr010_request(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct pwm_fttmr010 *priv = to_pwm_fttmr010(chip);
+ struct pwm_fttmr010_chan *chan;
+
+ if (!(priv->variant.output_mask & BIT(pwm->hwpwm))) {
+ dev_warn(chip->dev,
+ "tried to request PWM channel %d without output\n",
+ pwm->hwpwm);
+ return -EINVAL;
+ }
+
+ chan = devm_kzalloc(chip->dev, sizeof(*chan), GFP_KERNEL);
+ if (!chan)
+ return -ENOMEM;
+
+ pwm_set_chip_data(pwm, chan);
+
+ return 0;
+}
+
+static void pwm_fttmr010_free(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ devm_kfree(chip->dev, pwm_get_chip_data(pwm));
+ pwm_set_chip_data(pwm, NULL);
+}
+
+static int pwm_fttmr010_enable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct pwm_fttmr010 *priv = to_pwm_fttmr010(chip);
+ u32 cr;
+
+ cr = readl(priv->base + TIMER_CR);
+
+ switch (pwm->hwpwm) {
+ case 5:
+ cr |= (TIMER_5_CR_ASPEED_ENABLE | TIMER_5_CR_ASPEED_PULSE_OUT);
+ break;
+ case 6:
+ cr |= (TIMER_6_CR_ASPEED_ENABLE | TIMER_6_CR_ASPEED_PULSE_OUT);
+ break;
+ case 7:
+ cr |= (TIMER_7_CR_ASPEED_ENABLE | TIMER_7_CR_ASPEED_PULSE_OUT);
+ break;
+ case 8:
+ cr |= (TIMER_8_CR_ASPEED_ENABLE | TIMER_8_CR_ASPEED_PULSE_OUT);
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ writel(cr, priv->base + TIMER_CR);
+ priv->disabled_mask &= ~BIT(pwm->hwpwm);
+
+ return 0;
+}
+
+static void pwm_fttmr010_disable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct pwm_fttmr010 *priv = to_pwm_fttmr010(chip);
+ u32 cr;
+
+ cr = readl(priv->base + TIMER_CR);
+
+ switch (pwm->hwpwm) {
+ case 5:
+ cr &= ~(TIMER_5_CR_ASPEED_ENABLE | TIMER_5_CR_ASPEED_PULSE_OUT);
+ break;
+ case 6:
+ cr &= ~(TIMER_6_CR_ASPEED_ENABLE | TIMER_6_CR_ASPEED_PULSE_OUT);
+ break;
+ case 7:
+ cr &= ~(TIMER_7_CR_ASPEED_ENABLE | TIMER_7_CR_ASPEED_PULSE_OUT);
+ break;
+ case 8:
+ cr &= ~(TIMER_8_CR_ASPEED_ENABLE | TIMER_8_CR_ASPEED_PULSE_OUT);
+ break;
+ default:
+ return;
+ }
+
+ writel(cr, priv->base + TIMER_CR);
+ priv->disabled_mask |= BIT(pwm->hwpwm);
+}
+
+static int pwm_fttmr010_config(struct pwm_chip *chip, struct pwm_device *pwm,
+ int duty_ns, int period_ns)
+{
+ u32 tload, tmatch, creg_offset, lreg_offset, mreg_offset;
+ struct pwm_fttmr010_chan *chan = pwm_get_chip_data(pwm);
+ struct pwm_fttmr010 *priv = to_pwm_fttmr010(chip);
+
+ /*
+ * We currently avoid using 64bit arithmetic by using the
+ * fact that anything faster than 1Hz is easily representable
+ * by 32bits.
+ */
+ if (period_ns > NSEC_PER_SEC)
+ return -ERANGE;
+
+ /* No need to update */
+ if (chan->period_ns == period_ns || chan->duty_ns == duty_ns)
+ return 0;
+
+ tload = period_ns / priv->clk_tick_ns;
+
+ /* Period is too short */
+ if (tload <= 1)
+ return -ERANGE;
+
+ tmatch = duty_ns / priv->clk_tick_ns;
+
+ /* 0% duty is not available */
+ if (!tmatch)
+ ++tmatch;
+
+ tmatch = tload - tmatch;
+
+ /* Decrement to get tick numbers, instead of tick counts */
+ --tload;
+ --tmatch;
+
+ if (tload == 0 || tmatch == 0)
+ return -ERANGE;
+
+ dev_dbg(priv->chip.dev, "clk_tick_ns:%u, tload:%u, tmatch:%u\n",
+ priv->clk_tick_ns, tload, tmatch);
+
+ switch (pwm->hwpwm) {
+ case 5:
+ creg_offset = TIMER5_ASPEED_COUNT;
+ lreg_offset = TIMER5_ASPEED_LOAD;
+ mreg_offset = TIMER5_ASPEED_MATCH1;
+ break;
+ case 6:
+ creg_offset = TIMER6_ASPEED_COUNT;
+ lreg_offset = TIMER6_ASPEED_LOAD;
+ mreg_offset = TIMER6_ASPEED_MATCH1;
+ break;
+ case 7:
+ creg_offset = TIMER7_ASPEED_COUNT;
+ lreg_offset = TIMER7_ASPEED_LOAD;
+ mreg_offset = TIMER7_ASPEED_MATCH1;
+ break;
+ case 8:
+ creg_offset = TIMER8_ASPEED_COUNT;
+ lreg_offset = TIMER8_ASPEED_LOAD;
+ mreg_offset = TIMER8_ASPEED_MATCH1;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ writel(tload, priv->base + creg_offset);
+ writel(tload, priv->base + lreg_offset);
+ writel(tmatch, priv->base + mreg_offset);
+
+ chan->period_ns = period_ns;
+ chan->duty_ns = duty_ns;
+
+ return 0;
+}
+
+static const struct pwm_ops pwm_fttmr010_ops = {
+ .request = pwm_fttmr010_request,
+ .free = pwm_fttmr010_free,
+ .enable = pwm_fttmr010_enable,
+ .disable = pwm_fttmr010_disable,
+ .config = pwm_fttmr010_config,
+ .owner = THIS_MODULE,
+};
+
+#ifdef CONFIG_OF
+static const struct pwm_fttmr010_variant aspeed_variant = {
+ .bits = 32,
+ .chan_min = 5,
+ .chan_max = 8,
+};
+
+static const struct of_device_id pwm_fttmr010_matches[] = {
+ { .compatible = "aspeed,ast2400-timer", .data = &aspeed_variant },
+ { .compatible = "aspeed,ast2500-timer", .data = &aspeed_variant },
+ { },
+};
+MODULE_DEVICE_TABLE(of, pwm_fttmr010_matches);
+
+static int pwm_fttmr010_parse_dt(struct pwm_fttmr010 *priv)
+{
+ struct device_node *np = priv->chip.dev->of_node;
+ const struct of_device_id *match;
+ struct property *prop;
+ const __be32 *cur;
+ u32 val;
+
+ match = of_match_node(pwm_fttmr010_matches, np);
+ if (!match)
+ return -ENODEV;
+
+ memcpy(&priv->variant, match->data, sizeof(priv->variant));
+
+ of_property_for_each_u32(np, "fttmr010,pwm-outputs", prop, cur, val) {
+ if (val < priv->variant.chan_min ||
+ val > priv->variant.chan_max) {
+ dev_err(priv->chip.dev,
+ "invalid channel index in fttmr010,pwm-outputs property\n");
+ continue;
+ }
+ priv->variant.output_mask |= BIT(val);
+ }
+
+ return 0;
+}
+#else
+static int pwm_fttmr010_parse_dt(struct pwm_fttmr010 *priv)
+{
+ return -ENODEV;
+}
+#endif
+
+static int pwm_fttmr010_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct pwm_fttmr010 *priv;
+ struct resource *res;
+ ulong clk_rate;
+ int ret;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->chip.dev = &pdev->dev;
+ priv->chip.ops = &pwm_fttmr010_ops;
+ priv->chip.base = -1;
+
+ if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) {
+ ret = pwm_fttmr010_parse_dt(priv);
+ if (ret)
+ return ret;
+
+ priv->chip.of_xlate = of_pwm_xlate_with_flags;
+ priv->chip.of_pwm_n_cells = 3;
+ } else {
+ if (!pdev->dev.platform_data) {
+ dev_err(&pdev->dev, "no platform data specified\n");
+ return -EINVAL;
+ }
+
+ memcpy(&priv->variant, pdev->dev.platform_data,
+ sizeof(priv->variant));
+ }
+
+ priv->chip.npwm = priv->variant.chan_max + 1;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ priv->clk = devm_clk_get(&pdev->dev, "PCLK");
+ if (IS_ERR(priv->clk)) {
+ dev_err(dev, "failed to get timer base clk\n");
+ return PTR_ERR(priv->clk);
+ }
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret < 0) {
+ dev_err(dev, "failed to enable base clock\n");
+ return ret;
+ }
+
+ clk_rate = clk_get_rate(priv->clk);
+ priv->clk_tick_ns = NSEC_PER_SEC / clk_rate;
+
+ platform_set_drvdata(pdev, priv);
+
+ ret = pwmchip_add(&priv->chip);
+ if (ret < 0) {
+ dev_err(dev, "failed to register PWM chip\n");
+ clk_disable_unprepare(priv->clk);
+ return ret;
+ }
+
+ dev_dbg(dev, "clk at %lu\n", clk_rate);
+
+ return 0;
+}
+
+static int pwm_fttmr010_remove(struct platform_device *pdev)
+{
+ struct pwm_fttmr010 *priv = platform_get_drvdata(pdev);
+
+ pwmchip_remove(&priv->chip);
+ clk_disable_unprepare(priv->clk);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int pwm_fttmr010_resume(struct device *dev)
+{
+ struct pwm_fttmr010 *priv = dev_get_drvdata(dev);
+ struct pwm_chip *chip = &priv->chip;
+ unsigned int i;
+
+ for (i = priv->variant.chan_min; i < priv->variant.chan_max; i++) {
+ struct pwm_device *pwm = &chip->pwms[i];
+ struct pwm_fttmr010_chan *chan = pwm_get_chip_data(pwm);
+
+ if (!chan)
+ continue;
+
+ if (chan->period_ns) {
+ pwm_fttmr010_config(chip, pwm, chan->duty_ns,
+ chan->period_ns);
+ }
+
+ if (priv->disabled_mask & BIT(i))
+ pwm_fttmr010_disable(chip, pwm);
+ else
+ pwm_fttmr010_enable(chip, pwm);
+ }
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(pwm_fttmr010_pm_ops, NULL, pwm_fttmr010_resume);
+
+static struct platform_driver pwm_fttmr010_driver = {
+ .driver = {
+ .name = "fttmr010-timer-pwm",
+ .pm = &pwm_fttmr010_pm_ops,
+ .of_match_table = of_match_ptr(pwm_fttmr010_matches),
+ },
+ .probe = pwm_fttmr010_probe,
+ .remove = pwm_fttmr010_remove,
+};
+module_platform_driver(pwm_fttmr010_driver);
+
+MODULE_AUTHOR("Jae Hyun Yoo <jae.hyun.yoo@linux.intel.com>");
+MODULE_DESCRIPTION("FTTMR010 PWM Driver for timer pulse outputs");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index e1bc5214494e..f51a2f0041de 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -564,6 +564,16 @@ config RTC_DRV_PALMAS
This driver can also be built as a module. If so, the module
will be called rtc-palma.
+config RTC_DRV_PCHC620
+ tristate "PCH C620 RTC driver"
+ help
+ If you say yes here you get support for the Intel C620 Series PCH
+ built-in read-only RTC. This driver is not for in-system use on x86,
+ but rather is for external access over I2C from a BMC.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-pchc620.
+
config RTC_DRV_TPS6586X
tristate "TI TPS6586X RTC driver"
depends on MFD_TPS6586X
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 5ceeafe4d5b2..1691049cdd63 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -114,6 +114,7 @@ obj-$(CONFIG_RTC_DRV_NTXEC) += rtc-ntxec.o
obj-$(CONFIG_RTC_DRV_OMAP) += rtc-omap.o
obj-$(CONFIG_RTC_DRV_OPAL) += rtc-opal.o
obj-$(CONFIG_RTC_DRV_PALMAS) += rtc-palmas.o
+obj-$(CONFIG_RTC_DRV_PCHC620) += rtc-pchc620.o
obj-$(CONFIG_RTC_DRV_PCAP) += rtc-pcap.o
obj-$(CONFIG_RTC_DRV_PCF2123) += rtc-pcf2123.o
obj-$(CONFIG_RTC_DRV_PCF2127) += rtc-pcf2127.o
diff --git a/drivers/rtc/rtc-pchc620.c b/drivers/rtc/rtc-pchc620.c
new file mode 100644
index 000000000000..a944b327ca67
--- /dev/null
+++ b/drivers/rtc/rtc-pchc620.c
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * RTC driver for PCHC620
+ * Copyright (C) 2021 YADRO
+ */
+
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/rtc.h>
+#include <linux/bcd.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+#define PCH_REG_FORCE_OFF 0x00
+#define PCH_REG_SC 0x09
+#define PCH_REG_MN 0x0a
+#define PCH_REG_HR 0x0b
+#define PCH_REG_DW 0x0c
+#define PCH_REG_DM 0x0d
+#define PCH_REG_MO 0x0e
+#define PCH_REG_YR 0x0f
+
+#define NUM_TIME_REGS (PCH_REG_YR - PCH_REG_SC + 1)
+
+struct pch {
+ struct rtc_device *rtc;
+ struct regmap *regmap;
+};
+
+static int pchc620_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct pch *pch = i2c_get_clientdata(client);
+ unsigned char rtc_data[NUM_TIME_REGS] = {0};
+ int rc;
+
+ rc = regmap_bulk_read(pch->regmap, PCH_REG_SC, rtc_data, NUM_TIME_REGS);
+ if (rc < 0) {
+ dev_err(dev, "Fail to read time reg(%d)\n", rc);
+ return rc;
+ }
+
+ tm->tm_sec = bcd2bin(rtc_data[0]);
+ tm->tm_min = bcd2bin(rtc_data[1]);
+ tm->tm_hour = bcd2bin(rtc_data[2]);
+ tm->tm_wday = rtc_data[3];
+ tm->tm_mday = bcd2bin(rtc_data[4]);
+ tm->tm_mon = bcd2bin(rtc_data[5]) - 1;
+ tm->tm_year = bcd2bin(rtc_data[6]) + 100;
+
+ return 0;
+}
+
+static ssize_t pch_force_off(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct pch *pch = i2c_get_clientdata(client);
+ unsigned long val;
+ int rc;
+
+ if (kstrtoul(buf, 10, &val))
+ return -EINVAL;
+
+ if (val) {
+ /* 0x02 host force off */
+ rc = regmap_write(pch->regmap, PCH_REG_FORCE_OFF, 0x2);
+ if (rc < 0) {
+ dev_err(dev, "Fail to read time reg(%d)\n", rc);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+static DEVICE_ATTR(force_off, S_IWUSR | S_IWGRP, NULL, pch_force_off);
+
+static const struct rtc_class_ops pchc620_rtc_ops = {
+ .read_time = pchc620_rtc_read_time,
+};
+
+static const struct regmap_config pchc620_rtc_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .use_single_read = true,
+};
+
+static int pchc620_rtc_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct pch *pch;
+ int rc;
+
+ pch = devm_kzalloc(&client->dev, sizeof(*pch), GFP_KERNEL);
+ if (!pch)
+ return -ENOMEM;
+
+ pch->regmap = devm_regmap_init_i2c(client, &pchc620_rtc_regmap_config);
+ if (IS_ERR(pch->regmap)) {
+ dev_err(&client->dev, "regmap_init failed\n");
+ return PTR_ERR(pch->regmap);
+ }
+
+ i2c_set_clientdata(client, pch);
+
+ pch->rtc = devm_rtc_device_register(&client->dev, "pch-rtc",
+ &pchc620_rtc_ops, THIS_MODULE);
+ if (IS_ERR(pch->rtc))
+ return PTR_ERR(pch->rtc);
+
+ rc = sysfs_create_file(&client->dev.kobj, &dev_attr_force_off.attr);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+static int pchc620_rtc_remove(struct i2c_client *client)
+{
+ sysfs_remove_file(&client->dev.kobj, &dev_attr_force_off.attr);
+ return 0;
+}
+
+static const struct i2c_device_id pchc620_rtc_id[] = {
+ { "pchc620-rtc", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, pchc620_rtc_id);
+
+static const struct of_device_id pchc620_rtc_of_match[] = {
+ { .compatible = "rtc,pchc620", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, pchc620_rtc_of_match);
+
+static struct i2c_driver pchc620_rtc_driver = {
+ .driver = {
+ .name = "pchc620-rtc",
+ .of_match_table = pchc620_rtc_of_match,
+ },
+ .probe = pchc620_rtc_probe,
+ .remove = pchc620_rtc_remove,
+ .id_table = pchc620_rtc_id,
+};
+module_i2c_driver(pchc620_rtc_driver);
+
+MODULE_DESCRIPTION("RTC PCHC620 driver");
+MODULE_AUTHOR("Ivan Mikhaylov <i.mikhaylov@yadro.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/aspeed/Kconfig b/drivers/soc/aspeed/Kconfig
index aaf4596ae4f9..09bf81b295fd 100644
--- a/drivers/soc/aspeed/Kconfig
+++ b/drivers/soc/aspeed/Kconfig
@@ -4,6 +4,22 @@ if ARCH_ASPEED || COMPILE_TEST
menu "ASPEED SoC drivers"
+config ASPEED_BMC_MISC
+ bool "Miscellaneous ASPEED BMC interfaces"
+ depends on ARCH_ASPEED || COMPILE_TEST
+ default ARCH_ASPEED
+ help
+ Say yes to expose VGA and LPC scratch registers, and other
+ miscellaneous control interfaces specific to the ASPEED BMC SoCs
+
+config ASPEED_ESPI_SLAVE
+ depends on ARCH_ASPEED || COMPILE_TEST
+ depends on REGMAP_MMIO
+ tristate "Aspeed ast2500/2600 eSPI slave device driver"
+ help
+ Enable driver support for the Aspeed eSPI engine. Handles event
+ arising from firmware bootup and enable espi oob channel.
+
config ASPEED_LPC_CTRL
tristate "ASPEED LPC firmware cycle control"
select REGMAP
@@ -14,6 +30,20 @@ config ASPEED_LPC_CTRL
also provides a read/write interface to a BMC ram region where the
host LPC read/write region can be buffered.
+config ASPEED_LPC_MBOX
+ tristate "Aspeed LPC Mailbox Controller"
+ depends on REGMAP && MFD_SYSCON
+ help
+ Expose the ASPEED LPC MBOX registers found on Aspeed SOCs (AST2400
+ and AST2500) to userspace.
+
+config ASPEED_LPC_SIO
+ tristate "Aspeed ast2400/2500 HOST LPC SIO support"
+ depends on REGMAP && MFD_SYSCON
+ help
+ Provides a driver to control the LPC SIO interface on ASPEED platform
+ through ioctl()s.
+
config ASPEED_LPC_SNOOP
tristate "ASPEED LPC snoop support"
select REGMAP
@@ -24,6 +54,14 @@ config ASPEED_LPC_SNOOP
allows the BMC to listen on and save the data written by
the host to an arbitrary LPC I/O port.
+config ASPEED_MCTP
+ tristate "Aspeed ast2600 MCTP Controller support"
+ depends on REGMAP && MFD_SYSCON
+ help
+ Enable support for ast2600 MCTP Controller.
+ The MCTP controller allows the BMC to communicate with devices on
+ the host PCIe network.
+
config ASPEED_UART_ROUTING
tristate "ASPEED uart routing control"
select REGMAP
@@ -62,6 +100,14 @@ config ASPEED_XDMA
SoCs. The XDMA engine can perform PCIe DMA operations between the BMC
and a host processor.
+config ASPEED_VGA_SHAREDMEM
+ tristate "Aspeed VGA Shared memory"
+ help
+ To access VGA shared memory on Aspeed BMC, enable this option.
+ This driver used by ManagedDataRegionlV2 specification. In the
+ specification, BIOS will transfer whole SMBIOS table to VGA memory,
+ and BMC can get the table from VGA memory through this driver.
+
config ASPEED_SBC
bool "ASPEED Secure Boot Controller driver"
default MACH_ASPEED_G6
diff --git a/drivers/soc/aspeed/Makefile b/drivers/soc/aspeed/Makefile
index 9e275fd1d54d..6b498ad1537a 100644
--- a/drivers/soc/aspeed/Makefile
+++ b/drivers/soc/aspeed/Makefile
@@ -1,8 +1,15 @@
# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_ASPEED_BMC_MISC) += aspeed-bmc-misc.o
+obj-$(CONFIG_ASPEED_ESPI_SLAVE) += aspeed-espi.o
+aspeed-espi-y := aspeed-espi-slave.o aspeed-espi-oob.o aspeed-espi-vw.o
obj-$(CONFIG_ASPEED_LPC_CTRL) += aspeed-lpc-ctrl.o
+obj-$(CONFIG_ASPEED_LPC_MBOX) += aspeed-lpc-mbox.o
+obj-$(CONFIG_ASPEED_LPC_SIO) += aspeed-lpc-sio.o
obj-$(CONFIG_ASPEED_LPC_SNOOP) += aspeed-lpc-snoop.o
obj-$(CONFIG_ASPEED_UART_ROUTING) += aspeed-uart-routing.o
obj-$(CONFIG_ASPEED_P2A_CTRL) += aspeed-p2a-ctrl.o
obj-$(CONFIG_ASPEED_SOCINFO) += aspeed-socinfo.o
obj-$(CONFIG_ASPEED_SBC) += aspeed-sbc.o
obj-$(CONFIG_ASPEED_XDMA) += aspeed-xdma.o
+obj-$(CONFIG_ASPEED_VGA_SHAREDMEM) += aspeed-vga-sharedmem.o
+obj-$(CONFIG_ASPEED_MCTP) += aspeed-mctp.o
diff --git a/drivers/soc/aspeed/aspeed-bmc-misc.c b/drivers/soc/aspeed/aspeed-bmc-misc.c
new file mode 100644
index 000000000000..4aad3129f793
--- /dev/null
+++ b/drivers/soc/aspeed/aspeed-bmc-misc.c
@@ -0,0 +1,289 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright 2018 IBM Corp.
+
+#include <linux/kobject.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+#include <crypto/hash.h>
+
+#define DEVICE_NAME "aspeed-bmc-misc"
+
+struct aspeed_bmc_ctrl {
+ const char *name;
+ u32 offset;
+ u64 mask;
+ u32 shift;
+ bool read_only;
+ u32 reg_width;
+ const char *hash_data;
+ struct regmap *map;
+ struct kobj_attribute attr;
+};
+
+struct aspeed_bmc_misc {
+ struct device *dev;
+ struct regmap *map;
+ struct aspeed_bmc_ctrl *ctrls;
+ int nr_ctrls;
+};
+
+static int aspeed_bmc_misc_parse_dt_child(struct device_node *child,
+ struct aspeed_bmc_ctrl *ctrl)
+{
+ int rc;
+ u32 mask;
+
+ /* Example child:
+ *
+ * ilpc2ahb {
+ * offset = <0x80>;
+ * bit-mask = <0x1>;
+ * bit-shift = <6>;
+ * reg-width = <64>;
+ * label = "foo";
+ * }
+ */
+ if (of_property_read_string(child, "label", &ctrl->name))
+ ctrl->name = child->name;
+
+ rc = of_property_read_u32(child, "offset", &ctrl->offset);
+ if (rc < 0)
+ return rc;
+
+ /* optional reg-width, default to 32 */
+ rc = of_property_read_u32(child, "reg-width", &ctrl->reg_width);
+ if (rc < 0 || ctrl->reg_width != 64)
+ ctrl->reg_width = 32;
+
+ if (ctrl->reg_width == 32) {
+ rc = of_property_read_u32(child, "bit-mask", &mask);
+ if (rc < 0)
+ return rc;
+ ctrl->mask = mask;
+ } else {
+ rc = of_property_read_u64(child, "bit-mask", &ctrl->mask);
+ if (rc < 0)
+ return rc;
+ }
+
+ rc = of_property_read_u32(child, "bit-shift", &ctrl->shift);
+ if (rc < 0)
+ return rc;
+
+ ctrl->read_only = of_property_read_bool(child, "read-only");
+
+ ctrl->mask <<= ctrl->shift;
+ /* optional hash_data for obfuscating reads */
+ if (of_property_read_string(child, "hash-data", &ctrl->hash_data))
+ ctrl->hash_data = NULL;
+
+ return 0;
+}
+
+static int aspeed_bmc_misc_parse_dt(struct aspeed_bmc_misc *bmc,
+ struct device_node *parent)
+{
+ struct aspeed_bmc_ctrl *ctrl;
+ struct device_node *child;
+ int rc;
+
+ bmc->nr_ctrls = of_get_child_count(parent);
+ bmc->ctrls = devm_kcalloc(bmc->dev, bmc->nr_ctrls, sizeof(*bmc->ctrls),
+ GFP_KERNEL);
+ if (!bmc->ctrls)
+ return -ENOMEM;
+
+ ctrl = bmc->ctrls;
+ for_each_child_of_node(parent, child) {
+ rc = aspeed_bmc_misc_parse_dt_child(child, ctrl++);
+ if (rc < 0) {
+ of_node_put(child);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+#define SHA256_DIGEST_LEN 32
+static int hmac_sha256(u8 *key, u8 ksize, const char *plaintext, u8 psize,
+ u8 *output)
+{
+ struct crypto_shash *tfm;
+ struct shash_desc *shash;
+ int ret;
+
+ if (!ksize)
+ return -EINVAL;
+
+ tfm = crypto_alloc_shash("hmac(sha256)", 0, 0);
+ if (IS_ERR(tfm)) {
+ return -ENOMEM;
+ }
+
+ ret = crypto_shash_setkey(tfm, key, ksize);
+ if (ret)
+ goto failed;
+
+ shash = kzalloc(sizeof(*shash) + crypto_shash_descsize(tfm), GFP_KERNEL);
+ if (!shash) {
+ ret = -ENOMEM;
+ goto failed;
+ }
+
+ shash->tfm = tfm;
+ ret = crypto_shash_digest(shash, plaintext, psize, output);
+
+ kfree(shash);
+
+failed:
+ crypto_free_shash(tfm);
+ return ret;
+}
+
+static ssize_t aspeed_bmc_misc_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct aspeed_bmc_ctrl *ctrl;
+ u32 val;
+ u64 val64;
+ int rc;
+ u8 *binbuf;
+ size_t buf_len;
+ u8 hashbuf[SHA256_DIGEST_LEN];
+
+ ctrl = container_of(attr, struct aspeed_bmc_ctrl, attr);
+
+ if (ctrl->reg_width == 32) {
+ rc = regmap_read(ctrl->map, ctrl->offset, &val);
+ if (rc)
+ return rc;
+ val &= (u32)ctrl->mask;
+ val >>= ctrl->shift;
+
+ return sprintf(buf, "%u\n", val);
+ }
+ rc = regmap_read(ctrl->map, ctrl->offset, &val);
+ if (rc)
+ return rc;
+ val64 = val;
+ rc = regmap_read(ctrl->map, ctrl->offset + sizeof(u32), &val);
+ if (rc)
+ return rc;
+ /* aspeed puts 64-bit regs as L, H in address space */
+ val64 |= (u64)val << 32;
+ val64 &= ctrl->mask;
+ val64 >>= ctrl->shift;
+ buf_len = sizeof(val64);
+
+ if (ctrl->hash_data) {
+ rc = hmac_sha256((u8*)&val64, buf_len, ctrl->hash_data,
+ strlen(ctrl->hash_data), hashbuf);
+ if (rc)
+ return rc;
+ buf_len = SHA256_DIGEST_LEN;
+ binbuf = hashbuf;
+ } else {
+ binbuf = (u8*)&val64;
+ buf_len = sizeof(val64);
+ }
+ bin2hex(buf, binbuf, buf_len);
+ buf[buf_len * 2] = '\n';
+ rc = buf_len * 2 + 1;
+
+ return rc;
+
+}
+
+static ssize_t aspeed_bmc_misc_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct aspeed_bmc_ctrl *ctrl;
+ long val;
+ int rc;
+
+ ctrl = container_of(attr, struct aspeed_bmc_ctrl, attr);
+
+ if (ctrl->read_only)
+ return -EROFS;
+
+ rc = kstrtol(buf, 0, &val);
+ if (rc)
+ return rc;
+
+ val <<= ctrl->shift;
+ rc = regmap_write_bits(ctrl->map, ctrl->offset, ctrl->mask, val);
+
+ return rc < 0 ? rc : count;
+}
+
+static int aspeed_bmc_misc_add_sysfs_attr(struct aspeed_bmc_misc *bmc,
+ struct aspeed_bmc_ctrl *ctrl)
+{
+ ctrl->map = bmc->map;
+
+ sysfs_attr_init(&ctrl->attr.attr);
+ ctrl->attr.attr.name = ctrl->name;
+ ctrl->attr.attr.mode = 0664;
+ ctrl->attr.show = aspeed_bmc_misc_show;
+ ctrl->attr.store = aspeed_bmc_misc_store;
+
+ return sysfs_create_file(&bmc->dev->kobj, &ctrl->attr.attr);
+}
+
+static int aspeed_bmc_misc_populate_sysfs(struct aspeed_bmc_misc *bmc)
+{
+ int rc;
+ int i;
+
+ for (i = 0; i < bmc->nr_ctrls; i++) {
+ rc = aspeed_bmc_misc_add_sysfs_attr(bmc, &bmc->ctrls[i]);
+ if (rc < 0)
+ return rc;
+ }
+
+ return 0;
+}
+
+static int aspeed_bmc_misc_probe(struct platform_device *pdev)
+{
+ struct aspeed_bmc_misc *bmc;
+ int rc;
+
+ bmc = devm_kzalloc(&pdev->dev, sizeof(*bmc), GFP_KERNEL);
+ if (!bmc)
+ return -ENOMEM;
+
+ bmc->dev = &pdev->dev;
+ bmc->map = syscon_node_to_regmap(pdev->dev.parent->of_node);
+ if (IS_ERR(bmc->map))
+ return PTR_ERR(bmc->map);
+
+ rc = aspeed_bmc_misc_parse_dt(bmc, pdev->dev.of_node);
+ if (rc < 0)
+ return rc;
+
+ return aspeed_bmc_misc_populate_sysfs(bmc);
+}
+
+static const struct of_device_id aspeed_bmc_misc_match[] = {
+ { .compatible = "aspeed,bmc-misc" },
+ { },
+};
+
+static struct platform_driver aspeed_bmc_misc = {
+ .driver = {
+ .name = DEVICE_NAME,
+ .of_match_table = aspeed_bmc_misc_match,
+ },
+ .probe = aspeed_bmc_misc_probe,
+};
+
+module_platform_driver(aspeed_bmc_misc);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Andrew Jeffery <andrew@aj.id.au>");
diff --git a/drivers/soc/aspeed/aspeed-espi-ctrl.h b/drivers/soc/aspeed/aspeed-espi-ctrl.h
new file mode 100644
index 000000000000..14e0334b80c8
--- /dev/null
+++ b/drivers/soc/aspeed/aspeed-espi-ctrl.h
@@ -0,0 +1,337 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2021 Aspeed Technology Inc.
+ */
+#ifndef _ASPEED_ESPI_CTRL_H_
+#define _ASPEED_ESPI_CTRL_H_
+
+#include <linux/bits.h>
+
+enum aspeed_espi_version {
+ ASPEED_ESPI_AST2500,
+ ASPEED_ESPI_AST2600,
+};
+
+struct aspeed_espi_model {
+ u32 version;
+};
+
+struct aspeed_espi_ctrl {
+ struct device *dev;
+
+ struct regmap *map;
+ struct clk *clk;
+
+ int irq;
+
+ struct aspeed_espi_perif *perif;
+ struct aspeed_espi_vw *vw;
+ struct aspeed_espi_oob *oob;
+ struct aspeed_espi_flash *flash;
+
+ const struct aspeed_espi_model *model;
+};
+
+/* eSPI register offset */
+#define ASPEED_ESPI_CTRL 0x000
+#define ASPEED_ESPI_CTRL_SW_RESET GENMASK(31, 24)
+#define ASPEED_ESPI_HW_RESET BIT(31)
+#define ASPEED_ESPI_CTRL_OOB_RX_SW_RST BIT(28)
+#define ASPEED_ESPI_CTRL_FLASH_TX_DMA_EN BIT(23)
+#define ASPEED_ESPI_CTRL_FLASH_RX_DMA_EN BIT(22)
+#define ASPEED_ESPI_CTRL_OOB_TX_DMA_EN BIT(21)
+#define ASPEED_ESPI_CTRL_OOB_RX_DMA_EN BIT(20)
+#define ASPEED_ESPI_CTRL_PERIF_NP_TX_DMA_EN BIT(19)
+#define ASPEED_ESPI_CTRL_PERIF_PC_TX_DMA_EN BIT(17)
+#define ASPEED_ESPI_CTRL_PERIF_PC_RX_DMA_EN BIT(16)
+#define ASPEED_ESPI_CTRL_FLASH_SW_MODE_MASK GENMASK(11, 10)
+#define ASPEED_ESPI_CTRL_FLASH_SW_MODE_SHIFT 10
+#define ASPEED_ESPI_CTRL_PERIF_PC_RX_DMA_EN BIT(16)
+#define ASPEED_ESPI_CTRL_FLASH_SW_RDY BIT(7)
+#define ASPEED_ESPI_CTRL_OOB_CHRDY BIT(4)
+#define ASPEED_ESPI_CTRL_OOB_SW_RDY BIT(4)
+#define ASPEED_ESPI_CTRL_VW_SW_RDY BIT(3)
+#define ASPEED_ESPI_CTRL_PERIF_SW_RDY BIT(1)
+#define ASPEED_ESPI_STS 0x004
+#define ASPEED_ESPI_INT_STS 0x008
+#define ASPEED_ESPI_INT_STS_HW_RST_DEASSERT BIT(31)
+#define ASPEED_ESPI_INT_STS_OOB_RX_TMOUT BIT(23)
+#define ASPEED_ESPI_VW_SYSEVT1 BIT(22)
+#define ASPEED_ESPI_INT_STS_VW_SYSEVT1 BIT(22)
+#define ASPEED_ESPI_INT_STS_FLASH_TX_ERR BIT(21)
+#define ASPEED_ESPI_INT_STS_OOB_TX_ERR BIT(20)
+#define ASPEED_ESPI_INT_STS_FLASH_TX_ABT BIT(19)
+#define ASPEED_ESPI_INT_STS_OOB_TX_ABT BIT(18)
+#define ASPEED_ESPI_INT_STS_PERIF_NP_TX_ABT BIT(17)
+#define ASPEED_ESPI_INT_STS_PERIF_PC_TX_ABT BIT(16)
+#define ASPEED_ESPI_INT_STS_FLASH_RX_ABT BIT(15)
+#define ASPEED_ESPI_INT_STS_OOB_RX_ABT BIT(14)
+#define ASPEED_ESPI_INT_STS_PERIF_NP_RX_ABT BIT(13)
+#define ASPEED_ESPI_INT_STS_PERIF_PC_RX_ABT BIT(12)
+#define ASPEED_ESPI_INT_STS_PERIF_NP_TX_ERR BIT(11)
+#define ASPEED_ESPI_INT_STS_PERIF_PC_TX_ERR BIT(10)
+#define ASPEED_ESPI_INT_STS_VW_GPIOEVT BIT(9)
+#define ASPEED_ESPI_VW_SYSEVT BIT(8)
+#define ASPEED_ESPI_INT_STS_VW_SYSEVT BIT(8)
+#define ASPEED_ESPI_INT_STS_FLASH_TX_CMPLT BIT(7)
+#define ASPEED_ESPI_INT_STS_FLASH_RX_CMPLT BIT(6)
+#define ASPEED_ESPI_INT_STS_OOB_TX_CMPLT BIT(5)
+#define ASPEED_ESPI_INT_STS_OOB_RX_CMPLT BIT(4)
+#define ASPEED_ESPI_INT_STS_PERIF_NP_TX_CMPLT BIT(3)
+#define ASPEED_ESPI_INT_STS_PERIF_PC_TX_CMPLT BIT(1)
+#define ASPEED_ESPI_INT_STS_PERIF_PC_RX_CMPLT BIT(0)
+#define ASPEED_ESPI_INT_EN 0x00c
+#define ASPEED_ESPI_INT_EN_HW_RST_DEASSERT BIT(31)
+#define ASPEED_ESPI_INT_EN_OOB_RX_TMOUT BIT(23)
+#define ASPEED_ESPI_INT_EN_VW_SYSEVT1 BIT(22)
+#define ASPEED_ESPI_INT_EN_FLASH_TX_ERR BIT(21)
+#define ASPEED_ESPI_INT_EN_OOB_TX_ERR BIT(20)
+#define ASPEED_ESPI_INT_EN_FLASH_TX_ABT BIT(19)
+#define ASPEED_ESPI_INT_EN_OOB_TX_ABT BIT(18)
+#define ASPEED_ESPI_INT_EN_PERIF_NP_TX_ABT BIT(17)
+#define ASPEED_ESPI_INT_EN_PERIF_PC_TX_ABT BIT(16)
+#define ASPEED_ESPI_INT_EN_FLASH_RX_ABT BIT(15)
+#define ASPEED_ESPI_INT_EN_OOB_RX_ABT BIT(14)
+#define ASPEED_ESPI_INT_EN_PERIF_NP_RX_ABT BIT(13)
+#define ASPEED_ESPI_INT_EN_PERIF_PC_RX_ABT BIT(12)
+#define ASPEED_ESPI_INT_EN_PERIF_NP_TX_ERR BIT(11)
+#define ASPEED_ESPI_INT_EN_PERIF_PC_TX_ERR BIT(10)
+#define ASPEED_ESPI_INT_EN_VW_GPIOEVT BIT(9)
+#define ASPEED_ESPI_INT_EN_VW_SYSEVT BIT(8)
+#define ASPEED_ESPI_INT_EN_FLASH_TX_CMPLT BIT(7)
+#define ASPEED_ESPI_INT_EN_FLASH_RX_CMPLT BIT(6)
+#define ASPEED_ESPI_INT_EN_OOB_TX_CMPLT BIT(5)
+#define ASPEED_ESPI_INT_EN_OOB_RX_CMPLT BIT(4)
+#define ASPEED_ESPI_INT_EN_PERIF_NP_TX_CMPLT BIT(3)
+#define ASPEED_ESPI_INT_EN_PERIF_PC_TX_CMPLT BIT(1)
+#define ASPEED_ESPI_INT_EN_PERIF_PC_RX_CMPLT BIT(0)
+#define ASPEED_ESPI_PERIF_PC_RX_DMA 0x010
+#define ASPEED_ESPI_PERIF_PC_RX_CTRL 0x014
+#define ASPEED_ESPI_PERIF_PC_RX_CTRL_PEND_SERV BIT(31)
+#define ASPEED_ESPI_PERIF_PC_RX_CTRL_LEN_MASK GENMASK(23, 12)
+#define ASPEED_ESPI_PERIF_PC_RX_CTRL_LEN_SHIFT 12
+#define ASPEED_ESPI_PERIF_PC_RX_CTRL_TAG_MASK GENMASK(11, 8)
+#define ASPEED_ESPI_PERIF_PC_RX_CTRL_TAG_SHIFT 8
+#define ASPEED_ESPI_PERIF_PC_RX_CTRL_CYC_MASK GENMASK(7, 0)
+#define ASPEED_ESPI_PERIF_PC_RX_CTRL_CYC_SHIFT 0
+#define ASPEED_ESPI_PERIF_PC_RX_PORT 0x018
+#define ASPEED_ESPI_PERIF_PC_TX_DMA 0x020
+#define ASPEED_ESPI_PERIF_PC_TX_CTRL 0x024
+#define ASPEED_ESPI_PERIF_PC_TX_CTRL_TRIGGER BIT(31)
+#define ASPEED_ESPI_PERIF_PC_TX_CTRL_LEN_MASK GENMASK(23, 12)
+#define ASPEED_ESPI_PERIF_PC_TX_CTRL_LEN_SHIFT 12
+#define ASPEED_ESPI_PERIF_PC_TX_CTRL_TAG_MASK GENMASK(11, 8)
+#define ASPEED_ESPI_PERIF_PC_TX_CTRL_TAG_SHIFT 8
+#define ASPEED_ESPI_PERIF_PC_TX_CTRL_CYC_MASK GENMASK(7, 0)
+#define ASPEED_ESPI_PERIF_PC_TX_CTRL_CYC_SHIFT 0
+#define ASPEED_ESPI_PERIF_PC_TX_PORT 0x028
+#define ASPEED_ESPI_PERIF_NP_TX_DMA 0x030
+#define ASPEED_ESPI_PERIF_NP_TX_CTRL 0x034
+#define ASPEED_ESPI_PERIF_NP_TX_CTRL_TRIGGER BIT(31)
+#define ASPEED_ESPI_PERIF_NP_TX_CTRL_LEN_MASK GENMASK(23, 12)
+#define ASPEED_ESPI_PERIF_NP_TX_CTRL_LEN_SHIFT 12
+#define ASPEED_ESPI_PERIF_NP_TX_CTRL_TAG_MASK GENMASK(11, 8)
+#define ASPEED_ESPI_PERIF_NP_TX_CTRL_TAG_SHIFT 8
+#define ASPEED_ESPI_PERIF_NP_TX_CTRL_CYC_MASK GENMASK(7, 0)
+#define ASPEED_ESPI_PERIF_NP_TX_CTRL_CYC_SHIFT 0
+#define ASPEED_ESPI_PERIF_NP_TX_PORT 0x038
+#define ASPEED_ESPI_OOB_RX_DMA 0x040
+#define ASPEED_ESPI_OOB_RX_CTRL 0x044
+#define ASPEED_ESPI_OOB_RX_CTRL_PEND_SERV BIT(31)
+#define ASPEED_ESPI_OOB_RX_CTRL_LEN_MASK GENMASK(23, 12)
+#define ASPEED_ESPI_OOB_RX_CTRL_LEN_SHIFT 12
+#define ASPEED_ESPI_OOB_RX_CTRL_TAG_MASK GENMASK(11, 8)
+#define ASPEED_ESPI_OOB_RX_CTRL_TAG_SHIFT 8
+#define ASPEED_ESPI_OOB_RX_CTRL_CYC_MASK GENMASK(7, 0)
+#define ASPEED_ESPI_OOB_RX_CTRL_CYC_SHIFT 0
+#define ASPEED_ESPI_OOB_RX_PORT 0x048
+#define ASPEED_ESPI_OOB_TX_DMA 0x050
+#define ASPEED_ESPI_OOB_TX_CTRL 0x054
+#define ASPEED_ESPI_OOB_TX_CTRL_TRIGGER BIT(31)
+#define ASPEED_ESPI_OOB_TX_CTRL_LEN_MASK GENMASK(23, 12)
+#define ASPEED_ESPI_OOB_TX_CTRL_LEN_SHIFT 12
+#define ASPEED_ESPI_OOB_TX_CTRL_TAG_MASK GENMASK(11, 8)
+#define ASPEED_ESPI_OOB_TX_CTRL_TAG_SHIFT 8
+#define ASPEED_ESPI_OOB_TX_CTRL_CYC_MASK GENMASK(7, 0)
+#define ASPEED_ESPI_OOB_TX_CTRL_CYC_SHIFT 0
+#define ASPEED_ESPI_OOB_TX_PORT 0x058
+#define ASPEED_ESPI_FLASH_RX_DMA 0x060
+#define ASPEED_ESPI_FLASH_RX_CTRL 0x064
+#define ASPEED_ESPI_FLASH_RX_CTRL_PEND_SERV BIT(31)
+#define ASPEED_ESPI_FLASH_RX_CTRL_LEN_MASK GENMASK(23, 12)
+#define ASPEED_ESPI_FLASH_RX_CTRL_LEN_SHIFT 12
+#define ASPEED_ESPI_FLASH_RX_CTRL_TAG_MASK GENMASK(11, 8)
+#define ASPEED_ESPI_FLASH_RX_CTRL_TAG_SHIFT 8
+#define ASPEED_ESPI_FLASH_RX_CTRL_CYC_MASK GENMASK(7, 0)
+#define ASPEED_ESPI_FLASH_RX_CTRL_CYC_SHIFT 0
+#define ASPEED_ESPI_FLASH_RX_PORT 0x068
+#define ASPEED_ESPI_FLASH_TX_DMA 0x070
+#define ASPEED_ESPI_FLASH_TX_CTRL 0x074
+#define ASPEED_ESPI_FLASH_TX_CTRL_TRIGGER BIT(31)
+#define ASPEED_ESPI_FLASH_TX_CTRL_LEN_MASK GENMASK(23, 12)
+#define ASPEED_ESPI_FLASH_TX_CTRL_LEN_SHIFT 12
+#define ASPEED_ESPI_FLASH_TX_CTRL_TAG_MASK GENMASK(11, 8)
+#define ASPEED_ESPI_FLASH_TX_CTRL_TAG_SHIFT 8
+#define ASPEED_ESPI_FLASH_TX_CTRL_CYC_MASK GENMASK(7, 0)
+#define ASPEED_ESPI_FLASH_TX_CTRL_CYC_SHIFT 0
+#define ASPEED_ESPI_FLASH_TX_PORT 0x078
+#define ASPEED_ESPI_CTRL2 0x080
+#define ASPEED_ESPI_CTRL2_MEMCYC_RD_DIS BIT(6)
+#define ASPEED_ESPI_CTRL2_MEMCYC_WR_DIS BIT(4)
+#define ASPEED_ESPI_PERIF_PC_RX_SADDR 0x084
+#define ASPEED_ESPI_PERIF_PC_RX_TADDR 0x088
+#define ASPEED_ESPI_PERIF_PC_RX_MASK 0x08c
+#define ASPEED_ESPI_PERIF_PC_RX_MASK_CFG_WP BIT(0)
+#define ASPEED_ESPI_SYSEVT_INT_EN 0x094
+#define ASPEED_ESPI_SYSEVT 0x098
+#define ASPEED_ESPI_SYSEVT_HOST_RST_ACK BIT(27)
+#define ASPEED_ESPI_SYSEVT_RST_CPU_INIT BIT(26)
+#define ASPEED_ESPI_SYSEVT_SLAVE_BOOT_STATUS BIT(23)
+#define ASPEED_ESPI_SYSEVT_NON_FATAL_ERR BIT(22)
+#define ASPEED_ESPI_SYSEVT_FATAL_ERR BIT(21)
+#define ASPEED_ESPI_SYSEVT_SLAVE_BOOT_DONE BIT(20)
+#define ASPEED_ESPI_SYSEVT_OOB_RST_ACK BIT(16)
+#define ASPEED_ESPI_SYSEVT_NMI_OUT BIT(10)
+#define ASPEED_ESPI_SYSEVT_SMI_OUT BIT(9)
+#define ASPEED_ESPI_SYSEVT_HOST_RST_WARN BIT(8)
+#define ASPEED_ESPI_SYSEVT_OOB_RST_WARN BIT(6)
+#define ASPEED_ESPI_SYSEVT_PLTRSTN BIT(5)
+#define ASPEED_ESPI_SYSEVT_SUSPEND BIT(4)
+#define ASPEED_ESPI_SYSEVT_S5_SLEEP BIT(2)
+#define ASPEED_ESPI_SYSEVT_S4_SLEEP BIT(1)
+#define ASPEED_ESPI_SYSEVT_S3_SLEEP BIT(0)
+#define ASPEED_ESPI_VW_GPIO_VAL 0x09c
+#define ASPEED_ESPI_GEN_CAP_N_CONF 0x0a0
+#define ASPEED_ESPI_CH0_CAP_N_CONF 0x0a4
+#define ASPEED_ESPI_CH1_CAP_N_CONF 0x0a8
+#define ASPEED_ESPI_CH2_CAP_N_CONF 0x0ac
+#define ASPEED_ESPI_CH3_CAP_N_CONF 0x0b0
+#define ASPEED_ESPI_CH3_CAP_N_CONF2 0x0b4
+#define ASPEED_ESPI_SYSEVT1_INT_EN 0x100
+#define ASPEED_ESPI_SYSEVT1 0x104
+#define ASPEED_ESPI_SYSEVT1_SUSPEND_ACK BIT(20)
+#define ASPEED_ESPI_SYSEVT1_SUSPEND_WARN BIT(0)
+#define ASPEED_ESPI_SYSEVT_INT_T0 0x110
+#define ASPEED_ESPI_SYSEVT_INT_T0_MASK 0x00
+#define ASPEED_ESPI_SYSEVT_INT_T1 0x114
+#define ASPEED_ESPI_SYSEVT_INT_T1_MASK 0x00
+#define ASPEED_ESPI_SYSEVT_INT_T2 0x118
+#define ASPEED_ESPI_SYSEVT_INT_T2_HOST_RST_WARN BIT(8)
+#define ASPEED_ESPI_SYSEVT_INT_T2_OOB_RST_WARN BIT(6)
+#define ASPEED_ESPI_SYSEVT_INT_STS 0x11c
+#define ASPEED_ESPI_SYSEVT_INT_STS_NMI_OUT BIT(10)
+#define ASPEED_ESPI_SYSEVT_INT_STS_SMI_OUT BIT(9)
+#define ASPEED_ESPI_SYSEVT_INT_STS_HOST_RST_WARN BIT(8)
+#define ASPEED_ESPI_SYSEVT_INT_STS_OOB_RST_WARN BIT(6)
+#define ASPEED_ESPI_SYSEVT_INT_STS_PLTRSTN BIT(5)
+#define ASPEED_ESPI_SYSEVT_INT_STS_SUSPEND BIT(4)
+#define ASPEED_ESPI_SYSEVT1_INT_T0 0x120
+#define ASPEED_ESPI_SYSEVT1_INT_T0_MASK 0x00
+#define ASPEED_ESPI_SYSEVT1_INT_T1 0x124
+#define ASPEED_ESPI_SYSEVT1_INT_T1_MASK 0x00
+#define ASPEED_ESPI_SYSEVT1_INT_T2 0x128
+#define ASPEED_ESPI_SYSEVT1_INT_T2_MASK BIT(0)
+#define ASPEED_ESPI_SYSEVT1_INT_STS 0x12c
+#define ASPEED_ESPI_SYSEVT1_INT_STS_SUSPEND_WARN BIT(0)
+#define ASPEED_ESPI_OOB_RX_DMA_RB_SIZE 0x130
+#define ASPEED_ESPI_OOB_RX_DMA_RD_PTR 0x134
+#define ASPEED_ESPI_OOB_RX_DMA_RD_PTR_UPDATE BIT(31)
+#define ASPEED_ESPI_OOB_RX_DMA_WS_PTR 0x138
+#define ASPEED_ESPI_OOB_RX_DMA_WS_PTR_RECV_EN BIT(31)
+#define ASPEED_ESPI_OOB_RX_DMA_WS_PTR_SP_MASK GENMASK(27, 16)
+#define ASPEED_ESPI_OOB_RX_DMA_WS_PTR_SP_SHIFT 16
+#define ASPEED_ESPI_OOB_RX_DMA_WS_PTR_WP_MASK GENMASK(11, 0)
+#define ASPEED_ESPI_OOB_RX_DMA_WS_PTR_WP_SHIFT 0
+#define ASPEED_ESPI_OOB_TX_DMA_RB_SIZE 0x140
+#define ASPEED_ESPI_OOB_TX_DMA_RD_PTR 0x144
+#define ASPEED_ESPI_OOB_TX_DMA_RD_PTR_UPDATE BIT(31)
+#define ASPEED_ESPI_OOB_TX_DMA_WR_PTR 0x148
+#define ASPEED_ESPI_OOB_TX_DMA_WR_PTR_SEND_EN BIT(31)
+
+/* collect ASPEED_ESPI_INT_STS bits of eSPI channels for convenience */
+#define ASPEED_ESPI_INT_STS_PERIF_BITS \
+ (ASPEED_ESPI_INT_STS_PERIF_NP_TX_ABT | \
+ ASPEED_ESPI_INT_STS_PERIF_PC_TX_ABT | \
+ ASPEED_ESPI_INT_STS_PERIF_NP_RX_ABT | \
+ ASPEED_ESPI_INT_STS_PERIF_PC_RX_ABT | \
+ ASPEED_ESPI_INT_STS_PERIF_NP_TX_ERR | \
+ ASPEED_ESPI_INT_STS_PERIF_PC_TX_ERR | \
+ ASPEED_ESPI_INT_STS_PERIF_NP_TX_CMPLT | \
+ ASPEED_ESPI_INT_STS_PERIF_PC_TX_CMPLT | \
+ ASPEED_ESPI_INT_STS_PERIF_PC_RX_CMPLT)
+
+#define ASPEED_ESPI_INT_STS_VW_BITS \
+ (ASPEED_ESPI_INT_STS_VW_SYSEVT1 | \
+ ASPEED_ESPI_INT_STS_VW_GPIOEVT | \
+ ASPEED_ESPI_INT_STS_VW_SYSEVT)
+
+#define ASPEED_ESPI_INT_STS_OOB_BITS \
+ (ASPEED_ESPI_INT_STS_OOB_RX_TMOUT | \
+ ASPEED_ESPI_INT_STS_OOB_TX_ERR | \
+ ASPEED_ESPI_INT_STS_OOB_TX_ABT | \
+ ASPEED_ESPI_INT_STS_OOB_RX_ABT | \
+ ASPEED_ESPI_INT_STS_OOB_TX_CMPLT | \
+ ASPEED_ESPI_INT_STS_OOB_RX_CMPLT)
+
+#define ASPEED_ESPI_INT_STS_FLASH_BITS \
+ (ASPEED_ESPI_INT_STS_FLASH_TX_ERR | \
+ ASPEED_ESPI_INT_STS_FLASH_TX_ABT | \
+ ASPEED_ESPI_INT_STS_FLASH_RX_ABT | \
+ ASPEED_ESPI_INT_STS_FLASH_TX_CMPLT | \
+ ASPEED_ESPI_INT_STS_FLASH_RX_CMPLT)
+
+/* collect ASPEED_ESPI_INT_EN bits of eSPI channels for convenience */
+#define ASPEED_ESPI_INT_MASK \
+ (ASPEED_ESPI_HW_RESET | \
+ ASPEED_ESPI_VW_SYSEVT1 | \
+ ASPEED_ESPI_VW_SYSEVT)
+
+/*
+ * Setup Interrupt Type / Enable of System Event from Master
+ * T2 T1 T0
+ * 1) HOST_RST_WARN : Dual Edge 1 0 0
+ * 2) OOB_RST_WARN : Dual Edge 1 0 0
+ * 3) PLTRSTN : Dual Edge 1 0 0
+ */
+#define ASPEED_ESPI_SYSEVT_INT_T2_MASK \
+ (ASPEED_ESPI_SYSEVT_HOST_RST_WARN | \
+ ASPEED_ESPI_SYSEVT_OOB_RST_WARN | \
+ ASPEED_ESPI_SYSEVT_PLTRSTN)
+
+#define ASPEED_ESPI_SYSEVT1_INT_MASK \
+ (ASPEED_ESPI_SYSEVT1_INT_T0_MASK | \
+ ASPEED_ESPI_SYSEVT1_INT_T1_MASK | \
+ ASPEED_ESPI_SYSEVT1_INT_T2_MASK)
+
+#define ASPEED_ESPI_INT_EN_PERIF_BITS \
+ (ASPEED_ESPI_INT_EN_PERIF_NP_TX_ABT | \
+ ASPEED_ESPI_INT_EN_PERIF_PC_TX_ABT | \
+ ASPEED_ESPI_INT_EN_PERIF_NP_RX_ABT | \
+ ASPEED_ESPI_INT_EN_PERIF_PC_RX_ABT | \
+ ASPEED_ESPI_INT_EN_PERIF_NP_TX_ERR | \
+ ASPEED_ESPI_INT_EN_PERIF_PC_TX_ERR | \
+ ASPEED_ESPI_INT_EN_PERIF_NP_TX_CMPLT | \
+ ASPEED_ESPI_INT_EN_PERIF_PC_TX_CMPLT | \
+ ASPEED_ESPI_INT_EN_PERIF_PC_RX_CMPLT)
+
+#define ASPEED_ESPI_INT_EN_VW_BITS \
+ (ASPEED_ESPI_INT_EN_VW_SYSEVT1 | \
+ ASPEED_ESPI_INT_EN_VW_GPIOEVT | \
+ ASPEED_ESPI_INT_EN_VW_SYSEVT)
+
+#define ASPEED_ESPI_INT_EN_OOB_BITS \
+ (ASPEED_ESPI_INT_EN_OOB_RX_TMOUT | \
+ ASPEED_ESPI_INT_EN_OOB_TX_ERR | \
+ ASPEED_ESPI_INT_EN_OOB_TX_ABT | \
+ ASPEED_ESPI_INT_EN_OOB_RX_ABT | \
+ ASPEED_ESPI_INT_EN_OOB_TX_CMPLT | \
+ ASPEED_ESPI_INT_EN_OOB_RX_CMPLT)
+
+#define ASPEED_ESPI_INT_EN_FLASH_BITS \
+ (ASPEED_ESPI_INT_EN_FLASH_TX_ERR | \
+ ASPEED_ESPI_INT_EN_FLASH_TX_ABT | \
+ ASPEED_ESPI_INT_EN_FLASH_RX_ABT | \
+ ASPEED_ESPI_INT_EN_FLASH_TX_CMPLT | \
+ ASPEED_ESPI_INT_EN_FLASH_RX_CMPLT)
+
+#endif
diff --git a/drivers/soc/aspeed/aspeed-espi-oob.c b/drivers/soc/aspeed/aspeed-espi-oob.c
new file mode 100644
index 000000000000..ca9c362c897d
--- /dev/null
+++ b/drivers/soc/aspeed/aspeed-espi-oob.c
@@ -0,0 +1,488 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2021 Aspeed Technology Inc.
+ */
+#include <linux/aspeed-espi-ioc.h>
+#include <linux/dma-mapping.h>
+#include <linux/of_device.h>
+#include <linux/miscdevice.h>
+#include <linux/regmap.h>
+#include <linux/vmalloc.h>
+
+#include "aspeed-espi-ctrl.h"
+#include "aspeed-espi-oob.h"
+
+#define OOB_MDEV_NAME "aspeed-espi-oob"
+
+#define OOB_DMA_DESC_MAX_NUM 1024
+#define OOB_DMA_TX_DESC_CUST 0x04
+
+#define OOB_DEFAULT_RX_PACKET_LEN 0x1000
+
+/*
+ * Descriptor-based RX DMA handling
+ */
+static long aspeed_espi_oob_dma_desc_get_rx(struct file *fp,
+ struct aspeed_espi_ioc *ioc,
+ struct aspeed_espi_oob *espi_oob)
+{
+ struct aspeed_espi_ctrl *espi_ctrl = espi_oob->ctrl;
+ int rc = 0;
+ struct espi_comm_hdr *hdr;
+ struct oob_rx_dma_desc *d;
+ unsigned long flags;
+ u32 wptr, sptr;
+ u32 pkt_len;
+ u32 reg;
+ u8 *pkt;
+
+ regmap_read(espi_ctrl->map, ASPEED_ESPI_OOB_RX_DMA_WS_PTR, &reg);
+ wptr = (reg & ASPEED_ESPI_OOB_RX_DMA_WS_PTR_WP_MASK) >>
+ ASPEED_ESPI_OOB_RX_DMA_WS_PTR_WP_SHIFT;
+ sptr = (reg & ASPEED_ESPI_OOB_RX_DMA_WS_PTR_SP_MASK) >>
+ ASPEED_ESPI_OOB_RX_DMA_WS_PTR_SP_SHIFT;
+ d = &espi_oob->dma.rx_desc[sptr];
+ if (!d->dirty)
+ return -EFAULT;
+
+ pkt_len = ((d->len) ? : OOB_DEFAULT_RX_PACKET_LEN) + sizeof(struct espi_comm_hdr);
+ if (ioc->pkt_len < pkt_len)
+ return -EINVAL;
+
+ pkt = vmalloc(pkt_len);
+ if (!pkt)
+ return -ENOMEM;
+
+ hdr = (struct espi_comm_hdr *)pkt;
+ hdr->cyc = d->cyc;
+ hdr->tag = d->tag;
+ hdr->len_h = ESPI_LEN_HIGH(d->len);
+ hdr->len_l = ESPI_LEN_LOW(d->len);
+ memcpy(hdr + 1, espi_oob->dma.rx_virt + (PAGE_SIZE * sptr), pkt_len - sizeof(*hdr));
+ if (copy_to_user((void __user *)ioc->pkt, pkt, pkt_len)) {
+ rc = -EFAULT;
+ goto free_n_out;
+ }
+
+ /* make current descriptor available again */
+ d->dirty = 0;
+ sptr = (sptr + 1) % espi_oob->dma.rx_desc_num;
+ wptr = (wptr + 1) % espi_oob->dma.rx_desc_num;
+ reg = ASPEED_ESPI_OOB_RX_DMA_WS_PTR_RECV_EN;
+ reg |= (wptr << ASPEED_ESPI_OOB_RX_DMA_WS_PTR_WP_SHIFT) &
+ ASPEED_ESPI_OOB_RX_DMA_WS_PTR_WP_MASK;
+ reg |= (sptr << ASPEED_ESPI_OOB_RX_DMA_WS_PTR_SP_SHIFT) &
+ ASPEED_ESPI_OOB_RX_DMA_WS_PTR_SP_MASK;
+
+ regmap_write(espi_ctrl->map, ASPEED_ESPI_OOB_RX_DMA_WS_PTR, reg);
+ spin_lock_irqsave(&espi_oob->lock, flags);
+ espi_oob->rx_ready = espi_oob->dma.rx_desc[sptr].dirty;
+ spin_unlock_irqrestore(&espi_oob->lock, flags);
+
+free_n_out:
+ vfree(pkt);
+ return rc;
+}
+
+static long aspeed_espi_oob_get_rx(struct file *fp,
+ struct aspeed_espi_ioc *ioc,
+ struct aspeed_espi_oob *espi_oob)
+{
+ struct aspeed_espi_ctrl *espi_ctrl = espi_oob->ctrl;
+ int i, rc = 0;
+ struct espi_comm_hdr *hdr;
+ unsigned long flags;
+ u32 cyc, tag, len;
+ u32 pkt_len;
+ u32 reg;
+ u8 *pkt;
+
+ if (fp->f_flags & O_NONBLOCK) {
+ if (!mutex_trylock(&espi_oob->get_rx_mtx))
+ return -EBUSY;
+
+ if (!espi_oob->rx_ready) {
+ rc = -ENODATA;
+ goto unlock_mtx_n_out;
+ }
+ } else {
+ mutex_lock(&espi_oob->get_rx_mtx);
+ if (!espi_oob->rx_ready) {
+ rc = wait_event_interruptible(espi_oob->wq, espi_oob->rx_ready);
+ if (rc == -ERESTARTSYS) {
+ rc = -EINTR;
+ goto unlock_mtx_n_out;
+ }
+ }
+ }
+ if (espi_oob->dma_mode && espi_ctrl->model->version != ASPEED_ESPI_AST2500) {
+ rc = aspeed_espi_oob_dma_desc_get_rx(fp, ioc, espi_oob);
+ goto unlock_mtx_n_out;
+ }
+
+ regmap_read(espi_ctrl->map, ASPEED_ESPI_OOB_RX_CTRL, &reg);
+ cyc = (reg & ASPEED_ESPI_OOB_RX_CTRL_CYC_MASK) >> ASPEED_ESPI_OOB_RX_CTRL_CYC_SHIFT;
+ tag = (reg & ASPEED_ESPI_OOB_RX_CTRL_TAG_MASK) >> ASPEED_ESPI_OOB_RX_CTRL_TAG_SHIFT;
+ len = (reg & ASPEED_ESPI_OOB_RX_CTRL_LEN_MASK) >> ASPEED_ESPI_OOB_RX_CTRL_LEN_SHIFT;
+
+ /*
+ * Calculate the length of the rest part of the eSPI packet to be read from HW
+ * and copied to user space.
+ */
+ pkt_len = (len ?: ASPEED_ESPI_PLD_LEN_MAX) + sizeof(*hdr);
+ if (ioc->pkt_len < pkt_len) {
+ rc = -EINVAL;
+ goto unlock_mtx_n_out;
+ }
+
+ pkt = vmalloc(pkt_len);
+ if (!pkt) {
+ rc = -ENOMEM;
+ goto unlock_mtx_n_out;
+ }
+
+ hdr = (struct espi_comm_hdr *)pkt;
+ hdr->cyc = cyc;
+ hdr->tag = tag;
+ hdr->len_h = ESPI_LEN_HIGH(len);
+ hdr->len_l = ESPI_LEN_LOW(len);
+ if (espi_oob->dma_mode) {
+ memcpy(hdr + 1, espi_oob->dma.rx_virt, pkt_len - sizeof(*hdr));
+ } else {
+ for (i = sizeof(*hdr); i < pkt_len; ++i) {
+ regmap_read(espi_ctrl->map, ASPEED_ESPI_OOB_RX_PORT, &reg);
+ pkt[i] = reg & 0xff;
+ }
+ }
+
+ if (copy_to_user((void __user *)ioc->pkt, pkt, pkt_len)) {
+ rc = -EFAULT;
+ goto free_n_out;
+ }
+
+ regmap_write_bits(espi_ctrl->map, ASPEED_ESPI_OOB_RX_CTRL,
+ ASPEED_ESPI_OOB_RX_CTRL_PEND_SERV,
+ ASPEED_ESPI_OOB_RX_CTRL_PEND_SERV);
+
+ spin_lock_irqsave(&espi_oob->lock, flags);
+ espi_oob->rx_ready = 0;
+ spin_unlock_irqrestore(&espi_oob->lock, flags);
+
+free_n_out:
+ vfree(pkt);
+
+unlock_mtx_n_out:
+ mutex_unlock(&espi_oob->get_rx_mtx);
+ return rc;
+}
+
+static long aspeed_espi_oob_dma_desc_put_tx(struct file *fp,
+ struct aspeed_espi_ioc *ioc,
+ struct aspeed_espi_oob *espi_oob)
+{
+ struct aspeed_espi_ctrl *espi_ctrl = espi_oob->ctrl;
+ int rc = 0;
+ struct espi_comm_hdr *hdr;
+ struct oob_tx_dma_desc *d;
+ u32 rptr, wptr;
+ u8 *pkt;
+
+ pkt = vzalloc(ioc->pkt_len);
+ if (!pkt)
+ return -ENOMEM;
+
+ hdr = (struct espi_comm_hdr *)pkt;
+ if (copy_from_user(pkt, (void __user *)ioc->pkt, ioc->pkt_len)) {
+ rc = -EFAULT;
+ goto free_n_out;
+ }
+
+ regmap_write(espi_ctrl->map, ASPEED_ESPI_OOB_TX_DMA_RD_PTR,
+ ASPEED_ESPI_OOB_TX_DMA_RD_PTR_UPDATE);
+ regmap_read(espi_ctrl->map, ASPEED_ESPI_OOB_TX_DMA_RD_PTR, &rptr);
+ regmap_read(espi_ctrl->map, ASPEED_ESPI_OOB_TX_DMA_WR_PTR, &wptr);
+ if (((wptr + 1) % espi_oob->dma.tx_desc_num) == rptr) {
+ rc = -EBUSY;
+ goto free_n_out;
+ }
+
+ d = &espi_oob->dma.tx_desc[wptr];
+ d->cyc = hdr->cyc;
+ d->tag = hdr->tag;
+ d->len = ESPI_LEN(hdr->len_h, hdr->len_l);
+ d->msg_type = OOB_DMA_TX_DESC_CUST;
+ memcpy(espi_oob->dma.tx_virt + (PAGE_SIZE * wptr), hdr + 1, ioc->pkt_len - sizeof(*hdr));
+
+ wptr = (wptr + 1) % espi_oob->dma.tx_desc_num;
+ wptr |= ASPEED_ESPI_OOB_TX_DMA_WR_PTR_SEND_EN;
+ regmap_write(espi_ctrl->map, ASPEED_ESPI_OOB_TX_DMA_WR_PTR, wptr);
+
+free_n_out:
+ vfree(pkt);
+ return rc;
+}
+
+static long aspeed_espi_oob_put_tx(struct file *fp,
+ struct aspeed_espi_ioc *ioc,
+ struct aspeed_espi_oob *espi_oob)
+{
+ struct aspeed_espi_ctrl *espi_ctrl = espi_oob->ctrl;
+ int i, rc = 0;
+ struct espi_comm_hdr *hdr;
+ u32 reg, len;
+ u8 *pkt;
+
+ if (!mutex_trylock(&espi_oob->put_tx_mtx))
+ return -EBUSY;
+
+ if (espi_oob->dma_mode && espi_ctrl->model->version != ASPEED_ESPI_AST2500) {
+ rc = aspeed_espi_oob_dma_desc_put_tx(fp, ioc, espi_oob);
+ goto unlock_mtx_n_out;
+ }
+
+ regmap_read(espi_ctrl->map, ASPEED_ESPI_OOB_TX_CTRL, &reg);
+ if (reg & ASPEED_ESPI_OOB_TX_CTRL_TRIGGER) {
+ rc = -EBUSY;
+ goto unlock_mtx_n_out;
+ }
+
+ if (ioc->pkt_len > ASPEED_ESPI_PKT_LEN_MAX) {
+ rc = -EINVAL;
+ goto unlock_mtx_n_out;
+ }
+
+ pkt = vmalloc(ioc->pkt_len);
+ if (!pkt) {
+ rc = -ENOMEM;
+ goto unlock_mtx_n_out;
+ }
+
+ hdr = (struct espi_comm_hdr *)pkt;
+
+ if (copy_from_user(pkt, (void __user *)ioc->pkt, ioc->pkt_len)) {
+ rc = -EFAULT;
+ goto free_n_out;
+ }
+
+ if (espi_oob->dma_mode) {
+ memcpy(espi_oob->dma.tx_virt, hdr + 1, ioc->pkt_len - sizeof(*hdr));
+ } else {
+ for (i = sizeof(*hdr); i < ioc->pkt_len; ++i)
+ regmap_write(espi_ctrl->map, ASPEED_ESPI_OOB_TX_PORT, pkt[i]);
+ }
+
+ len = ESPI_LEN(hdr->len_h, hdr->len_l);
+ reg = (hdr->cyc << ASPEED_ESPI_OOB_TX_CTRL_CYC_SHIFT) & ASPEED_ESPI_OOB_TX_CTRL_CYC_MASK;
+ reg |= (hdr->tag << ASPEED_ESPI_OOB_TX_CTRL_TAG_SHIFT) & ASPEED_ESPI_OOB_TX_CTRL_TAG_MASK;
+ reg |= (len << ASPEED_ESPI_OOB_TX_CTRL_LEN_SHIFT) & ASPEED_ESPI_OOB_TX_CTRL_LEN_MASK;
+ reg |= ASPEED_ESPI_OOB_TX_CTRL_TRIGGER;
+
+ regmap_write(espi_ctrl->map, ASPEED_ESPI_OOB_TX_CTRL, reg);
+
+free_n_out:
+ vfree(pkt);
+
+unlock_mtx_n_out:
+ mutex_unlock(&espi_oob->put_tx_mtx);
+ return rc;
+}
+
+static long aspeed_espi_oob_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
+{
+ struct aspeed_espi_oob *espi_oob = container_of(fp->private_data,
+ struct aspeed_espi_oob,
+ mdev);
+ struct aspeed_espi_ioc ioc;
+
+ if (copy_from_user(&ioc, (void __user *)arg, sizeof(ioc)))
+ return -EFAULT;
+
+ if (ioc.pkt_len > ASPEED_ESPI_PKT_LEN_MAX)
+ return -EINVAL;
+
+ switch (cmd) {
+ case ASPEED_ESPI_OOB_GET_RX:
+ return aspeed_espi_oob_get_rx(fp, &ioc, espi_oob);
+ case ASPEED_ESPI_OOB_PUT_TX:
+ return aspeed_espi_oob_put_tx(fp, &ioc, espi_oob);
+ };
+
+ return -ENOTTY;
+}
+
+void aspeed_espi_oob_event(u32 sts, struct aspeed_espi_oob *espi_oob)
+{
+ unsigned long flags;
+
+ if (sts & ASPEED_ESPI_INT_STS_OOB_RX_CMPLT) {
+ spin_lock_irqsave(&espi_oob->lock, flags);
+ espi_oob->rx_ready = 1;
+ spin_unlock_irqrestore(&espi_oob->lock, flags);
+ wake_up_interruptible(&espi_oob->wq);
+ }
+}
+
+static void aspeed_espi_oob_dma_init(struct aspeed_espi_oob *espi_oob)
+{
+ struct aspeed_espi_ctrl *espi_ctrl = espi_oob->ctrl;
+ struct aspeed_espi_oob_dma *dma = &espi_oob->dma;
+ int i;
+
+ regmap_update_bits(espi_ctrl->map, ASPEED_ESPI_CTRL,
+ ASPEED_ESPI_CTRL_OOB_TX_DMA_EN | ASPEED_ESPI_CTRL_OOB_RX_DMA_EN,
+ ASPEED_ESPI_CTRL_OOB_TX_DMA_EN | ASPEED_ESPI_CTRL_OOB_RX_DMA_EN);
+
+ if (espi_ctrl->model->version == ASPEED_ESPI_AST2500) {
+ regmap_write(espi_ctrl->map, ASPEED_ESPI_OOB_TX_DMA, dma->tx_addr);
+ regmap_write(espi_ctrl->map, ASPEED_ESPI_OOB_RX_DMA, dma->rx_addr);
+ } else {
+ for (i = 0; i < dma->tx_desc_num; ++i)
+ dma->tx_desc[i].data_addr = dma->tx_addr + (i * PAGE_SIZE);
+
+ for (i = 0; i < dma->rx_desc_num; ++i) {
+ dma->rx_desc[i].data_addr = dma->rx_addr + (i * PAGE_SIZE);
+ dma->rx_desc[i].dirty = 0;
+ }
+ regmap_write(espi_ctrl->map, ASPEED_ESPI_OOB_TX_DMA, dma->tx_desc_addr);
+ regmap_write(espi_ctrl->map, ASPEED_ESPI_OOB_TX_DMA_RB_SIZE,
+ dma->tx_desc_num);
+ regmap_write(espi_ctrl->map, ASPEED_ESPI_OOB_RX_DMA, dma->rx_desc_addr);
+ regmap_write(espi_ctrl->map, ASPEED_ESPI_OOB_RX_DMA_RB_SIZE,
+ dma->rx_desc_num);
+ regmap_update_bits(espi_ctrl->map, ASPEED_ESPI_OOB_RX_DMA_WS_PTR,
+ ASPEED_ESPI_OOB_RX_DMA_WS_PTR_RECV_EN,
+ ASPEED_ESPI_OOB_RX_DMA_WS_PTR_RECV_EN);
+ }
+}
+
+void aspeed_espi_oob_enable(struct aspeed_espi_oob *espi_oob)
+{
+ struct aspeed_espi_ctrl *espi_ctrl = espi_oob->ctrl;
+
+ regmap_update_bits(espi_ctrl->map, ASPEED_ESPI_CTRL,
+ ASPEED_ESPI_CTRL_OOB_SW_RDY | ASPEED_ESPI_CTRL_OOB_RX_SW_RST, 0);
+
+ if (espi_oob->dma_mode)
+ regmap_update_bits(espi_ctrl->map, ASPEED_ESPI_CTRL,
+ ASPEED_ESPI_CTRL_OOB_TX_DMA_EN | ASPEED_ESPI_CTRL_OOB_RX_DMA_EN,
+ 0);
+ else
+ regmap_write(espi_ctrl->map, ASPEED_ESPI_OOB_RX_CTRL,
+ ASPEED_ESPI_OOB_RX_CTRL_PEND_SERV);
+
+ regmap_update_bits(espi_ctrl->map, ASPEED_ESPI_CTRL, ASPEED_ESPI_CTRL_OOB_RX_SW_RST,
+ ASPEED_ESPI_CTRL_OOB_RX_SW_RST);
+ regmap_write(espi_ctrl->map, ASPEED_ESPI_OOB_RX_CTRL, ASPEED_ESPI_OOB_RX_CTRL_PEND_SERV);
+
+ if (espi_oob->dma_mode)
+ aspeed_espi_oob_dma_init(espi_oob);
+
+ regmap_write(espi_ctrl->map, ASPEED_ESPI_INT_STS, ASPEED_ESPI_INT_STS_OOB_BITS);
+ regmap_update_bits(espi_ctrl->map, ASPEED_ESPI_INT_EN,
+ ASPEED_ESPI_INT_EN_OOB_BITS,
+ ASPEED_ESPI_INT_EN_OOB_BITS);
+ regmap_update_bits(espi_ctrl->map, ASPEED_ESPI_CTRL,
+ ASPEED_ESPI_CTRL_OOB_SW_RDY,
+ ASPEED_ESPI_CTRL_OOB_SW_RDY);
+}
+
+static const struct file_operations aspeed_espi_oob_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = aspeed_espi_oob_ioctl,
+};
+
+static int aspeed_espi_oob_dma_alloc(struct device *dev, struct aspeed_espi_oob *espi_oob,
+ u32 version)
+{
+ struct aspeed_espi_oob_dma *dma = &espi_oob->dma;
+
+ if (version != ASPEED_ESPI_AST2500) {
+ of_property_read_u32(dev->of_node, "oob,dma-tx-desc-num", &dma->tx_desc_num);
+ of_property_read_u32(dev->of_node, "oob,dma-rx-desc-num", &dma->rx_desc_num);
+
+ if (!dma->tx_desc_num || !dma->rx_desc_num) {
+ dev_err(dev, "invalid zero number of DMA channels\n");
+ return -EINVAL;
+ }
+
+ if (dma->tx_desc_num >= OOB_DMA_DESC_MAX_NUM ||
+ dma->rx_desc_num >= OOB_DMA_DESC_MAX_NUM) {
+ dev_err(dev, "too many number of DMA channels\n");
+ return -EINVAL;
+ }
+
+ dma->tx_desc = dmam_alloc_coherent(dev, sizeof(*dma->tx_desc) * dma->tx_desc_num,
+ &dma->tx_desc_addr, GFP_KERNEL);
+ if (!dma->tx_desc) {
+ dev_err(dev, "cannot allocate DMA TX descriptor\n");
+ return -ENOMEM;
+ }
+
+ dma->rx_desc = dmam_alloc_coherent(dev, sizeof(*dma->rx_desc) * dma->rx_desc_num,
+ &dma->rx_desc_addr, GFP_KERNEL);
+ if (!dma->rx_desc) {
+ dev_err(dev, "cannot allocate DMA RX descriptor\n");
+ return -ENOMEM;
+ }
+ }
+ /*
+ * DMA descriptors are consumed in the circular queue paradigm.
+ * Therefore, one dummy slot is reserved to detect the full
+ * condition. For AST2500 without DMA descriptors supported,
+ * the number of the queue slot should be 1 here.
+ */
+ dma->tx_desc_num += 1;
+ dma->rx_desc_num += 1;
+ dma->tx_virt = dmam_alloc_coherent(dev, PAGE_SIZE * dma->tx_desc_num, &dma->tx_addr,
+ GFP_KERNEL);
+ if (!dma->tx_virt)
+ return -ENOMEM;
+
+ dma->rx_virt = dmam_alloc_coherent(dev, PAGE_SIZE * dma->rx_desc_num, &dma->rx_addr,
+ GFP_KERNEL);
+ if (!dma->rx_virt)
+ return -ENOMEM;
+ return 0;
+}
+
+void *aspeed_espi_oob_alloc(struct device *dev, struct aspeed_espi_ctrl *espi_ctrl)
+{
+ struct aspeed_espi_oob *espi_oob =
+ devm_kzalloc(dev, sizeof(struct aspeed_espi_oob), GFP_KERNEL);
+ int rc = 0;
+
+ if (!espi_oob)
+ return ERR_PTR(-ENOMEM);
+
+ espi_oob->ctrl = espi_ctrl;
+ init_waitqueue_head(&espi_oob->wq);
+ spin_lock_init(&espi_oob->lock);
+ mutex_init(&espi_oob->put_tx_mtx);
+ mutex_init(&espi_oob->get_rx_mtx);
+ if (of_property_read_bool(dev->of_node, "oob,dma-mode")) {
+ rc = aspeed_espi_oob_dma_alloc(dev, espi_oob, espi_ctrl->model->version);
+ if (rc)
+ return ERR_PTR(rc);
+ espi_oob->dma_mode = 1;
+ }
+
+ espi_oob->mdev.parent = dev;
+ espi_oob->mdev.minor = MISC_DYNAMIC_MINOR;
+ espi_oob->mdev.name = devm_kasprintf(dev, GFP_KERNEL, "%s", OOB_MDEV_NAME);
+ espi_oob->mdev.fops = &aspeed_espi_oob_fops;
+ rc = misc_register(&espi_oob->mdev);
+ if (rc) {
+ dev_err(dev, "cannot register device\n");
+ return ERR_PTR(rc);
+ }
+
+ aspeed_espi_oob_enable(espi_oob);
+ return espi_oob;
+}
+
+void aspeed_espi_oob_free(struct device *dev, struct aspeed_espi_oob *espi_oob)
+{
+ mutex_destroy(&espi_oob->put_tx_mtx);
+ mutex_destroy(&espi_oob->get_rx_mtx);
+ misc_deregister(&espi_oob->mdev);
+}
diff --git a/drivers/soc/aspeed/aspeed-espi-oob.h b/drivers/soc/aspeed/aspeed-espi-oob.h
new file mode 100644
index 000000000000..ee5fb3d09770
--- /dev/null
+++ b/drivers/soc/aspeed/aspeed-espi-oob.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2021 Aspeed Technology Inc.
+ */
+#ifndef _ASPEED_ESPI_OOB_H_
+#define _ASPEED_ESPI_OOB_H_
+
+struct oob_tx_dma_desc {
+ u32 data_addr;
+ u8 cyc;
+ u16 tag : 4;
+ u16 len : 12;
+ u8 msg_type : 3;
+ u8 raz0 : 1;
+ u8 pec : 1;
+ u8 int_en : 1;
+ u8 pause : 1;
+ u8 raz1 : 1;
+ u32 raz2;
+ u32 raz3;
+} __packed;
+
+struct oob_rx_dma_desc {
+ u32 data_addr;
+ u8 cyc;
+ u16 tag : 4;
+ u16 len : 12;
+ u8 raz : 7;
+ u8 dirty : 1;
+} __packed;
+
+struct aspeed_espi_oob_dma {
+ u32 tx_desc_num;
+ u32 rx_desc_num;
+
+ struct oob_tx_dma_desc *tx_desc;
+ dma_addr_t tx_desc_addr;
+
+ struct oob_rx_dma_desc *rx_desc;
+ dma_addr_t rx_desc_addr;
+
+ void *tx_virt;
+ dma_addr_t tx_addr;
+
+ void *rx_virt;
+ dma_addr_t rx_addr;
+};
+
+struct aspeed_espi_oob {
+ u32 dma_mode;
+ struct aspeed_espi_oob_dma dma;
+
+ u32 rx_ready;
+ wait_queue_head_t wq;
+ /* Locks rx resources and allow one receive at a time */
+ struct mutex get_rx_mtx;
+ /* Locks tx resources and allow one transmit at a time */
+ struct mutex put_tx_mtx;
+ /* Lock to synchronize receive in irq context */
+ spinlock_t lock;
+
+ struct miscdevice mdev;
+ struct aspeed_espi_ctrl *ctrl;
+};
+
+void aspeed_espi_oob_event(u32 sts, struct aspeed_espi_oob *espi_oob);
+void aspeed_espi_oob_enable(struct aspeed_espi_oob *espi_oob);
+void *aspeed_espi_oob_alloc(struct device *dev, struct aspeed_espi_ctrl *espi_ctrl);
+void aspeed_espi_oob_free(struct device *dev, struct aspeed_espi_oob *espi_oob);
+
+#endif
diff --git a/drivers/soc/aspeed/aspeed-espi-slave.c b/drivers/soc/aspeed/aspeed-espi-slave.c
new file mode 100644
index 000000000000..d638b2c7351a
--- /dev/null
+++ b/drivers/soc/aspeed/aspeed-espi-slave.c
@@ -0,0 +1,466 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2015-2019, Intel Corporation.
+
+#include <linux/aspeed-espi-ioc.h>
+#include <linux/clk.h>
+#include <linux/fs.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/poll.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/sched/signal.h>
+#include <linux/spinlock.h>
+#include <linux/uaccess.h>
+
+#include "aspeed-espi-ctrl.h"
+#include "aspeed-espi-oob.h"
+#include "aspeed-espi-vw.h"
+
+struct aspeed_espi {
+ struct regmap *map;
+ struct clk *clk;
+ struct device *dev;
+ struct reset_control *reset;
+ int irq;
+ int rst_irq;
+
+ /* for PLTRST_N signal monitoring interface */
+ struct miscdevice pltrstn_miscdev;
+ spinlock_t pltrstn_lock; /* for PLTRST_N signal sampling */
+ wait_queue_head_t pltrstn_waitq;
+ char pltrstn;
+ bool pltrstn_in_avail;
+ struct aspeed_espi_ctrl *espi_ctrl;
+
+};
+
+static void aspeed_espi_sys_event(struct aspeed_espi *priv)
+{
+ u32 sts, evt;
+
+ regmap_read(priv->map, ASPEED_ESPI_SYSEVT_INT_STS, &sts);
+ regmap_read(priv->map, ASPEED_ESPI_SYSEVT, &evt);
+
+ dev_dbg(priv->dev, "sys: sts = %08x, evt = %08x\n", sts, evt);
+
+ if (!(evt & ASPEED_ESPI_SYSEVT_SLAVE_BOOT_STATUS)) {
+ regmap_write(priv->map, ASPEED_ESPI_SYSEVT,
+ evt | ASPEED_ESPI_SYSEVT_SLAVE_BOOT_STATUS |
+ ASPEED_ESPI_SYSEVT_SLAVE_BOOT_DONE);
+ dev_dbg(priv->dev, "Setting espi slave boot done\n");
+ }
+ if (sts & ASPEED_ESPI_SYSEVT_HOST_RST_WARN) {
+ if (evt & ASPEED_ESPI_SYSEVT_HOST_RST_WARN)
+ regmap_write_bits(priv->map, ASPEED_ESPI_SYSEVT,
+ ASPEED_ESPI_SYSEVT_HOST_RST_ACK,
+ ASPEED_ESPI_SYSEVT_HOST_RST_ACK);
+ else
+ regmap_write_bits(priv->map, ASPEED_ESPI_SYSEVT,
+ ASPEED_ESPI_SYSEVT_HOST_RST_ACK, 0);
+ dev_dbg(priv->dev, "SYSEVT_HOST_RST_WARN: acked\n");
+ }
+ if (sts & ASPEED_ESPI_SYSEVT_OOB_RST_WARN) {
+ if (evt & ASPEED_ESPI_SYSEVT_OOB_RST_WARN)
+ regmap_write_bits(priv->map, ASPEED_ESPI_SYSEVT,
+ ASPEED_ESPI_SYSEVT_OOB_RST_ACK,
+ ASPEED_ESPI_SYSEVT_OOB_RST_ACK);
+ else
+ regmap_write_bits(priv->map, ASPEED_ESPI_SYSEVT,
+ ASPEED_ESPI_SYSEVT_OOB_RST_ACK, 0);
+ dev_dbg(priv->dev, "SYSEVT_OOB_RST_WARN: acked\n");
+ }
+ if (sts & ASPEED_ESPI_SYSEVT_PLTRSTN || priv->pltrstn == 'U') {
+ spin_lock(&priv->pltrstn_lock);
+ priv->pltrstn = (evt & ASPEED_ESPI_SYSEVT_PLTRSTN) ? '1' : '0';
+ priv->pltrstn_in_avail = true;
+ spin_unlock(&priv->pltrstn_lock);
+ wake_up_interruptible(&priv->pltrstn_waitq);
+ dev_dbg(priv->dev, "SYSEVT_PLTRSTN: %c\n", priv->pltrstn);
+ }
+
+ regmap_write(priv->map, ASPEED_ESPI_SYSEVT_INT_STS, sts);
+}
+
+static void aspeed_espi_sys_event1(struct aspeed_espi *priv)
+{
+ u32 sts, evt;
+
+ regmap_read(priv->map, ASPEED_ESPI_SYSEVT1_INT_STS, &sts);
+ regmap_read(priv->map, ASPEED_ESPI_SYSEVT1, &evt);
+
+ dev_dbg(priv->dev, "sys event1: sts = %08x, evt = %08x\n", sts, evt);
+
+ if (sts & ASPEED_ESPI_SYSEVT1_SUSPEND_WARN) {
+ if (evt & ASPEED_ESPI_SYSEVT1_SUSPEND_WARN)
+ regmap_write_bits(priv->map, ASPEED_ESPI_SYSEVT1,
+ ASPEED_ESPI_SYSEVT1_SUSPEND_ACK,
+ ASPEED_ESPI_SYSEVT1_SUSPEND_ACK);
+ else
+ regmap_write_bits(priv->map, ASPEED_ESPI_SYSEVT1,
+ ASPEED_ESPI_SYSEVT1_SUSPEND_ACK, 0);
+ dev_dbg(priv->dev, "SYSEVT1_SUS_WARN: acked\n");
+ }
+
+ regmap_write(priv->map, ASPEED_ESPI_SYSEVT1_INT_STS, sts);
+}
+
+static void aspeed_espi_boot_ack(struct aspeed_espi *priv)
+{
+ u32 evt;
+
+ regmap_read(priv->map, ASPEED_ESPI_SYSEVT, &evt);
+ if (!(evt & ASPEED_ESPI_SYSEVT_SLAVE_BOOT_STATUS)) {
+ regmap_write(priv->map, ASPEED_ESPI_SYSEVT,
+ evt | ASPEED_ESPI_SYSEVT_SLAVE_BOOT_STATUS |
+ ASPEED_ESPI_SYSEVT_SLAVE_BOOT_DONE);
+ dev_dbg(priv->dev, "Setting espi slave boot done\n");
+ }
+
+ regmap_read(priv->map, ASPEED_ESPI_SYSEVT1, &evt);
+ if (evt & ASPEED_ESPI_SYSEVT1_SUSPEND_WARN &&
+ !(evt & ASPEED_ESPI_SYSEVT1_SUSPEND_ACK)) {
+ regmap_write(priv->map, ASPEED_ESPI_SYSEVT1,
+ evt | ASPEED_ESPI_SYSEVT1_SUSPEND_ACK);
+ dev_dbg(priv->dev, "Boot SYSEVT1_SUS_WARN: acked\n");
+ }
+}
+
+static irqreturn_t aspeed_espi_irq(int irq, void *arg)
+{
+ struct aspeed_espi *priv = arg;
+ u32 sts, sts_handled = 0;
+
+ regmap_read(priv->map, ASPEED_ESPI_INT_STS, &sts);
+
+ dev_dbg(priv->dev, "INT_STS: 0x%08x\n", sts);
+
+ if (sts & ASPEED_ESPI_VW_SYSEVT) {
+ aspeed_espi_sys_event(priv);
+ sts_handled |= ASPEED_ESPI_VW_SYSEVT;
+ }
+ if (sts & ASPEED_ESPI_VW_SYSEVT1) {
+ aspeed_espi_sys_event1(priv);
+ sts_handled |= ASPEED_ESPI_VW_SYSEVT1;
+ }
+ if (sts & ASPEED_ESPI_INT_STS_OOB_BITS) {
+ aspeed_espi_oob_event(sts, priv->espi_ctrl->oob);
+ regmap_write(priv->map, ASPEED_ESPI_INT_STS, sts & ASPEED_ESPI_INT_STS_OOB_BITS);
+ }
+ if (sts & ASPEED_ESPI_HW_RESET) {
+ if (priv->rst_irq < 0) {
+ regmap_write_bits(priv->map, ASPEED_ESPI_CTRL,
+ ASPEED_ESPI_CTRL_SW_RESET, 0);
+ regmap_write_bits(priv->map, ASPEED_ESPI_CTRL,
+ ASPEED_ESPI_CTRL_SW_RESET,
+ ASPEED_ESPI_CTRL_SW_RESET);
+ }
+
+ regmap_write_bits(priv->map, ASPEED_ESPI_CTRL,
+ ASPEED_ESPI_CTRL_OOB_CHRDY,
+ ASPEED_ESPI_CTRL_OOB_CHRDY);
+ aspeed_espi_boot_ack(priv);
+ sts_handled |= ASPEED_ESPI_HW_RESET;
+ aspeed_espi_oob_enable(priv->espi_ctrl->oob);
+ aspeed_espi_vw_enable(priv->espi_ctrl->vw);
+ }
+
+ regmap_write(priv->map, ASPEED_ESPI_INT_STS, sts);
+
+ return sts != sts_handled ? IRQ_NONE : IRQ_HANDLED;
+}
+
+static void aspeed_espi_config_irq(struct aspeed_espi *priv)
+{
+ regmap_write(priv->map, ASPEED_ESPI_SYSEVT_INT_T0, ASPEED_ESPI_SYSEVT_INT_T0_MASK);
+ regmap_write(priv->map, ASPEED_ESPI_SYSEVT_INT_T1, ASPEED_ESPI_SYSEVT_INT_T1_MASK);
+ regmap_write(priv->map, ASPEED_ESPI_SYSEVT_INT_T2, ASPEED_ESPI_SYSEVT_INT_T2_MASK);
+ regmap_write(priv->map, ASPEED_ESPI_SYSEVT_INT_EN, 0xFFFFFFFF);
+ regmap_write(priv->map, ASPEED_ESPI_SYSEVT1_INT_T0, ASPEED_ESPI_SYSEVT1_INT_T0_MASK);
+ regmap_write(priv->map, ASPEED_ESPI_SYSEVT1_INT_T1, ASPEED_ESPI_SYSEVT1_INT_T1_MASK);
+ regmap_write(priv->map, ASPEED_ESPI_SYSEVT1_INT_T2, ASPEED_ESPI_SYSEVT1_INT_T2_MASK);
+ regmap_write(priv->map, ASPEED_ESPI_SYSEVT1_INT_EN, ASPEED_ESPI_SYSEVT1_INT_MASK);
+ regmap_write_bits(priv->map, ASPEED_ESPI_INT_EN, ASPEED_ESPI_INT_MASK,
+ ASPEED_ESPI_INT_MASK);
+}
+
+static irqreturn_t aspeed_espi_reset_isr(int irq, void *arg)
+{
+ struct aspeed_espi *priv = arg;
+
+ reset_control_assert(priv->reset);
+ reset_control_deassert(priv->reset);
+
+ regmap_write_bits(priv->map, ASPEED_ESPI_CTRL,
+ ASPEED_ESPI_CTRL_SW_RESET, 0);
+ regmap_write_bits(priv->map, ASPEED_ESPI_CTRL,
+ ASPEED_ESPI_CTRL_SW_RESET, ASPEED_ESPI_CTRL_SW_RESET);
+
+ regmap_write_bits(priv->map, ASPEED_ESPI_CTRL,
+ ASPEED_ESPI_CTRL_OOB_CHRDY, 0);
+
+ aspeed_espi_config_irq(priv);
+
+ return IRQ_HANDLED;
+}
+
+static inline struct aspeed_espi *to_aspeed_espi(struct file *filp)
+{
+ return container_of(filp->private_data, struct aspeed_espi,
+ pltrstn_miscdev);
+}
+
+static int aspeed_espi_pltrstn_open(struct inode *inode, struct file *filp)
+{
+ struct aspeed_espi *priv = to_aspeed_espi(filp);
+
+ if ((filp->f_flags & O_ACCMODE) != O_RDONLY)
+ return -EACCES;
+ priv->pltrstn_in_avail = true ; /*Setting true returns first data after file open*/
+
+ return 0;
+}
+
+static ssize_t aspeed_espi_pltrstn_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *offset)
+{
+ struct aspeed_espi *priv = to_aspeed_espi(filp);
+ DECLARE_WAITQUEUE(wait, current);
+ char data, old_sample;
+ int ret = 0;
+
+ spin_lock_irq(&priv->pltrstn_lock);
+
+ if (filp->f_flags & O_NONBLOCK) {
+ if (!priv->pltrstn_in_avail) {
+ ret = -EAGAIN;
+ goto out_unlock;
+ }
+ data = priv->pltrstn;
+ priv->pltrstn_in_avail = false;
+ } else {
+ add_wait_queue(&priv->pltrstn_waitq, &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ old_sample = priv->pltrstn;
+
+ do {
+ if (old_sample != priv->pltrstn) {
+ data = priv->pltrstn;
+ priv->pltrstn_in_avail = false;
+ break;
+ }
+
+ if (signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ } else {
+ spin_unlock_irq(&priv->pltrstn_lock);
+ schedule();
+ spin_lock_irq(&priv->pltrstn_lock);
+ }
+ } while (!ret);
+
+ remove_wait_queue(&priv->pltrstn_waitq, &wait);
+ set_current_state(TASK_RUNNING);
+ }
+out_unlock:
+ spin_unlock_irq(&priv->pltrstn_lock);
+
+ if (ret)
+ return ret;
+
+ ret = put_user(data, buf);
+ if (!ret)
+ ret = sizeof(data);
+
+ return ret;
+}
+
+static unsigned int aspeed_espi_pltrstn_poll(struct file *file,
+ poll_table *wait)
+{
+ struct aspeed_espi *priv = to_aspeed_espi(file);
+ unsigned int mask = 0;
+
+ poll_wait(file, &priv->pltrstn_waitq, wait);
+ if (priv->pltrstn_in_avail)
+ mask |= POLLIN;
+ return mask;
+}
+
+static const struct file_operations aspeed_espi_pltrstn_fops = {
+ .owner = THIS_MODULE,
+ .open = aspeed_espi_pltrstn_open,
+ .read = aspeed_espi_pltrstn_read,
+ .poll = aspeed_espi_pltrstn_poll,
+};
+
+static const struct regmap_config aspeed_espi_regmap_cfg = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x200,
+};
+
+static int aspeed_espi_probe(struct platform_device *pdev)
+{
+ struct aspeed_espi_ctrl *espi_ctrl;
+ struct aspeed_espi *priv;
+ struct resource *res;
+ void __iomem *regs;
+ u32 ctrl;
+ int ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ espi_ctrl = devm_kzalloc(&pdev->dev, sizeof(*espi_ctrl), GFP_KERNEL);
+ if (!espi_ctrl)
+ return -ENOMEM;
+
+ dev_set_drvdata(&pdev->dev, priv);
+ priv->dev = &pdev->dev;
+ priv->espi_ctrl = espi_ctrl;
+ espi_ctrl->model = of_device_get_match_data(&pdev->dev);
+ priv->map = devm_regmap_init_mmio(&pdev->dev, regs,
+ &aspeed_espi_regmap_cfg);
+ if (IS_ERR(priv->map))
+ return PTR_ERR(priv->map);
+ espi_ctrl->map = priv->map;
+ aspeed_espi_config_irq(priv);
+ espi_ctrl->oob = aspeed_espi_oob_alloc(&pdev->dev, espi_ctrl);
+ if (IS_ERR(espi_ctrl->oob)) {
+ dev_err(&pdev->dev, "Failed to allocate espi out-of-band channel\n");
+ return PTR_ERR(espi_ctrl->oob);
+ }
+ espi_ctrl->vw = aspeed_espi_vw_init(&pdev->dev, espi_ctrl);
+ if (IS_ERR(espi_ctrl->vw)) {
+ dev_err(&pdev->dev, "Failed to allocate espi virtual wire channel\n");
+ return PTR_ERR(espi_ctrl->vw);
+ }
+
+ spin_lock_init(&priv->pltrstn_lock);
+ init_waitqueue_head(&priv->pltrstn_waitq);
+ priv->pltrstn = 'U'; /* means it's not reported yet from master */
+
+ priv->irq = platform_get_irq(pdev, 0);
+ if (priv->irq < 0)
+ return priv->irq;
+
+ ret = devm_request_irq(&pdev->dev, priv->irq, aspeed_espi_irq, 0,
+ "aspeed-espi-irq", priv);
+ if (ret)
+ return ret;
+
+ if (of_device_is_compatible(pdev->dev.of_node,
+ "aspeed,ast2600-espi-slave")) {
+ priv->rst_irq = platform_get_irq(pdev, 1);
+ if (priv->rst_irq < 0)
+ return priv->rst_irq;
+
+ ret = devm_request_irq(&pdev->dev, priv->rst_irq,
+ aspeed_espi_reset_isr, 0,
+ "aspeed-espi-rst-irq", priv);
+ if (ret)
+ return ret;
+
+ priv->reset = devm_reset_control_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->reset))
+ return PTR_ERR(priv->reset);
+ } else {
+ priv->rst_irq = -ENOTSUPP;
+ }
+
+ priv->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(priv->clk),
+ "couldn't get clock\n");
+ ret = clk_prepare_enable(priv->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "couldn't enable clock\n");
+ return ret;
+ }
+
+ /*
+ * We check that the regmap works on this very first access, but as this
+ * is an MMIO-backed regmap, subsequent regmap access is not going to
+ * fail and we skip error checks from this point.
+ */
+ ret = regmap_read(priv->map, ASPEED_ESPI_CTRL, &ctrl);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to read ctrl register\n");
+ goto err_clk_disable_out;
+ }
+ regmap_write(priv->map, ASPEED_ESPI_CTRL,
+ ctrl | ASPEED_ESPI_CTRL_OOB_CHRDY);
+
+ priv->pltrstn_miscdev.minor = MISC_DYNAMIC_MINOR;
+ priv->pltrstn_miscdev.name = "espi-pltrstn";
+ priv->pltrstn_miscdev.fops = &aspeed_espi_pltrstn_fops;
+ priv->pltrstn_miscdev.parent = &pdev->dev;
+
+ ret = misc_register(&priv->pltrstn_miscdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to register device\n");
+ goto err_clk_disable_out;
+ }
+
+ aspeed_espi_boot_ack(priv);
+
+ dev_info(&pdev->dev, "eSPI registered, irq %d\n", priv->irq);
+
+ return 0;
+
+err_clk_disable_out:
+ clk_disable_unprepare(priv->clk);
+ return ret;
+}
+
+static int aspeed_espi_remove(struct platform_device *pdev)
+{
+ struct aspeed_espi *priv = dev_get_drvdata(&pdev->dev);
+
+ aspeed_espi_oob_free(priv->dev, priv->espi_ctrl->oob);
+ aspeed_espi_vw_fini(priv->dev, priv->espi_ctrl->vw);
+ misc_deregister(&priv->pltrstn_miscdev);
+ clk_disable_unprepare(priv->clk);
+ return 0;
+}
+
+static const struct aspeed_espi_model ast2600_model = {
+ .version = ASPEED_ESPI_AST2600,
+};
+
+static const struct of_device_id of_espi_match_table[] = {
+ { .compatible = "aspeed,ast2500-espi-slave" },
+ { .compatible = "aspeed,ast2600-espi-slave",
+ .data = &ast2600_model},
+ { }
+};
+MODULE_DEVICE_TABLE(of, of_espi_match_table);
+
+static struct platform_driver aspeed_espi_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = of_match_ptr(of_espi_match_table),
+ },
+ .probe = aspeed_espi_probe,
+ .remove = aspeed_espi_remove,
+};
+module_platform_driver(aspeed_espi_driver);
+
+MODULE_AUTHOR("Haiyue Wang <haiyue.wang@linux.intel.com>");
+MODULE_AUTHOR("Jae Hyun Yoo <jae.hyun.yoo@linux.intel.com>");
+MODULE_DESCRIPTION("Aspeed eSPI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/aspeed/aspeed-espi-vw.c b/drivers/soc/aspeed/aspeed-espi-vw.c
new file mode 100644
index 000000000000..81bfb7f59274
--- /dev/null
+++ b/drivers/soc/aspeed/aspeed-espi-vw.c
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2022 ASPEED Technology Inc.
+ */
+#include <linux/aspeed-espi-ioc.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/regmap.h>
+#include <linux/uaccess.h>
+
+#include "aspeed-espi-ctrl.h"
+#include "aspeed-espi-vw.h"
+
+#define VW_MDEV_NAME "aspeed-espi-vw"
+
+static long aspeed_espi_vw_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
+{
+ struct aspeed_espi_vw *espi_vw = container_of(fp->private_data,
+ struct aspeed_espi_vw,
+ mdev);
+ struct aspeed_espi_ctrl *espi_ctrl = espi_vw->ctrl;
+ u32 val = 0;
+
+ switch (cmd) {
+ case ASPEED_ESPI_VW_GET_GPIO_VAL:
+ regmap_read(espi_ctrl->map, ASPEED_ESPI_VW_GPIO_VAL, &val);
+ if (put_user(val, (uint32_t __user *)arg))
+ return -EFAULT;
+ break;
+
+ case ASPEED_ESPI_VW_PUT_GPIO_VAL:
+ if (get_user(val, (uint32_t __user *)arg))
+ return -EFAULT;
+ regmap_write(espi_ctrl->map, ASPEED_ESPI_VW_GPIO_VAL, val);
+ break;
+
+ default:
+ return -ENOTTY;
+ };
+
+ return 0;
+}
+
+void aspeed_espi_vw_enable(struct aspeed_espi_vw *espi_vw)
+{
+ struct aspeed_espi_ctrl *espi_ctrl = espi_vw->ctrl;
+
+ regmap_write(espi_ctrl->map, ASPEED_ESPI_INT_STS,
+ ASPEED_ESPI_INT_STS_VW_BITS);
+
+ regmap_update_bits(espi_ctrl->map, ASPEED_ESPI_INT_EN,
+ ASPEED_ESPI_INT_EN_VW_BITS,
+ ASPEED_ESPI_INT_EN_VW_BITS);
+
+ regmap_update_bits(espi_ctrl->map, ASPEED_ESPI_CTRL,
+ ASPEED_ESPI_CTRL_VW_SW_RDY,
+ ASPEED_ESPI_CTRL_VW_SW_RDY);
+}
+
+static const struct file_operations aspeed_espi_vw_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = aspeed_espi_vw_ioctl,
+};
+
+void *aspeed_espi_vw_init(struct device *dev, struct aspeed_espi_ctrl *espi_ctrl)
+{
+ int rc;
+ struct aspeed_espi_vw *espi_vw;
+
+ espi_vw = devm_kzalloc(dev, sizeof(*espi_vw), GFP_KERNEL);
+ if (!espi_vw)
+ return ERR_PTR(-ENOMEM);
+
+ espi_vw->ctrl = espi_ctrl;
+
+ espi_vw->mdev.parent = dev;
+ espi_vw->mdev.minor = MISC_DYNAMIC_MINOR;
+ espi_vw->mdev.name = devm_kasprintf(dev, GFP_KERNEL, "%s", VW_MDEV_NAME);
+ espi_vw->mdev.fops = &aspeed_espi_vw_fops;
+ rc = misc_register(&espi_vw->mdev);
+ if (rc) {
+ dev_err(dev, "cannot register device\n");
+ return ERR_PTR(rc);
+ }
+
+ aspeed_espi_vw_enable(espi_vw);
+
+ return espi_vw;
+}
+
+void aspeed_espi_vw_fini(struct device *dev, struct aspeed_espi_vw *espi_vw)
+{
+ misc_deregister(&espi_vw->mdev);
+}
diff --git a/drivers/soc/aspeed/aspeed-espi-vw.h b/drivers/soc/aspeed/aspeed-espi-vw.h
new file mode 100644
index 000000000000..4532e4e13d43
--- /dev/null
+++ b/drivers/soc/aspeed/aspeed-espi-vw.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2022 ASPEED Technology Inc.
+ */
+#ifndef _ASPEED_ESPI_VW_H_
+#define _ASPEED_ESPI_VW_H_
+
+struct aspeed_espi_vw {
+ int irq;
+ int irq_reset;
+
+ struct miscdevice mdev;
+ struct aspeed_espi_ctrl *ctrl;
+};
+
+void aspeed_espi_vw_enable(struct aspeed_espi_vw *espi_vw);
+void *aspeed_espi_vw_init(struct device *dev, struct aspeed_espi_ctrl *espi_ctrl);
+void aspeed_espi_vw_fini(struct device *dev, struct aspeed_espi_vw *espi_vw);
+
+#endif
diff --git a/drivers/soc/aspeed/aspeed-lpc-ctrl.c b/drivers/soc/aspeed/aspeed-lpc-ctrl.c
index 258894ed234b..7fda4a987d8c 100644
--- a/drivers/soc/aspeed/aspeed-lpc-ctrl.c
+++ b/drivers/soc/aspeed/aspeed-lpc-ctrl.c
@@ -329,6 +329,7 @@ static int aspeed_lpc_ctrl_probe(struct platform_device *pdev)
err:
clk_disable_unprepare(lpc_ctrl->clk);
+
return rc;
}
diff --git a/drivers/soc/aspeed/aspeed-lpc-mbox.c b/drivers/soc/aspeed/aspeed-lpc-mbox.c
new file mode 100644
index 000000000000..7941792abacb
--- /dev/null
+++ b/drivers/soc/aspeed/aspeed-lpc-mbox.c
@@ -0,0 +1,459 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+// Copyright 2017 IBM Corporation
+// TODO: Rewrite this driver
+
+#include <linux/aspeed-lpc-mbox.h>
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/syscon.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/poll.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/kfifo.h>
+
+#define DEVICE_NAME "aspeed-mbox"
+
+#define ASPEED_MBOX_CTRL_RECV BIT(7)
+#define ASPEED_MBOX_CTRL_MASK BIT(1)
+#define ASPEED_MBOX_CTRL_SEND BIT(0)
+
+#define AST2600_MBOX_NUM_REGS 32
+#define AST2600_MBOX_DATA_0 0x00
+#define AST2600_MBOX_STATUS_0 0x80
+#define AST2600_MBOX_BMC_CTRL 0x90
+#define AST2600_MBOX_INTERRUPT_0 0xA0
+
+#define AST2500_MBOX_NUM_REGS 16
+#define AST2500_MBOX_DATA_0 0x00
+#define AST2500_MBOX_STATUS_0 0x40
+#define AST2500_MBOX_BMC_CTRL 0x48
+#define AST2500_MBOX_INTERRUPT_0 0x50
+
+struct aspeed_mbox_config {
+ u32 num_regs;
+ u32 data_offset;
+ u32 status_offset;
+ u32 bmc_control_offset;
+ u32 bmc_interrupt_offset;
+};
+
+static const struct aspeed_mbox_config ast2500_config = {
+ .num_regs = AST2500_MBOX_NUM_REGS,
+ .data_offset = AST2500_MBOX_DATA_0,
+ .status_offset = AST2500_MBOX_STATUS_0,
+ .bmc_control_offset = AST2500_MBOX_BMC_CTRL,
+ .bmc_interrupt_offset = AST2500_MBOX_INTERRUPT_0,
+};
+
+static const struct aspeed_mbox_config ast2600_config = {
+ .num_regs = AST2600_MBOX_NUM_REGS,
+ .data_offset = AST2600_MBOX_DATA_0,
+ .status_offset = AST2600_MBOX_STATUS_0,
+ .bmc_control_offset = AST2600_MBOX_BMC_CTRL,
+ .bmc_interrupt_offset = AST2600_MBOX_INTERRUPT_0,
+};
+
+struct aspeed_mbox {
+ struct miscdevice miscdev;
+ struct regmap *regmap;
+ struct clk *clk;
+ unsigned int base;
+ int irq;
+ wait_queue_head_t queue;
+ struct mutex mutex;
+ struct kfifo fifo;
+ spinlock_t lock;
+ struct aspeed_mbox_config configs;
+};
+
+static atomic_t aspeed_mbox_open_count = ATOMIC_INIT(0);
+
+static u8 aspeed_mbox_inb(struct aspeed_mbox *mbox, int reg)
+{
+ /*
+ * The mbox registers are actually only one byte but are addressed
+ * four bytes apart. The other three bytes are marked 'reserved',
+ * they *should* be zero but lets not rely on it.
+ * I am going to rely on the fact we can casually read/write to them...
+ */
+ unsigned int val = 0xff; /* If regmap throws an error return 0xff */
+ int rc = regmap_read(mbox->regmap, mbox->base + reg, &val);
+
+ if (rc)
+ dev_err(mbox->miscdev.parent, "regmap_read() failed with "
+ "%d (reg: 0x%08x)\n", rc, reg);
+
+ return val & 0xff;
+}
+
+static void aspeed_mbox_outb(struct aspeed_mbox *mbox, u8 data, int reg)
+{
+ int rc = regmap_write(mbox->regmap, mbox->base + reg, data);
+
+ if (rc)
+ dev_err(mbox->miscdev.parent, "regmap_write() failed with "
+ "%d (data: %u reg: 0x%08x)\n", rc, data, reg);
+}
+
+static struct aspeed_mbox *file_mbox(struct file *file)
+{
+ return container_of(file->private_data, struct aspeed_mbox, miscdev);
+}
+
+/* Save a byte to a FIFO and discard the oldest byte if FIFO is full */
+static void put_fifo_with_discard(struct aspeed_mbox *mbox, u8 val)
+{
+ if (!kfifo_initialized(&mbox->fifo))
+ return;
+ if (kfifo_is_full(&mbox->fifo))
+ kfifo_skip(&mbox->fifo);
+ kfifo_put(&mbox->fifo, val);
+}
+
+static int aspeed_mbox_open(struct inode *inode, struct file *file)
+{
+ struct aspeed_mbox *mbox = file_mbox(file);
+ int i;
+
+ if (atomic_inc_return(&aspeed_mbox_open_count) == 1) {
+ /*
+ * Reset the FIFO while opening to clear the old cached data
+ * and load the FIFO with latest mailbox register values.
+ */
+ spin_lock_irq(&mbox->lock);
+ kfifo_reset(&mbox->fifo);
+ for (i = 0; i < mbox->configs.num_regs; i++) {
+ put_fifo_with_discard(mbox,
+ aspeed_mbox_inb(mbox, mbox->configs.data_offset + (i * 4)));
+ }
+ spin_unlock_irq(&mbox->lock);
+
+ return 0;
+ }
+
+ atomic_dec(&aspeed_mbox_open_count);
+ return -EBUSY;
+}
+
+static ssize_t aspeed_mbox_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct aspeed_mbox *mbox = file_mbox(file);
+ unsigned int copied = 0;
+ char __user *p = buf;
+ ssize_t ret;
+ int i;
+
+ if (!access_ok(buf, count))
+ return -EFAULT;
+
+ if (count + *ppos > mbox->configs.num_regs)
+ return -EINVAL;
+
+ /* Restrict count as per the number of mailbox registers to use kfifo. */
+ if (count != mbox->configs.num_regs) {
+ mutex_lock(&mbox->mutex);
+ for (i = *ppos; count > 0 && i < mbox->configs.num_regs; i++) {
+ u8 reg = aspeed_mbox_inb(mbox, mbox->configs.data_offset + (i * 4));
+
+ ret = __put_user(reg, p);
+ if (ret)
+ goto out_unlock;
+
+ p++;
+ count--;
+ }
+ mutex_unlock(&mbox->mutex);
+ return p - buf;
+ }
+
+ mutex_lock(&mbox->mutex);
+ if (kfifo_is_empty(&mbox->fifo)) {
+ if (file->f_flags & O_NONBLOCK) {
+ ret = -EAGAIN;
+ goto out_unlock;
+ }
+
+ ret = wait_event_interruptible(mbox->queue, !kfifo_is_empty(&mbox->fifo));
+ if (ret == -ERESTARTSYS) {
+ ret = -EINTR;
+ goto out_unlock;
+ }
+ }
+
+ /*
+ * Kfifo allows single reader to access the kfifo concurrently with
+ * single writer, which means that we only need to serialize against
+ * other callers of aspeed_mbox_read.
+ */
+ ret = kfifo_to_user(&mbox->fifo, buf, count, &copied);
+
+out_unlock:
+ mutex_unlock(&mbox->mutex);
+ return ret ? ret : copied;
+}
+
+static ssize_t aspeed_mbox_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct aspeed_mbox *mbox = file_mbox(file);
+ const char __user *p = buf;
+ ssize_t ret;
+ char c;
+ int i;
+
+ if (!access_ok(buf, count))
+ return -EFAULT;
+
+ if (count + *ppos > mbox->configs.num_regs)
+ return -EINVAL;
+
+ mutex_lock(&mbox->mutex);
+
+ for (i = *ppos; count > 0 && i < mbox->configs.num_regs; i++) {
+ ret = __get_user(c, p);
+ if (ret)
+ goto out_unlock;
+
+ aspeed_mbox_outb(mbox, c, mbox->configs.data_offset + (i * 4));
+ p++;
+ count--;
+ }
+
+ for (i = 0; i < mbox->configs.num_regs / 8; i++)
+ aspeed_mbox_outb(mbox, 0xff, mbox->configs.status_offset + (i * 4));
+
+ aspeed_mbox_outb(mbox,
+ ASPEED_MBOX_CTRL_RECV | ASPEED_MBOX_CTRL_MASK | ASPEED_MBOX_CTRL_SEND,
+ mbox->configs.bmc_control_offset);
+ ret = p - buf;
+
+out_unlock:
+ mutex_unlock(&mbox->mutex);
+ return ret;
+}
+
+static unsigned int aspeed_mbox_poll(struct file *file, poll_table *wait)
+{
+ struct aspeed_mbox *mbox = file_mbox(file);
+
+ poll_wait(file, &mbox->queue, wait);
+ return !kfifo_is_empty(&mbox->fifo) ? POLLIN : 0;
+}
+
+static int aspeed_mbox_release(struct inode *inode, struct file *file)
+{
+ atomic_dec(&aspeed_mbox_open_count);
+ return 0;
+}
+
+static long aspeed_mbox_ioctl(struct file *file, unsigned int cmd,
+ unsigned long param)
+{
+ struct aspeed_mbox *mbox = file_mbox(file);
+ struct aspeed_mbox_ioctl_data data;
+ long ret;
+
+ switch (cmd) {
+ case ASPEED_MBOX_SIZE:
+ data.data = mbox->configs.num_regs;
+ ret = copy_to_user((void __user *)param, &data, sizeof(data));
+ break;
+ default:
+ ret = -ENOTTY;
+ }
+ return ret;
+}
+
+static const struct file_operations aspeed_mbox_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_seek_end_llseek,
+ .read = aspeed_mbox_read,
+ .write = aspeed_mbox_write,
+ .open = aspeed_mbox_open,
+ .release = aspeed_mbox_release,
+ .poll = aspeed_mbox_poll,
+ .unlocked_ioctl = aspeed_mbox_ioctl,
+};
+
+static irqreturn_t aspeed_mbox_irq(int irq, void *arg)
+{
+ struct aspeed_mbox *mbox = arg;
+ int i;
+
+ dev_dbg(mbox->miscdev.parent, "BMC_CTRL11: 0x%02x\n",
+ aspeed_mbox_inb(mbox, mbox->configs.bmc_control_offset));
+ for (i = 0; i < mbox->configs.num_regs / 8; i++) {
+ dev_dbg(mbox->miscdev.parent, "STATUS: 0x%02x\n",
+ aspeed_mbox_inb(mbox, mbox->configs.status_offset + (i * 4)));
+ }
+ for (i = 0; i < mbox->configs.num_regs; i++) {
+ dev_dbg(mbox->miscdev.parent, "DATA_%d: 0x%02x\n", i,
+ aspeed_mbox_inb(mbox, mbox->configs.data_offset + (i * 4)));
+ }
+
+ spin_lock(&mbox->lock);
+ for (i = 0; i < mbox->configs.num_regs; i++) {
+ put_fifo_with_discard(mbox,
+ aspeed_mbox_inb(mbox, mbox->configs.data_offset + (i * 4)));
+ }
+ spin_unlock(&mbox->lock);
+
+ /* Clear interrupt status */
+ for (i = 0; i < mbox->configs.num_regs / 8; i++)
+ aspeed_mbox_outb(mbox, 0xff, mbox->configs.status_offset + (i * 4));
+
+ aspeed_mbox_outb(mbox, ASPEED_MBOX_CTRL_RECV, mbox->configs.bmc_control_offset);
+
+ wake_up(&mbox->queue);
+ return IRQ_HANDLED;
+}
+
+static int aspeed_mbox_config_irq(struct aspeed_mbox *mbox,
+ struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ int rc;
+ int i;
+ mbox->irq = platform_get_irq(pdev, 0);
+ if (!mbox->irq)
+ return -ENODEV;
+
+ rc = devm_request_irq(dev, mbox->irq, aspeed_mbox_irq,
+ IRQF_SHARED, DEVICE_NAME, mbox);
+ if (rc < 0) {
+ dev_err(dev, "Unable to request IRQ %d\n", mbox->irq);
+ return rc;
+ }
+
+ /* Disable all register based interrupts. */
+ for (i = 0; i < mbox->configs.num_regs / 8; i++)
+ aspeed_mbox_outb(mbox, 0xff, mbox->configs.bmc_interrupt_offset + i * 4);
+
+ /* These registers are write one to clear. Clear them. */
+ for (i = 0; i < mbox->configs.num_regs / 8; i++)
+ aspeed_mbox_outb(mbox, 0xff, mbox->configs.status_offset + i * 4);
+
+ aspeed_mbox_outb(mbox, ASPEED_MBOX_CTRL_RECV, mbox->configs.bmc_control_offset);
+ return 0;
+}
+
+static const struct of_device_id aspeed_mbox_match[] = {
+ { .compatible = "aspeed,ast2400-mbox", .data = &ast2500_config },
+ { .compatible = "aspeed,ast2500-mbox", .data = &ast2500_config },
+ { .compatible = "aspeed,ast2600-mbox", .data = &ast2600_config },
+ { },
+};
+MODULE_DEVICE_TABLE(of, aspeed_mbox_match);
+
+static int aspeed_mbox_probe(struct platform_device *pdev)
+{
+ const struct aspeed_mbox_config *config;
+ const struct of_device_id *match;
+ struct aspeed_mbox *mbox;
+ struct device *dev;
+ int rc;
+
+ dev = &pdev->dev;
+
+ mbox = devm_kzalloc(dev, sizeof(*mbox), GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+
+ dev_set_drvdata(&pdev->dev, mbox);
+
+ match = of_match_node(aspeed_mbox_match, pdev->dev.of_node);
+ if (!match)
+ return -EINVAL;
+
+ config = match->data;
+ memcpy(&mbox->configs, config, sizeof(mbox->configs));
+
+ rc = of_property_read_u32(dev->of_node, "reg", &mbox->base);
+ if (rc) {
+ dev_err(dev, "Couldn't read reg device-tree property\n");
+ return rc;
+ }
+
+ mbox->regmap = syscon_node_to_regmap(
+ pdev->dev.parent->of_node);
+ if (IS_ERR(mbox->regmap)) {
+ dev_err(dev, "Couldn't get regmap\n");
+ return -ENODEV;
+ }
+
+ spin_lock_init(&mbox->lock);
+ mutex_init(&mbox->mutex);
+ init_waitqueue_head(&mbox->queue);
+
+ mbox->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(mbox->clk))
+ return dev_err_probe(dev, PTR_ERR(mbox->clk),
+ "couldn't get clock\n");
+ rc = clk_prepare_enable(mbox->clk);
+ if (rc) {
+ dev_err(dev, "couldn't enable clock\n");
+ return rc;
+ }
+
+ /* Create FIFO data structure */
+ rc = kfifo_alloc(&mbox->fifo, mbox->configs.num_regs * sizeof(u32), GFP_KERNEL);
+ if (rc)
+ return rc;
+
+ mbox->miscdev.minor = MISC_DYNAMIC_MINOR;
+ mbox->miscdev.name = DEVICE_NAME;
+ mbox->miscdev.fops = &aspeed_mbox_fops;
+ mbox->miscdev.parent = dev;
+ rc = misc_register(&mbox->miscdev);
+ if (rc) {
+ dev_err(dev, "Unable to register device\n");
+ goto err;
+ }
+
+ rc = aspeed_mbox_config_irq(mbox, pdev);
+ if (rc) {
+ dev_err(dev, "Failed to configure IRQ\n");
+ misc_deregister(&mbox->miscdev);
+ goto err;
+ }
+
+ dev_info(&pdev->dev, "LPC mbox registered, irq %d\n", mbox->irq);
+
+ return 0;
+
+err:
+ clk_disable_unprepare(mbox->clk);
+
+ return rc;
+}
+
+static int aspeed_mbox_remove(struct platform_device *pdev)
+{
+ struct aspeed_mbox *mbox = dev_get_drvdata(&pdev->dev);
+
+ misc_deregister(&mbox->miscdev);
+ clk_disable_unprepare(mbox->clk);
+ kfifo_free(&mbox->fifo);
+
+ return 0;
+}
+
+static struct platform_driver aspeed_mbox_driver = {
+ .driver = {
+ .name = DEVICE_NAME,
+ .of_match_table = aspeed_mbox_match,
+ },
+ .probe = aspeed_mbox_probe,
+ .remove = aspeed_mbox_remove,
+};
+
+module_platform_driver(aspeed_mbox_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Cyril Bur <cyrilbur@gmail.com>");
+MODULE_DESCRIPTION("Aspeed mailbox device driver");
diff --git a/drivers/soc/aspeed/aspeed-lpc-sio.c b/drivers/soc/aspeed/aspeed-lpc-sio.c
new file mode 100644
index 000000000000..9bbaff83f968
--- /dev/null
+++ b/drivers/soc/aspeed/aspeed-lpc-sio.c
@@ -0,0 +1,437 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2012-2017 ASPEED Technology Inc.
+// Copyright (c) 2017-2020 Intel Corporation
+
+#include <linux/aspeed-lpc-sio.h>
+#include <linux/clk.h>
+#include <linux/mfd/syscon.h>
+#include <linux/miscdevice.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/poll.h>
+#include <linux/regmap.h>
+
+#define SOC_NAME "aspeed"
+#define DEVICE_NAME "lpc-sio"
+
+#define AST_LPC_SWCR0300 0x00
+#define LPC_PWRGD_STS BIT(30)
+#define LPC_PWRGD_RISING_EVT_STS BIT(29)
+#define LPC_PWRGD_FALLING_EVT_STS BIT(28)
+#define LPC_PWRBTN_STS BIT(27)
+#define LPC_PWRBTN_RISING_EVT_STS BIT(26)
+#define LPC_PWRBTN_FALLING_EVT_STS BIT(25)
+#define LPC_S5N_STS BIT(21)
+#define LPC_S5N_RISING_EVT_STS BIT(20)
+#define LPC_S5N_FALLING_EVT_STS BIT(19)
+#define LPC_S3N_STS BIT(18)
+#define LPC_S3N_RISING_EVT_STS BIT(17)
+#define LPC_S3N_FALLING_EVT_STS BIT(16)
+#define LPC_PWBTO_RAW_STS BIT(15)
+#define LPC_LAST_ONCTL_STS BIT(14)
+#define LPC_WAS_PFAIL_STS BIT(13)
+#define LPC_POWER_UP_FAIL_STS BIT(12) /* Crowbar */
+#define LPC_PWRBTN_OVERRIDE_STS BIT(11)
+#define LPC_BMC_TRIG_WAKEUP_EVT_STS BIT(8)
+
+#define AST_LPC_SWCR0704 0x04
+#define LPC_BMC_TRIG_WAKEUP_EVT_EN BIT(8)
+
+#define AST_LPC_SWCR0B08 0x08
+#define LPC_PWREQ_OUTPUT_LEVEL BIT(25)
+#define LPC_PWBTO_OUTPUT_LEVEL BIT(24)
+#define LPC_ONCTL_STS BIT(15)
+#define LPC_ONCTL_GPIO_LEVEL BIT(14)
+#define LPC_ONCTL_EN_GPIO_OUTPUT BIT(13)
+#define LPC_ONCTL_EN_GPIO_MODE BIT(12)
+#define LPC_BMC_TRIG_WAKEUP_EVT BIT(6)
+#define LPC_BMC_TRIG_SMI_EVT_EN BIT(0)
+
+#define AST_LPC_SWCR0F0C 0x0C
+#define AST_LPC_SWCR1310 0x10
+#define AST_LPC_SWCR1714 0x14
+#define AST_LPC_SWCR1B18 0x18
+#define AST_LPC_SWCR1F1C 0x1C
+#define AST_LPC_ACPIE3E0 0x20
+#define AST_LPC_ACPIC1C0 0x24
+
+#define AST_LPC_ACPIB3B0 0x28
+#define LPC_BMC_TRIG_SCI_EVT_STS BIT(8)
+
+#define AST_LPC_ACPIB7B4 0x2C
+#define LPC_BMC_TRIG_SCI_EVT_EN BIT(8)
+
+struct aspeed_lpc_sio {
+ struct miscdevice miscdev;
+ struct regmap *regmap;
+ struct clk *clk;
+ struct semaphore lock;
+ unsigned int reg_base;
+};
+
+static struct aspeed_lpc_sio *file_aspeed_lpc_sio(struct file *file)
+{
+ return container_of(file->private_data, struct aspeed_lpc_sio,
+ miscdev);
+}
+
+static int aspeed_lpc_sio_open(struct inode *inode, struct file *filp)
+{
+ return 0;
+}
+
+#define LPC_SLP3N5N_EVENT_STATUS (\
+ LPC_S5N_RISING_EVT_STS | \
+ LPC_S5N_FALLING_EVT_STS | \
+ LPC_S3N_RISING_EVT_STS | \
+ LPC_S3N_FALLING_EVT_STS)
+
+/*
+ * SLPS3n SLPS5n State
+ * ---------------------------------
+ * 1 1 S12
+ * 0 1 S3I
+ * x 0 S45
+ *************************************
+ */
+
+static void sio_get_acpi_state(struct aspeed_lpc_sio *lpc_sio,
+ struct sio_ioctl_data *sio_data)
+{
+ u32 reg, val;
+
+ reg = lpc_sio->reg_base + AST_LPC_SWCR0300;
+ regmap_read(lpc_sio->regmap, reg, &val);
+
+ /* update the ACPI state event status */
+ if (sio_data->param != 0) {
+ if (val & LPC_SLP3N5N_EVENT_STATUS) {
+ sio_data->param = 1;
+ regmap_write(lpc_sio->regmap, reg,
+ LPC_SLP3N5N_EVENT_STATUS);
+ } else {
+ sio_data->param = 0;
+ }
+ }
+
+ if ((val & LPC_S3N_STS) && (val & LPC_S5N_STS))
+ sio_data->data = ACPI_STATE_S12;
+ else if ((val & LPC_S3N_STS) == 0 && (val & LPC_S5N_STS))
+ sio_data->data = ACPI_STATE_S3I;
+ else
+ sio_data->data = ACPI_STATE_S45;
+}
+
+#define LPC_PWRGD_EVENT_STATUS ( \
+ LPC_PWRGD_RISING_EVT_STS | \
+ LPC_PWRGD_FALLING_EVT_STS)
+
+static void sio_get_pwrgd_status(struct aspeed_lpc_sio *lpc_sio,
+ struct sio_ioctl_data *sio_data)
+{
+ u32 reg, val;
+
+ reg = lpc_sio->reg_base + AST_LPC_SWCR0300;
+ regmap_read(lpc_sio->regmap, reg, &val);
+
+ /* update the PWRGD event status */
+ if (sio_data->param != 0) {
+ if (val & LPC_PWRGD_EVENT_STATUS) {
+ sio_data->param = 1;
+ regmap_write(lpc_sio->regmap, reg,
+ LPC_PWRGD_EVENT_STATUS);
+ } else {
+ sio_data->param = 0;
+ }
+ }
+
+ sio_data->data = (val & LPC_PWRGD_STS) != 0 ? 1 : 0;
+}
+
+static void sio_get_onctl_status(struct aspeed_lpc_sio *lpc_sio,
+ struct sio_ioctl_data *sio_data)
+{
+ u32 reg, val;
+
+ reg = lpc_sio->reg_base + AST_LPC_SWCR0B08;
+ regmap_read(lpc_sio->regmap, reg, &val);
+
+ sio_data->data = (val & LPC_ONCTL_STS) != 0 ? 1 : 0;
+}
+
+static void sio_set_onctl_gpio(struct aspeed_lpc_sio *lpc_sio,
+ struct sio_ioctl_data *sio_data)
+{
+ u32 reg, val;
+
+ reg = lpc_sio->reg_base + AST_LPC_SWCR0B08;
+ regmap_read(lpc_sio->regmap, reg, &val);
+
+ /* Enable ONCTL GPIO mode */
+ if (sio_data->param != 0) {
+ val |= LPC_ONCTL_EN_GPIO_MODE;
+ val |= LPC_ONCTL_EN_GPIO_OUTPUT;
+
+ if (sio_data->data != 0)
+ val |= LPC_ONCTL_GPIO_LEVEL;
+ else
+ val &= ~LPC_ONCTL_GPIO_LEVEL;
+
+ regmap_write(lpc_sio->regmap, reg, val);
+ } else {
+ val &= ~LPC_ONCTL_EN_GPIO_MODE;
+ regmap_write(lpc_sio->regmap, reg, val);
+ }
+}
+
+static void sio_get_pwrbtn_override(struct aspeed_lpc_sio *lpc_sio,
+ struct sio_ioctl_data *sio_data)
+{
+ u32 reg, val;
+
+ reg = lpc_sio->reg_base + AST_LPC_SWCR0300;
+ regmap_read(lpc_sio->regmap, reg, &val);
+
+ /* clear the PWRBTN OVERRIDE status */
+ if (sio_data->param != 0 && val & LPC_PWRBTN_OVERRIDE_STS)
+ regmap_write(lpc_sio->regmap, reg, LPC_PWRBTN_OVERRIDE_STS);
+
+ sio_data->data = (val & LPC_PWRBTN_OVERRIDE_STS) != 0 ? 1 : 0;
+}
+
+static void sio_get_pfail_status(struct aspeed_lpc_sio *lpc_sio,
+ struct sio_ioctl_data *sio_data)
+{
+ u32 reg, val;
+
+ reg = lpc_sio->reg_base + AST_LPC_SWCR0300;
+ regmap_read(lpc_sio->regmap, reg, &val);
+
+ /* [ASPEED]: SWCR_03_00[13] (Was_pfail: default 1) is used to identify
+ * this current booting is from AC loss (not DC loss) if FW cleans this
+ * bit after booting successfully every time.
+ **********************************************************************/
+ if (val & LPC_WAS_PFAIL_STS) {
+ regmap_write(lpc_sio->regmap, reg, 0); /* W0C */
+ sio_data->data = 1;
+ } else {
+ sio_data->data = 0;
+ }
+}
+
+static void sio_set_bmc_sci_event(struct aspeed_lpc_sio *lpc_sio,
+ struct sio_ioctl_data *sio_data)
+{
+ u32 reg;
+
+ if (sio_data->param) {
+ reg = lpc_sio->reg_base + AST_LPC_ACPIB7B4;
+ regmap_write_bits(lpc_sio->regmap, reg,
+ LPC_BMC_TRIG_SCI_EVT_EN,
+ LPC_BMC_TRIG_SCI_EVT_EN);
+
+ reg = lpc_sio->reg_base + AST_LPC_SWCR0704;
+ regmap_write_bits(lpc_sio->regmap, reg,
+ LPC_BMC_TRIG_WAKEUP_EVT_EN,
+ LPC_BMC_TRIG_WAKEUP_EVT_EN);
+
+ reg = lpc_sio->reg_base + AST_LPC_SWCR0B08;
+ regmap_write_bits(lpc_sio->regmap, reg,
+ LPC_BMC_TRIG_WAKEUP_EVT,
+ LPC_BMC_TRIG_WAKEUP_EVT);
+ } else {
+ reg = lpc_sio->reg_base + AST_LPC_SWCR0300;
+ regmap_write_bits(lpc_sio->regmap, reg,
+ LPC_BMC_TRIG_WAKEUP_EVT_STS,
+ LPC_BMC_TRIG_WAKEUP_EVT_STS);
+
+ reg = lpc_sio->reg_base + AST_LPC_ACPIB3B0;
+ regmap_write_bits(lpc_sio->regmap, reg,
+ LPC_BMC_TRIG_SCI_EVT_STS,
+ LPC_BMC_TRIG_SCI_EVT_STS);
+ }
+
+ sio_data->data = sio_data->param;
+}
+
+static void sio_set_bmc_smi_event(struct aspeed_lpc_sio *lpc_sio,
+ struct sio_ioctl_data *sio_data)
+{
+ u32 reg;
+
+ if (sio_data->param) {
+ reg = lpc_sio->reg_base + AST_LPC_SWCR0704;
+ regmap_write_bits(lpc_sio->regmap, reg,
+ LPC_BMC_TRIG_WAKEUP_EVT_EN,
+ LPC_BMC_TRIG_WAKEUP_EVT_EN);
+
+ reg = lpc_sio->reg_base + AST_LPC_SWCR0B08;
+ regmap_write_bits(lpc_sio->regmap, reg,
+ LPC_BMC_TRIG_SMI_EVT_EN,
+ LPC_BMC_TRIG_SMI_EVT_EN);
+ regmap_write_bits(lpc_sio->regmap, reg,
+ LPC_BMC_TRIG_WAKEUP_EVT,
+ LPC_BMC_TRIG_WAKEUP_EVT);
+ } else {
+ reg = lpc_sio->reg_base + AST_LPC_SWCR0300;
+ regmap_write_bits(lpc_sio->regmap, reg,
+ LPC_BMC_TRIG_WAKEUP_EVT_STS,
+ LPC_BMC_TRIG_WAKEUP_EVT_STS);
+ }
+
+ sio_data->data = sio_data->param;
+}
+
+typedef void (*sio_cmd_fn) (struct aspeed_lpc_sio *sio_dev,
+ struct sio_ioctl_data *sio_data);
+
+static sio_cmd_fn sio_cmd_handle[SIO_MAX_CMD] = {
+ [SIO_GET_ACPI_STATE] = sio_get_acpi_state,
+ [SIO_GET_PWRGD_STATUS] = sio_get_pwrgd_status,
+ [SIO_GET_ONCTL_STATUS] = sio_get_onctl_status,
+ [SIO_SET_ONCTL_GPIO] = sio_set_onctl_gpio,
+ [SIO_GET_PWRBTN_OVERRIDE] = sio_get_pwrbtn_override,
+ [SIO_GET_PFAIL_STATUS] = sio_get_pfail_status,
+ [SIO_SET_BMC_SCI_EVENT] = sio_set_bmc_sci_event,
+ [SIO_SET_BMC_SMI_EVENT] = sio_set_bmc_smi_event,
+};
+
+static long aspeed_lpc_sio_ioctl(struct file *file, unsigned int cmd,
+ unsigned long param)
+{
+ struct aspeed_lpc_sio *lpc_sio = file_aspeed_lpc_sio(file);
+ struct sio_ioctl_data sio_data;
+ sio_cmd_fn cmd_fn;
+ long ret;
+
+ if (copy_from_user(&sio_data, (void __user *)param, sizeof(sio_data)))
+ return -EFAULT;
+
+ if (cmd != SIO_IOC_COMMAND || sio_data.sio_cmd >= SIO_MAX_CMD)
+ return -EINVAL;
+
+ cmd_fn = sio_cmd_handle[sio_data.sio_cmd];
+ if (!cmd_fn)
+ return -EINVAL;
+
+ if (down_interruptible(&lpc_sio->lock) != 0)
+ return -ERESTARTSYS;
+
+ cmd_fn(lpc_sio, &sio_data);
+ ret = copy_to_user((void __user *)param, &sio_data, sizeof(sio_data));
+
+ up(&lpc_sio->lock);
+
+ return ret;
+}
+
+static const struct file_operations aspeed_lpc_sio_fops = {
+ .owner = THIS_MODULE,
+ .open = aspeed_lpc_sio_open,
+ .unlocked_ioctl = aspeed_lpc_sio_ioctl,
+};
+
+static int aspeed_lpc_sio_probe(struct platform_device *pdev)
+{
+ struct aspeed_lpc_sio *lpc_sio;
+ struct device *dev;
+ u32 val;
+ int ret;
+
+ dev = &pdev->dev;
+
+ lpc_sio = devm_kzalloc(dev, sizeof(*lpc_sio), GFP_KERNEL);
+ if (!lpc_sio)
+ return -ENOMEM;
+
+ dev_set_drvdata(&pdev->dev, lpc_sio);
+
+ ret = of_property_read_u32(dev->of_node, "reg", &lpc_sio->reg_base);
+ if (ret) {
+ dev_err(dev, "Couldn't read reg device-tree property\n");
+ return ret;
+ }
+
+ lpc_sio->regmap = syscon_node_to_regmap(pdev->dev.parent->of_node);
+ if (IS_ERR(lpc_sio->regmap)) {
+ dev_err(dev, "Couldn't get regmap\n");
+ return -ENODEV;
+ }
+
+ /*
+ * We check that the regmap works on this very first access,
+ * but as this is an MMIO-backed regmap, subsequent regmap
+ * access is not going to fail and we skip error checks from
+ * this point.
+ */
+ ret = regmap_read(lpc_sio->regmap, AST_LPC_SWCR0300, &val);
+ if (ret) {
+ dev_err(dev, "failed to read regmap\n");
+ return ret;
+ }
+
+ sema_init(&lpc_sio->lock, 1);
+
+ lpc_sio->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(lpc_sio->clk))
+ return dev_err_probe(dev, PTR_ERR(lpc_sio->clk),
+ "couldn't get clock\n");
+ ret = clk_prepare_enable(lpc_sio->clk);
+ if (ret) {
+ dev_err(dev, "couldn't enable clock\n");
+ return ret;
+ }
+
+ lpc_sio->miscdev.minor = MISC_DYNAMIC_MINOR;
+ lpc_sio->miscdev.name = DEVICE_NAME;
+ lpc_sio->miscdev.fops = &aspeed_lpc_sio_fops;
+ lpc_sio->miscdev.parent = dev;
+ ret = misc_register(&lpc_sio->miscdev);
+ if (ret) {
+ dev_err(dev, "Unable to register device\n");
+ goto err;
+ }
+
+ dev_info(dev, "Loaded at %pap (0x%08x)\n", &lpc_sio->regmap,
+ lpc_sio->reg_base);
+
+ return 0;
+
+err:
+ clk_disable_unprepare(lpc_sio->clk);
+
+ return ret;
+}
+
+static int aspeed_lpc_sio_remove(struct platform_device *pdev)
+{
+ struct aspeed_lpc_sio *lpc_sio = dev_get_drvdata(&pdev->dev);
+
+ misc_deregister(&lpc_sio->miscdev);
+ clk_disable_unprepare(lpc_sio->clk);
+
+ return 0;
+}
+
+static const struct of_device_id aspeed_lpc_sio_match[] = {
+ { .compatible = "aspeed,ast2500-lpc-sio" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, aspeed_lpc_sio_match);
+
+static struct platform_driver aspeed_lpc_sio_driver = {
+ .driver = {
+ .name = SOC_NAME "-" DEVICE_NAME,
+ .of_match_table = of_match_ptr(aspeed_lpc_sio_match),
+ },
+ .probe = aspeed_lpc_sio_probe,
+ .remove = aspeed_lpc_sio_remove,
+};
+module_platform_driver(aspeed_lpc_sio_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Ryan Chen <ryan_chen@aspeedtech.com>");
+MODULE_AUTHOR("Yong Li <yong.blli@linux.intel.com>");
+MODULE_DESCRIPTION("ASPEED AST LPC SIO device driver");
diff --git a/drivers/soc/aspeed/aspeed-mctp.c b/drivers/soc/aspeed/aspeed-mctp.c
new file mode 100644
index 000000000000..5073f0e14dc3
--- /dev/null
+++ b/drivers/soc/aspeed/aspeed-mctp.c
@@ -0,0 +1,1837 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2020, Intel Corporation.
+
+#include <linux/aspeed-mctp.h>
+#include <linux/bitfield.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/list_sort.h>
+#include <linux/mfd/syscon.h>
+#include <linux/miscdevice.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_platform.h>
+#include <linux/pci.h>
+#include <linux/poll.h>
+#include <linux/ptr_ring.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+#include <linux/swab.h>
+#include <linux/uaccess.h>
+#include <linux/workqueue.h>
+
+#include <uapi/linux/aspeed-mctp.h>
+
+/* AST2600 MCTP Controller registers */
+#define ASPEED_MCTP_CTRL 0x000
+#define TX_CMD_TRIGGER BIT(0)
+#define RX_CMD_READY BIT(4)
+#define MATCHING_EID BIT(9)
+
+#define ASPEED_MCTP_TX_CMD 0x004
+#define ASPEED_MCTP_RX_CMD 0x008
+
+#define ASPEED_MCTP_INT_STS 0x00c
+#define ASPEED_MCTP_INT_EN 0x010
+#define TX_CMD_SENT_INT BIT(0)
+#define TX_CMD_LAST_INT BIT(1)
+#define TX_CMD_WRONG_INT BIT(2)
+#define RX_CMD_RECEIVE_INT BIT(8)
+#define RX_CMD_NO_MORE_INT BIT(9)
+
+#define ASPEED_MCTP_EID 0x014
+#define ASPEED_MCTP_OBFF_CTRL 0x018
+
+#define ASPEED_MCTP_ENGINE_CTRL 0x01c
+#define TX_MAX_PAYLOAD_SIZE_SHIFT 0
+#define TX_MAX_PAYLOAD_SIZE_MASK GENMASK(1, TX_MAX_PAYLOAD_SIZE_SHIFT)
+#define TX_MAX_PAYLOAD_SIZE(x) \
+ (((x) << TX_MAX_PAYLOAD_SIZE_SHIFT) & TX_MAX_PAYLOAD_SIZE_MASK)
+#define RX_MAX_PAYLOAD_SIZE_SHIFT 4
+#define RX_MAX_PAYLOAD_SIZE_MASK GENMASK(5, RX_MAX_PAYLOAD_SIZE_SHIFT)
+#define RX_MAX_PAYLOAD_SIZE(x) \
+ (((x) << RX_MAX_PAYLOAD_SIZE_SHIFT) & RX_MAX_PAYLOAD_SIZE_MASK)
+#define FIFO_LAYOUT_SHIFT 8
+#define FIFO_LAYOUT_MASK GENMASK(9, FIFO_LAYOUT_SHIFT)
+#define FIFO_LAYOUT(x) \
+ (((x) << FIFO_LAYOUT_SHIFT) & FIFO_LAYOUT_MASK)
+
+#define ASPEED_MCTP_RX_BUF_ADDR 0x020
+#define ASPEED_MCTP_RX_BUF_SIZE 0x024
+#define ASPEED_MCTP_RX_BUF_RD_PTR 0x028
+#define UPDATE_RX_RD_PTR BIT(31)
+#define RX_BUF_RD_PTR_MASK GENMASK(11, 0)
+#define ASPEED_MCTP_RX_BUF_WR_PTR 0x02c
+#define RX_BUF_WR_PTR_MASK GENMASK(11, 0)
+
+#define ASPEED_MCTP_TX_BUF_ADDR 0x030
+#define ASPEED_MCTP_TX_BUF_SIZE 0x034
+#define ASPEED_MCTP_TX_BUF_RD_PTR 0x038
+#define UPDATE_TX_RD_PTR BIT(31)
+#define TX_BUF_RD_PTR_MASK GENMASK(11, 0)
+#define ASPEED_MCTP_TX_BUF_WR_PTR 0x03c
+#define TX_BUF_WR_PTR_MASK GENMASK(11, 0)
+
+#define ADDR_LEN (BIT(26) - 1)
+#define DATA_ADDR(x) (((x) >> 4) & ADDR_LEN)
+
+/* TX command */
+#define TX_LAST_CMD BIT(31)
+#define TX_DATA_ADDR_SHIFT 4
+#define TX_DATA_ADDR_MASK GENMASK(30, TX_DATA_ADDR_SHIFT)
+#define TX_DATA_ADDR(x) \
+ ((DATA_ADDR(x) << TX_DATA_ADDR_SHIFT) & TX_DATA_ADDR_MASK)
+#define TX_RESERVED_1_MASK GENMASK(1, 0) /* must be 1 */
+#define TX_RESERVED_1 1
+#define TX_STOP_AFTER_CMD BIT(16)
+#define TX_INTERRUPT_AFTER_CMD BIT(15)
+#define TX_PACKET_SIZE_SHIFT 2
+#define TX_PACKET_SIZE_MASK GENMASK(12, TX_PACKET_SIZE_SHIFT)
+#define TX_PACKET_SIZE(x) \
+ (((x) << TX_PACKET_SIZE_SHIFT) & TX_PACKET_SIZE_MASK)
+#define TX_RESERVED_0_MASK GENMASK(1, 0) /* MBZ */
+#define TX_RESERVED_0 0
+
+/* RX command */
+#define RX_INTERRUPT_AFTER_CMD BIT(2)
+#define RX_DATA_ADDR_SHIFT 4
+#define RX_DATA_ADDR_MASK GENMASK(30, RX_DATA_ADDR_SHIFT)
+#define RX_DATA_ADDR(x) \
+ ((DATA_ADDR(x) << RX_DATA_ADDR_SHIFT) & RX_DATA_ADDR_MASK)
+
+/* HW buffer sizes */
+#define TX_PACKET_COUNT 48
+#define RX_PACKET_COUNT 96
+#define TX_MAX_PACKET_COUNT (TX_BUF_RD_PTR_MASK + 1)
+#define RX_MAX_PACKET_COUNT (RX_BUF_RD_PTR_MASK + 1)
+
+#define TX_CMD_BUF_SIZE \
+ PAGE_ALIGN(TX_PACKET_COUNT * sizeof(struct aspeed_mctp_tx_cmd))
+#define TX_DATA_BUF_SIZE \
+ PAGE_ALIGN(TX_PACKET_COUNT * sizeof(struct mctp_pcie_packet_data))
+#define RX_CMD_BUF_SIZE PAGE_ALIGN(RX_PACKET_COUNT * sizeof(u32))
+#define RX_DATA_BUF_SIZE \
+ PAGE_ALIGN(RX_PACKET_COUNT * sizeof(struct mctp_pcie_packet_data))
+
+/* Per client packet cache sizes */
+#define RX_RING_COUNT 64
+#define TX_RING_COUNT 64
+
+/* PCIe Host Controller registers */
+#define ASPEED_PCIE_MISC_STS_1 0x0c4
+
+/* PCI address definitions */
+#define PCI_DEV_NUM_MASK GENMASK(4, 0)
+#define PCI_BUS_NUM_SHIFT 5
+#define PCI_BUS_NUM_MASK GENMASK(12, PCI_BUS_NUM_SHIFT)
+#define GET_PCI_DEV_NUM(x) ((x) & PCI_DEV_NUM_MASK)
+#define GET_PCI_BUS_NUM(x) (((x) & PCI_BUS_NUM_MASK) >> PCI_BUS_NUM_SHIFT)
+
+/* MCTP header definitions */
+#define MCTP_HDR_SRC_EID_OFFSET 14
+#define MCTP_HDR_TAG_OFFSET 15
+#define MCTP_HDR_SOM BIT(7)
+#define MCTP_HDR_EOM BIT(6)
+#define MCTP_HDR_SOM_EOM (MCTP_HDR_SOM | MCTP_HDR_EOM)
+#define MCTP_HDR_TYPE_OFFSET 16
+#define MCTP_HDR_TYPE_CONTROL 0
+#define MCTP_HDR_TYPE_VDM_PCI 0x7e
+#define MCTP_HDR_TYPE_SPDM 0x5
+#define MCTP_HDR_TYPE_BASE_LAST MCTP_HDR_TYPE_SPDM
+#define MCTP_HDR_VENDOR_OFFSET 17
+#define MCTP_HDR_VDM_TYPE_OFFSET 19
+
+/* FIXME: ast2600 supports variable max transmission unit */
+#define ASPEED_MCTP_MTU 64
+
+struct aspeed_mctp_tx_cmd {
+ u32 tx_lo;
+ u32 tx_hi;
+};
+
+struct mctp_buffer {
+ void *vaddr;
+ dma_addr_t dma_handle;
+};
+
+struct mctp_channel {
+ struct mctp_buffer data;
+ struct mctp_buffer cmd;
+ struct tasklet_struct tasklet;
+ u32 buffer_count;
+ u32 rd_ptr;
+ u32 wr_ptr;
+ bool stopped;
+};
+
+struct aspeed_mctp {
+ struct device *dev;
+ struct regmap *map;
+ struct reset_control *reset;
+ struct mctp_channel tx;
+ struct mctp_channel rx;
+ struct list_head clients;
+ struct mctp_client *default_client;
+ struct list_head mctp_type_handlers;
+ /*
+ * clients_lock protects list of clients, list of type handlers
+ * and default client
+ */
+ spinlock_t clients_lock;
+ struct list_head endpoints;
+ size_t endpoints_count;
+ /*
+ * endpoints_lock protects list of endpoints
+ */
+ struct mutex endpoints_lock;
+ struct {
+ struct regmap *map;
+ struct delayed_work rst_dwork;
+ bool need_uevent;
+ u16 bdf;
+ } pcie;
+ struct {
+ bool enable;
+ bool warmup;
+ int packet_counter;
+ } rx_runaway_wa;
+ u8 eid;
+ struct platform_device *peci_mctp;
+};
+
+struct mctp_client {
+ struct kref ref;
+ struct aspeed_mctp *priv;
+ struct ptr_ring tx_queue;
+ struct ptr_ring rx_queue;
+ struct list_head link;
+ wait_queue_head_t wait_queue;
+};
+
+struct mctp_type_handler {
+ u8 mctp_type;
+ u16 pci_vendor_id;
+ u16 vdm_type;
+ u16 vdm_mask;
+ struct mctp_client *client;
+ struct list_head link;
+};
+
+union aspeed_mctp_eid_data_info {
+ struct aspeed_mctp_eid_info eid_info;
+ struct aspeed_mctp_eid_ext_info eid_ext_info;
+};
+
+enum mctp_address_type {
+ ASPEED_MCTP_GENERIC_ADDR_FORMAT = 0,
+ ASPEED_MCTP_EXTENDED_ADDR_FORMAT = 1
+};
+
+struct aspeed_mctp_endpoint {
+ union aspeed_mctp_eid_data_info data;
+ struct list_head link;
+};
+
+struct kmem_cache *packet_cache;
+
+void *aspeed_mctp_packet_alloc(gfp_t flags)
+{
+ return kmem_cache_alloc(packet_cache, flags);
+}
+EXPORT_SYMBOL_GPL(aspeed_mctp_packet_alloc);
+
+void aspeed_mctp_packet_free(void *packet)
+{
+ kmem_cache_free(packet_cache, packet);
+}
+EXPORT_SYMBOL_GPL(aspeed_mctp_packet_free);
+
+/*
+ * HW produces and expects VDM header in little endian and payload in network order.
+ * To allow userspace to use network order for the whole packet, PCIe VDM header needs
+ * to be swapped.
+ */
+static void aspeed_mctp_swap_pcie_vdm_hdr(struct mctp_pcie_packet_data *data)
+{
+ int i;
+
+ for (i = 0; i < PCIE_VDM_HDR_SIZE_DW; i++)
+ data->hdr[i] = swab32(data->hdr[i]);
+}
+
+static void aspeed_mctp_rx_trigger(struct mctp_channel *rx)
+{
+ struct aspeed_mctp *priv = container_of(rx, typeof(*priv), rx);
+
+ /*
+ * Even though rx_buf_addr doesn't change, if we don't do the write
+ * here, the HW doesn't trigger RX. We're also clearing the
+ * RX_CMD_READY bit, otherwise we're observing a rare case where
+ * trigger isn't registered by the HW, and we're ending up with stuck
+ * HW (not reacting to wr_ptr writes).
+ * Also, note that we're writing 0 as wr_ptr. If we're writing other
+ * value, the HW behaves in a bizarre way that's hard to explain...
+ */
+ regmap_update_bits(priv->map, ASPEED_MCTP_CTRL, RX_CMD_READY, 0);
+ regmap_write(priv->map, ASPEED_MCTP_RX_BUF_ADDR, rx->cmd.dma_handle);
+ regmap_write(priv->map, ASPEED_MCTP_RX_BUF_WR_PTR, 0);
+
+ /* After re-enabling RX we need to restart WA logic */
+ if (priv->rx_runaway_wa.enable) {
+ priv->rx_runaway_wa.warmup = true;
+ priv->rx_runaway_wa.packet_counter = 0;
+ priv->rx.buffer_count = RX_PACKET_COUNT;
+ }
+
+ regmap_update_bits(priv->map, ASPEED_MCTP_CTRL, RX_CMD_READY,
+ RX_CMD_READY);
+}
+
+static void aspeed_mctp_tx_trigger(struct mctp_channel *tx, bool notify)
+{
+ struct aspeed_mctp *priv = container_of(tx, typeof(*priv), tx);
+
+ if (notify) {
+ struct aspeed_mctp_tx_cmd *last_cmd;
+
+ last_cmd = (struct aspeed_mctp_tx_cmd *)tx->cmd.vaddr +
+ (tx->wr_ptr - 1) % TX_PACKET_COUNT;
+ last_cmd->tx_lo |= TX_INTERRUPT_AFTER_CMD;
+ }
+
+ regmap_write(priv->map, ASPEED_MCTP_TX_BUF_WR_PTR, tx->wr_ptr);
+ regmap_update_bits(priv->map, ASPEED_MCTP_CTRL, TX_CMD_TRIGGER,
+ TX_CMD_TRIGGER);
+}
+
+static void aspeed_mctp_emit_tx_cmd(struct mctp_channel *tx,
+ struct mctp_pcie_packet *packet)
+{
+ struct aspeed_mctp_tx_cmd *tx_cmd =
+ (struct aspeed_mctp_tx_cmd *)tx->cmd.vaddr + tx->wr_ptr;
+ u32 packet_sz_dw = packet->size / sizeof(u32) -
+ sizeof(packet->data.hdr) / sizeof(u32);
+ u32 offset = tx->wr_ptr * sizeof(packet->data);
+
+ aspeed_mctp_swap_pcie_vdm_hdr(&packet->data);
+
+ memcpy((u8 *)tx->data.vaddr + offset, &packet->data,
+ sizeof(packet->data));
+
+ tx_cmd->tx_lo = TX_PACKET_SIZE(packet_sz_dw);
+ tx_cmd->tx_hi = TX_RESERVED_1;
+ tx_cmd->tx_hi |= TX_DATA_ADDR(tx->data.dma_handle + offset);
+
+ tx->wr_ptr = (tx->wr_ptr + 1) % TX_PACKET_COUNT;
+}
+
+static struct mctp_client *aspeed_mctp_client_alloc(struct aspeed_mctp *priv)
+{
+ struct mctp_client *client;
+
+ client = kzalloc(sizeof(*client), GFP_KERNEL);
+ if (!client)
+ goto out;
+
+ kref_init(&client->ref);
+ client->priv = priv;
+ ptr_ring_init(&client->tx_queue, TX_RING_COUNT, GFP_KERNEL);
+ ptr_ring_init(&client->rx_queue, RX_RING_COUNT, GFP_ATOMIC);
+
+out:
+ return client;
+}
+
+static void aspeed_mctp_client_free(struct kref *ref)
+{
+ struct mctp_client *client = container_of(ref, typeof(*client), ref);
+
+ ptr_ring_cleanup(&client->rx_queue, &aspeed_mctp_packet_free);
+ ptr_ring_cleanup(&client->tx_queue, &aspeed_mctp_packet_free);
+
+ kfree(client);
+}
+
+static void aspeed_mctp_client_get(struct mctp_client *client)
+{
+ lockdep_assert_held(&client->priv->clients_lock);
+
+ kref_get(&client->ref);
+}
+
+static void aspeed_mctp_client_put(struct mctp_client *client)
+{
+ kref_put(&client->ref, &aspeed_mctp_client_free);
+}
+
+static struct mctp_client *
+aspeed_mctp_find_handler(struct aspeed_mctp *priv,
+ struct mctp_pcie_packet *packet)
+{
+ struct mctp_type_handler *handler;
+ u8 *hdr = (u8 *)packet->data.hdr;
+ struct mctp_client *client = NULL;
+ u8 mctp_type, som_eom;
+ u16 vendor = 0;
+ u16 vdm_type = 0;
+
+ lockdep_assert_held(&priv->clients_lock);
+
+ /*
+ * Middle and EOM fragments cannot be matched to MCTP type.
+ * For consistency do not match type for any fragmented messages.
+ */
+ som_eom = hdr[MCTP_HDR_TAG_OFFSET] & MCTP_HDR_SOM_EOM;
+ if (som_eom != MCTP_HDR_SOM_EOM)
+ return NULL;
+
+ mctp_type = hdr[MCTP_HDR_TYPE_OFFSET];
+ if (mctp_type == MCTP_HDR_TYPE_VDM_PCI) {
+ vendor = *((u16 *)&hdr[MCTP_HDR_VENDOR_OFFSET]);
+ vdm_type = *((u16 *)&hdr[MCTP_HDR_VDM_TYPE_OFFSET]);
+ }
+
+ list_for_each_entry(handler, &priv->mctp_type_handlers, link) {
+ if (handler->mctp_type == mctp_type &&
+ handler->pci_vendor_id == vendor &&
+ handler->vdm_type == (vdm_type & handler->vdm_mask)) {
+ dev_dbg(priv->dev, "Found client for type %x vdm %x\n",
+ mctp_type, handler->vdm_type);
+ client = handler->client;
+ break;
+ }
+ }
+ return client;
+}
+
+static void aspeed_mctp_dispatch_packet(struct aspeed_mctp *priv,
+ struct mctp_pcie_packet *packet)
+{
+ struct mctp_client *client;
+ int ret;
+
+ spin_lock(&priv->clients_lock);
+
+ client = aspeed_mctp_find_handler(priv, packet);
+
+ if (!client)
+ client = priv->default_client;
+
+ if (client)
+ aspeed_mctp_client_get(client);
+
+ spin_unlock(&priv->clients_lock);
+
+ if (client) {
+ ret = ptr_ring_produce(&client->rx_queue, packet);
+ if (ret) {
+ /*
+ * This can happen if client process does not
+ * consume packets fast enough
+ */
+ dev_dbg(priv->dev, "Failed to store packet in client RX queue\n");
+ aspeed_mctp_packet_free(packet);
+ } else {
+ wake_up_all(&client->wait_queue);
+ }
+ aspeed_mctp_client_put(client);
+ } else {
+ dev_dbg(priv->dev, "Failed to dispatch RX packet\n");
+ aspeed_mctp_packet_free(packet);
+ }
+}
+
+static void aspeed_mctp_tx_tasklet(unsigned long data)
+{
+ struct mctp_channel *tx = (struct mctp_channel *)data;
+ struct aspeed_mctp *priv = container_of(tx, typeof(*priv), tx);
+ struct mctp_client *client;
+ bool trigger = false;
+ bool full = false;
+ u32 rd_ptr;
+
+ regmap_write(priv->map, ASPEED_MCTP_TX_BUF_RD_PTR, UPDATE_RX_RD_PTR);
+ regmap_read(priv->map, ASPEED_MCTP_TX_BUF_RD_PTR, &rd_ptr);
+ rd_ptr &= TX_BUF_RD_PTR_MASK;
+
+ spin_lock(&priv->clients_lock);
+
+ list_for_each_entry(client, &priv->clients, link) {
+ while (!(full = (tx->wr_ptr + 1) % TX_PACKET_COUNT == rd_ptr)) {
+ struct mctp_pcie_packet *packet;
+
+ packet = ptr_ring_consume(&client->tx_queue);
+ if (!packet)
+ break;
+
+ aspeed_mctp_emit_tx_cmd(tx, packet);
+ aspeed_mctp_packet_free(packet);
+ trigger = true;
+ }
+ }
+
+ spin_unlock(&priv->clients_lock);
+
+ if (trigger)
+ aspeed_mctp_tx_trigger(tx, full);
+}
+
+static void aspeed_mctp_rx_tasklet(unsigned long data)
+{
+ struct mctp_channel *rx = (struct mctp_channel *)data;
+ struct aspeed_mctp *priv = container_of(rx, typeof(*priv), rx);
+ struct mctp_pcie_packet *rx_packet;
+ struct mctp_pcie_packet_data *rx_buf;
+ u32 hw_read_ptr;
+ u32 *hdr;
+
+ /* Trigger HW read pointer update, must be done before RX loop */
+ regmap_write(priv->map, ASPEED_MCTP_RX_BUF_RD_PTR, UPDATE_RX_RD_PTR);
+
+ /*
+ * XXX: Using rd_ptr obtained from HW is unreliable so we need to
+ * maintain the state of buffer on our own by peeking into the buffer
+ * and checking where the packet was written.
+ */
+ rx_buf = (struct mctp_pcie_packet_data *)rx->data.vaddr;
+ hdr = (u32 *)&rx_buf[rx->wr_ptr];
+
+ if (priv->rx_runaway_wa.warmup && !*hdr) {
+ u32 tmp_wr_ptr = rx->wr_ptr;
+
+ /*
+ * HACK: Right after start the RX hardware can put received
+ * packet into an unexpected offset - in order to locate
+ * received packet driver has to scan all RX data buffers.
+ */
+ do {
+ tmp_wr_ptr = (tmp_wr_ptr + 1) % RX_PACKET_COUNT;
+
+ hdr = (u32 *)&rx_buf[tmp_wr_ptr];
+ } while (!*hdr && tmp_wr_ptr != rx->wr_ptr);
+
+ if (tmp_wr_ptr != rx->wr_ptr) {
+ dev_dbg(priv->dev, "Runaway RX packet found %d -> %d\n",
+ rx->wr_ptr, tmp_wr_ptr);
+ rx->wr_ptr = tmp_wr_ptr;
+ }
+
+ /*
+ * Once we receive RX_PACKET_COUNT packets, hardware is
+ * guaranteed to use (RX_PACKET_COUNT - 4) buffers. Decrease
+ * buffer count by 4, then we can turn off scanning of RX
+ * buffers. RX buffer scanning should be enabled every time
+ * RX hardware is started.
+ * This is just a performance optimization - we could keep
+ * scanning RX buffers forever, but under heavy traffic it is
+ * fairly common that rx_tasklet is executed while RX buffer
+ * ring is empty.
+ */
+ if (priv->rx_runaway_wa.packet_counter > RX_PACKET_COUNT) {
+ priv->rx_runaway_wa.warmup = false;
+ rx->buffer_count = RX_PACKET_COUNT - 4;
+ }
+ }
+
+ while (*hdr != 0) {
+ rx_packet = aspeed_mctp_packet_alloc(GFP_ATOMIC);
+ if (rx_packet) {
+ memcpy(&rx_packet->data, hdr, sizeof(rx_packet->data));
+
+ aspeed_mctp_swap_pcie_vdm_hdr(&rx_packet->data);
+
+ aspeed_mctp_dispatch_packet(priv, rx_packet);
+ } else {
+ dev_dbg(priv->dev, "Failed to allocate RX packet\n");
+ }
+
+ *hdr = 0;
+ rx->wr_ptr = (rx->wr_ptr + 1) % rx->buffer_count;
+ hdr = (u32 *)&rx_buf[rx->wr_ptr];
+
+ priv->rx_runaway_wa.packet_counter++;
+ }
+
+ /*
+ * Update HW write pointer, this can be done only after driver consumes
+ * packets from RX ring.
+ */
+ regmap_read(priv->map, ASPEED_MCTP_RX_BUF_RD_PTR, &hw_read_ptr);
+ hw_read_ptr &= RX_BUF_RD_PTR_MASK;
+ regmap_write(priv->map, ASPEED_MCTP_RX_BUF_WR_PTR, (hw_read_ptr));
+
+ dev_dbg(priv->dev, "RX hw ptr %02d, sw ptr %2d\n",
+ hw_read_ptr, rx->wr_ptr);
+
+ /* Kick RX if it was stopped due to ring full condition */
+ if (rx->stopped) {
+ regmap_update_bits(priv->map, ASPEED_MCTP_CTRL, RX_CMD_READY,
+ RX_CMD_READY);
+ rx->stopped = false;
+ }
+}
+
+static void aspeed_mctp_rx_chan_init(struct mctp_channel *rx)
+{
+ struct aspeed_mctp *priv = container_of(rx, typeof(*priv), rx);
+ u32 *rx_cmd = (u32 *)rx->cmd.vaddr;
+ u32 data_size = sizeof(struct mctp_pcie_packet_data);
+ u32 hw_rx_count = RX_PACKET_COUNT;
+ int i;
+
+ for (i = 0; i < RX_PACKET_COUNT; i++) {
+ *rx_cmd = RX_DATA_ADDR(rx->data.dma_handle + data_size * i);
+ *rx_cmd |= RX_INTERRUPT_AFTER_CMD;
+ rx_cmd++;
+ }
+
+ rx->buffer_count = RX_PACKET_COUNT;
+
+ /*
+ * TODO: Once read pointer runaway bug is fixed in some future AST2x00
+ * stepping then add chip revision detection and turn on this
+ * workaround only when needed
+ */
+ priv->rx_runaway_wa.enable = true;
+
+ /*
+ * Hardware does not wrap around ASPEED_MCTP_RX_BUF_SIZE
+ * correctly - we have to set number of buffers to n/4 -1
+ */
+ if (priv->rx_runaway_wa.enable)
+ hw_rx_count = (RX_PACKET_COUNT / 4 - 1);
+
+ regmap_write(priv->map, ASPEED_MCTP_RX_BUF_SIZE, hw_rx_count);
+}
+
+static void aspeed_mctp_tx_chan_init(struct mctp_channel *tx)
+{
+ struct aspeed_mctp *priv = container_of(tx, typeof(*priv), tx);
+
+ tx->wr_ptr = 0;
+ regmap_update_bits(priv->map, ASPEED_MCTP_CTRL, TX_CMD_TRIGGER, 0);
+ regmap_write(priv->map, ASPEED_MCTP_TX_BUF_SIZE, TX_PACKET_COUNT);
+ regmap_write(priv->map, ASPEED_MCTP_TX_BUF_WR_PTR, 0);
+ regmap_write(priv->map, ASPEED_MCTP_TX_BUF_ADDR, tx->cmd.dma_handle);
+}
+
+struct mctp_client *aspeed_mctp_create_client(struct aspeed_mctp *priv)
+{
+ struct mctp_client *client;
+
+ client = aspeed_mctp_client_alloc(priv);
+ if (!client)
+ return NULL;
+
+ init_waitqueue_head(&client->wait_queue);
+
+ spin_lock_bh(&priv->clients_lock);
+ list_add_tail(&client->link, &priv->clients);
+ spin_unlock_bh(&priv->clients_lock);
+
+ return client;
+}
+EXPORT_SYMBOL_GPL(aspeed_mctp_create_client);
+
+static int aspeed_mctp_open(struct inode *inode, struct file *file)
+{
+ struct miscdevice *misc = file->private_data;
+ struct platform_device *pdev = to_platform_device(misc->parent);
+ struct aspeed_mctp *priv = platform_get_drvdata(pdev);
+ struct mctp_client *client;
+
+ client = aspeed_mctp_create_client(priv);
+ if (!client)
+ return -ENOMEM;
+
+ file->private_data = client;
+
+ return 0;
+}
+
+void aspeed_mctp_delete_client(struct mctp_client *client)
+{
+ struct aspeed_mctp *priv = client->priv;
+ struct mctp_type_handler *handler, *tmp;
+
+ spin_lock_bh(&priv->clients_lock);
+
+ list_del(&client->link);
+
+ if (priv->default_client == client)
+ priv->default_client = NULL;
+
+ list_for_each_entry_safe(handler, tmp, &priv->mctp_type_handlers,
+ link) {
+ if (handler->client == client) {
+ list_del(&handler->link);
+ kfree(handler);
+ }
+ }
+ spin_unlock_bh(&priv->clients_lock);
+
+ /* Disable the tasklet to appease lockdep */
+ local_bh_disable();
+ aspeed_mctp_client_put(client);
+ local_bh_enable();
+}
+EXPORT_SYMBOL_GPL(aspeed_mctp_delete_client);
+
+static int aspeed_mctp_release(struct inode *inode, struct file *file)
+{
+ struct mctp_client *client = file->private_data;
+
+ aspeed_mctp_delete_client(client);
+
+ return 0;
+}
+
+static u16 _get_bdf(struct aspeed_mctp *priv)
+{
+ u16 bdf;
+
+ bdf = READ_ONCE(priv->pcie.bdf);
+ smp_rmb(); /* enforce ordering between flush and producer */
+
+ return bdf;
+}
+
+static void _set_bdf(struct aspeed_mctp *priv, u16 bdf)
+{
+ smp_wmb(); /* enforce ordering between flush and producer */
+ WRITE_ONCE(priv->pcie.bdf, bdf);
+}
+
+#define LEN_MASK_HI GENMASK(9, 8)
+#define LEN_MASK_LO GENMASK(7, 0)
+#define PCI_VDM_HDR_LEN_MASK_LO GENMASK(31, 24)
+#define PCI_VDM_HDR_LEN_MASK_HI GENMASK(17, 16)
+#define PCIE_VDM_HDR_REQUESTER_BDF_MASK GENMASK(31, 16)
+
+int aspeed_mctp_send_packet(struct mctp_client *client,
+ struct mctp_pcie_packet *packet)
+{
+ struct aspeed_mctp *priv = client->priv;
+ u32 *hdr_dw = (u32 *)packet->data.hdr;
+ u8 *hdr = (u8 *)packet->data.hdr;
+ u16 packet_data_sz_dw;
+ u16 pci_data_len_dw;
+ int ret;
+ u16 bdf;
+
+ bdf = _get_bdf(priv);
+ if (bdf == 0)
+ return -EIO;
+
+ /*
+ * If the data size is different from contents of PCIe VDM header,
+ * aspeed_mctp_tx_cmd will be programmed incorrectly. This may cause
+ * MCTP HW to stop working.
+ */
+ pci_data_len_dw = FIELD_PREP(LEN_MASK_LO, FIELD_GET(PCI_VDM_HDR_LEN_MASK_LO, hdr_dw[0])) |
+ FIELD_PREP(LEN_MASK_HI, FIELD_GET(PCI_VDM_HDR_LEN_MASK_HI, hdr_dw[0]));
+ if (pci_data_len_dw == 0) /* According to PCIe Spec, 0 means 1024 DW */
+ pci_data_len_dw = SZ_1K;
+
+ packet_data_sz_dw = packet->size / sizeof(u32) - sizeof(packet->data.hdr) / sizeof(u32);
+ if (packet_data_sz_dw != pci_data_len_dw)
+ return -EINVAL;
+
+ be32p_replace_bits(&hdr_dw[1], bdf, PCIE_VDM_HDR_REQUESTER_BDF_MASK);
+
+ /*
+ * XXX Don't update EID for MCTP Control messages - old EID may
+ * interfere with MCTP discovery flow.
+ */
+ if (priv->eid && hdr[MCTP_HDR_TYPE_OFFSET] != MCTP_HDR_TYPE_CONTROL)
+ hdr[MCTP_HDR_SRC_EID_OFFSET] = priv->eid;
+
+ ret = ptr_ring_produce_bh(&client->tx_queue, packet);
+ if (!ret)
+ tasklet_hi_schedule(&priv->tx.tasklet);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(aspeed_mctp_send_packet);
+
+struct mctp_pcie_packet *aspeed_mctp_receive_packet(struct mctp_client *client,
+ unsigned long timeout)
+{
+ struct aspeed_mctp *priv = client->priv;
+ u16 bdf = _get_bdf(priv);
+ int ret;
+
+ if (bdf == 0)
+ return ERR_PTR(-EIO);
+
+ ret = wait_event_interruptible_timeout(client->wait_queue,
+ __ptr_ring_peek(&client->rx_queue),
+ timeout);
+ if (ret < 0)
+ return ERR_PTR(ret);
+ else if (ret == 0)
+ return ERR_PTR(-ETIME);
+
+ return ptr_ring_consume_bh(&client->rx_queue);
+}
+EXPORT_SYMBOL_GPL(aspeed_mctp_receive_packet);
+
+void aspeed_mctp_flush_rx_queue(struct mctp_client *client)
+{
+ struct mctp_pcie_packet *packet;
+
+ while ((packet = ptr_ring_consume_bh(&client->rx_queue)))
+ aspeed_mctp_packet_free(packet);
+}
+EXPORT_SYMBOL_GPL(aspeed_mctp_flush_rx_queue);
+
+static ssize_t aspeed_mctp_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct mctp_client *client = file->private_data;
+ struct aspeed_mctp *priv = client->priv;
+ struct mctp_pcie_packet *rx_packet;
+ u16 bdf;
+
+ if (count < PCIE_MCTP_MIN_PACKET_SIZE)
+ return -EINVAL;
+
+ bdf = _get_bdf(priv);
+ if (bdf == 0)
+ return -EIO;
+
+ if (count > sizeof(rx_packet->data))
+ count = sizeof(rx_packet->data);
+
+ rx_packet = ptr_ring_consume_bh(&client->rx_queue);
+ if (!rx_packet)
+ return -EAGAIN;
+
+ if (copy_to_user(buf, &rx_packet->data, count)) {
+ dev_err(priv->dev, "copy to user failed\n");
+ count = -EFAULT;
+ }
+
+ aspeed_mctp_packet_free(rx_packet);
+
+ return count;
+}
+
+static void aspeed_mctp_flush_tx_queue(struct mctp_client *client)
+{
+ struct mctp_pcie_packet *packet;
+
+ while ((packet = ptr_ring_consume_bh(&client->tx_queue)))
+ aspeed_mctp_packet_free(packet);
+}
+
+static void aspeed_mctp_flush_all_tx_queues(struct aspeed_mctp *priv)
+{
+ struct mctp_client *client;
+
+ spin_lock_bh(&priv->clients_lock);
+ list_for_each_entry(client, &priv->clients, link)
+ aspeed_mctp_flush_tx_queue(client);
+ spin_unlock_bh(&priv->clients_lock);
+}
+
+static ssize_t aspeed_mctp_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct mctp_client *client = file->private_data;
+ struct aspeed_mctp *priv = client->priv;
+ struct mctp_pcie_packet *tx_packet;
+ int ret;
+
+ if (count < PCIE_MCTP_MIN_PACKET_SIZE)
+ return -EINVAL;
+
+ if (count > sizeof(tx_packet->data))
+ return -ENOSPC;
+
+ tx_packet = aspeed_mctp_packet_alloc(GFP_KERNEL);
+ if (!tx_packet) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (copy_from_user(&tx_packet->data, buf, count)) {
+ dev_err(priv->dev, "copy from user failed\n");
+ ret = -EFAULT;
+ goto out_packet;
+ }
+
+ tx_packet->size = count;
+
+ ret = aspeed_mctp_send_packet(client, tx_packet);
+ if (ret)
+ goto out_packet;
+
+ return count;
+
+out_packet:
+ aspeed_mctp_packet_free(tx_packet);
+out:
+ return ret;
+}
+
+int aspeed_mctp_add_type_handler(struct mctp_client *client, u8 mctp_type,
+ u16 pci_vendor_id, u16 vdm_type, u16 vdm_mask)
+{
+ struct aspeed_mctp *priv = client->priv;
+ struct mctp_type_handler *handler, *new_handler;
+ int ret = 0;
+
+ if (mctp_type <= MCTP_HDR_TYPE_BASE_LAST) {
+ /* Vendor, type and type mask must be zero for types 0-5 */
+ if (pci_vendor_id != 0 || vdm_type != 0 || vdm_mask != 0)
+ return -EINVAL;
+ } else if (mctp_type == MCTP_HDR_TYPE_VDM_PCI) {
+ /* For Vendor Defined PCI type the the vendor ID must be nonzero */
+ if (pci_vendor_id == 0 || pci_vendor_id == 0xffff)
+ return -EINVAL;
+ } else {
+ return -EINVAL;
+ }
+
+ new_handler = kzalloc(sizeof(*new_handler), GFP_KERNEL);
+ if (!new_handler)
+ return -ENOMEM;
+ new_handler->mctp_type = mctp_type;
+ new_handler->pci_vendor_id = pci_vendor_id;
+ new_handler->vdm_type = vdm_type & vdm_mask;
+ new_handler->vdm_mask = vdm_mask;
+ new_handler->client = client;
+
+ spin_lock_bh(&priv->clients_lock);
+ list_for_each_entry(handler, &priv->mctp_type_handlers, link) {
+ if (handler->mctp_type == new_handler->mctp_type &&
+ handler->pci_vendor_id == new_handler->pci_vendor_id &&
+ handler->vdm_type == new_handler->vdm_type) {
+ if (handler->client != new_handler->client)
+ ret = -EBUSY;
+ kfree(new_handler);
+ goto out_unlock;
+ }
+ }
+ list_add_tail(&new_handler->link, &priv->mctp_type_handlers);
+out_unlock:
+ spin_unlock_bh(&priv->clients_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(aspeed_mctp_add_type_handler);
+
+int aspeed_mctp_remove_type_handler(struct mctp_client *client,
+ u8 mctp_type, u16 pci_vendor_id,
+ u16 vdm_type, u16 vdm_mask)
+{
+ struct aspeed_mctp *priv = client->priv;
+ struct mctp_type_handler *handler, *tmp;
+ int ret = -EINVAL;
+
+ vdm_type &= vdm_mask;
+
+ spin_lock_bh(&priv->clients_lock);
+ list_for_each_entry_safe(handler, tmp, &priv->mctp_type_handlers,
+ link) {
+ if (handler->client == client &&
+ handler->mctp_type == mctp_type &&
+ handler->pci_vendor_id == pci_vendor_id &&
+ handler->vdm_type == vdm_type) {
+ list_del(&handler->link);
+ kfree(handler);
+ ret = 0;
+ break;
+ }
+ }
+ spin_unlock_bh(&priv->clients_lock);
+ return ret;
+}
+
+static int aspeed_mctp_register_default_handler(struct mctp_client *client)
+{
+ struct aspeed_mctp *priv = client->priv;
+ int ret = 0;
+
+ spin_lock_bh(&priv->clients_lock);
+
+ if (!priv->default_client)
+ priv->default_client = client;
+ else if (priv->default_client != client)
+ ret = -EBUSY;
+
+ spin_unlock_bh(&priv->clients_lock);
+
+ return ret;
+}
+
+static int
+aspeed_mctp_register_type_handler(struct mctp_client *client,
+ void __user *userbuf)
+{
+ struct aspeed_mctp *priv = client->priv;
+ struct aspeed_mctp_type_handler_ioctl handler;
+
+ if (copy_from_user(&handler, userbuf, sizeof(handler))) {
+ dev_err(priv->dev, "copy from user failed\n");
+ return -EFAULT;
+ }
+
+ return aspeed_mctp_add_type_handler(client, handler.mctp_type,
+ handler.pci_vendor_id,
+ handler.vendor_type,
+ handler.vendor_type_mask);
+}
+
+static int
+aspeed_mctp_unregister_type_handler(struct mctp_client *client,
+ void __user *userbuf)
+{
+ struct aspeed_mctp *priv = client->priv;
+ struct aspeed_mctp_type_handler_ioctl handler;
+
+ if (copy_from_user(&handler, userbuf, sizeof(handler))) {
+ dev_err(priv->dev, "copy from user failed\n");
+ return -EFAULT;
+ }
+
+ return aspeed_mctp_remove_type_handler(client, handler.mctp_type,
+ handler.pci_vendor_id,
+ handler.vendor_type,
+ handler.vendor_type_mask);
+}
+
+static int
+aspeed_mctp_filter_eid(struct aspeed_mctp *priv, void __user *userbuf)
+{
+ struct aspeed_mctp_filter_eid eid;
+
+ if (copy_from_user(&eid, userbuf, sizeof(eid))) {
+ dev_err(priv->dev, "copy from user failed\n");
+ return -EFAULT;
+ }
+
+ if (eid.enable) {
+ regmap_write(priv->map, ASPEED_MCTP_EID, eid.eid);
+ regmap_update_bits(priv->map, ASPEED_MCTP_CTRL,
+ MATCHING_EID, MATCHING_EID);
+ } else {
+ regmap_update_bits(priv->map, ASPEED_MCTP_CTRL,
+ MATCHING_EID, 0);
+ }
+ return 0;
+}
+
+static int aspeed_mctp_get_bdf(struct aspeed_mctp *priv, void __user *userbuf)
+{
+ struct aspeed_mctp_get_bdf bdf = { _get_bdf(priv) };
+
+ if (copy_to_user(userbuf, &bdf, sizeof(bdf))) {
+ dev_err(priv->dev, "copy to user failed\n");
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static int
+aspeed_mctp_get_medium_id(struct aspeed_mctp *priv, void __user *userbuf)
+{
+ struct aspeed_mctp_get_medium_id id = { 0x09 }; /* PCIe revision 2.0 */
+
+ if (copy_to_user(userbuf, &id, sizeof(id))) {
+ dev_err(priv->dev, "copy to user failed\n");
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static int
+aspeed_mctp_get_mtu(struct aspeed_mctp *priv, void __user *userbuf)
+{
+ struct aspeed_mctp_get_mtu id = { ASPEED_MCTP_MTU };
+
+ if (copy_to_user(userbuf, &id, sizeof(id))) {
+ dev_err(priv->dev, "copy to user failed\n");
+ return -EFAULT;
+ }
+ return 0;
+}
+
+int aspeed_mctp_get_eid_bdf(struct mctp_client *client, u8 eid, u16 *bdf)
+{
+ struct aspeed_mctp_endpoint *endpoint;
+ int ret = -ENOENT;
+
+ mutex_lock(&client->priv->endpoints_lock);
+ list_for_each_entry(endpoint, &client->priv->endpoints, link) {
+ if (endpoint->data.eid_info.eid == eid) {
+ *bdf = endpoint->data.eid_info.bdf;
+ ret = 0;
+ break;
+ }
+ }
+ mutex_unlock(&client->priv->endpoints_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(aspeed_mctp_get_eid_bdf);
+
+int aspeed_mctp_get_eid(struct mctp_client *client, u16 bdf,
+ u8 domain_id, u8 *eid)
+{
+ struct aspeed_mctp_endpoint *endpoint;
+ int ret = -ENOENT;
+
+ mutex_lock(&client->priv->endpoints_lock);
+
+ list_for_each_entry(endpoint, &client->priv->endpoints, link) {
+ if (endpoint->data.eid_ext_info.domain_id == domain_id &&
+ endpoint->data.eid_ext_info.bdf == bdf) {
+ *eid = endpoint->data.eid_ext_info.eid;
+ ret = 0;
+ break;
+ }
+ }
+
+ mutex_unlock(&client->priv->endpoints_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(aspeed_mctp_get_eid);
+
+static int
+aspeed_mctp_get_eid_info(struct aspeed_mctp *priv, void __user *userbuf,
+ enum mctp_address_type addr_format)
+{
+ int count = 0;
+ int ret = 0;
+ struct aspeed_mctp_get_eid_info get_eid;
+ struct aspeed_mctp_endpoint *endpoint;
+ void *user_ptr;
+ size_t count_to_copy;
+
+ if (copy_from_user(&get_eid, userbuf, sizeof(get_eid))) {
+ dev_err(priv->dev, "copy from user failed\n");
+ return -EFAULT;
+ }
+
+ mutex_lock(&priv->endpoints_lock);
+
+ if (get_eid.count == 0) {
+ count = priv->endpoints_count;
+ goto out_unlock;
+ }
+
+ user_ptr = u64_to_user_ptr(get_eid.ptr);
+ count_to_copy = get_eid.count > priv->endpoints_count ?
+ priv->endpoints_count : get_eid.count;
+ list_for_each_entry(endpoint, &priv->endpoints, link) {
+ if (endpoint->data.eid_info.eid < get_eid.start_eid)
+ continue;
+ if (count >= count_to_copy)
+ break;
+
+ if (addr_format == ASPEED_MCTP_EXTENDED_ADDR_FORMAT)
+ ret = copy_to_user(&(((struct aspeed_mctp_eid_ext_info *)
+ user_ptr)[count]),
+ &endpoint->data,
+ sizeof(struct aspeed_mctp_eid_ext_info));
+ else
+ ret = copy_to_user(&(((struct aspeed_mctp_eid_info *)
+ user_ptr)[count]),
+ &endpoint->data,
+ sizeof(struct aspeed_mctp_eid_info));
+
+ if (ret) {
+ dev_err(priv->dev, "copy to user failed\n");
+ ret = -EFAULT;
+ goto out_unlock;
+ }
+ count++;
+ }
+
+out_unlock:
+ get_eid.count = count;
+ if (copy_to_user(userbuf, &get_eid, sizeof(get_eid))) {
+ dev_err(priv->dev, "copy to user failed\n");
+ ret = -EFAULT;
+ }
+
+ mutex_unlock(&priv->endpoints_lock);
+ return ret;
+}
+
+static int
+eid_info_cmp(void *priv, const struct list_head *a, const struct list_head *b)
+{
+ struct aspeed_mctp_endpoint *endpoint_a;
+ struct aspeed_mctp_endpoint *endpoint_b;
+
+ if (a == b)
+ return 0;
+
+ endpoint_a = list_entry(a, typeof(*endpoint_a), link);
+ endpoint_b = list_entry(b, typeof(*endpoint_b), link);
+
+ if (endpoint_a->data.eid_info.eid < endpoint_b->data.eid_info.eid)
+ return -1;
+ else if (endpoint_a->data.eid_info.eid > endpoint_b->data.eid_info.eid)
+ return 1;
+
+ return 0;
+}
+
+static void aspeed_mctp_eid_info_list_remove(struct list_head *list)
+{
+ struct aspeed_mctp_endpoint *endpoint;
+ struct aspeed_mctp_endpoint *tmp;
+
+ list_for_each_entry_safe(endpoint, tmp, list, link) {
+ list_del(&endpoint->link);
+ kfree(endpoint);
+ }
+}
+
+static bool
+aspeed_mctp_eid_info_list_valid(struct list_head *list)
+{
+ struct aspeed_mctp_endpoint *endpoint;
+ struct aspeed_mctp_endpoint *next;
+
+ list_for_each_entry(endpoint, list, link) {
+ next = list_next_entry(endpoint, link);
+ if (&next->link == list)
+ break;
+
+ /* duplicted eids */
+ if (next->data.eid_info.eid == endpoint->data.eid_info.eid)
+ return false;
+ }
+
+ return true;
+}
+
+static int
+aspeed_mctp_set_eid_info(struct aspeed_mctp *priv, void __user *userbuf,
+ enum mctp_address_type addr_format)
+{
+ struct list_head list = LIST_HEAD_INIT(list);
+ struct aspeed_mctp_set_eid_info set_eid;
+ void *user_ptr;
+ struct aspeed_mctp_endpoint *endpoint;
+ int ret = 0;
+ u8 eid = 0;
+ size_t i;
+
+ if (copy_from_user(&set_eid, userbuf, sizeof(set_eid))) {
+ dev_err(priv->dev, "copy from user failed\n");
+ return -EFAULT;
+ }
+
+ if (set_eid.count > ASPEED_MCTP_EID_INFO_MAX)
+ return -EINVAL;
+
+ user_ptr = u64_to_user_ptr(set_eid.ptr);
+ for (i = 0; i < set_eid.count; i++) {
+ endpoint = kzalloc(sizeof(*endpoint), GFP_KERNEL);
+ if (!endpoint) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ memset(endpoint, 0, sizeof(*endpoint));
+
+ if (addr_format == ASPEED_MCTP_EXTENDED_ADDR_FORMAT)
+ ret = copy_from_user(&endpoint->data,
+ &(((struct aspeed_mctp_eid_ext_info *)
+ user_ptr)[i]),
+ sizeof(struct aspeed_mctp_eid_ext_info));
+ else
+ ret = copy_from_user(&endpoint->data,
+ &(((struct aspeed_mctp_eid_info *)
+ user_ptr)[i]),
+ sizeof(struct aspeed_mctp_eid_info));
+
+ if (ret) {
+ dev_err(priv->dev, "copy from user failed\n");
+ kfree(endpoint);
+ ret = -EFAULT;
+ goto out;
+ }
+
+ /* Detect self EID */
+ if (_get_bdf(priv) == endpoint->data.eid_info.bdf) {
+ /*
+ * XXX Use smallest EID with matching BDF.
+ * On some platforms there could be multiple endpoints
+ * with same BDF in routing table.
+ */
+ if (eid == 0 || endpoint->data.eid_info.eid < eid)
+ eid = endpoint->data.eid_info.eid;
+ }
+
+ list_add_tail(&endpoint->link, &list);
+ }
+
+ list_sort(NULL, &list, eid_info_cmp);
+ if (!aspeed_mctp_eid_info_list_valid(&list)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ mutex_lock(&priv->endpoints_lock);
+ if (list_empty(&priv->endpoints))
+ list_splice_init(&list, &priv->endpoints);
+ else
+ list_swap(&list, &priv->endpoints);
+ priv->endpoints_count = set_eid.count;
+ priv->eid = eid;
+ mutex_unlock(&priv->endpoints_lock);
+out:
+ aspeed_mctp_eid_info_list_remove(&list);
+ return ret;
+}
+
+static long
+aspeed_mctp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct mctp_client *client = file->private_data;
+ struct aspeed_mctp *priv = client->priv;
+ void __user *userbuf = (void __user *)arg;
+ int ret;
+
+ switch (cmd) {
+ case ASPEED_MCTP_IOCTL_FILTER_EID:
+ ret = aspeed_mctp_filter_eid(priv, userbuf);
+ break;
+
+ case ASPEED_MCTP_IOCTL_GET_BDF:
+ ret = aspeed_mctp_get_bdf(priv, userbuf);
+ break;
+
+ case ASPEED_MCTP_IOCTL_GET_MEDIUM_ID:
+ ret = aspeed_mctp_get_medium_id(priv, userbuf);
+ break;
+
+ case ASPEED_MCTP_IOCTL_GET_MTU:
+ ret = aspeed_mctp_get_mtu(priv, userbuf);
+ break;
+
+ case ASPEED_MCTP_IOCTL_REGISTER_DEFAULT_HANDLER:
+ ret = aspeed_mctp_register_default_handler(client);
+ break;
+
+ case ASPEED_MCTP_IOCTL_REGISTER_TYPE_HANDLER:
+ ret = aspeed_mctp_register_type_handler(client, userbuf);
+ break;
+
+ case ASPEED_MCTP_IOCTL_UNREGISTER_TYPE_HANDLER:
+ ret = aspeed_mctp_unregister_type_handler(client, userbuf);
+ break;
+
+ case ASPEED_MCTP_IOCTL_GET_EID_INFO:
+ ret = aspeed_mctp_get_eid_info(priv, userbuf, ASPEED_MCTP_GENERIC_ADDR_FORMAT);
+ break;
+
+ case ASPEED_MCTP_IOCTL_GET_EID_EXT_INFO:
+ ret = aspeed_mctp_get_eid_info(priv, userbuf, ASPEED_MCTP_EXTENDED_ADDR_FORMAT);
+ break;
+
+ case ASPEED_MCTP_IOCTL_SET_EID_INFO:
+ ret = aspeed_mctp_set_eid_info(priv, userbuf, ASPEED_MCTP_GENERIC_ADDR_FORMAT);
+ break;
+
+ case ASPEED_MCTP_IOCTL_SET_EID_EXT_INFO:
+ ret = aspeed_mctp_set_eid_info(priv, userbuf, ASPEED_MCTP_EXTENDED_ADDR_FORMAT);
+ break;
+
+ default:
+ dev_err(priv->dev, "Command not found\n");
+ ret = -ENOTTY;
+ }
+
+ return ret;
+}
+
+static __poll_t aspeed_mctp_poll(struct file *file,
+ struct poll_table_struct *pt)
+{
+ struct mctp_client *client = file->private_data;
+ __poll_t ret = 0;
+
+ poll_wait(file, &client->wait_queue, pt);
+
+ if (!ptr_ring_full_bh(&client->tx_queue))
+ ret |= EPOLLOUT;
+
+ if (__ptr_ring_peek(&client->rx_queue))
+ ret |= EPOLLIN;
+
+ return ret;
+}
+
+static const struct file_operations aspeed_mctp_fops = {
+ .owner = THIS_MODULE,
+ .open = aspeed_mctp_open,
+ .release = aspeed_mctp_release,
+ .read = aspeed_mctp_read,
+ .write = aspeed_mctp_write,
+ .unlocked_ioctl = aspeed_mctp_ioctl,
+ .poll = aspeed_mctp_poll,
+};
+
+static const struct regmap_config aspeed_mctp_regmap_cfg = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = ASPEED_MCTP_TX_BUF_WR_PTR,
+};
+
+struct device_type aspeed_mctp_type = {
+ .name = "aspeed-mctp",
+};
+
+static struct miscdevice aspeed_mctp_miscdev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "aspeed-mctp",
+ .fops = &aspeed_mctp_fops,
+};
+
+static void aspeed_mctp_send_pcie_uevent(struct kobject *kobj, bool ready)
+{
+ char *pcie_not_ready_event[] = { ASPEED_MCTP_READY "=0", NULL };
+ char *pcie_ready_event[] = { ASPEED_MCTP_READY "=1", NULL };
+
+ kobject_uevent_env(kobj, KOBJ_CHANGE,
+ ready ? pcie_ready_event : pcie_not_ready_event);
+}
+
+static u16 aspeed_mctp_pcie_setup(struct aspeed_mctp *priv)
+{
+ u32 reg;
+ u16 bdf;
+
+ regmap_read(priv->pcie.map, ASPEED_PCIE_MISC_STS_1, &reg);
+
+ bdf = PCI_DEVID(GET_PCI_BUS_NUM(reg), GET_PCI_DEV_NUM(reg));
+ if (bdf != 0)
+ cancel_delayed_work(&priv->pcie.rst_dwork);
+ else
+ schedule_delayed_work(&priv->pcie.rst_dwork,
+ msecs_to_jiffies(1000));
+
+ return bdf;
+}
+
+static void aspeed_mctp_irq_enable(struct aspeed_mctp *priv)
+{
+ u32 enable = TX_CMD_SENT_INT | TX_CMD_WRONG_INT |
+ RX_CMD_RECEIVE_INT | RX_CMD_NO_MORE_INT;
+
+ regmap_write(priv->map, ASPEED_MCTP_INT_EN, enable);
+}
+
+static void aspeed_mctp_irq_disable(struct aspeed_mctp *priv)
+{
+ regmap_write(priv->map, ASPEED_MCTP_INT_EN, 0);
+}
+
+static void aspeed_mctp_reset_work(struct work_struct *work)
+{
+ struct aspeed_mctp *priv = container_of(work, typeof(*priv),
+ pcie.rst_dwork.work);
+ struct kobject *kobj = &aspeed_mctp_miscdev.this_device->kobj;
+ u16 bdf;
+
+ if (priv->pcie.need_uevent) {
+ aspeed_mctp_send_pcie_uevent(kobj, false);
+ priv->pcie.need_uevent = false;
+ }
+
+ bdf = aspeed_mctp_pcie_setup(priv);
+ if (bdf) {
+ aspeed_mctp_flush_all_tx_queues(priv);
+ aspeed_mctp_irq_enable(priv);
+ aspeed_mctp_rx_trigger(&priv->rx);
+ _set_bdf(priv, bdf);
+ aspeed_mctp_send_pcie_uevent(kobj, true);
+ }
+}
+
+static void aspeed_mctp_channels_init(struct aspeed_mctp *priv)
+{
+ aspeed_mctp_rx_chan_init(&priv->rx);
+ aspeed_mctp_tx_chan_init(&priv->tx);
+}
+
+static irqreturn_t aspeed_mctp_irq_handler(int irq, void *arg)
+{
+ struct aspeed_mctp *priv = arg;
+ u32 handled = 0;
+ u32 status;
+
+ regmap_read(priv->map, ASPEED_MCTP_INT_STS, &status);
+ regmap_write(priv->map, ASPEED_MCTP_INT_STS, status);
+
+ if (status & TX_CMD_SENT_INT) {
+ tasklet_hi_schedule(&priv->tx.tasklet);
+
+ handled |= TX_CMD_SENT_INT;
+ }
+
+ if (status & TX_CMD_WRONG_INT) {
+ /* TODO: print the actual command */
+ dev_warn(priv->dev, "TX wrong");
+
+ handled |= TX_CMD_WRONG_INT;
+ }
+
+ if (status & RX_CMD_RECEIVE_INT) {
+ tasklet_hi_schedule(&priv->rx.tasklet);
+
+ handled |= RX_CMD_RECEIVE_INT;
+ }
+
+ if (status & RX_CMD_NO_MORE_INT) {
+ dev_dbg(priv->dev, "RX full");
+ priv->rx.stopped = true;
+ tasklet_hi_schedule(&priv->rx.tasklet);
+
+ handled |= RX_CMD_NO_MORE_INT;
+ }
+
+ if (!handled)
+ return IRQ_NONE;
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t aspeed_mctp_pcie_rst_irq_handler(int irq, void *arg)
+{
+ struct aspeed_mctp *priv = arg;
+
+ aspeed_mctp_channels_init(priv);
+
+ priv->pcie.need_uevent = true;
+ _set_bdf(priv, 0);
+ priv->eid = 0;
+
+ schedule_delayed_work(&priv->pcie.rst_dwork, 0);
+
+ return IRQ_HANDLED;
+}
+
+static void aspeed_mctp_drv_init(struct aspeed_mctp *priv)
+{
+ INIT_LIST_HEAD(&priv->clients);
+ INIT_LIST_HEAD(&priv->mctp_type_handlers);
+ INIT_LIST_HEAD(&priv->endpoints);
+
+ spin_lock_init(&priv->clients_lock);
+ mutex_init(&priv->endpoints_lock);
+
+ INIT_DELAYED_WORK(&priv->pcie.rst_dwork, aspeed_mctp_reset_work);
+
+ tasklet_init(&priv->tx.tasklet, aspeed_mctp_tx_tasklet,
+ (unsigned long)&priv->tx);
+ tasklet_init(&priv->rx.tasklet, aspeed_mctp_rx_tasklet,
+ (unsigned long)&priv->rx);
+}
+
+static void aspeed_mctp_drv_fini(struct aspeed_mctp *priv)
+{
+ aspeed_mctp_eid_info_list_remove(&priv->endpoints);
+ tasklet_disable(&priv->tx.tasklet);
+ tasklet_kill(&priv->tx.tasklet);
+ tasklet_disable(&priv->rx.tasklet);
+ tasklet_kill(&priv->rx.tasklet);
+
+ cancel_delayed_work_sync(&priv->pcie.rst_dwork);
+}
+
+static int aspeed_mctp_resources_init(struct aspeed_mctp *priv)
+{
+ struct platform_device *pdev = to_platform_device(priv->dev);
+ void __iomem *regs;
+
+ regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(regs)) {
+ dev_err(priv->dev, "Failed to get regmap!\n");
+ return PTR_ERR(regs);
+ }
+
+ priv->map = devm_regmap_init_mmio(priv->dev, regs,
+ &aspeed_mctp_regmap_cfg);
+ if (IS_ERR(priv->map))
+ return PTR_ERR(priv->map);
+
+ priv->reset = devm_reset_control_get(priv->dev, 0);
+ if (IS_ERR(priv->reset)) {
+ dev_err(priv->dev, "Failed to get reset!\n");
+ return PTR_ERR(priv->reset);
+ }
+
+ priv->pcie.map =
+ syscon_regmap_lookup_by_phandle(priv->dev->of_node,
+ "aspeed,pcieh");
+ if (IS_ERR(priv->pcie.map)) {
+ dev_err(priv->dev, "Failed to find PCIe Host regmap!\n");
+ return PTR_ERR(priv->pcie.map);
+ }
+
+ platform_set_drvdata(pdev, priv);
+
+ return 0;
+}
+
+static int aspeed_mctp_dma_init(struct aspeed_mctp *priv)
+{
+ struct mctp_channel *tx = &priv->tx;
+ struct mctp_channel *rx = &priv->rx;
+ int ret = -ENOMEM;
+
+ BUILD_BUG_ON(TX_PACKET_COUNT >= TX_MAX_PACKET_COUNT);
+ BUILD_BUG_ON(RX_PACKET_COUNT >= RX_MAX_PACKET_COUNT);
+
+ tx->cmd.vaddr = dma_alloc_coherent(priv->dev, TX_CMD_BUF_SIZE,
+ &tx->cmd.dma_handle, GFP_KERNEL);
+
+ if (!tx->cmd.vaddr)
+ return ret;
+
+ tx->data.vaddr = dma_alloc_coherent(priv->dev, TX_DATA_BUF_SIZE,
+ &tx->data.dma_handle, GFP_KERNEL);
+
+ if (!tx->data.vaddr)
+ goto out_tx_data;
+
+ rx->cmd.vaddr = dma_alloc_coherent(priv->dev, RX_CMD_BUF_SIZE,
+ &rx->cmd.dma_handle, GFP_KERNEL);
+
+ if (!rx->cmd.vaddr)
+ goto out_tx_cmd;
+
+ rx->data.vaddr = dma_alloc_coherent(priv->dev, RX_DATA_BUF_SIZE,
+ &rx->data.dma_handle, GFP_KERNEL);
+
+ if (!rx->data.vaddr)
+ goto out_rx_data;
+
+ return 0;
+out_rx_data:
+ dma_free_coherent(priv->dev, RX_CMD_BUF_SIZE, rx->cmd.vaddr,
+ rx->cmd.dma_handle);
+
+out_tx_cmd:
+ dma_free_coherent(priv->dev, TX_DATA_BUF_SIZE, tx->data.vaddr,
+ tx->data.dma_handle);
+
+out_tx_data:
+ dma_free_coherent(priv->dev, TX_CMD_BUF_SIZE, tx->cmd.vaddr,
+ tx->cmd.dma_handle);
+ return ret;
+}
+
+static void aspeed_mctp_dma_fini(struct aspeed_mctp *priv)
+{
+ struct mctp_channel *tx = &priv->tx;
+ struct mctp_channel *rx = &priv->rx;
+
+ dma_free_coherent(priv->dev, TX_CMD_BUF_SIZE, tx->cmd.vaddr,
+ tx->cmd.dma_handle);
+
+ dma_free_coherent(priv->dev, RX_CMD_BUF_SIZE, rx->cmd.vaddr,
+ rx->cmd.dma_handle);
+
+ dma_free_coherent(priv->dev, TX_DATA_BUF_SIZE, tx->data.vaddr,
+ tx->data.dma_handle);
+
+ dma_free_coherent(priv->dev, RX_DATA_BUF_SIZE, rx->data.vaddr,
+ rx->data.dma_handle);
+}
+
+static int aspeed_mctp_irq_init(struct aspeed_mctp *priv)
+{
+ struct platform_device *pdev = to_platform_device(priv->dev);
+ int irq, ret;
+
+ irq = platform_get_irq(pdev, 0);
+ if (!irq)
+ return -ENODEV;
+
+ ret = devm_request_irq(priv->dev, irq, aspeed_mctp_irq_handler,
+ IRQF_SHARED, "aspeed-mctp", priv);
+ if (ret)
+ return ret;
+
+ irq = platform_get_irq(pdev, 1);
+ if (!irq)
+ return -ENODEV;
+
+ ret = devm_request_irq(priv->dev, irq,
+ aspeed_mctp_pcie_rst_irq_handler,
+ IRQF_SHARED, "aspeed-mctp", priv);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void aspeed_mctp_hw_reset(struct aspeed_mctp *priv)
+{
+ u32 reg;
+
+ /*
+ * XXX: We need to skip the reset when we probe multiple times.
+ * Currently calling reset more than once seems to make the HW upset,
+ * however, we do need to reset once after the first boot before we're
+ * able to use the HW.
+ */
+ regmap_read(priv->map, ASPEED_MCTP_TX_BUF_ADDR, &reg);
+
+ if (reg) {
+ dev_info(priv->dev,
+ "Already initialized - skipping hardware reset\n");
+ return;
+ }
+
+ if (reset_control_assert(priv->reset) != 0)
+ dev_warn(priv->dev, "Failed to assert reset\n");
+
+ if (reset_control_deassert(priv->reset) != 0)
+ dev_warn(priv->dev, "Failed to deassert reset\n");
+}
+
+static int aspeed_mctp_probe(struct platform_device *pdev)
+{
+ struct aspeed_mctp *priv;
+ int ret;
+ u16 bdf;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ priv->dev = &pdev->dev;
+
+ aspeed_mctp_drv_init(priv);
+
+ ret = aspeed_mctp_resources_init(priv);
+ if (ret) {
+ dev_err(priv->dev, "Failed to init resources\n");
+ goto out_drv;
+ }
+
+ ret = aspeed_mctp_dma_init(priv);
+ if (ret) {
+ dev_err(priv->dev, "Failed to init DMA\n");
+ goto out_drv;
+ }
+
+ aspeed_mctp_hw_reset(priv);
+
+ aspeed_mctp_channels_init(priv);
+
+ aspeed_mctp_miscdev.parent = priv->dev;
+ ret = misc_register(&aspeed_mctp_miscdev);
+ if (ret) {
+ dev_err(priv->dev, "Failed to register miscdev\n");
+ goto out_dma;
+ }
+ aspeed_mctp_miscdev.this_device->type = &aspeed_mctp_type;
+
+ ret = aspeed_mctp_irq_init(priv);
+ if (ret) {
+ dev_err(priv->dev, "Failed to init IRQ!\n");
+ goto out_dma;
+ }
+
+ aspeed_mctp_irq_enable(priv);
+
+ bdf = aspeed_mctp_pcie_setup(priv);
+ if (bdf != 0)
+ _set_bdf(priv, bdf);
+
+ priv->peci_mctp =
+ platform_device_register_data(priv->dev, "peci-mctp",
+ PLATFORM_DEVID_NONE, NULL, 0);
+ if (IS_ERR(priv->peci_mctp))
+ dev_err(priv->dev, "Failed to register peci-mctp device\n");
+
+ aspeed_mctp_rx_trigger(&priv->rx);
+
+ return 0;
+
+out_dma:
+ aspeed_mctp_dma_fini(priv);
+out_drv:
+ aspeed_mctp_drv_fini(priv);
+out:
+ dev_err(&pdev->dev, "Failed to probe Aspeed MCTP: %d\n", ret);
+ return ret;
+}
+
+static int aspeed_mctp_remove(struct platform_device *pdev)
+{
+ struct aspeed_mctp *priv = platform_get_drvdata(pdev);
+
+ platform_device_unregister(priv->peci_mctp);
+
+ misc_deregister(&aspeed_mctp_miscdev);
+
+ aspeed_mctp_irq_disable(priv);
+
+ aspeed_mctp_dma_fini(priv);
+
+ aspeed_mctp_drv_fini(priv);
+
+ return 0;
+}
+
+static const struct of_device_id aspeed_mctp_match_table[] = {
+ { .compatible = "aspeed,ast2600-mctp" },
+ { }
+};
+
+static struct platform_driver aspeed_mctp_driver = {
+ .driver = {
+ .name = "aspeed-mctp",
+ .of_match_table = of_match_ptr(aspeed_mctp_match_table),
+ },
+ .probe = aspeed_mctp_probe,
+ .remove = aspeed_mctp_remove,
+};
+
+static int __init aspeed_mctp_init(void)
+{
+ packet_cache =
+ kmem_cache_create_usercopy("mctp-packet",
+ sizeof(struct mctp_pcie_packet),
+ 0, 0, 0,
+ sizeof(struct mctp_pcie_packet),
+ NULL);
+ if (!packet_cache)
+ return -ENOMEM;
+
+ return platform_driver_register(&aspeed_mctp_driver);
+}
+
+static void __exit aspeed_mctp_exit(void)
+{
+ platform_driver_unregister(&aspeed_mctp_driver);
+ kmem_cache_destroy(packet_cache);
+}
+
+module_init(aspeed_mctp_init)
+module_exit(aspeed_mctp_exit)
+
+MODULE_DEVICE_TABLE(of, aspeed_mctp_match_table);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Iwona Winiarska <iwona.winiarska@intel.com>");
+MODULE_DESCRIPTION("Aspeed AST2600 MCTP driver");
diff --git a/drivers/soc/aspeed/aspeed-uart-routing.c b/drivers/soc/aspeed/aspeed-uart-routing.c
index ef8b24fd1851..8dc58340e5fd 100644
--- a/drivers/soc/aspeed/aspeed-uart-routing.c
+++ b/drivers/soc/aspeed/aspeed-uart-routing.c
@@ -31,6 +31,8 @@
#define UART_ROUTING_UART6 "uart6"
#define UART_ROUTING_UART10 "uart10"
#define UART_ROUTING_RES "reserved"
+#define UART_ROUTING_HICR9_RAW "hicr9"
+#define UART_ROUTING_HICRA_RAW "hicra"
struct aspeed_uart_routing {
struct regmap *map;
@@ -40,7 +42,7 @@ struct aspeed_uart_routing {
struct aspeed_uart_routing_selector {
struct device_attribute dev_attr;
uint8_t reg;
- uint8_t mask;
+ uint32_t mask;
uint8_t shift;
const char *const options[];
};
@@ -63,6 +65,22 @@ static ssize_t aspeed_uart_routing_store(struct device *dev,
.store = aspeed_uart_routing_store, \
}
+static struct aspeed_uart_routing_selector hicr9_raw_sel = {
+ .dev_attr = ROUTING_ATTR(UART_ROUTING_HICR9_RAW),
+ .reg = HICR9,
+ .shift = 0,
+ .mask = 0xffffffff,
+ .options = { NULL },
+};
+
+static struct aspeed_uart_routing_selector hicra_raw_sel = {
+ .dev_attr = ROUTING_ATTR(UART_ROUTING_HICRA_RAW),
+ .reg = HICRA,
+ .shift = 0,
+ .mask = 0xffffffff,
+ .options = { NULL },
+};
+
/* routing selector for AST25xx */
static struct aspeed_uart_routing_selector ast2500_io6_sel = {
.dev_attr = ROUTING_ATTR(UART_ROUTING_IO6),
@@ -278,6 +296,8 @@ static struct attribute *ast2500_uart_routing_attrs[] = {
&ast2500_io3_sel.dev_attr.attr,
&ast2500_io2_sel.dev_attr.attr,
&ast2500_io1_sel.dev_attr.attr,
+ &hicr9_raw_sel.dev_attr.attr,
+ &hicra_raw_sel.dev_attr.attr,
NULL,
};
@@ -482,6 +502,8 @@ static struct attribute *ast2600_uart_routing_attrs[] = {
&ast2600_io3_sel.dev_attr.attr,
&ast2600_io2_sel.dev_attr.attr,
&ast2600_io1_sel.dev_attr.attr,
+ &hicr9_raw_sel.dev_attr.attr,
+ &hicra_raw_sel.dev_attr.attr,
NULL,
};
@@ -495,22 +517,27 @@ static ssize_t aspeed_uart_routing_show(struct device *dev,
{
struct aspeed_uart_routing *uart_routing = dev_get_drvdata(dev);
struct aspeed_uart_routing_selector *sel = to_routing_selector(attr);
- int val, pos, len;
+ int val, pos, len = 0;
regmap_read(uart_routing->map, sel->reg, &val);
val = (val >> sel->shift) & sel->mask;
- len = 0;
- for (pos = 0; sel->options[pos] != NULL; ++pos) {
- if (pos == val)
- len += sysfs_emit_at(buf, len, "[%s] ", sel->options[pos]);
- else
- len += sysfs_emit_at(buf, len, "%s ", sel->options[pos]);
+ if (sel == &hicr9_raw_sel || sel == &hicra_raw_sel) {
+ len += sysfs_emit_at(buf, len, "0x%08x", val);
+ } else {
+ for (pos = 0; sel->options[pos] != NULL; ++pos) {
+ if (pos == val)
+ len += sysfs_emit_at(buf, len, "[%s] ",
+ sel->options[pos]);
+ else
+ len += sysfs_emit_at(buf, len, "%s ",
+ sel->options[pos]);
+ }
+
+ if (val >= pos)
+ len += sysfs_emit_at(buf, len, "[unknown(%d)]", val);
}
- if (val >= pos)
- len += sysfs_emit_at(buf, len, "[unknown(%d)]", val);
-
len += sysfs_emit_at(buf, len, "\n");
return len;
@@ -522,12 +549,19 @@ static ssize_t aspeed_uart_routing_store(struct device *dev,
{
struct aspeed_uart_routing *uart_routing = dev_get_drvdata(dev);
struct aspeed_uart_routing_selector *sel = to_routing_selector(attr);
- int val;
-
- val = match_string(sel->options, -1, buf);
- if (val < 0) {
- dev_err(dev, "invalid value \"%s\"\n", buf);
- return -EINVAL;
+ int val, res;
+ char end;
+
+ if (sel == &hicr9_raw_sel || sel == &hicra_raw_sel) {
+ res = sscanf(buf, "%i%c", &val, &end);
+ if (res < 1 || (res > 1 && end != '\n'))
+ return -EINVAL;
+ } else {
+ val = match_string(sel->options, -1, buf);
+ if (val < 0) {
+ dev_err(dev, "invalid value \"%s\"\n", buf);
+ return -EINVAL;
+ }
}
regmap_update_bits(uart_routing->map, sel->reg,
diff --git a/drivers/soc/aspeed/aspeed-vga-sharedmem.c b/drivers/soc/aspeed/aspeed-vga-sharedmem.c
new file mode 100644
index 000000000000..cd1f5431378c
--- /dev/null
+++ b/drivers/soc/aspeed/aspeed-vga-sharedmem.c
@@ -0,0 +1,163 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 Intel Corporation
+ * VGA Shared Memory driver for Aspeed AST2500
+ */
+
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+
+#define SHAREDMEM_NAME "vgasharedmem"
+
+struct aspeed_vga_sharedmem {
+ struct miscdevice miscdev;
+ unsigned int addr;
+ unsigned int size;
+ bool mmap_enable;
+};
+
+static struct aspeed_vga_sharedmem *file_sharemem(struct file *file)
+{
+ return container_of(file->private_data,
+ struct aspeed_vga_sharedmem, miscdev);
+}
+
+static int vga_open(struct inode *inode, struct file *file)
+{
+ struct aspeed_vga_sharedmem *vga_sharedmem = file_sharemem(file);
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (!vga_sharedmem->mmap_enable)
+ return -EPERM;
+
+ return 0;
+}
+
+static int vga_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct aspeed_vga_sharedmem *vga_sharedmem = file_sharemem(file);
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ vma->vm_flags = (vma->vm_flags & (~VM_WRITE));
+ remap_pfn_range(vma, vma->vm_start, vga_sharedmem->addr >> PAGE_SHIFT,
+ vga_sharedmem->size, vma->vm_page_prot);
+ return 0;
+}
+
+static ssize_t enable_mmap_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct aspeed_vga_sharedmem *vga_sharedmem = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%u\n", vga_sharedmem->mmap_enable);
+}
+
+static ssize_t enable_mmap_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct aspeed_vga_sharedmem *vga_sharedmem =
+ dev_get_drvdata(dev);
+ bool val;
+
+ if (kstrtobool(buf, &val))
+ return -EINVAL;
+
+ vga_sharedmem->mmap_enable = val;
+
+ return count;
+}
+static DEVICE_ATTR_RW(enable_mmap);
+
+static struct attribute *sharedmem_attrs[] = {
+ &dev_attr_enable_mmap.attr,
+ NULL
+};
+
+static const struct attribute_group sharedmem_attr_group = {
+ .attrs = sharedmem_attrs,
+};
+
+static const struct attribute_group *sharedmem_attr_groups[] = {
+ &sharedmem_attr_group,
+ NULL
+};
+
+static const struct file_operations vga_sharedmem_fops = {
+ .owner = THIS_MODULE,
+ .open = vga_open,
+ .mmap = vga_mmap,
+};
+
+static struct miscdevice vga_sharedmem_miscdev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = SHAREDMEM_NAME,
+ .fops = &vga_sharedmem_fops,
+ .groups = sharedmem_attr_groups,
+};
+
+static int vga_sharedmem_probe(struct platform_device *pdev)
+{
+ struct aspeed_vga_sharedmem *vga_sharedmem;
+ struct device *dev = &pdev->dev;
+ struct resource *rc;
+
+ vga_sharedmem = devm_kzalloc(dev, sizeof(*vga_sharedmem), GFP_KERNEL);
+ if (!vga_sharedmem)
+ return -ENOMEM;
+
+ dev_set_drvdata(&pdev->dev, vga_sharedmem);
+
+ rc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!rc) {
+ dev_err(dev, "Couldn't read size device-tree property\n");
+ return -ENXIO;
+ }
+
+ vga_sharedmem->addr = rc->start;
+ vga_sharedmem->size = resource_size(rc);
+ vga_sharedmem->mmap_enable = true;
+
+ vga_sharedmem->miscdev = vga_sharedmem_miscdev;
+
+ return misc_register(&vga_sharedmem->miscdev);
+}
+
+static int vga_sharedmem_remove(struct platform_device *pdev)
+{
+ struct aspeed_vga_sharedmem *vga_sharedmem =
+ dev_get_drvdata(&pdev->dev);
+
+ misc_deregister(&vga_sharedmem->miscdev);
+
+ return 0;
+}
+
+static const struct of_device_id vga_sharedmem_match[] = {
+ { .compatible = "aspeed,ast2500-vga-sharedmem", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, vga_sharedmem_match);
+
+static struct platform_driver vga_sharedmem_driver = {
+ .driver = {
+ .name = "VGA-SHAREDMEM",
+ .of_match_table = vga_sharedmem_match,
+ },
+ .probe = vga_sharedmem_probe,
+ .remove = vga_sharedmem_remove,
+};
+
+module_platform_driver(vga_sharedmem_driver);
+
+MODULE_AUTHOR("Yang Cheng <cheng.c.yang@intel.com>");
+MODULE_DESCRIPTION("Shared VGA memory");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index 477e72a1d11e..bc22f4f33dfc 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -264,6 +264,9 @@ static ssize_t gadget_dev_desc_UDC_store(struct config_item *item,
if (strlen(page) < len)
return -EOVERFLOW;
+ if (strlen(page) < len)
+ return -EOVERFLOW;
+
name = kstrdup(page, GFP_KERNEL);
if (!name)
return -ENOMEM;
diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
index ca0a7d9eaa34..1fd5fdff639b 100644
--- a/drivers/usb/gadget/function/f_hid.c
+++ b/drivers/usb/gadget/function/f_hid.c
@@ -407,7 +407,7 @@ static void f_hidg_req_complete(struct usb_ep *ep, struct usb_request *req)
struct f_hidg *hidg = (struct f_hidg *)ep->driver_data;
unsigned long flags;
- if (req->status != 0) {
+ if (req->status != 0 && req->status != -ESHUTDOWN) {
ERROR(hidg->func.config->cdev,
"End Point Request ERROR: %d\n", req->status);
}
diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
index 1e1e5c5fdafb..1a710c129efa 100644
--- a/drivers/usb/gadget/function/f_mass_storage.c
+++ b/drivers/usb/gadget/function/f_mass_storage.c
@@ -679,6 +679,8 @@ static int do_read(struct fsg_common *common)
amount_left -= nread;
common->residue -= nread;
+ fsg_stats_rd_attempt(&curlun->stats, nread);
+
/*
* Except at the end of the transfer, nread will be
* equal to the buffer size, which is divisible by the
@@ -876,6 +878,8 @@ static int do_write(struct fsg_common *common)
amount_left_to_write -= nwritten;
common->residue -= nwritten;
+ fsg_stats_wr_attempt(&curlun->stats, nwritten);
+
/* If an error occurred, report it and its position */
if (nwritten < amount) {
curlun->sense_data = SS_WRITE_ERROR;
@@ -3136,6 +3140,13 @@ static ssize_t fsg_lun_opts_inquiry_string_store(struct config_item *item,
CONFIGFS_ATTR(fsg_lun_opts_, inquiry_string);
+static ssize_t fsg_lun_opts_stats_show(struct config_item *item, char *page)
+{
+ return fsg_show_stats(to_fsg_lun_opts(item)->lun, page);
+}
+
+CONFIGFS_ATTR_RO(fsg_lun_opts_, stats);
+
static struct configfs_attribute *fsg_lun_attrs[] = {
&fsg_lun_opts_attr_file,
&fsg_lun_opts_attr_ro,
@@ -3143,6 +3154,7 @@ static struct configfs_attribute *fsg_lun_attrs[] = {
&fsg_lun_opts_attr_cdrom,
&fsg_lun_opts_attr_nofua,
&fsg_lun_opts_attr_inquiry_string,
+ &fsg_lun_opts_attr_stats,
NULL,
};
diff --git a/drivers/usb/gadget/function/storage_common.c b/drivers/usb/gadget/function/storage_common.c
index b859a158a414..197630f38fcb 100644
--- a/drivers/usb/gadget/function/storage_common.c
+++ b/drivers/usb/gadget/function/storage_common.c
@@ -371,6 +371,15 @@ ssize_t fsg_show_inquiry_string(struct fsg_lun *curlun, char *buf)
}
EXPORT_SYMBOL_GPL(fsg_show_inquiry_string);
+ssize_t fsg_show_stats(struct fsg_lun *curlun, char *buf)
+{
+ return sprintf(buf, "read cnt: %u\n" "read sum: %llu\n"
+ "write cnt: %u\n" "write sum: %llu\n",
+ curlun->stats.read.count, curlun->stats.read.bytes,
+ curlun->stats.write.count, curlun->stats.write.bytes);
+}
+EXPORT_SYMBOL_GPL(fsg_show_stats);
+
/*
* The caller must hold fsg->filesem for reading when calling this function.
*/
diff --git a/drivers/usb/gadget/function/storage_common.h b/drivers/usb/gadget/function/storage_common.h
index bdeb1e233fc9..528441480165 100644
--- a/drivers/usb/gadget/function/storage_common.h
+++ b/drivers/usb/gadget/function/storage_common.h
@@ -95,6 +95,32 @@ do { \
*/
#define INQUIRY_STRING_LEN ((size_t) (8 + 16 + 4 + 1))
+struct fsg_stats_cnt {
+ u64 bytes;
+ u32 count;
+};
+
+struct fsg_stats {
+ struct fsg_stats_cnt read;
+ struct fsg_stats_cnt write;
+};
+
+static inline void fsg_stats_update(struct fsg_stats_cnt *cnt, u64 diff)
+{
+ cnt->count++;
+ cnt->bytes += diff;
+}
+
+static inline void fsg_stats_wr_attempt(struct fsg_stats *stats, u64 b_written)
+{
+ fsg_stats_update(&stats->write, b_written);
+}
+
+static inline void fsg_stats_rd_attempt(struct fsg_stats *stats, u64 b_read)
+{
+ fsg_stats_update(&stats->read, b_read);
+}
+
struct fsg_lun {
struct file *filp;
loff_t file_length;
@@ -120,6 +146,8 @@ struct fsg_lun {
const char *name; /* "lun.name" */
const char **name_pfx; /* "function.name" */
char inquiry_string[INQUIRY_STRING_LEN];
+
+ struct fsg_stats stats;
};
static inline bool fsg_lun_is_open(struct fsg_lun *curlun)
@@ -208,6 +236,7 @@ ssize_t fsg_show_file(struct fsg_lun *curlun, struct rw_semaphore *filesem,
ssize_t fsg_show_inquiry_string(struct fsg_lun *curlun, char *buf);
ssize_t fsg_show_cdrom(struct fsg_lun *curlun, char *buf);
ssize_t fsg_show_removable(struct fsg_lun *curlun, char *buf);
+ssize_t fsg_show_stats(struct fsg_lun *curlun, char *buf);
ssize_t fsg_store_ro(struct fsg_lun *curlun, struct rw_semaphore *filesem,
const char *buf, size_t count);
ssize_t fsg_store_nofua(struct fsg_lun *curlun, const char *buf, size_t count);
diff --git a/drivers/watchdog/aspeed_wdt.c b/drivers/watchdog/aspeed_wdt.c
index 436571b6fc79..9cd2dc1b65c9 100644
--- a/drivers/watchdog/aspeed_wdt.c
+++ b/drivers/watchdog/aspeed_wdt.c
@@ -47,7 +47,8 @@ MODULE_DEVICE_TABLE(of, aspeed_wdt_of_table);
#define WDT_CTRL_RESET_MODE_SOC (0x00 << 5)
#define WDT_CTRL_RESET_MODE_FULL_CHIP (0x01 << 5)
#define WDT_CTRL_RESET_MODE_ARM_CPU (0x10 << 5)
-#define WDT_CTRL_1MHZ_CLK BIT(4)
+#define WDT_CTRL_1MHZ_CLK BIT(4) /* ast2400/2500 */
+#define WDT_CTRL_WDT_RST_BY_SOC_RST BIT(4) /* ast2600 */
#define WDT_CTRL_WDT_EXT BIT(3)
#define WDT_CTRL_WDT_INTR BIT(2)
#define WDT_CTRL_RESET_SYSTEM BIT(1)
@@ -277,12 +278,15 @@ static int aspeed_wdt_probe(struct platform_device *pdev)
* On clock rates:
* - ast2400 wdt can run at PCLK, or 1MHz
* - ast2500 only runs at 1MHz, hard coding bit 4 to 1
- * - ast2600 always runs at 1MHz
+ * - ast2600 uses WDT0C[4] as 'Enable WDT to be reset by SOC reset'
*
* Set the ast2400 to run at 1MHz as it simplifies the driver.
*/
- if (of_device_is_compatible(np, "aspeed,ast2400-wdt"))
+ if (of_device_is_compatible(np, "aspeed,ast2400-wdt") ||
+ of_device_is_compatible(np, "aspeed,ast2500-wdt"))
wdt->ctrl = WDT_CTRL_1MHZ_CLK;
+ else if (of_device_is_compatible(np, "aspeed,ast2600-wdt"))
+ wdt->ctrl = WDT_CTRL_WDT_RST_BY_SOC_RST;
/*
* Control reset on a per-device basis to ensure the
@@ -367,13 +371,12 @@ static int aspeed_wdt_probe(struct platform_device *pdev)
writel(duration - 1, wdt->base + WDT_RESET_WIDTH);
}
- status = readl(wdt->base + WDT_TIMEOUT_STATUS);
- if (status & WDT_TIMEOUT_STATUS_BOOT_SECONDARY) {
- wdt->wdd.bootstatus = WDIOF_CARDRESET;
-
- if (of_device_is_compatible(np, "aspeed,ast2400-wdt") ||
- of_device_is_compatible(np, "aspeed,ast2500-wdt"))
+ if (!of_device_is_compatible(np, "aspeed,ast2600-wdt")) {
+ status = readl(wdt->base + WDT_TIMEOUT_STATUS);
+ if (status & WDT_TIMEOUT_STATUS_BOOT_SECONDARY) {
+ wdt->wdd.bootstatus = WDIOF_CARDRESET;
wdt->wdd.groups = bswitch_groups;
+ }
}
dev_set_drvdata(dev, wdt);
diff --git a/fs/jffs2/writev.c b/fs/jffs2/writev.c
index a1bda9dab3f8..eec419719e1d 100644
--- a/fs/jffs2/writev.c
+++ b/fs/jffs2/writev.c
@@ -16,9 +16,18 @@
int jffs2_flash_direct_writev(struct jffs2_sb_info *c, const struct kvec *vecs,
unsigned long count, loff_t to, size_t *retlen)
{
+ int ret;
+
+ ret = mtd_writev(c->mtd, vecs, count, to, retlen);
+
if (!jffs2_is_writebuffered(c)) {
if (jffs2_sum_active()) {
int res;
+
+ if (ret ||
+ *retlen != iov_length((struct iovec *) vecs, count))
+ return ret;
+
res = jffs2_sum_add_kvec(c, vecs, count, (uint32_t) to);
if (res) {
return res;
@@ -26,19 +35,23 @@ int jffs2_flash_direct_writev(struct jffs2_sb_info *c, const struct kvec *vecs,
}
}
- return mtd_writev(c->mtd, vecs, count, to, retlen);
+ return ret;
}
int jffs2_flash_direct_write(struct jffs2_sb_info *c, loff_t ofs, size_t len,
size_t *retlen, const u_char *buf)
{
int ret;
+
ret = mtd_write(c->mtd, ofs, len, retlen, buf);
if (jffs2_sum_active()) {
struct kvec vecs[1];
int res;
+ if (ret || *retlen != len)
+ return ret;
+
vecs[0].iov_base = (unsigned char *) buf;
vecs[0].iov_len = len;
@@ -47,5 +60,6 @@ int jffs2_flash_direct_write(struct jffs2_sb_info *c, loff_t ofs, size_t len,
return res;
}
}
+
return ret;
}
diff --git a/include/dt-bindings/clock/aspeed-clock.h b/include/dt-bindings/clock/aspeed-clock.h
index 9ff4f6e4558c..41d531dd0b48 100644
--- a/include/dt-bindings/clock/aspeed-clock.h
+++ b/include/dt-bindings/clock/aspeed-clock.h
@@ -41,6 +41,8 @@
#define ASPEED_CLK_24M 35
#define ASPEED_CLK_MAC1RCLK 36
#define ASPEED_CLK_MAC2RCLK 37
+#define ASPEED_CLK_UART_HS 38
+#define ASPEED_CLK_MAX 39
#define ASPEED_RESET_XDMA 0
#define ASPEED_RESET_MCTP 1
diff --git a/include/dt-bindings/clock/ast2600-clock.h b/include/dt-bindings/clock/ast2600-clock.h
index 62b9520a00fd..165bec96ebe7 100644
--- a/include/dt-bindings/clock/ast2600-clock.h
+++ b/include/dt-bindings/clock/ast2600-clock.h
@@ -87,15 +87,28 @@
#define ASPEED_CLK_MAC2RCLK 68
#define ASPEED_CLK_MAC3RCLK 69
#define ASPEED_CLK_MAC4RCLK 70
+#define ASPEED_CLK_UART5 71
+#define ASPEED_CLK_I3C 72
+#define ASPEED_CLK_MAX 73
/* Only list resets here that are not part of a gate */
+#define ASPEED_RESET_ESPI 57
#define ASPEED_RESET_ADC 55
#define ASPEED_RESET_JTAG_MASTER2 54
+#define ASPEED_RESET_I3C7 47
+#define ASPEED_RESET_I3C6 46
+#define ASPEED_RESET_I3C5 45
+#define ASPEED_RESET_I3C4 44
+#define ASPEED_RESET_I3C3 43
+#define ASPEED_RESET_I3C2 42
+#define ASPEED_RESET_I3C1 41
+#define ASPEED_RESET_I3C0 40
#define ASPEED_RESET_I3C_DMA 39
#define ASPEED_RESET_PWM 37
#define ASPEED_RESET_PECI 36
#define ASPEED_RESET_MII 35
#define ASPEED_RESET_I2C 34
+#define ASPEED_RESET_LPC 32
#define ASPEED_RESET_H2X 31
#define ASPEED_RESET_GP_MCU 30
#define ASPEED_RESET_DP_MCU 29
diff --git a/include/linux/aspeed-mctp.h b/include/linux/aspeed-mctp.h
new file mode 100644
index 000000000000..d286060a79ee
--- /dev/null
+++ b/include/linux/aspeed-mctp.h
@@ -0,0 +1,149 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2020 Intel Corporation */
+
+#ifndef __LINUX_ASPEED_MCTP_H
+#define __LINUX_ASPEED_MCTP_H
+
+#include <linux/types.h>
+
+struct mctp_client;
+struct aspeed_mctp;
+
+struct pcie_transport_hdr {
+ u8 fmt_type;
+ u8 mbz;
+ u8 mbz_attr_len_hi;
+ u8 len_lo;
+ u16 requester;
+ u8 tag;
+ u8 code;
+ u16 target;
+ u16 vendor;
+} __packed;
+
+struct mctp_protocol_hdr {
+ u8 ver;
+ u8 dest;
+ u8 src;
+ u8 flags_seq_tag;
+} __packed;
+
+#define PCIE_VDM_HDR_SIZE 16
+#define MCTP_BTU_SIZE 64
+#define PCIE_VDM_DATA_SIZE_DW (MCTP_BTU_SIZE / 4)
+#define PCIE_VDM_HDR_SIZE_DW (PCIE_VDM_HDR_SIZE / 4)
+
+#define PCIE_MCTP_MIN_PACKET_SIZE (PCIE_VDM_HDR_SIZE + 4)
+
+struct mctp_pcie_packet_data {
+ u32 hdr[PCIE_VDM_HDR_SIZE_DW];
+ u32 payload[PCIE_VDM_DATA_SIZE_DW];
+};
+
+struct mctp_pcie_packet {
+ struct mctp_pcie_packet_data data;
+ u32 size;
+};
+
+/**
+ * aspeed_mctp_add_type_handler() - register for the given MCTP message type
+ * @client: pointer to the existing mctp_client context
+ * @mctp_type: message type code according to DMTF DSP0239 spec.
+ * @pci_vendor_id: vendor ID (non-zero if msg_type is Vendor Defined PCI,
+ * otherwise it should be set to 0)
+ * @vdm_type: vendor defined message type (it should be set to 0 for non-Vendor
+ * Defined PCI message type)
+ * @vdm_mask: vendor defined message mask (it should be set to 0 for non-Vendor
+ * Defined PCI message type)
+ *
+ * Return:
+ * * 0 - success,
+ * * -EINVAL - arguments passed are incorrect,
+ * * -ENOMEM - cannot alloc a new handler,
+ * * -EBUSY - given message has already registered handler.
+ */
+
+int aspeed_mctp_add_type_handler(struct mctp_client *client, u8 mctp_type,
+ u16 pci_vendor_id, u16 vdm_type, u16 vdm_mask);
+
+/**
+ * aspeed_mctp_create_client() - create mctp_client context
+ * @priv pointer to aspeed-mctp context
+ *
+ * Returns struct mctp_client or NULL.
+ */
+struct mctp_client *aspeed_mctp_create_client(struct aspeed_mctp *priv);
+
+/**
+ * aspeed_mctp_delete_client()- delete mctp_client context
+ * @client: pointer to existing mctp_client context
+ */
+void aspeed_mctp_delete_client(struct mctp_client *client);
+
+/**
+ * aspeed_mctp_send_packet() - send mctp_packet
+ * @client: pointer to existing mctp_client context
+ * @tx_packet: the allocated packet that needs to be send via aspeed-mctp
+ *
+ * After the function returns success, the packet is no longer owned by the
+ * caller, and as such, the caller should not attempt to free it.
+ *
+ * Return:
+ * * 0 - success,
+ * * -ENOSPC - failed to send packet due to lack of available space.
+ */
+int aspeed_mctp_send_packet(struct mctp_client *client,
+ struct mctp_pcie_packet *tx_packet);
+
+/**
+ * aspeed_mctp_receive_packet() - receive mctp_packet
+ * @client: pointer to existing mctp_client context
+ * @timeout: timeout, in jiffies
+ *
+ * The function will sleep for up to @timeout if no packet is ready to read.
+ *
+ * After the function returns valid packet, the caller takes its ownership and
+ * is responsible for freeing it.
+ *
+ * Returns struct mctp_pcie_packet from or ERR_PTR in case of error or the
+ * @timeout elapsed.
+ */
+struct mctp_pcie_packet *aspeed_mctp_receive_packet(struct mctp_client *client,
+ unsigned long timeout);
+
+/**
+ * aspeed_mctp_flush_rx_queue() - remove all mctp_packets from rx queue
+ * @client: pointer to existing mctp_client context
+ */
+void aspeed_mctp_flush_rx_queue(struct mctp_client *client);
+
+/**
+ * aspeed_mctp_get_eid_bdf() - return PCIe address for requested endpoint ID
+ * @client: pointer to existing mctp_client context
+ * @eid: requested eid
+ * @bdf: pointer to store BDF value
+ *
+ * Return:
+ * * 0 - success,
+ * * -ENOENT - there is no record for requested endpoint id.
+ */
+int aspeed_mctp_get_eid_bdf(struct mctp_client *client, u8 eid, u16 *bdf);
+
+/**
+ * aspeed_mctp_get_eid() - return EID for requested BDF and domainId.
+ * @client: pointer to existing mctp_client context
+ * @bdf: requested BDF value
+ * @domain_id: requested domainId
+ * @eid: pointer to store EID value
+ *
+ * Return:
+ * * 0 - success,
+ * * -ENOENT - there is no record for requested bdf/domainId.
+ */
+int aspeed_mctp_get_eid(struct mctp_client *client, u16 bdf,
+ u8 domain_id, u8 *eid);
+
+void *aspeed_mctp_packet_alloc(gfp_t flags);
+void aspeed_mctp_packet_free(void *packet);
+
+#endif /* __LINUX_ASPEED_MCTP_H */
diff --git a/include/linux/i2c-mux.h b/include/linux/i2c-mux.h
index 98ef73b7c8fd..3d2586062ccc 100644
--- a/include/linux/i2c-mux.h
+++ b/include/linux/i2c-mux.h
@@ -14,6 +14,7 @@
#ifdef __KERNEL__
#include <linux/bitops.h>
+#include <linux/workqueue.h>
struct i2c_mux_core {
struct i2c_adapter *parent;
@@ -27,6 +28,10 @@ struct i2c_mux_core {
int (*select)(struct i2c_mux_core *, u32 chan_id);
int (*deselect)(struct i2c_mux_core *, u32 chan_id);
+ struct mutex hold_lock; /* mutex for channel holding */
+ u32 holder_chan_id;
+ struct delayed_work unhold_work;
+
int num_adapters;
int max_adapters;
struct i2c_adapter *adapter[];
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 2ce3efbe9198..fa0af97af147 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -20,6 +20,7 @@
#include <linux/irqdomain.h> /* for Host Notify IRQ */
#include <linux/of.h> /* for struct device_node */
#include <linux/swab.h> /* for swab16 */
+#include <linux/workqueue.h>
#include <uapi/linux/i2c.h>
extern struct bus_type i2c_bus_type;
@@ -373,6 +374,7 @@ enum i2c_slave_event {
I2C_SLAVE_WRITE_REQUESTED,
I2C_SLAVE_READ_PROCESSED,
I2C_SLAVE_WRITE_RECEIVED,
+ I2C_SLAVE_GCALL_REQUESTED,
I2C_SLAVE_STOP,
};
@@ -738,6 +740,13 @@ struct i2c_adapter {
struct irq_domain *host_notify_domain;
struct regulator *bus_regulator;
+
+ /*
+ * These will be used by root adpaters only. For muxes, each mux core
+ * has these individually.
+ */
+ struct mutex hold_lock; /* mutex for bus holding */
+ struct delayed_work unhold_work;
};
#define to_i2c_adapter(d) container_of(d, struct i2c_adapter, dev)
@@ -1040,4 +1049,22 @@ static inline struct i2c_adapter *i2c_acpi_find_adapter_by_handle(acpi_handle ha
}
#endif /* CONFIG_ACPI */
+enum i2c_hold_msg_type {
+ I2C_HOLD_MSG_NONE,
+ I2C_HOLD_MSG_SET,
+ I2C_HOLD_MSG_RESET
+};
+
+static inline enum i2c_hold_msg_type i2c_check_hold_msg(u16 flags, u16 len, u16 *buf)
+{
+ if (flags & I2C_M_HOLD && len == sizeof(u16)) {
+ if (*buf)
+ return I2C_HOLD_MSG_SET;
+
+ return I2C_HOLD_MSG_RESET;
+ }
+
+ return I2C_HOLD_MSG_NONE;
+}
+
#endif /* _LINUX_I2C_H */
diff --git a/include/linux/i3c/ccc.h b/include/linux/i3c/ccc.h
index 73b0982cc519..a7a19ebe6b6d 100644
--- a/include/linux/i3c/ccc.h
+++ b/include/linux/i3c/ccc.h
@@ -32,6 +32,9 @@
#define I3C_CCC_DEFSLVS I3C_CCC_ID(0x8, true)
#define I3C_CCC_ENTTM I3C_CCC_ID(0xb, true)
#define I3C_CCC_ENTHDR(x) I3C_CCC_ID(0x20 + (x), true)
+#define I3C_CCC_SETAASA I3C_CCC_ID(0x29, true)
+#define I3C_CCC_SETHID I3C_CCC_ID(0x61, true)
+#define I3C_CCC_DEVCTRL I3C_CCC_ID(0x62, true)
/* Unicast-only commands */
#define I3C_CCC_SETDASA I3C_CCC_ID(0x7, false)
@@ -243,6 +246,15 @@ struct i3c_ccc_setbrgtgt {
struct i3c_ccc_bridged_slave_desc bslaves[0];
} __packed;
+
+/**
+ * struct i3c_ccc_sethid - payload passed to SETHID CCC
+ *
+ * @hid: 3-bit HID
+ */
+struct i3c_ccc_sethid {
+ u8 hid;
+};
/**
* enum i3c_sdr_max_data_rate - max data rate values for private SDR transfers
*/
diff --git a/include/linux/i3c/device.h b/include/linux/i3c/device.h
index 8242e13e7b0b..e036a30f8c7e 100644
--- a/include/linux/i3c/device.h
+++ b/include/linux/i3c/device.h
@@ -71,9 +71,11 @@ struct i3c_priv_xfer {
/**
* enum i3c_dcr - I3C DCR values
* @I3C_DCR_GENERIC_DEVICE: generic I3C device
+ * @I3C_DCR_HUB: I3C HUB device
*/
enum i3c_dcr {
I3C_DCR_GENERIC_DEVICE = 0,
+ I3C_DCR_HUB = 0xC2,
};
#define I3C_PID_MANUF_ID(pid) (((pid) & GENMASK_ULL(47, 33)) >> 33)
@@ -128,6 +130,7 @@ struct i3c_device_info {
u32 max_read_turnaround;
u16 max_read_len;
u16 max_write_len;
+ __be16 status;
};
/*
@@ -178,6 +181,7 @@ struct i3c_driver {
int (*probe)(struct i3c_device *dev);
void (*remove)(struct i3c_device *dev);
const struct i3c_device_id *id_table;
+ bool target;
};
static inline struct i3c_driver *drv_to_i3cdrv(struct device_driver *drv)
@@ -293,6 +297,8 @@ int i3c_device_do_priv_xfers(struct i3c_device *dev,
struct i3c_priv_xfer *xfers,
int nxfers);
+int i3c_device_generate_ibi(struct i3c_device *dev, const u8 *data, int len);
+
void i3c_device_get_info(struct i3c_device *dev, struct i3c_device_info *info);
struct i3c_ibi_payload {
@@ -332,4 +338,12 @@ void i3c_device_free_ibi(struct i3c_device *dev);
int i3c_device_enable_ibi(struct i3c_device *dev);
int i3c_device_disable_ibi(struct i3c_device *dev);
+int i3c_device_getstatus_ccc(struct i3c_device *dev, struct i3c_device_info *info);
+
+struct i3c_target_read_setup {
+ void (*handler)(struct i3c_device *dev, const u8 *data, size_t len);
+};
+
+int i3c_target_read_register(struct i3c_device *dev, const struct i3c_target_read_setup *setup);
+
#endif /* I3C_DEV_H */
diff --git a/include/linux/i3c/master.h b/include/linux/i3c/master.h
index 9cb39d901cd5..5d35a6d1c992 100644
--- a/include/linux/i3c/master.h
+++ b/include/linux/i3c/master.h
@@ -22,6 +22,7 @@
#define I3C_BROADCAST_ADDR 0x7e
#define I3C_MAX_ADDR GENMASK(6, 0)
+struct i3c_target_ops;
struct i3c_master_controller;
struct i3c_bus;
struct i2c_device;
@@ -185,10 +186,20 @@ struct i3c_dev_boardinfo {
};
/**
+ * struct i3c_target_info - target information attached to a specific device
+ * @read handler: handler specified at i3c_target_read_register() call time.
+ */
+
+struct i3c_target_info {
+ void (*read_handler)(struct i3c_device *dev, const u8 *data, size_t len);
+};
+
+/**
* struct i3c_dev_desc - I3C device descriptor
* @common: common part of the I3C device descriptor
* @info: I3C device information. Will be automatically filled when you create
* your device with i3c_master_add_i3c_dev_locked()
+ * @target_info: I3C target information.
* @ibi_lock: lock used to protect the &struct_i3c_device->ibi
* @ibi: IBI info attached to a device. Should be NULL until
* i3c_device_request_ibi() is called
@@ -207,6 +218,7 @@ struct i3c_dev_boardinfo {
struct i3c_dev_desc {
struct i3c_i2c_dev_desc common;
struct i3c_device_info info;
+ struct i3c_target_info target_info;
struct mutex ibi_lock;
struct i3c_device_ibi_info *ibi;
struct i3c_device *dev;
@@ -463,6 +475,8 @@ struct i3c_master_controller_ops {
* registered to the I2C subsystem to be as transparent as possible to
* existing I2C drivers
* @ops: master operations. See &struct i3c_master_controller_ops
+ * @target_ops: target operations. See &struct i3c_target_ops
+ * @target: true if the underlying I3C device acts as a target on I3C bus
* @secondary: true if the master is a secondary master
* @init_done: true when the bus initialization is done
* @boardinfo.i3c: list of I3C boardinfo objects
@@ -485,8 +499,11 @@ struct i3c_master_controller {
struct i3c_dev_desc *this;
struct i2c_adapter i2c;
const struct i3c_master_controller_ops *ops;
+ const struct i3c_target_ops *target_ops;
+ unsigned int target : 1;
unsigned int secondary : 1;
unsigned int init_done : 1;
+ unsigned int jdec_spd : 1;
struct {
struct list_head i3c;
struct list_head i2c;
@@ -544,6 +561,13 @@ int i3c_master_register(struct i3c_master_controller *master,
bool secondary);
int i3c_master_unregister(struct i3c_master_controller *master);
+int i3c_register(struct i3c_master_controller *master,
+ struct device *parent,
+ const struct i3c_master_controller_ops *master_ops,
+ const struct i3c_target_ops *target_ops,
+ bool secondary);
+int i3c_unregister(struct i3c_master_controller *master);
+
/**
* i3c_dev_get_master_data() - get master private data attached to an I3C
* device descriptor
diff --git a/include/linux/i3c/target.h b/include/linux/i3c/target.h
new file mode 100644
index 000000000000..9e71124b5325
--- /dev/null
+++ b/include/linux/i3c/target.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2022, Intel Corporation */
+
+#ifndef I3C_TARGET_H
+#define I3C_TARGET_H
+
+#include <linux/device.h>
+#include <linux/i3c/device.h>
+
+struct i3c_master_controller;
+
+struct i3c_target_ops {
+ int (*bus_init)(struct i3c_master_controller *master);
+ void (*bus_cleanup)(struct i3c_master_controller *master);
+ int (*priv_xfers)(struct i3c_dev_desc *dev, struct i3c_priv_xfer *xfers, int nxfers);
+ int (*generate_ibi)(struct i3c_dev_desc *dev, const u8 *data, int len);
+};
+
+int i3c_target_register(struct i3c_master_controller *master, struct device *parent,
+ const struct i3c_target_ops *ops);
+int i3c_target_unregister(struct i3c_master_controller *master);
+
+#endif
diff --git a/include/linux/jtag.h b/include/linux/jtag.h
new file mode 100644
index 000000000000..fab12dc4fc5e
--- /dev/null
+++ b/include/linux/jtag.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018 Mellanox Technologies. All rights reserved. */
+/* Copyright (c) 2018 Oleksandr Shamray <oleksandrs@mellanox.com> */
+/* Copyright (c) 2019 Intel Corporation */
+
+#ifndef __LINUX_JTAG_H
+#define __LINUX_JTAG_H
+
+#include <linux/types.h>
+#include <uapi/linux/jtag.h>
+
+#define JTAG_MAX_XFER_DATA_LEN 65535
+
+struct jtag;
+/**
+ * struct jtag_ops - callbacks for JTAG control functions:
+ *
+ * @freq_get: get frequency function. Filled by dev driver
+ * @freq_set: set frequency function. Filled by dev driver
+ * @status_get: get JTAG TAPC state function. Mandatory, Filled by dev driver
+ * @status_set: set JTAG TAPC state function. Mandatory, Filled by dev driver
+ * @xfer: send JTAG xfer function. Mandatory func. Filled by dev driver
+ * @mode_set: set specific work mode for JTAG. Filled by dev driver
+ * @bitbang: set low level bitbang operations. Filled by dev driver
+ * @enable: enables JTAG interface in master mode. Filled by dev driver
+ * @disable: disables JTAG interface master mode. Filled by dev driver
+ */
+struct jtag_ops {
+ int (*freq_get)(struct jtag *jtag, u32 *freq);
+ int (*freq_set)(struct jtag *jtag, u32 freq);
+ int (*status_get)(struct jtag *jtag, u32 *state);
+ int (*status_set)(struct jtag *jtag, struct jtag_tap_state *endst);
+ int (*xfer)(struct jtag *jtag, struct jtag_xfer *xfer, u8 *xfer_data);
+ int (*mode_set)(struct jtag *jtag, struct jtag_mode *jtag_mode);
+ int (*bitbang)(struct jtag *jtag, struct bitbang_packet *bitbang,
+ struct tck_bitbang *bitbang_data);
+ int (*enable)(struct jtag *jtag);
+ int (*disable)(struct jtag *jtag);
+};
+
+void *jtag_priv(struct jtag *jtag);
+int devm_jtag_register(struct device *dev, struct jtag *jtag);
+struct jtag *jtag_alloc(struct device *host, size_t priv_size,
+ const struct jtag_ops *ops);
+void jtag_free(struct jtag *jtag);
+
+#endif /* __LINUX_JTAG_H */
diff --git a/include/linux/mfd/intel-peci-client.h b/include/linux/mfd/intel-peci-client.h
new file mode 100644
index 000000000000..84b5882bcf31
--- /dev/null
+++ b/include/linux/mfd/intel-peci-client.h
@@ -0,0 +1,163 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018-2020 Intel Corporation */
+
+#ifndef __LINUX_MFD_INTEL_PECI_CLIENT_H
+#define __LINUX_MFD_INTEL_PECI_CLIENT_H
+
+#include <linux/peci.h>
+
+#if IS_ENABLED(CONFIG_X86)
+#include <asm/intel-family.h>
+#else
+/*
+ * Architectures other than x86 cannot include the header file so define these
+ * at here. These are needed for detecting type of client x86 CPUs behind a PECI
+ * connection.
+ */
+#define INTEL_FAM6_HASWELL_X 0x3F
+#define INTEL_FAM6_BROADWELL_X 0x4F
+#define INTEL_FAM6_SKYLAKE_X 0x55
+#define INTEL_FAM6_SKYLAKE_XD 0x56
+#define INTEL_FAM6_ICELAKE_X 0x6A
+#define INTEL_FAM6_ICELAKE_XD 0x6C
+#endif
+
+#define INTEL_FAM6 6 /* P6 (Pentium Pro and later) */
+
+#define CORE_MASK_BITS_ON_HSX 18
+#define CHAN_RANK_MAX_ON_HSX 8 /* Max number of channel ranks on Haswell */
+#define DIMM_IDX_MAX_ON_HSX 3 /* Max DIMM index per channel on Haswell */
+
+#define CORE_MASK_BITS_ON_BDX 24
+#define CHAN_RANK_MAX_ON_BDX 4 /* Max number of channel ranks on Broadwell */
+#define DIMM_IDX_MAX_ON_BDX 3 /* Max DIMM index per channel on Broadwell */
+
+#define CORE_MASK_BITS_ON_SKX 28
+#define CHAN_RANK_MAX_ON_SKX 6 /* Max number of channel ranks on Skylake */
+#define DIMM_IDX_MAX_ON_SKX 2 /* Max DIMM index per channel on Skylake */
+
+#define CORE_MASK_BITS_ON_SKXD 28
+#define CHAN_RANK_MAX_ON_SKXD 2 /* Max number of channel ranks on Skylake D */
+#define DIMM_IDX_MAX_ON_SKXD 2 /* Max DIMM index per channel on Skylake D */
+
+#define CORE_MASK_BITS_ON_ICX 64
+#define CHAN_RANK_MAX_ON_ICX 8 /* Max number of channel ranks on Icelake */
+#define DIMM_IDX_MAX_ON_ICX 2 /* Max DIMM index per channel on Icelake */
+
+#define CORE_MASK_BITS_ON_ICXD 64
+#define CHAN_RANK_MAX_ON_ICXD 4 /* Max number of channel ranks on Icelake D */
+#define DIMM_IDX_MAX_ON_ICXD 2 /* Max DIMM index per channel on Icelake D */
+
+#define CORE_MASK_BITS_MAX CORE_MASK_BITS_ON_ICX
+#define CHAN_RANK_MAX CHAN_RANK_MAX_ON_HSX
+#define DIMM_IDX_MAX DIMM_IDX_MAX_ON_HSX
+#define DIMM_NUMS_MAX (CHAN_RANK_MAX * DIMM_IDX_MAX)
+
+/**
+ * struct cpu_gen_info - CPU generation specific information
+ * @family: CPU family ID
+ * @model: CPU model
+ * @core_mask_bits: number of resolved core bits
+ * @chan_rank_max: max number of channel ranks
+ * @dimm_idx_max: max number of DIMM indices
+ *
+ * CPU generation specific information to identify maximum number of cores and
+ * DIMM slots.
+ */
+struct cpu_gen_info {
+ u16 family;
+ u8 model;
+ uint core_mask_bits;
+ uint chan_rank_max;
+ uint dimm_idx_max;
+};
+
+/**
+ * struct peci_client_manager - PECI client manager information
+ * @client; pointer to the PECI client
+ * @name: PECI client manager name
+ * @gen_info: CPU generation info of the detected CPU
+ *
+ * PECI client manager information for managing PECI sideband functions on a CPU
+ * client.
+ */
+struct peci_client_manager {
+ struct peci_client *client;
+ char name[PECI_NAME_SIZE];
+ const struct cpu_gen_info *gen_info;
+};
+
+/**
+ * peci_client_read_package_config - read from the Package Configuration Space
+ * @priv: driver private data structure
+ * @index: encoding index for the requested service
+ * @param: parameter to specify the exact data being requested
+ * @data: data buffer to store the result
+ * Context: can sleep
+ *
+ * A generic PECI command that provides read access to the
+ * "Package Configuration Space" that is maintained by the PCU, including
+ * various power and thermal management functions. Typical PCS read services
+ * supported by the processor may include access to temperature data, energy
+ * status, run time information, DIMM temperatures and so on.
+ *
+ * Return: zero on success, else a negative error code.
+ */
+static inline int
+peci_client_read_package_config(struct peci_client_manager *priv,
+ u8 index, u16 param, u8 *data)
+{
+ struct peci_rd_pkg_cfg_msg msg;
+ int ret;
+
+ msg.addr = priv->client->addr;
+ msg.index = index;
+ msg.param = param;
+ msg.rx_len = 4;
+ msg.domain_id = 0;
+
+ ret = peci_command(priv->client->adapter, PECI_CMD_RD_PKG_CFG, sizeof(msg), &msg);
+ if (msg.cc != PECI_DEV_CC_SUCCESS)
+ ret = -EAGAIN;
+ if (ret)
+ return ret;
+
+ memcpy(data, msg.pkg_config, 4);
+
+ return 0;
+}
+
+/**
+ * peci_client_write_package_config - write to the Package Configuration Space
+ * @priv: driver private data structure
+ * @index: encoding index for the requested service
+ * @param: parameter to specify the exact data being requested
+ * @data: data to write
+ * Context: can sleep
+ *
+ * Return: zero on success, else a negative error code.
+ */
+static inline int
+peci_client_write_package_config(struct peci_client_manager *priv,
+ u8 index, u16 param, u32 data)
+{
+ struct peci_wr_pkg_cfg_msg msg;
+ int ret;
+
+ msg.addr = priv->client->addr;
+ msg.index = index;
+ msg.param = param;
+ msg.tx_len = 4u;
+ msg.value = data;
+ msg.domain_id = 0;
+
+ ret = peci_command(priv->client->adapter, PECI_CMD_WR_PKG_CFG, sizeof(msg), &msg);
+ if (!ret) {
+ if (msg.cc != PECI_DEV_CC_SUCCESS)
+ ret = -EAGAIN;
+ }
+
+ return ret;
+}
+
+#endif /* __LINUX_MFD_INTEL_PECI_CLIENT_H */
diff --git a/include/linux/peci.h b/include/linux/peci.h
new file mode 100644
index 000000000000..5a707bbeb66d
--- /dev/null
+++ b/include/linux/peci.h
@@ -0,0 +1,151 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018-2019 Intel Corporation */
+
+#ifndef __LINUX_PECI_H
+#define __LINUX_PECI_H
+
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/peci-ioctl.h>
+
+#define PECI_NAME_SIZE 32
+
+struct peci_board_info {
+ char type[PECI_NAME_SIZE];
+ u8 addr; /* CPU client address */
+ struct device_node *of_node;
+};
+
+/**
+ * struct peci_adapter - represent a PECI adapter
+ * @owner: owner module of the PECI adpater
+ * @bus_lock: mutex for exclusion of multiple callers
+ * @dev: device interface to this driver
+ * @nr: the bus number to map
+ * @name: name of the adapter
+ * @userspace_clients_lock: mutex for exclusion of clients handling
+ * @userspace_clients: list of registered clients
+ * @xfer: low-level transfer function pointer of the adapter
+ * @cmd_mask: mask for supportable PECI commands
+ * @use_dma: flag for indicating that adapter uses DMA
+ *
+ * Each PECI adapter can communicate with one or more PECI client children.
+ * These make a small bus, sharing a single wired PECI connection.
+ */
+struct peci_adapter {
+ struct module *owner;
+ struct mutex bus_lock; /* mutex for bus locking */
+ struct device dev;
+ int nr;
+ char name[PECI_NAME_SIZE];
+ struct mutex userspace_clients_lock; /* clients list mutex */
+ struct list_head userspace_clients;
+ int (*xfer)(struct peci_adapter *adapter,
+ struct peci_xfer_msg *msg);
+ u32 cmd_mask;
+ bool use_dma;
+ u8 peci_revision;
+};
+
+static inline struct peci_adapter *to_peci_adapter(void *d)
+{
+ return container_of(d, struct peci_adapter, dev);
+}
+
+static inline void *peci_get_adapdata(const struct peci_adapter *adapter)
+{
+ return dev_get_drvdata(&adapter->dev);
+}
+
+static inline void peci_set_adapdata(struct peci_adapter *adapter, void *data)
+{
+ dev_set_drvdata(&adapter->dev, data);
+}
+
+/**
+ * struct peci_client - represent a PECI client device
+ * @dev: driver model device node for the client
+ * @adapter: manages the bus segment hosting this PECI device
+ * @addr: address used on the PECI bus connected to the parent adapter
+ * @name: indicates the type of the device
+ * @detected: detected PECI clients list
+ *
+ * A peci_client identifies a single device (i.e. CPU) connected to a peci bus.
+ * The behaviour exposed to Linux is defined by the driver managing the device.
+ */
+struct peci_client {
+ struct device dev;
+ struct peci_adapter *adapter;
+ u8 addr;
+ char name[PECI_NAME_SIZE];
+ struct list_head detected;
+};
+
+static inline struct peci_client *to_peci_client(void *d)
+{
+ return container_of(d, struct peci_client, dev);
+}
+
+struct peci_device_id {
+ char name[PECI_NAME_SIZE];
+ ulong driver_data; /* Data private to the driver */
+};
+
+/**
+ * struct peci_driver - represent a PECI device driver
+ * @probe: callback for device binding
+ * @remove: callback for device unbinding
+ * @shutdown: callback for device shutdown
+ * @driver: device driver model driver
+ * @id_table: list of PECI devices supported by this driver
+ *
+ * The driver.owner field should be set to the module owner of this driver.
+ * The driver.name field should be set to the name of this driver.
+ */
+struct peci_driver {
+ int (*probe)(struct peci_client *client);
+ int (*remove)(struct peci_client *client);
+ void (*shutdown)(struct peci_client *client);
+ struct device_driver driver;
+ const struct peci_device_id *id_table;
+};
+
+static inline struct peci_driver *to_peci_driver(void *d)
+{
+ return container_of(d, struct peci_driver, driver);
+}
+
+/**
+ * module_peci_driver - Helper macro for registering a modular PECI driver
+ * @__peci_driver: peci_driver struct
+ *
+ * Helper macro for PECI drivers which do not do anything special in module
+ * init/exit. This eliminates a lot of boilerplate. Each module may only
+ * use this macro once, and calling it replaces module_init() and module_exit()
+ */
+#define module_peci_driver(__peci_driver) \
+ module_driver(__peci_driver, peci_add_driver, peci_del_driver)
+
+/* use a define to avoid include chaining to get THIS_MODULE */
+#define peci_add_driver(driver) peci_register_driver(THIS_MODULE, driver)
+
+extern struct bus_type peci_bus_type;
+extern struct device_type peci_adapter_type;
+extern struct device_type peci_client_type;
+
+int peci_register_driver(struct module *owner, struct peci_driver *drv);
+void peci_del_driver(struct peci_driver *driver);
+struct peci_client *peci_verify_client(struct device *dev);
+struct peci_adapter *peci_alloc_adapter(struct device *dev, uint size);
+struct peci_adapter *peci_get_adapter(int nr);
+void peci_put_adapter(struct peci_adapter *adapter);
+int peci_add_adapter(struct peci_adapter *adapter);
+void peci_del_adapter(struct peci_adapter *adapter);
+struct peci_adapter *peci_verify_adapter(struct device *dev);
+int peci_for_each_dev(void *data, int (*fn)(struct device *, void *));
+struct peci_xfer_msg *peci_get_xfer_msg(u8 tx_len, u8 rx_len);
+void peci_put_xfer_msg(struct peci_xfer_msg *msg);
+int peci_command(struct peci_adapter *adpater, enum peci_cmd cmd, uint msg_len, void *vmsg);
+int peci_get_cpu_id(struct peci_adapter *adapter, u8 addr, u32 *cpu_id);
+
+#endif /* __LINUX_PECI_H */
diff --git a/include/uapi/linux/aspeed-espi-ioc.h b/include/uapi/linux/aspeed-espi-ioc.h
new file mode 100644
index 000000000000..8e198158f6da
--- /dev/null
+++ b/include/uapi/linux/aspeed-espi-ioc.h
@@ -0,0 +1,198 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2021 Aspeed Technology Inc.
+ */
+#ifndef _ASPEED_ESPI_IOC_H
+#define _ASPEED_ESPI_IOC_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+/*
+ * eSPI cycle type encoding
+ *
+ * Section 5.1 Cycle Types and Packet Format,
+ * Intel eSPI Interface Base Specification, Rev 1.0, Jan. 2016.
+ */
+#define ESPI_PERIF_MEMRD32 0x00
+#define ESPI_PERIF_MEMRD64 0x02
+#define ESPI_PERIF_MEMWR32 0x01
+#define ESPI_PERIF_MEMWR64 0x03
+#define ESPI_PERIF_MSG 0x10
+#define ESPI_PERIF_MSG_D 0x11
+#define ESPI_PERIF_SUC_CMPLT 0x06
+#define ESPI_PERIF_SUC_CMPLT_D_MIDDLE 0x09
+#define ESPI_PERIF_SUC_CMPLT_D_FIRST 0x0b
+#define ESPI_PERIF_SUC_CMPLT_D_LAST 0x0d
+#define ESPI_PERIF_SUC_CMPLT_D_ONLY 0x0f
+#define ESPI_PERIF_UNSUC_CMPLT 0x0c
+#define ESPI_OOB_MSG 0x21
+#define ESPI_FLASH_READ 0x00
+#define ESPI_FLASH_WRITE 0x01
+#define ESPI_FLASH_ERASE 0x02
+#define ESPI_FLASH_SUC_CMPLT 0x06
+#define ESPI_FLASH_SUC_CMPLT_D_MIDDLE 0x09
+#define ESPI_FLASH_SUC_CMPLT_D_FIRST 0x0b
+#define ESPI_FLASH_SUC_CMPLT_D_LAST 0x0d
+#define ESPI_FLASH_SUC_CMPLT_D_ONLY 0x0f
+#define ESPI_FLASH_UNSUC_CMPLT 0x0c
+
+/*
+ * eSPI packet format structure
+ *
+ * Section 5.1 Cycle Types and Packet Format,
+ * Intel eSPI Interface Base Specification, Rev 1.0, Jan. 2016.
+ */
+struct espi_comm_hdr {
+ u8 cyc;
+ u8 len_h : 4;
+ u8 tag : 4;
+ u8 len_l;
+} __packed;
+
+struct espi_perif_mem32 {
+ u8 cyc;
+ u8 len_h : 4;
+ u8 tag : 4;
+ u8 len_l;
+ u32 addr_be;
+ u8 data[];
+} __packed;
+
+struct espi_perif_mem64 {
+ u8 cyc;
+ u8 len_h : 4;
+ u8 tag : 4;
+ u8 len_l;
+ u32 addr_be;
+ u8 data[];
+} __packed;
+
+struct espi_perif_msg {
+ u8 cyc;
+ u8 len_h : 4;
+ u8 tag : 4;
+ u8 len_l;
+ u8 msg_code;
+ u8 msg_byte[4];
+ u8 data[];
+} __packed;
+
+struct espi_perif_cmplt {
+ u8 cyc;
+ u8 len_h : 4;
+ u8 tag : 4;
+ u8 len_l;
+ u8 data[];
+} __packed;
+
+struct espi_oob_msg {
+ u8 cyc;
+ u8 len_h : 4;
+ u8 tag : 4;
+ u8 len_l;
+ u8 data[];
+} __packed;
+
+struct espi_flash_rwe {
+ u8 cyc;
+ u8 len_h : 4;
+ u8 tag : 4;
+ u8 len_l;
+ u32 addr_be;
+ u8 data[];
+} __packed;
+
+struct espi_flash_cmplt {
+ u8 cyc;
+ u8 len_h : 4;
+ u8 tag : 4;
+ u8 len_l;
+ u8 data[];
+} __packed;
+
+struct aspeed_espi_ioc {
+ u32 pkt_len;
+ u8 *pkt;
+};
+
+#define ESPI_LEN_HIGH(len) (((len) >> 0x08) & 0x0F)
+#define ESPI_LEN_LOW(len) ((len) & 0xFF)
+#define ESPI_LEN(len_h, len_l) ((((len_h) << 0x08) & 0x0F00) | ((len_l) & 0xFF))
+
+/*
+ * We choose the longest header and the max payload size based on the Intel
+ * specification to define the maximum eSPI packet length.
+ */
+#define ASPEED_ESPI_PLD_LEN_MIN BIT(6)
+#define ASPEED_ESPI_PLD_LEN_MAX BIT(12)
+#define ASPEED_ESPI_PKT_LEN_MAX (sizeof(struct espi_perif_msg) + ASPEED_ESPI_PLD_LEN_MAX)
+
+#define __ASPEED_ESPI_IOCTL_MAGIC 0xb8
+
+/*
+ * The IOCTL-based interface works in the eSPI packet in/out paradigm.
+ *
+ * Only the virtual wire IOCTL is a special case which does not send
+ * or receive an eSPI packet. However, to keep a more consisten use from
+ * userspace, we make all of the four channel drivers serve through the
+ * IOCTL interface.
+ *
+ * For the eSPI packet format, refer to
+ * Section 5.1 Cycle Types and Packet Format,
+ * Intel eSPI Interface Base Specification, Rev 1.0, Jan. 2016.
+ *
+ * For the example user apps using these IOCTL, refer to
+ * https://github.com/AspeedTech-BMC/aspeed_app/tree/master/espi_test
+ */
+
+/*
+ * Peripheral Channel (CH0)
+ * - ASPEED_ESPI_PERIF_PC_GET_RX
+ * Receive an eSPI Posted/Completion packet
+ * - ASPEED_ESPI_PERIF_PC_PUT_TX
+ * Transmit an eSPI Posted/Completion packet
+ * - ASPEED_ESPI_PERIF_NP_PUT_TX
+ * Transmit an eSPI Non-Posted packet
+ */
+#define ASPEED_ESPI_PERIF_PC_GET_RX _IOR(__ASPEED_ESPI_IOCTL_MAGIC, \
+ 0x00, struct aspeed_espi_ioc)
+#define ASPEED_ESPI_PERIF_PC_PUT_TX _IOW(__ASPEED_ESPI_IOCTL_MAGIC, \
+ 0x01, struct aspeed_espi_ioc)
+#define ASPEED_ESPI_PERIF_NP_PUT_TX _IOW(__ASPEED_ESPI_IOCTL_MAGIC, \
+ 0x02, struct aspeed_espi_ioc)
+/*
+ * Virtual Wire Channel (CH1)
+ * - ASPEED_ESPI_VW_GET_GPIO_VAL
+ * Read the input value of GPIO over the VW channel
+ * - ASPEED_ESPI_VW_PUT_GPIO_VAL
+ * Write the output value of GPIO over the VW channel
+ */
+#define ASPEED_ESPI_VW_GET_GPIO_VAL _IOR(__ASPEED_ESPI_IOCTL_MAGIC, \
+ 0x10, u8)
+#define ASPEED_ESPI_VW_PUT_GPIO_VAL _IOW(__ASPEED_ESPI_IOCTL_MAGIC, \
+ 0x11, u8)
+/*
+ * Out-of-band Channel (CH2)
+ * - ASPEED_ESPI_OOB_GET_RX
+ * Receive an eSPI OOB packet
+ * - ASPEED_ESPI_OOB_PUT_TX
+ * Transmit an eSPI OOB packet
+ */
+#define ASPEED_ESPI_OOB_GET_RX _IOR(__ASPEED_ESPI_IOCTL_MAGIC, \
+ 0x20, struct aspeed_espi_ioc)
+#define ASPEED_ESPI_OOB_PUT_TX _IOW(__ASPEED_ESPI_IOCTL_MAGIC, \
+ 0x21, struct aspeed_espi_ioc)
+/*
+ * Flash Channel (CH3)
+ * - ASPEED_ESPI_FLASH_GET_RX
+ * Receive an eSPI flash packet
+ * - ASPEED_ESPI_FLASH_PUT_TX
+ * Transmit an eSPI flash packet
+ */
+#define ASPEED_ESPI_FLASH_GET_RX _IOR(__ASPEED_ESPI_IOCTL_MAGIC, \
+ 0x30, struct aspeed_espi_ioc)
+#define ASPEED_ESPI_FLASH_PUT_TX _IOW(__ASPEED_ESPI_IOCTL_MAGIC, \
+ 0x31, struct aspeed_espi_ioc)
+
+#endif
diff --git a/include/uapi/linux/aspeed-lpc-mbox.h b/include/uapi/linux/aspeed-lpc-mbox.h
new file mode 100644
index 000000000000..dbb8a7f24222
--- /dev/null
+++ b/include/uapi/linux/aspeed-lpc-mbox.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later WITH Linux-syscall-note */
+/* Copyright (c) 2021 Intel Corporation */
+
+struct aspeed_mbox_ioctl_data {
+ unsigned int data;
+};
+
+#define ASPEED_MBOX_IOCTL_BASE 0xA3
+
+#define ASPEED_MBOX_SIZE \
+ _IOR(ASPEED_MBOX_IOCTL_BASE, 0, struct aspeed_mbox_ioctl_data)
diff --git a/include/uapi/linux/aspeed-lpc-sio.h b/include/uapi/linux/aspeed-lpc-sio.h
new file mode 100644
index 000000000000..0a4ae34a8ed1
--- /dev/null
+++ b/include/uapi/linux/aspeed-lpc-sio.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2012-2020 ASPEED Technology Inc.
+ * Copyright (c) 2017 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#ifndef _UAPI_LINUX_ASPEED_LPC_SIO_H
+#define _UAPI_LINUX_ASPEED_LPC_SIO_H
+
+#include <linux/ioctl.h>
+
+enum ACPI_SLP_STATE {
+ ACPI_STATE_S12 = 1,
+ ACPI_STATE_S3I,
+ ACPI_STATE_S45
+};
+
+/* SWC & ACPI for SuperIO IOCTL */
+enum SIO_CMD {
+ SIO_GET_ACPI_STATE = 0,
+ SIO_GET_PWRGD_STATUS,
+ SIO_GET_ONCTL_STATUS,
+ SIO_SET_ONCTL_GPIO,
+ SIO_GET_PWRBTN_OVERRIDE,
+ SIO_GET_PFAIL_STATUS, /* Start from AC Loss */
+ SIO_SET_BMC_SCI_EVENT,
+ SIO_SET_BMC_SMI_EVENT,
+
+ SIO_MAX_CMD
+};
+
+struct sio_ioctl_data {
+ unsigned short sio_cmd;
+ unsigned short param;
+ unsigned int data;
+};
+
+#define SIO_IOC_BASE 'P'
+#define SIO_IOC_COMMAND _IOWR(SIO_IOC_BASE, 1, struct sio_ioctl_data)
+
+#endif /* _UAPI_LINUX_ASPEED_LPC_SIO_H */
diff --git a/include/uapi/linux/aspeed-mctp.h b/include/uapi/linux/aspeed-mctp.h
new file mode 100644
index 000000000000..678ec3d9f1cc
--- /dev/null
+++ b/include/uapi/linux/aspeed-mctp.h
@@ -0,0 +1,128 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2020 Intel Corporation */
+
+#ifndef _UAPI_LINUX_ASPEED_MCTP_H
+#define _UAPI_LINUX_ASPEED_MCTP_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+/*
+ * aspeed-mctp is a simple device driver exposing a read/write interface:
+ * +----------------------+
+ * | PCIe VDM Header | 16 bytes (Big Endian)
+ * +----------------------+
+ * | MCTP Message Payload | 64/128/256/512 bytes (Big Endian)
+ * +----------------------+
+ *
+ * MCTP packet description can be found in DMTF DSP0238,
+ * MCTP PCIe VDM Transport Specification.
+ */
+
+#define ASPEED_MCTP_PCIE_VDM_HDR_SIZE 16
+
+/*
+ * uevents generated by aspeed-mctp driver
+ */
+#define ASPEED_MCTP_READY "PCIE_READY"
+
+/*
+ * maximum possible number of struct eid_info elements stored in list
+ */
+#define ASPEED_MCTP_EID_INFO_MAX 256
+
+/*
+ * MCTP operations
+ * @ASPEED_MCTP_IOCTL_FILTER_EID: enable/disable filter incoming packets based
+ * on Endpoint ID (BROKEN)
+ * @ASPEED_MCTP_IOCTL_GET_BDF: read PCI bus/device/function of MCTP Controller
+ * @ASPEED_MCTP_IOCTL_GET_MEDIUM_ID: read MCTP physical medium identifier
+ * related to PCIe revision
+ * @ASPEED_MCTP_IOCTL_GET_MTU: read max transmission unit (in bytes)
+ * @ASPEED_MCTP_IOCTL_REGISTER_DEFAULT_HANDLER Register client as default
+ * handler that receives all MCTP messages that were not dispatched to other
+ * clients
+ * @ASPEED_MCTP_IOCTL_REGISTER_TYPE_HANDLER Register client to receive all
+ * messages of specified MCTP type or PCI vendor defined type
+ * @ASPEED_MCTP_IOCTL_UNREGISTER_TYPE_HANDLER Unregister client as handler
+ * for specified MCTP type or PCI vendor defined message type
+ * @ASPEED_MCTP_GET_EID_INFO: read list of existing endpoint mappings
+ * returns count which is less of the two requested count and existing count
+ * @ASPEED_MCTP_SET_EID_INFO: write list of endpoint mappings
+ * overwrites already existing endpoint mappings
+ */
+
+struct aspeed_mctp_filter_eid {
+ __u8 eid;
+ bool enable;
+};
+
+struct aspeed_mctp_get_bdf {
+ __u16 bdf;
+};
+
+struct aspeed_mctp_get_medium_id {
+ __u8 medium_id;
+};
+
+struct aspeed_mctp_get_mtu {
+ __u8 mtu;
+};
+
+struct aspeed_mctp_type_handler_ioctl {
+ __u8 mctp_type; /* MCTP message type as per DSP239*/
+ /* Below params must be 0 if mctp_type is not Vendor Defined PCI */
+ __u16 pci_vendor_id; /* PCI Vendor ID */
+ __u16 vendor_type; /* Vendor specific type */
+ __u16 vendor_type_mask; /* Mask applied to vendor type */
+};
+
+struct aspeed_mctp_eid_info {
+ __u8 eid;
+ __u16 bdf;
+};
+
+struct aspeed_mctp_eid_ext_info {
+ __u8 eid;
+ __u16 bdf;
+ __u8 domain_id;
+};
+
+struct aspeed_mctp_get_eid_info {
+ __u64 ptr;
+ __u16 count;
+ __u8 start_eid;
+};
+
+struct aspeed_mctp_set_eid_info {
+ __u64 ptr;
+ __u16 count;
+};
+
+#define ASPEED_MCTP_IOCTL_BASE 0x4d
+
+#define ASPEED_MCTP_IOCTL_FILTER_EID \
+ _IOW(ASPEED_MCTP_IOCTL_BASE, 0, struct aspeed_mctp_filter_eid)
+#define ASPEED_MCTP_IOCTL_GET_BDF \
+ _IOR(ASPEED_MCTP_IOCTL_BASE, 1, struct aspeed_mctp_get_bdf)
+#define ASPEED_MCTP_IOCTL_GET_MEDIUM_ID \
+ _IOR(ASPEED_MCTP_IOCTL_BASE, 2, struct aspeed_mctp_get_medium_id)
+#define ASPEED_MCTP_IOCTL_GET_MTU \
+ _IOR(ASPEED_MCTP_IOCTL_BASE, 3, struct aspeed_mctp_get_mtu)
+#define ASPEED_MCTP_IOCTL_REGISTER_DEFAULT_HANDLER \
+ _IO(ASPEED_MCTP_IOCTL_BASE, 4)
+#define ASPEED_MCTP_IOCTL_REGISTER_TYPE_HANDLER \
+ _IOW(ASPEED_MCTP_IOCTL_BASE, 6, struct aspeed_mctp_type_handler_ioctl)
+#define ASPEED_MCTP_IOCTL_UNREGISTER_TYPE_HANDLER \
+ _IOW(ASPEED_MCTP_IOCTL_BASE, 7, struct aspeed_mctp_type_handler_ioctl)
+#define ASPEED_MCTP_IOCTL_GET_EID_INFO \
+ _IOWR(ASPEED_MCTP_IOCTL_BASE, 8, struct aspeed_mctp_get_eid_info)
+#define ASPEED_MCTP_IOCTL_SET_EID_INFO \
+ _IOW(ASPEED_MCTP_IOCTL_BASE, 9, struct aspeed_mctp_set_eid_info)
+#define ASPEED_MCTP_IOCTL_GET_EID_EXT_INFO \
+ _IOW(ASPEED_MCTP_IOCTL_BASE, 10, struct aspeed_mctp_get_eid_info)
+#define ASPEED_MCTP_IOCTL_SET_EID_EXT_INFO \
+ _IOW(ASPEED_MCTP_IOCTL_BASE, 11, struct aspeed_mctp_set_eid_info)
+
+
+#endif /* _UAPI_LINUX_ASPEED_MCTP_H */
diff --git a/include/uapi/linux/i2c.h b/include/uapi/linux/i2c.h
index 92326ebde350..83e0f406fedc 100644
--- a/include/uapi/linux/i2c.h
+++ b/include/uapi/linux/i2c.h
@@ -75,6 +75,7 @@ struct i2c_msg {
__u16 flags;
#define I2C_M_RD 0x0001 /* guaranteed to be 0x0001! */
#define I2C_M_TEN 0x0010 /* use only if I2C_FUNC_10BIT_ADDR */
+#define I2C_M_HOLD 0x0100 /* for holding a mux path */
#define I2C_M_DMA_SAFE 0x0200 /* use only in kernel space */
#define I2C_M_RECV_LEN 0x0400 /* use only if I2C_FUNC_SMBUS_READ_BLOCK_DATA */
#define I2C_M_NO_RD_ACK 0x0800 /* use only if I2C_FUNC_PROTOCOL_MANGLING */
diff --git a/include/uapi/linux/i3c/i3cdev.h b/include/uapi/linux/i3c/i3cdev.h
new file mode 100644
index 000000000000..0897313f5516
--- /dev/null
+++ b/include/uapi/linux/i3c/i3cdev.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (c) 2019 Synopsys, Inc. and/or its affiliates.
+ *
+ * Author: Vitor Soares <vitor.soares@synopsys.com>
+ */
+
+#ifndef _UAPI_I3C_DEV_H_
+#define _UAPI_I3C_DEV_H_
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+/* IOCTL commands */
+#define I3C_DEV_IOC_MAGIC 0x07
+
+/**
+ * struct i3c_ioc_priv_xfer - I3C SDR ioctl private transfer
+ * @data: Holds pointer to userspace buffer with transmit data.
+ * @len: Length of data buffer buffers, in bytes.
+ * @rnw: encodes the transfer direction. true for a read, false for a write
+ */
+struct i3c_ioc_priv_xfer {
+ __u64 data;
+ __u16 len;
+ __u8 rnw;
+ __u8 pad[5];
+};
+
+
+#define I3C_PRIV_XFER_SIZE(N) \
+ ((((sizeof(struct i3c_ioc_priv_xfer)) * (N)) < (1 << _IOC_SIZEBITS)) \
+ ? ((sizeof(struct i3c_ioc_priv_xfer)) * (N)) : 0)
+
+#define I3C_IOC_PRIV_XFER(N) \
+ _IOC(_IOC_READ|_IOC_WRITE, I3C_DEV_IOC_MAGIC, 30, I3C_PRIV_XFER_SIZE(N))
+
+#endif
diff --git a/include/uapi/linux/jtag.h b/include/uapi/linux/jtag.h
new file mode 100644
index 000000000000..b500266cfd61
--- /dev/null
+++ b/include/uapi/linux/jtag.h
@@ -0,0 +1,369 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018 Mellanox Technologies. All rights reserved. */
+/* Copyright (c) 2018 Oleksandr Shamray <oleksandrs@mellanox.com> */
+/* Copyright (c) 2019 Intel Corporation */
+
+#ifndef __UAPI_LINUX_JTAG_H
+#define __UAPI_LINUX_JTAG_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+/*
+ * JTAG_XFER_MODE: JTAG transfer mode. Used to set JTAG controller transfer mode
+ * This is bitmask for feature param in jtag_mode for ioctl JTAG_SIOCMODE
+ */
+#define JTAG_XFER_MODE 0
+/*
+ * JTAG_CONTROL_MODE: JTAG controller mode. Used to set JTAG controller mode
+ * This is bitmask for feature param in jtag_mode for ioctl JTAG_SIOCMODE
+ */
+#define JTAG_CONTROL_MODE 1
+/*
+ * JTAG_MASTER_OUTPUT_DISABLE: JTAG master mode output disable, it is used to
+ * enable other devices to own the JTAG bus.
+ * This is bitmask for mode param in jtag_mode for ioctl JTAG_SIOCMODE
+ */
+#define JTAG_MASTER_OUTPUT_DISABLE 0
+/*
+ * JTAG_MASTER_MODE: JTAG master mode. Used to set JTAG controller master mode
+ * This is bitmask for mode param in jtag_mode for ioctl JTAG_SIOCMODE
+ */
+#define JTAG_MASTER_MODE 1
+/*
+ * JTAG_XFER_HW_MODE: JTAG hardware mode. Used to set HW drived or bitbang
+ * mode. This is bitmask for mode param in jtag_mode for ioctl JTAG_SIOCMODE
+ */
+#define JTAG_XFER_HW_MODE 1
+/*
+ * JTAG_XFER_SW_MODE: JTAG software mode. Used to set SW drived or bitbang
+ * mode. This is bitmask for mode param in jtag_mode for ioctl JTAG_SIOCMODE
+ */
+#define JTAG_XFER_SW_MODE 0
+
+/**
+ * enum jtag_tapstate:
+ *
+ * @JTAG_STATE_TLRESET: JTAG state machine Test Logic Reset state
+ * @JTAG_STATE_IDLE: JTAG state machine IDLE state
+ * @JTAG_STATE_SELECTDR: JTAG state machine SELECT_DR state
+ * @JTAG_STATE_CAPTUREDR: JTAG state machine CAPTURE_DR state
+ * @JTAG_STATE_SHIFTDR: JTAG state machine SHIFT_DR state
+ * @JTAG_STATE_EXIT1DR: JTAG state machine EXIT-1 DR state
+ * @JTAG_STATE_PAUSEDR: JTAG state machine PAUSE_DR state
+ * @JTAG_STATE_EXIT2DR: JTAG state machine EXIT-2 DR state
+ * @JTAG_STATE_UPDATEDR: JTAG state machine UPDATE DR state
+ * @JTAG_STATE_SELECTIR: JTAG state machine SELECT_IR state
+ * @JTAG_STATE_CAPTUREIR: JTAG state machine CAPTURE_IR state
+ * @JTAG_STATE_SHIFTIR: JTAG state machine SHIFT_IR state
+ * @JTAG_STATE_EXIT1IR: JTAG state machine EXIT-1 IR state
+ * @JTAG_STATE_PAUSEIR: JTAG state machine PAUSE_IR state
+ * @JTAG_STATE_EXIT2IR: JTAG state machine EXIT-2 IR state
+ * @JTAG_STATE_UPDATEIR: JTAG state machine UPDATE IR state
+ * @JTAG_STATE_CURRENT: JTAG current state, saved by driver
+ */
+enum jtag_tapstate {
+ JTAG_STATE_TLRESET,
+ JTAG_STATE_IDLE,
+ JTAG_STATE_SELECTDR,
+ JTAG_STATE_CAPTUREDR,
+ JTAG_STATE_SHIFTDR,
+ JTAG_STATE_EXIT1DR,
+ JTAG_STATE_PAUSEDR,
+ JTAG_STATE_EXIT2DR,
+ JTAG_STATE_UPDATEDR,
+ JTAG_STATE_SELECTIR,
+ JTAG_STATE_CAPTUREIR,
+ JTAG_STATE_SHIFTIR,
+ JTAG_STATE_EXIT1IR,
+ JTAG_STATE_PAUSEIR,
+ JTAG_STATE_EXIT2IR,
+ JTAG_STATE_UPDATEIR,
+ JTAG_STATE_CURRENT
+};
+
+/**
+ * enum jtag_reset:
+ *
+ * @JTAG_NO_RESET: JTAG run TAP from current state
+ * @JTAG_FORCE_RESET: JTAG force TAP to reset state
+ */
+enum jtag_reset {
+ JTAG_NO_RESET = 0,
+ JTAG_FORCE_RESET = 1,
+};
+
+/**
+ * enum jtag_xfer_type:
+ *
+ * @JTAG_SIR_XFER: SIR transfer
+ * @JTAG_SDR_XFER: SDR transfer
+ */
+enum jtag_xfer_type {
+ JTAG_SIR_XFER = 0,
+ JTAG_SDR_XFER = 1,
+};
+
+/**
+ * enum jtag_xfer_direction:
+ *
+ * @JTAG_READ_XFER: read transfer
+ * @JTAG_WRITE_XFER: write transfer
+ * @JTAG_READ_WRITE_XFER: read & write transfer
+ */
+enum jtag_xfer_direction {
+ JTAG_READ_XFER = 1,
+ JTAG_WRITE_XFER = 2,
+ JTAG_READ_WRITE_XFER = 3,
+};
+
+/**
+ * struct jtag_tap_state - forces JTAG state machine to go into a TAPC
+ * state
+ *
+ * @reset: 0 - run IDLE/PAUSE from current state
+ * 1 - go through TEST_LOGIC/RESET state before IDLE/PAUSE
+ * @end: completion flag
+ * @tck: clock counter
+ *
+ * Structure provide interface to JTAG device for JTAG set state execution.
+ */
+struct jtag_tap_state {
+ __u8 reset;
+ __u8 from;
+ __u8 endstate;
+ __u8 tck;
+};
+
+/**
+ * union pad_config - Padding Configuration:
+ *
+ * @type: transfer type
+ * @pre_pad_number: Number of prepadding bits bit[11:0]
+ * @post_pad_number: Number of prepadding bits bit[23:12]
+ * @pad_data : Bit value to be used by pre and post padding bit[24]
+ * @int_value: unsigned int packed padding configuration value bit[32:0]
+ *
+ * Structure provide pre and post padding configuration in a single __u32
+ */
+union pad_config {
+ struct {
+ __u32 pre_pad_number : 12;
+ __u32 post_pad_number : 12;
+ __u32 pad_data : 1;
+ __u32 rsvd : 7;
+ };
+ __u32 int_value;
+};
+
+/**
+ * struct jtag_xfer - jtag xfer:
+ *
+ * @type: transfer type
+ * @direction: xfer direction
+ * @from: xfer current state
+ * @endstate: xfer end state
+ * @padding: xfer padding
+ * @length: xfer bits length
+ * @tdio : xfer data array
+ *
+ * Structure provide interface to JTAG device for JTAG SDR/SIR xfer execution.
+ */
+struct jtag_xfer {
+ __u8 type;
+ __u8 direction;
+ __u8 from;
+ __u8 endstate;
+ __u32 padding;
+ __u32 length;
+ __u64 tdio;
+};
+
+/**
+ * struct bitbang_packet - jtag bitbang array packet:
+ *
+ * @data: JTAG Bitbang struct array pointer(input/output)
+ * @length: array size (input)
+ *
+ * Structure provide interface to JTAG device for JTAG bitbang bundle execution
+ */
+struct bitbang_packet {
+ struct tck_bitbang *data;
+ __u32 length;
+} __attribute__((__packed__));
+
+/**
+ * struct jtag_bitbang - jtag bitbang:
+ *
+ * @tms: JTAG TMS
+ * @tdi: JTAG TDI (input)
+ * @tdo: JTAG TDO (output)
+ *
+ * Structure provide interface to JTAG device for JTAG bitbang execution.
+ */
+struct tck_bitbang {
+ __u8 tms;
+ __u8 tdi;
+ __u8 tdo;
+} __attribute__((__packed__));
+
+/**
+ * struct jtag_mode - jtag mode:
+ *
+ * @feature: 0 - JTAG feature setting selector for JTAG controller HW/SW
+ * 1 - JTAG feature setting selector for controller bus master
+ * mode output (enable / disable).
+ * @mode: (0 - SW / 1 - HW) for JTAG_XFER_MODE feature(0)
+ * (0 - output disable / 1 - output enable) for JTAG_CONTROL_MODE
+ * feature(1)
+ *
+ * Structure provide configuration modes to JTAG device.
+ */
+struct jtag_mode {
+ __u32 feature;
+ __u32 mode;
+};
+
+/* ioctl interface */
+#define __JTAG_IOCTL_MAGIC 0xb2
+
+#define JTAG_SIOCSTATE _IOW(__JTAG_IOCTL_MAGIC, 0, struct jtag_tap_state)
+#define JTAG_SIOCFREQ _IOW(__JTAG_IOCTL_MAGIC, 1, unsigned int)
+#define JTAG_GIOCFREQ _IOR(__JTAG_IOCTL_MAGIC, 2, unsigned int)
+#define JTAG_IOCXFER _IOWR(__JTAG_IOCTL_MAGIC, 3, struct jtag_xfer)
+#define JTAG_GIOCSTATUS _IOWR(__JTAG_IOCTL_MAGIC, 4, enum jtag_tapstate)
+#define JTAG_SIOCMODE _IOW(__JTAG_IOCTL_MAGIC, 5, unsigned int)
+#define JTAG_IOCBITBANG _IOW(__JTAG_IOCTL_MAGIC, 6, unsigned int)
+
+/**
+ * struct tms_cycle - This structure represents a tms cycle state.
+ *
+ * @tmsbits: is the bitwise representation of the needed tms transitions to
+ * move from one state to another.
+ * @count: number of jumps needed to move to the needed state.
+ *
+ */
+struct tms_cycle {
+ unsigned char tmsbits;
+ unsigned char count;
+};
+
+/*
+ * This is the complete set TMS cycles for going from any TAP state to any
+ * other TAP state, following a "shortest path" rule.
+ */
+static const struct tms_cycle _tms_cycle_lookup[][16] = {
+/* TLR RTI SelDR CapDR SDR Ex1DR*/
+/* TLR */{{0x00, 0}, {0x00, 1}, {0x02, 2}, {0x02, 3}, {0x02, 4}, {0x0a, 4},
+/* PDR Ex2DR UpdDR SelIR CapIR SIR*/
+ {0x0a, 5}, {0x2a, 6}, {0x1a, 5}, {0x06, 3}, {0x06, 4}, {0x06, 5},
+/* Ex1IR PIR Ex2IR UpdIR*/
+ {0x16, 5}, {0x16, 6}, {0x56, 7}, {0x36, 6} },
+
+/* TLR RTI SelDR CapDR SDR Ex1DR*/
+/* RTI */{{0x07, 3}, {0x00, 0}, {0x01, 1}, {0x01, 2}, {0x01, 3}, {0x05, 3},
+/* PDR Ex2DR UpdDR SelIR CapIR SIR*/
+ {0x05, 4}, {0x15, 5}, {0x0d, 4}, {0x03, 2}, {0x03, 3}, {0x03, 4},
+/* Ex1IR PIR Ex2IR UpdIR*/
+ {0x0b, 4}, {0x0b, 5}, {0x2b, 6}, {0x1b, 5} },
+
+/* TLR RTI SelDR CapDR SDR Ex1DR*/
+/* SelDR*/{{0x03, 2}, {0x03, 3}, {0x00, 0}, {0x00, 1}, {0x00, 2}, {0x02, 2},
+/* PDR Ex2DR UpdDR SelIR CapIR SIR*/
+ {0x02, 3}, {0x0a, 4}, {0x06, 3}, {0x01, 1}, {0x01, 2}, {0x01, 3},
+/* Ex1IR PIR Ex2IR UpdIR*/
+ {0x05, 3}, {0x05, 4}, {0x15, 5}, {0x0d, 4} },
+
+/* TLR RTI SelDR CapDR SDR Ex1DR*/
+/* CapDR*/{{0x1f, 5}, {0x03, 3}, {0x07, 3}, {0x00, 0}, {0x00, 1}, {0x01, 1},
+/* PDR Ex2DR UpdDR SelIR CapIR SIR*/
+ {0x01, 2}, {0x05, 3}, {0x03, 2}, {0x0f, 4}, {0x0f, 5}, {0x0f, 6},
+/* Ex1IR PIR Ex2IR UpdIR*/
+ {0x2f, 6}, {0x2f, 7}, {0xaf, 8}, {0x6f, 7} },
+
+/* TLR RTI SelDR CapDR SDR Ex1DR*/
+/* SDR */{{0x1f, 5}, {0x03, 3}, {0x07, 3}, {0x07, 4}, {0x00, 0}, {0x01, 1},
+/* PDR Ex2DR UpdDR SelIR CapIR SIR*/
+ {0x01, 2}, {0x05, 3}, {0x03, 2}, {0x0f, 4}, {0x0f, 5}, {0x0f, 6},
+/* Ex1IR PIR Ex2IR UpdIR*/
+ {0x2f, 6}, {0x2f, 7}, {0xaf, 8}, {0x6f, 7} },
+
+/* TLR RTI SelDR CapDR SDR Ex1DR*/
+/* Ex1DR*/{{0x0f, 4}, {0x01, 2}, {0x03, 2}, {0x03, 3}, {0x02, 3}, {0x00, 0},
+/* PDR Ex2DR UpdDR SelIR CapIR SIR*/
+ {0x00, 1}, {0x02, 2}, {0x01, 1}, {0x07, 3}, {0x07, 4}, {0x07, 5},
+/* Ex1IR PIR Ex2IR UpdIR*/
+ {0x17, 5}, {0x17, 6}, {0x57, 7}, {0x37, 6} },
+
+/* TLR RTI SelDR CapDR SDR Ex1DR*/
+/* PDR */{{0x1f, 5}, {0x03, 3}, {0x07, 3}, {0x07, 4}, {0x01, 2}, {0x05, 3},
+/* PDR Ex2DR UpdDR SelIR CapIR SIR*/
+ {0x00, 0}, {0x01, 1}, {0x03, 2}, {0x0f, 4}, {0x0f, 5}, {0x0f, 6},
+/* Ex1IR PIR Ex2IR UpdIR*/
+ {0x2f, 6}, {0x2f, 7}, {0xaf, 8}, {0x6f, 7} },
+
+/* TLR RTI SelDR CapDR SDR Ex1DR*/
+/* Ex2DR*/{{0x0f, 4}, {0x01, 2}, {0x03, 2}, {0x03, 3}, {0x00, 1}, {0x02, 2},
+/* PDR Ex2DR UpdDR SelIR CapIR SIR*/
+ {0x02, 3}, {0x00, 0}, {0x01, 1}, {0x07, 3}, {0x07, 4}, {0x07, 5},
+/* Ex1IR PIR Ex2IR UpdIR*/
+ {0x17, 5}, {0x17, 6}, {0x57, 7}, {0x37, 6} },
+
+/* TLR RTI SelDR CapDR SDR Ex1DR*/
+/* UpdDR*/{{0x07, 3}, {0x00, 1}, {0x01, 1}, {0x01, 2}, {0x01, 3}, {0x05, 3},
+/* PDR Ex2DR UpdDR SelIR CapIR SIR*/
+ {0x05, 4}, {0x15, 5}, {0x00, 0}, {0x03, 2}, {0x03, 3}, {0x03, 4},
+/* Ex1IR PIR Ex2IR UpdIR*/
+ {0x0b, 4}, {0x0b, 5}, {0x2b, 6}, {0x1b, 5} },
+
+/* TLR RTI SelDR CapDR SDR Ex1DR*/
+/* SelIR*/{{0x01, 1}, {0x01, 2}, {0x05, 3}, {0x05, 4}, {0x05, 5}, {0x15, 5},
+/* PDR Ex2DR UpdDR SelIR CapIR SIR*/
+ {0x15, 6}, {0x55, 7}, {0x35, 6}, {0x00, 0}, {0x00, 1}, {0x00, 2},
+/* Ex1IR PIR Ex2IR UpdIR*/
+ {0x02, 2}, {0x02, 3}, {0x0a, 4}, {0x06, 3} },
+
+/* TLR RTI SelDR CapDR SDR Ex1DR*/
+/* CapIR*/{{0x1f, 5}, {0x03, 3}, {0x07, 3}, {0x07, 4}, {0x07, 5}, {0x17, 5},
+/* PDR Ex2DR UpdDR SelIR CapIR SIR*/
+ {0x17, 6}, {0x57, 7}, {0x37, 6}, {0x0f, 4}, {0x00, 0}, {0x00, 1},
+/* Ex1IR PIR Ex2IR UpdIR*/
+ {0x01, 1}, {0x01, 2}, {0x05, 3}, {0x03, 2} },
+
+/* TLR RTI SelDR CapDR SDR Ex1DR*/
+/* SIR */{{0x1f, 5}, {0x03, 3}, {0x07, 3}, {0x07, 4}, {0x07, 5}, {0x17, 5},
+/* PDR Ex2DR UpdDR SelIR CapIR SIR*/
+ {0x17, 6}, {0x57, 7}, {0x37, 6}, {0x0f, 4}, {0x0f, 5}, {0x00, 0},
+/* Ex1IR PIR Ex2IR UpdIR*/
+ {0x01, 1}, {0x01, 2}, {0x05, 3}, {0x03, 2} },
+
+/* TLR RTI SelDR CapDR SDR Ex1DR*/
+/* Ex1IR*/{{0x0f, 4}, {0x01, 2}, {0x03, 2}, {0x03, 3}, {0x03, 4}, {0x0b, 4},
+/* PDR Ex2DR UpdDR SelIR CapIR SIR*/
+ {0x0b, 5}, {0x2b, 6}, {0x1b, 5}, {0x07, 3}, {0x07, 4}, {0x02, 3},
+/* Ex1IR PIR Ex2IR UpdIR*/
+ {0x00, 0}, {0x00, 1}, {0x02, 2}, {0x01, 1} },
+
+/* TLR RTI SelDR CapDR SDR Ex1DR*/
+/* PIR */{{0x1f, 5}, {0x03, 3}, {0x07, 3}, {0x07, 4}, {0x07, 5}, {0x17, 5},
+/* PDR Ex2DR UpdDR SelIR CapIR SIR*/
+ {0x17, 6}, {0x57, 7}, {0x37, 6}, {0x0f, 4}, {0x0f, 5}, {0x01, 2},
+/* Ex1IR PIR Ex2IR UpdIR*/
+ {0x05, 3}, {0x00, 0}, {0x01, 1}, {0x03, 2} },
+
+/* TLR RTI SelDR CapDR SDR Ex1DR*/
+/* Ex2IR*/{{0x0f, 4}, {0x01, 2}, {0x03, 2}, {0x03, 3}, {0x03, 4}, {0x0b, 4},
+/* PDR Ex2DR UpdDR SelIR CapIR SIR*/
+ {0x0b, 5}, {0x2b, 6}, {0x1b, 5}, {0x07, 3}, {0x07, 4}, {0x00, 1},
+/* Ex1IR PIR Ex2IR UpdIR*/
+ {0x02, 2}, {0x02, 3}, {0x00, 0}, {0x01, 1} },
+
+/* TLR RTI SelDR CapDR SDR Ex1DR*/
+/* UpdIR*/{{0x07, 3}, {0x00, 1}, {0x01, 1}, {0x01, 2}, {0x01, 3}, {0x05, 3},
+/* PDR Ex2DR UpdDR SelIR CapIR SIR*/
+ {0x05, 4}, {0x15, 5}, {0x0d, 4}, {0x03, 2}, {0x03, 3}, {0x03, 4},
+/* Ex1IR PIR Ex2IR UpdIR*/
+ {0x0b, 4}, {0x0b, 5}, {0x2b, 6}, {0x00, 0} },
+};
+
+#endif /* __UAPI_LINUX_JTAG_H */
diff --git a/include/uapi/linux/jtag_drv.h b/include/uapi/linux/jtag_drv.h
new file mode 100644
index 000000000000..4df638f8fa43
--- /dev/null
+++ b/include/uapi/linux/jtag_drv.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2017 ASPEED Technology Inc. */
+/* Copyright (c) 2018 Intel Corporation */
+
+#ifndef __JTAG_DRV_H__
+#define __JTAG_DRV_H__
+
+enum xfer_mode {
+ HW_MODE = 0,
+ SW_MODE
+} xfer_mode;
+
+struct tck_bitbang {
+ __u8 tms;
+ __u8 tdi;
+ __u8 tdo;
+} __attribute__((__packed__));
+
+struct scan_xfer {
+ __u8 mode;
+ __u32 tap_state;
+ __u32 length;
+ __u8 *tdi;
+ __u32 tdi_bytes;
+ __u8 *tdo;
+ __u32 tdo_bytes;
+ __u32 end_tap_state;
+} __attribute__((__packed__));
+
+struct set_tck_param {
+ __u8 mode;
+ __u32 tck;
+} __attribute__((__packed__));
+
+struct get_tck_param {
+ __u8 mode;
+ __u32 tck;
+} __attribute__((__packed__));
+
+struct tap_state_param {
+ __u8 mode;
+ __u32 from_state;
+ __u32 to_state;
+} __attribute__((__packed__));
+
+enum jtag_states {
+ jtag_tlr,
+ jtag_rti,
+ jtag_sel_dr,
+ jtag_cap_dr,
+ jtag_shf_dr,
+ jtag_ex1_dr,
+ jtag_pau_dr,
+ jtag_ex2_dr,
+ jtag_upd_dr,
+ jtag_sel_ir,
+ jtag_cap_ir,
+ jtag_shf_ir,
+ jtag_ex1_ir,
+ jtag_pau_ir,
+ jtag_ex2_ir,
+ jtag_upd_ir
+} jtag_states;
+
+#define JTAGIOC_BASE 'T'
+
+#define AST_JTAG_SET_TCK _IOW(JTAGIOC_BASE, 3, struct set_tck_param)
+#define AST_JTAG_GET_TCK _IOR(JTAGIOC_BASE, 4, struct get_tck_param)
+#define AST_JTAG_BITBANG _IOWR(JTAGIOC_BASE, 5, struct tck_bitbang)
+#define AST_JTAG_SET_TAPSTATE _IOW(JTAGIOC_BASE, 6, struct tap_state_param)
+#define AST_JTAG_READWRITESCAN _IOWR(JTAGIOC_BASE, 7, struct scan_xfer)
+
+#endif
diff --git a/include/uapi/linux/peci-ioctl.h b/include/uapi/linux/peci-ioctl.h
new file mode 100644
index 000000000000..181559c0655d
--- /dev/null
+++ b/include/uapi/linux/peci-ioctl.h
@@ -0,0 +1,703 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/* Copyright (c) 2018-2020 Intel Corporation */
+
+#ifndef __PECI_IOCTL_H
+#define __PECI_IOCTL_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+/* The PECI client's default address of 0x30 */
+#define PECI_BASE_ADDR 0x30
+
+/* Max number of CPU clients */
+#define PECI_OFFSET_MAX 8
+
+/* Max number of domains per CPU */
+#define DOMAIN_OFFSET_MAX 16
+
+/* PECI read/write data buffer size max */
+#define PECI_BUFFER_SIZE 255
+
+/* Device Specific Completion Code (CC) Definition */
+#define PECI_DEV_CC_SUCCESS 0x40
+#define PECI_DEV_CC_NEED_RETRY 0x80
+#define PECI_DEV_CC_OUT_OF_RESOURCE 0x81
+#define PECI_DEV_CC_UNAVAIL_RESOURCE 0x82
+#define PECI_DEV_CC_INVALID_REQ 0x90
+#define PECI_DEV_CC_MCA_ERROR 0x91
+#define PECI_DEV_CC_CATASTROPHIC_MCA_ERROR 0x93
+#define PECI_DEV_CC_FATAL_MCA_DETECTED 0x94
+#define PECI_DEV_CC_PARITY_ERROR_ON_GPSB_OR_PMSB 0x98
+#define PECI_DEV_CC_PARITY_ERROR_ON_GPSB_OR_PMSB_IERR 0x9B
+#define PECI_DEV_CC_PARITY_ERROR_ON_GPSB_OR_PMSB_MCA 0x9C
+
+/* Completion Code mask to check retry needs */
+#define PECI_DEV_CC_RETRY_CHECK_MASK 0xf0
+
+#define PECI_DEV_RETRY_TIMEOUT msecs_to_jiffies(700)
+#define PECI_DEV_RETRY_INTERVAL_MIN_USEC 100
+#define PECI_DEV_RETRY_INTERVAL_MAX_USEC (128 * 1000)
+#define PECI_DEV_RETRY_BIT 0x01
+
+/**
+ * enum peci_cmd - PECI client commands
+ * @PECI_CMD_XFER: raw PECI transfer
+ * @PECI_CMD_PING: ping, a required message for all PECI devices
+ * @PECI_CMD_GET_DIB: get DIB (Device Info Byte)
+ * @PECI_CMD_GET_TEMP: get maximum die temperature
+ * @PECI_CMD_RD_PKG_CFG: read access to the PCS (Package Configuration Space)
+ * @PECI_CMD_WR_PKG_CFG: write access to the PCS (Package Configuration Space)
+ * @PECI_CMD_RD_IA_MSR: read access to MSRs (Model Specific Registers)
+ * @PECI_CMD_WR_IA_MSR: write access to MSRs (Model Specific Registers)
+ * @PECI_CMD_RD_IA_MSREX: read access to MSRs (Model Specific Registers)
+ * @PECI_CMD_RD_PCI_CFG: sideband read access to the PCI configuration space
+ * maintained in downstream devices external to the processor
+ * @PECI_CMD_WR_PCI_CFG: sideband write access to the PCI configuration space
+ * maintained in downstream devices external to the processor
+ * @PECI_CMD_RD_PCI_CFG_LOCAL: sideband read access to the PCI configuration
+ * space that resides within the processor
+ * @PECI_CMD_WR_PCI_CFG_LOCAL: sideband write access to the PCI configuration
+ * space that resides within the processor
+ *
+ * Available commands depend on client's PECI revision.
+ */
+enum peci_cmd {
+ PECI_CMD_XFER = 0,
+ PECI_CMD_PING,
+ PECI_CMD_GET_DIB,
+ PECI_CMD_GET_TEMP,
+ PECI_CMD_RD_PKG_CFG,
+ PECI_CMD_WR_PKG_CFG,
+ PECI_CMD_RD_IA_MSR,
+ PECI_CMD_WR_IA_MSR,
+ PECI_CMD_RD_IA_MSREX,
+ PECI_CMD_RD_PCI_CFG,
+ PECI_CMD_WR_PCI_CFG,
+ PECI_CMD_RD_PCI_CFG_LOCAL,
+ PECI_CMD_WR_PCI_CFG_LOCAL,
+ PECI_CMD_RD_END_PT_CFG,
+ PECI_CMD_WR_END_PT_CFG,
+ PECI_CMD_CRASHDUMP_DISC,
+ PECI_CMD_CRASHDUMP_GET_FRAME,
+ PECI_CMD_MAX
+};
+
+/**
+ * struct peci_xfer_msg - raw PECI transfer command
+ * @addr; address of the client
+ * @tx_len: number of data to be written in bytes
+ * @rx_len: number of data to be read in bytes
+ * @tx_buf: data to be written, or NULL
+ * @rx_buf: data to be read, or NULL
+ *
+ * raw PECI transfer
+ */
+struct peci_xfer_msg {
+ __u8 addr;
+ __u8 tx_len;
+ __u8 rx_len;
+ __u8 padding;
+ __u8 *tx_buf;
+ __u8 *rx_buf;
+} __attribute__((__packed__));
+
+/**
+ * struct peci_ping_msg - ping command
+ * @addr: address of the client
+ *
+ * Ping() is a required message for all PECI devices. This message is used to
+ * enumerate devices or determine if a device has been removed, been
+ * powered-off, etc.
+ */
+struct peci_ping_msg {
+ __u8 addr;
+ __u8 padding[3];
+} __attribute__((__packed__));
+
+/**
+ * struct peci_get_dib_msg - GetDIB command
+ * @addr: address of the client
+ * @dib: DIB data to be read
+ *
+ * The processor PECI client implementation of GetDIB() includes an 8-byte
+ * response and provides information regarding client revision number and the
+ * number of supported domains. All processor PECI clients support the GetDIB()
+ * command.
+ */
+struct peci_get_dib_msg {
+#define PECI_GET_DIB_WR_LEN 1
+#define PECI_GET_DIB_RD_LEN 8
+#define PECI_GET_DIB_CMD 0xf7
+
+ __u8 addr;
+ __u8 padding[3];
+ __u64 dib;
+} __attribute__((__packed__));
+
+/**
+ * struct peci_get_temp_msg - GetTemp command
+ * @addr: address of the client
+ * @temp_raw: raw temperature data to be read
+ *
+ * The GetTemp() command is used to retrieve the maximum die temperature from a
+ * target PECI address. The temperature is used by the external thermal
+ * management system to regulate the temperature on the die. The data is
+ * returned as a negative value representing the number of degrees centigrade
+ * below the maximum processor junction temperature.
+ */
+struct peci_get_temp_msg {
+#define PECI_GET_TEMP_WR_LEN 1
+#define PECI_GET_TEMP_RD_LEN 2
+#define PECI_GET_TEMP_CMD 0x01
+
+ __u8 addr;
+ __u8 padding;
+ __s16 temp_raw;
+} __attribute__((__packed__));
+
+/**
+ * struct peci_rd_pkg_cfg_msg - RdPkgConfig command
+ * @addr: address of the client
+ * @index: encoding index for the requested service
+ * @param: specific data being requested
+ * @rx_len: number of data to be read in bytes
+ * @cc: completion code
+ * @pkg_config: package config data to be read
+ * @domain_id: domain ID of the client
+ *
+ * The RdPkgConfig() command provides read access to the Package Configuration
+ * Space (PCS) within the processor, including various power and thermal
+ * management functions. Typical PCS read services supported by the processor
+ * may include access to temperature data, energy status, run time information,
+ * DIMM temperatures and so on.
+ */
+struct peci_rd_pkg_cfg_msg {
+#define PECI_RDPKGCFG_WRITE_LEN 5
+#define PECI_RDPKGCFG_READ_LEN_BASE 1
+#define PECI_RDPKGCFG_CMD 0xa1
+
+ __u8 addr;
+ __u8 index;
+#define PECI_MBX_INDEX_CPU_ID 0 /* Package Identifier Read */
+#define PECI_MBX_INDEX_VR_DEBUG 1 /* VR Debug */
+#define PECI_MBX_INDEX_PKG_TEMP_READ 2 /* Package Temperature Read */
+#define PECI_MBX_INDEX_ENERGY_COUNTER 3 /* Energy counter */
+#define PECI_MBX_INDEX_ENERGY_STATUS 4 /* DDR Energy Status */
+#define PECI_MBX_INDEX_WAKE_MODE_BIT 5 /* "Wake on PECI" Mode bit */
+#define PECI_MBX_INDEX_EPI 6 /* Efficient Performance Indication */
+#define PECI_MBX_INDEX_PKG_RAPL_PERF 8 /* Pkg RAPL Performance Status Read */
+#define PECI_MBX_INDEX_MODULE_TEMP 9 /* Module Temperature Read */
+#define PECI_MBX_INDEX_DTS_MARGIN 10 /* DTS thermal margin */
+#define PECI_MBX_INDEX_SKT_PWR_THRTL_DUR 11 /* Socket Power Throttled Duration */
+#define PECI_MBX_INDEX_CFG_TDP_CONTROL 12 /* TDP Config Control */
+#define PECI_MBX_INDEX_CFG_TDP_LEVELS 13 /* TDP Config Levels */
+#define PECI_MBX_INDEX_DDR_DIMM_TEMP 14 /* DDR DIMM Temperature */
+#define PECI_MBX_INDEX_CFG_ICCMAX 15 /* Configurable ICCMAX */
+#define PECI_MBX_INDEX_TEMP_TARGET 16 /* Temperature Target Read */
+#define PECI_MBX_INDEX_CURR_CFG_LIMIT 17 /* Current Config Limit */
+#define PECI_MBX_INDEX_DIMM_TEMP_READ 20 /* Package Thermal Status Read */
+#define PECI_MBX_INDEX_DRAM_IMC_TMP_READ 22 /* DRAM IMC Temperature Read */
+#define PECI_MBX_INDEX_DDR_CH_THERM_STAT 23 /* DDR Channel Thermal Status */
+#define PECI_MBX_INDEX_PKG_POWER_LIMIT1 26 /* Package Power Limit1 */
+#define PECI_MBX_INDEX_PKG_POWER_LIMIT2 27 /* Package Power Limit2 */
+#define PECI_MBX_INDEX_TDP 28 /* Thermal design power minimum */
+#define PECI_MBX_INDEX_TDP_HIGH 29 /* Thermal design power maximum */
+#define PECI_MBX_INDEX_TDP_UNITS 30 /* Units for power/energy registers */
+#define PECI_MBX_INDEX_RUN_TIME 31 /* Accumulated Run Time */
+#define PECI_MBX_INDEX_CONSTRAINED_TIME 32 /* Thermally Constrained Time Read */
+#define PECI_MBX_INDEX_TURBO_RATIO 33 /* Turbo Activation Ratio */
+#define PECI_MBX_INDEX_DDR_RAPL_PL1 34 /* DDR RAPL PL1 */
+#define PECI_MBX_INDEX_DDR_PWR_INFO_HIGH 35 /* DRAM Power Info Read (high) */
+#define PECI_MBX_INDEX_DDR_PWR_INFO_LOW 36 /* DRAM Power Info Read (low) */
+#define PECI_MBX_INDEX_DDR_RAPL_PL2 37 /* DDR RAPL PL2 */
+#define PECI_MBX_INDEX_DDR_RAPL_STATUS 38 /* DDR RAPL Performance Status */
+#define PECI_MBX_INDEX_DDR_HOT_ABSOLUTE 43 /* DDR Hottest Dimm Absolute Temp */
+#define PECI_MBX_INDEX_DDR_HOT_RELATIVE 44 /* DDR Hottest Dimm Relative Temp */
+#define PECI_MBX_INDEX_DDR_THROTTLE_TIME 45 /* DDR Throttle Time */
+#define PECI_MBX_INDEX_DDR_THERM_STATUS 46 /* DDR Thermal Status */
+#define PECI_MBX_INDEX_TIME_AVG_TEMP 47 /* Package time-averaged temperature */
+#define PECI_MBX_INDEX_TURBO_RATIO_LIMIT 49 /* Turbo Ratio Limit Read */
+#define PECI_MBX_INDEX_HWP_AUTO_OOB 53 /* HWP Autonomous Out-of-band */
+#define PECI_MBX_INDEX_DDR_WARM_BUDGET 55 /* DDR Warm Power Budget */
+#define PECI_MBX_INDEX_DDR_HOT_BUDGET 56 /* DDR Hot Power Budget */
+#define PECI_MBX_INDEX_PKG_PSYS_PWR_LIM3 57 /* Package/Psys Power Limit3 */
+#define PECI_MBX_INDEX_PKG_PSYS_PWR_LIM1 58 /* Package/Psys Power Limit1 */
+#define PECI_MBX_INDEX_PKG_PSYS_PWR_LIM2 59 /* Package/Psys Power Limit2 */
+#define PECI_MBX_INDEX_PKG_PSYS_PWR_LIM4 60 /* Package/Psys Power Limit4 */
+#define PECI_MBX_INDEX_PERF_LIMIT_REASON 65 /* Performance Limit Reasons */
+
+ __u16 param;
+/* When index is PECI_MBX_INDEX_CPU_ID */
+#define PECI_PKG_ID_CPU_ID 0x0000 /* CPUID Info */
+#define PECI_PKG_POWER_SKU_UNIT 0x0000 /* Time, Energy, Power units */
+#define PECI_PKG_ID_PLATFORM_ID 0x0001 /* Platform ID */
+#define PECI_PKG_ID_UNCORE_ID 0x0002 /* Uncore Device ID */
+#define PECI_PKG_ID_MAX_THREAD_ID 0x0003 /* Max Thread ID */
+#define PECI_PKG_ID_MICROCODE_REV 0x0004 /* CPU Microcode Update Revision */
+#define PECI_PKG_ID_MACHINE_CHECK_STATUS 0x0005 /* Machine Check Status */
+#define PECI_PKG_ID_CPU_PACKAGE 0x00ff /* CPU package ID*/
+#define PECI_PKG_ID_DIMM 0x00ff /* DIMM ID*/
+#define PECI_PKG_ID_PLATFORM 0x00fe /* Entire platform ID */
+
+ __u8 rx_len;
+ __u8 cc;
+ __u8 padding[2];
+ __u8 pkg_config[4];
+ __u8 domain_id;
+ __u8 padding1[3];
+} __attribute__((__packed__));
+
+/**
+ * struct peci_wr_pkg_cfg_msg - WrPkgConfig command
+ * @addr: address of the client
+ * @index: encoding index for the requested service
+ * @param: specific data being requested
+ * @tx_len: number of data to be written in bytes
+ * @cc: completion code
+ * @value: package config data to be written
+ * @domain_id: domain ID of the client
+ *
+ * The WrPkgConfig() command provides write access to the Package Configuration
+ * Space (PCS) within the processor, including various power and thermal
+ * management functions. Typical PCS write services supported by the processor
+ * may include power limiting, thermal averaging constant programming and so
+ * on.
+ */
+struct peci_wr_pkg_cfg_msg {
+#define PECI_WRPKGCFG_WRITE_LEN_BASE 6
+#define PECI_WRPKGCFG_READ_LEN 1
+#define PECI_WRPKGCFG_CMD 0xa5
+
+ __u8 addr;
+ __u8 index;
+#define PECI_MBX_INDEX_DIMM_AMBIENT 19
+#define PECI_MBX_INDEX_DIMM_TEMP 24
+
+ __u16 param;
+ __u8 tx_len;
+ __u8 cc;
+ __u8 padding[2];
+ __u32 value;
+ __u8 domain_id;
+ __u8 padding1[3];
+} __attribute__((__packed__));
+
+/**
+ * struct peci_rd_ia_msr_msg - RdIAMSR command
+ * @addr: address of the client
+ * @thread_id: ID of the specific logical processor
+ * @address: address of MSR to read from
+ * @cc: completion code
+ * @value: data to be read
+ * @domain_id: domain ID of the client
+ *
+ * The RdIAMSR() PECI command provides read access to Model Specific Registers
+ * (MSRs) defined in the processor's Intel Architecture (IA).
+ */
+struct peci_rd_ia_msr_msg {
+#define PECI_RDIAMSR_WRITE_LEN 5
+#define PECI_RDIAMSR_READ_LEN 9
+#define PECI_RDIAMSR_CMD 0xb1
+
+ __u8 addr;
+ __u8 thread_id;
+ __u16 address;
+ __u8 cc;
+ __u8 padding[3];
+ __u64 value;
+ __u8 domain_id;
+ __u8 padding1[3];
+} __attribute__((__packed__));
+
+/**
+ * struct peci_wr_ia_msr_msg - WrIAMSR command
+ * @addr: address of the client
+ * @thread_id: ID of the specific logical processor
+ * @address: address of MSR to write to
+ * @tx_len: number of data to be written in bytes
+ * @cc: completion code
+ * @value: data to be written
+ * @domain_id: domain ID of the client
+ *
+ * The WrIAMSR() PECI command provides write access to Model Specific Registers
+ * (MSRs) defined in the processor's Intel Architecture (IA).
+ */
+struct peci_wr_ia_msr_msg {
+#define PECI_WRIAMSR_CMD 0xb5
+
+ __u8 addr;
+ __u8 thread_id;
+ __u16 address;
+ __u8 tx_len;
+ __u8 cc;
+ __u8 padding[2];
+ __u64 value;
+ __u8 domain_id;
+ __u8 padding1[3];
+} __attribute__((__packed__));
+
+/**
+ * struct peci_rd_ia_msrex_msg - RdIAMSREX command
+ * @addr: address of the client
+ * @thread_id: ID of the specific logical processor
+ * @address: address of MSR to read from
+ * @cc: completion code
+ * @value: data to be read
+ * @domain_id: domain ID of the client
+ *
+ * The RdIAMSREX() PECI command provides read access to Model Specific
+ * Registers (MSRs) defined in the processor's Intel Architecture (IA).
+ * The differences between RdIAMSREX() and RdIAMSR() are that:
+ * (1)RdIAMSR() can only read MC registers, RdIAMSREX() can read all MSRs
+ * (2)thread_id of RdIAMSR() is u8, thread_id of RdIAMSREX() is u16
+ */
+struct peci_rd_ia_msrex_msg {
+#define PECI_RDIAMSREX_WRITE_LEN 6
+#define PECI_RDIAMSREX_READ_LEN 9
+#define PECI_RDIAMSREX_CMD 0xd1
+
+ __u8 addr;
+ __u8 padding0;
+ __u16 thread_id;
+ __u16 address;
+ __u8 cc;
+ __u8 padding1;
+ __u64 value;
+ __u8 domain_id;
+ __u8 padding2[3];
+} __attribute__((__packed__));
+
+/**
+ * struct peci_rd_pci_cfg_msg - RdPCIConfig command
+ * @addr: address of the client
+ * @bus: PCI bus number
+ * @device: PCI device number
+ * @function: specific function to read from
+ * @reg: specific register to read from
+ * @cc: completion code
+ * @pci_config: config data to be read
+ * @domain_id: domain ID of the client
+ *
+ * The RdPCIConfig() command provides sideband read access to the PCI
+ * configuration space maintained in downstream devices external to the
+ * processor.
+ */
+struct peci_rd_pci_cfg_msg {
+#define PECI_RDPCICFG_WRITE_LEN 6
+#define PECI_RDPCICFG_READ_LEN 5
+#define PECI_RDPCICFG_READ_LEN_MAX 24
+#define PECI_RDPCICFG_CMD 0x61
+
+ __u8 addr;
+ __u8 bus;
+#define PECI_PCI_BUS0_CPU0 0x00
+#define PECI_PCI_BUS0_CPU1 0x80
+#define PECI_PCI_CPUBUSNO_BUS 0x00
+#define PECI_PCI_CPUBUSNO_DEV 0x08
+#define PECI_PCI_CPUBUSNO_FUNC 0x02
+#define PECI_PCI_CPUBUSNO 0xcc
+#define PECI_PCI_CPUBUSNO_1 0xd0
+#define PECI_PCI_CPUBUSNO_VALID 0xd4
+
+ __u8 device;
+ __u8 function;
+ __u16 reg;
+ __u8 cc;
+ __u8 padding[1];
+ __u8 pci_config[4];
+ __u8 domain_id;
+ __u8 padding1[3];
+} __attribute__((__packed__));
+
+/**
+ * struct peci_wr_pci_cfg_msg - WrPCIConfig command
+ * @addr: address of the client
+ * @bus: PCI bus number
+ * @device: PCI device number
+ * @function: specific function to write to
+ * @reg: specific register to write to
+ * @tx_len: number of data to be written in bytes
+ * @cc: completion code
+ * @pci_config: config data to be written
+ * @domain_id: domain ID of the client
+ *
+ * The RdPCIConfig() command provides sideband write access to the PCI
+ * configuration space maintained in downstream devices external to the
+ * processor.
+ */
+struct peci_wr_pci_cfg_msg {
+#define PECI_WRPCICFG_CMD 0x65
+
+ __u8 addr;
+ __u8 bus;
+ __u8 device;
+ __u8 function;
+ __u16 reg;
+ __u8 tx_len;
+ __u8 cc;
+ __u8 pci_config[4];
+ __u8 domain_id;
+ __u8 padding[3];
+} __attribute__((__packed__));
+
+/**
+ * struct peci_rd_pci_cfg_local_msg - RdPCIConfigLocal command
+ * @addr: address of the client
+ * @bus: PCI bus number
+ * @device: PCI device number
+ * @function: specific function to read from
+ * @reg: specific register to read from
+ * @rx_len: number of data to be read in bytes
+ * @cc: completion code
+ * @pci_config: config data to be read
+ * @domain_id: domain ID of the client
+ *
+ * The RdPCIConfigLocal() command provides sideband read access to the PCI
+ * configuration space that resides within the processor. This includes all
+ * processor IIO and uncore registers within the PCI configuration space.
+ */
+struct peci_rd_pci_cfg_local_msg {
+#define PECI_RDPCICFGLOCAL_WRITE_LEN 5
+#define PECI_RDPCICFGLOCAL_READ_LEN_BASE 1
+#define PECI_RDPCICFGLOCAL_CMD 0xe1
+
+ __u8 addr;
+ __u8 bus;
+ __u8 device;
+ __u8 function;
+ __u16 reg;
+ __u8 rx_len;
+ __u8 cc;
+ __u8 pci_config[4];
+ __u8 domain_id;
+ __u8 padding[3];
+} __attribute__((__packed__));
+
+/**
+ * struct peci_wr_pci_cfg_local_msg - WrPCIConfigLocal command
+ * @addr: address of the client
+ * @bus: PCI bus number
+ * @device: PCI device number
+ * @function: specific function to read from
+ * @reg: specific register to read from
+ * @tx_len: number of data to be written in bytes
+ * @cc: completion code
+ * @value: config data to be written
+ * @domain_id: domain ID of the client
+ *
+ * The WrPCIConfigLocal() command provides sideband write access to the PCI
+ * configuration space that resides within the processor. PECI originators can
+ * access this space even before BIOS enumeration of the system buses.
+ */
+struct peci_wr_pci_cfg_local_msg {
+#define PECI_WRPCICFGLOCAL_WRITE_LEN_BASE 6
+#define PECI_WRPCICFGLOCAL_READ_LEN 1
+#define PECI_WRPCICFGLOCAL_CMD 0xe5
+
+ __u8 addr;
+ __u8 bus;
+ __u8 device;
+ __u8 function;
+ __u16 reg;
+ __u8 tx_len;
+ __u8 cc;
+ __u32 value;
+ __u8 domain_id;
+ __u8 padding[3];
+} __attribute__((__packed__));
+
+struct peci_rd_end_pt_cfg_msg {
+#define PECI_RDENDPTCFG_PCI_WRITE_LEN 12
+#define PECI_RDENDPTCFG_MMIO_D_WRITE_LEN 14
+#define PECI_RDENDPTCFG_MMIO_Q_WRITE_LEN 18
+#define PECI_RDENDPTCFG_READ_LEN_BASE 1
+#define PECI_RDENDPTCFG_CMD 0xc1
+
+ __u8 addr;
+ __u8 msg_type;
+#define PECI_ENDPTCFG_TYPE_LOCAL_PCI 0x03
+#define PECI_ENDPTCFG_TYPE_PCI 0x04
+#define PECI_ENDPTCFG_TYPE_MMIO 0x05
+
+ union {
+ struct {
+ __u8 seg;
+ __u8 bus;
+ __u8 device;
+ __u8 function;
+ __u16 reg;
+ } pci_cfg;
+ struct {
+ __u8 seg;
+ __u8 bus;
+ __u8 device;
+ __u8 function;
+ __u8 bar;
+ __u8 addr_type;
+#define PECI_ENDPTCFG_ADDR_TYPE_PCI 0x04
+#define PECI_ENDPTCFG_ADDR_TYPE_MMIO_D 0x05
+#define PECI_ENDPTCFG_ADDR_TYPE_MMIO_Q 0x06
+
+ __u64 offset;
+ } mmio;
+ } params;
+ __u8 rx_len;
+ __u8 cc;
+ __u8 padding[2];
+ __u8 data[8];
+ __u8 domain_id;
+ __u8 padding1[3];
+} __attribute__((__packed__));
+
+struct peci_wr_end_pt_cfg_msg {
+#define PECI_WRENDPTCFG_PCI_WRITE_LEN_BASE 13
+#define PECI_WRENDPTCFG_MMIO_D_WRITE_LEN_BASE 15
+#define PECI_WRENDPTCFG_MMIO_Q_WRITE_LEN_BASE 19
+#define PECI_WRENDPTCFG_READ_LEN 1
+#define PECI_WRENDPTCFG_CMD 0xc5
+
+ __u8 addr;
+ __u8 msg_type;
+ /* See msg_type in struct peci_rd_end_pt_cfg_msg */
+
+ union {
+ struct {
+ __u8 seg;
+ __u8 bus;
+ __u8 device;
+ __u8 function;
+ __u16 reg;
+ } pci_cfg;
+ struct {
+ __u8 seg;
+ __u8 bus;
+ __u8 device;
+ __u8 function;
+ __u8 bar;
+ __u8 addr_type;
+ /* See addr_type in struct peci_rd_end_pt_cfg_msg */
+
+ __u64 offset;
+ } mmio;
+ } params;
+ __u8 tx_len;
+ __u8 cc;
+ __u8 padding[2];
+ __u64 value;
+ __u8 domain_id;
+ __u8 padding1[3];
+} __attribute__((__packed__));
+
+/* Crashdump Agent */
+#define PECI_CRASHDUMP_CORE 0x00
+#define PECI_CRASHDUMP_TOR 0x01
+
+/* Crashdump Agent Param */
+#define PECI_CRASHDUMP_PAYLOAD_SIZE 0x00
+
+/* Crashdump Agent Data Param */
+#define PECI_CRASHDUMP_AGENT_ID 0x00
+#define PECI_CRASHDUMP_AGENT_PARAM 0x01
+
+struct peci_crashdump_disc_msg {
+ __u8 addr;
+ __u8 subopcode;
+#define PECI_CRASHDUMP_ENABLED 0x00
+#define PECI_CRASHDUMP_NUM_AGENTS 0x01
+#define PECI_CRASHDUMP_AGENT_DATA 0x02
+
+ __u8 cc;
+ __u8 param0;
+ __u16 param1;
+ __u8 param2;
+ __u8 rx_len;
+ __u8 data[8];
+ __u8 domain_id;
+ __u8 padding[3];
+} __attribute__((__packed__));
+
+struct peci_crashdump_get_frame_msg {
+#define PECI_CRASHDUMP_DISC_WRITE_LEN 9
+#define PECI_CRASHDUMP_DISC_READ_LEN_BASE 1
+#define PECI_CRASHDUMP_DISC_VERSION 0
+#define PECI_CRASHDUMP_DISC_OPCODE 1
+#define PECI_CRASHDUMP_GET_FRAME_WRITE_LEN 10
+#define PECI_CRASHDUMP_GET_FRAME_READ_LEN_BASE 1
+#define PECI_CRASHDUMP_GET_FRAME_VERSION 0
+#define PECI_CRASHDUMP_GET_FRAME_OPCODE 3
+#define PECI_CRASHDUMP_CMD 0x71
+
+ __u8 addr;
+ __u8 padding0;
+ __u16 param0;
+ __u16 param1;
+ __u16 param2;
+ __u8 rx_len;
+ __u8 cc;
+ __u8 padding1[2];
+ __u8 data[16];
+ __u8 domain_id;
+ __u8 padding2[3];
+} __attribute__((__packed__));
+
+#define PECI_IOC_BASE 0xb8
+
+#define PECI_IOC_XFER \
+ _IOWR(PECI_IOC_BASE, PECI_CMD_XFER, struct peci_xfer_msg)
+
+#define PECI_IOC_PING \
+ _IOWR(PECI_IOC_BASE, PECI_CMD_PING, struct peci_ping_msg)
+
+#define PECI_IOC_GET_DIB \
+ _IOWR(PECI_IOC_BASE, PECI_CMD_GET_DIB, struct peci_get_dib_msg)
+
+#define PECI_IOC_GET_TEMP \
+ _IOWR(PECI_IOC_BASE, PECI_CMD_GET_TEMP, struct peci_get_temp_msg)
+
+#define PECI_IOC_RD_PKG_CFG \
+ _IOWR(PECI_IOC_BASE, PECI_CMD_RD_PKG_CFG, struct peci_rd_pkg_cfg_msg)
+
+#define PECI_IOC_WR_PKG_CFG \
+ _IOWR(PECI_IOC_BASE, PECI_CMD_WR_PKG_CFG, struct peci_wr_pkg_cfg_msg)
+
+#define PECI_IOC_RD_IA_MSR \
+ _IOWR(PECI_IOC_BASE, PECI_CMD_RD_IA_MSR, struct peci_rd_ia_msr_msg)
+
+#define PECI_IOC_WR_IA_MSR \
+ _IOWR(PECI_IOC_BASE, PECI_CMD_WR_IA_MSR, struct peci_wr_ia_msr_msg)
+
+#define PECI_IOC_RD_IA_MSREX \
+ _IOWR(PECI_IOC_BASE, PECI_CMD_RD_IA_MSREX, struct peci_rd_ia_msrex_msg)
+
+#define PECI_IOC_RD_PCI_CFG \
+ _IOWR(PECI_IOC_BASE, PECI_CMD_RD_PCI_CFG, struct peci_rd_pci_cfg_msg)
+
+#define PECI_IOC_WR_PCI_CFG \
+ _IOWR(PECI_IOC_BASE, PECI_CMD_WR_PCI_CFG, struct peci_wr_pci_cfg_msg)
+
+#define PECI_IOC_RD_PCI_CFG_LOCAL \
+ _IOWR(PECI_IOC_BASE, PECI_CMD_RD_PCI_CFG_LOCAL, \
+ struct peci_rd_pci_cfg_local_msg)
+
+#define PECI_IOC_WR_PCI_CFG_LOCAL \
+ _IOWR(PECI_IOC_BASE, PECI_CMD_WR_PCI_CFG_LOCAL, \
+ struct peci_wr_pci_cfg_local_msg)
+
+#define PECI_IOC_RD_END_PT_CFG \
+ _IOWR(PECI_IOC_BASE, PECI_CMD_RD_END_PT_CFG, \
+ struct peci_rd_end_pt_cfg_msg)
+
+#define PECI_IOC_WR_END_PT_CFG \
+ _IOWR(PECI_IOC_BASE, PECI_CMD_WR_END_PT_CFG, \
+ struct peci_wr_end_pt_cfg_msg)
+
+#define PECI_IOC_CRASHDUMP_DISC \
+ _IOWR(PECI_IOC_BASE, PECI_CMD_CRASHDUMP_DISC, \
+ struct peci_crashdump_disc_msg)
+
+#define PECI_IOC_CRASHDUMP_GET_FRAME \
+ _IOWR(PECI_IOC_BASE, PECI_CMD_CRASHDUMP_GET_FRAME, \
+ struct peci_crashdump_get_frame_msg)
+
+#endif /* __PECI_IOCTL_H */
diff --git a/net/ncsi/ncsi-manage.c b/net/ncsi/ncsi-manage.c
index 7121ce2a47c0..729786f3ad7e 100644
--- a/net/ncsi/ncsi-manage.c
+++ b/net/ncsi/ncsi-manage.c
@@ -91,7 +91,6 @@ static void ncsi_channel_monitor(struct timer_list *t)
struct ncsi_channel *nc = from_timer(nc, t, monitor.timer);
struct ncsi_package *np = nc->package;
struct ncsi_dev_priv *ndp = np->ndp;
- struct ncsi_channel_mode *ncm;
struct ncsi_cmd_arg nca;
bool enabled, chained;
unsigned int monitor_state;
@@ -141,17 +140,14 @@ bad_state:
netdev_err(ndp->ndev.dev, "NCSI Channel %d timed out!\n",
nc->id);
ncsi_report_link(ndp, true);
- ndp->flags |= NCSI_DEV_RESHUFFLE;
- ncm = &nc->modes[NCSI_MODE_LINK];
spin_lock_irqsave(&nc->lock, flags);
nc->monitor.enabled = false;
- nc->state = NCSI_CHANNEL_INVISIBLE;
- ncm->data[2] &= ~0x1;
+ nc->state = NCSI_CHANNEL_INACTIVE;
spin_unlock_irqrestore(&nc->lock, flags);
spin_lock_irqsave(&ndp->lock, flags);
- nc->state = NCSI_CHANNEL_ACTIVE;
+ ndp->flags |= NCSI_DEV_RESHUFFLE | NCSI_DEV_RESET;
list_add_tail_rcu(&nc->link, &ndp->channel_queue);
spin_unlock_irqrestore(&ndp->lock, flags);
ncsi_process_next_channel(ndp);
@@ -432,6 +428,7 @@ static void ncsi_request_timeout(struct timer_list *t)
{
struct ncsi_request *nr = from_timer(nr, t, timer);
struct ncsi_dev_priv *ndp = nr->ndp;
+ struct ncsi_dev *nd = &ndp->ndev;
struct ncsi_cmd_pkt *cmd;
struct ncsi_package *np;
struct ncsi_channel *nc;
@@ -446,6 +443,16 @@ static void ncsi_request_timeout(struct timer_list *t)
spin_unlock_irqrestore(&ndp->lock, flags);
return;
}
+ if (nd->state == ncsi_dev_state_suspend ||
+ nd->state == ncsi_dev_state_suspend_select ||
+ nd->state == ncsi_dev_state_suspend_gls ||
+ nd->state == ncsi_dev_state_suspend_dcnt ||
+ nd->state == ncsi_dev_state_suspend_dc ||
+ nd->state == ncsi_dev_state_suspend_deselect ||
+ nd->state == ncsi_dev_state_suspend_done) {
+ ndp->flags |= NCSI_DEV_RESET;
+ nd->state = ncsi_dev_state_suspend_done;
+ }
spin_unlock_irqrestore(&ndp->lock, flags);
if (nr->flags == NCSI_REQ_FLAG_NETLINK_DRIVEN) {