summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-07-04 06:55:59 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2017-07-04 06:55:59 +0300
commitf4dd029ee0b92b77769a1ac6dce03e829e74763e (patch)
treef918cc855a8171d68746ab61a32b9e61b024845b
parent974668417b74ec5f68df2411f53b3d3812565059 (diff)
parentcbbdc6082917a92da0fc07cee255111de16ed64a (diff)
downloadlinux-f4dd029ee0b92b77769a1ac6dce03e829e74763e.tar.xz
Merge tag 'char-misc-4.13-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc
Pull char/misc updates from Greg KH: "Here is the "big" char/misc driver patchset for 4.13-rc1. Lots of stuff in here, a large thunderbolt update, w1 driver header reorg, the new mux driver subsystem, google firmware driver updates, and a raft of other smaller things. Full details in the shortlog. All of these have been in linux-next for a while with the only reported issue being a merge problem with this tree and the jc-docs tree in the w1 documentation area" * tag 'char-misc-4.13-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc: (147 commits) misc: apds990x: Use sysfs_match_string() helper mei: drop unreachable code in mei_start mei: validate the message header only in first fragment. DocBook: w1: Update W1 file locations and names in DocBook mux: adg792a: always require I2C support nvmem: rockchip-efuse: add support for rk322x-efuse nvmem: core: add locking to nvmem_find_cell nvmem: core: Call put_device() in nvmem_unregister() nvmem: core: fix leaks on registration errors nvmem: correct Broadcom OTP controller driver writes w1: Add subsystem kernel public interface drivers/fsi: Add module license to core driver drivers/fsi: Use asynchronous slave mode drivers/fsi: Add hub master support drivers/fsi: Add SCOM FSI client device driver drivers/fsi/gpio: Add tracepoints for GPIO master drivers/fsi: Add GPIO based FSI master drivers/fsi: Document FSI master sysfs files in ABI drivers/fsi: Add error handling for slave drivers/fsi: Add tracepoints for low-level operations ...
-rw-r--r--Documentation/ABI/testing/sysfs-bus-fsi38
-rw-r--r--Documentation/ABI/testing/sysfs-bus-thunderbolt110
-rw-r--r--Documentation/ABI/testing/sysfs-class-mux16
-rw-r--r--Documentation/DocBook/w1.tmpl18
-rw-r--r--Documentation/admin-guide/devices.txt4
-rw-r--r--Documentation/admin-guide/index.rst1
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt7
-rw-r--r--Documentation/admin-guide/thunderbolt.rst199
-rw-r--r--Documentation/devicetree/bindings/arm/coresight-cpu-debug.txt49
-rw-r--r--Documentation/devicetree/bindings/fsi/fsi-master-gpio.txt24
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-mux-gpmux.txt99
-rw-r--r--Documentation/devicetree/bindings/iio/multiplexer/io-channel-mux.txt39
-rw-r--r--Documentation/devicetree/bindings/mux/adi,adg792a.txt75
-rw-r--r--Documentation/devicetree/bindings/mux/gpio-mux.txt69
-rw-r--r--Documentation/devicetree/bindings/mux/mmio-mux.txt60
-rw-r--r--Documentation/devicetree/bindings/mux/mux-controller.txt157
-rw-r--r--Documentation/devicetree/bindings/nvmem/rockchip-efuse.txt1
-rw-r--r--Documentation/driver-model/devres.txt7
-rw-r--r--Documentation/trace/coresight-cpu-debug.txt175
-rw-r--r--MAINTAINERS22
-rw-r--r--arch/arm64/boot/dts/hisilicon/hi6220.dtsi64
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916.dtsi32
-rw-r--r--arch/x86/include/asm/mshyperv.h1
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/auxdisplay/panel.c5
-rw-r--r--drivers/firmware/google/memconsole-coreboot.c54
-rw-r--r--drivers/firmware/google/memconsole-x86-legacy.c18
-rw-r--r--drivers/firmware/google/memconsole.c14
-rw-r--r--drivers/firmware/google/memconsole.h7
-rw-r--r--drivers/firmware/google/vpd.c39
-rw-r--r--drivers/fsi/Kconfig26
-rw-r--r--drivers/fsi/Makefile3
-rw-r--r--drivers/fsi/fsi-core.c841
-rw-r--r--drivers/fsi/fsi-master-gpio.c604
-rw-r--r--drivers/fsi/fsi-master-hub.c327
-rw-r--r--drivers/fsi/fsi-master.h43
-rw-r--r--drivers/fsi/fsi-scom.c263
-rw-r--r--drivers/hv/channel.c8
-rw-r--r--drivers/hv/channel_mgmt.c69
-rw-r--r--drivers/hv/connection.c11
-rw-r--r--drivers/hv/hv.c9
-rw-r--r--drivers/hv/hv_kvp.c14
-rw-r--r--drivers/hv/hv_util.c164
-rw-r--r--drivers/hv/hyperv_vmbus.h11
-rw-r--r--drivers/hv/vmbus_drv.c80
-rw-r--r--drivers/hwtracing/coresight/Kconfig14
-rw-r--r--drivers/hwtracing/coresight/Makefile1
-rw-r--r--drivers/hwtracing/coresight/coresight-cpu-debug.c700
-rw-r--r--drivers/hwtracing/coresight/coresight-etb10.c7
-rw-r--r--drivers/hwtracing/coresight/coresight-etm-perf.c3
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etf.c25
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.c7
-rw-r--r--drivers/hwtracing/coresight/coresight.c34
-rw-r--r--drivers/hwtracing/coresight/of_coresight.c47
-rw-r--r--drivers/i2c/muxes/Kconfig13
-rw-r--r--drivers/i2c/muxes/Makefile1
-rw-r--r--drivers/i2c/muxes/i2c-mux-gpmux.c173
-rw-r--r--drivers/iio/Kconfig1
-rw-r--r--drivers/iio/Makefile1
-rw-r--r--drivers/iio/inkern.c60
-rw-r--r--drivers/iio/multiplexer/Kconfig18
-rw-r--r--drivers/iio/multiplexer/Makefile6
-rw-r--r--drivers/iio/multiplexer/iio-mux.c459
-rw-r--r--drivers/ipack/ipack.c3
-rw-r--r--drivers/memory/ti-aemif.c5
-rw-r--r--drivers/misc/Kconfig8
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/apds990x.c16
-rw-r--r--drivers/misc/aspeed-lpc-snoop.c261
-rw-r--r--drivers/misc/bh1770glc.c2
-rw-r--r--drivers/misc/mei/bus.c2
-rw-r--r--drivers/misc/mei/hw.h2
-rw-r--r--drivers/misc/mei/init.c6
-rw-r--r--drivers/misc/mei/interrupt.c26
-rw-r--r--drivers/misc/mei/mei_dev.h1
-rw-r--r--drivers/misc/sram-exec.c27
-rw-r--r--drivers/mux/Kconfig59
-rw-r--r--drivers/mux/Makefile8
-rw-r--r--drivers/mux/mux-adg792a.c157
-rw-r--r--drivers/mux/mux-core.c547
-rw-r--r--drivers/mux/mux-gpio.c114
-rw-r--r--drivers/mux/mux-mmio.c141
-rw-r--r--drivers/nvmem/bcm-ocotp.c4
-rw-r--r--drivers/nvmem/core.c22
-rw-r--r--drivers/nvmem/rockchip-efuse.c4
-rw-r--r--drivers/platform/goldfish/goldfish_pipe.c2
-rw-r--r--drivers/power/supply/ds2760_battery.c2
-rw-r--r--drivers/power/supply/ds2780_battery.c2
-rw-r--r--drivers/power/supply/ds2781_battery.c2
-rw-r--r--drivers/pps/Kconfig12
-rw-r--r--drivers/pps/clients/Kconfig6
-rw-r--r--drivers/pps/generators/Kconfig3
-rw-r--r--drivers/spmi/spmi-pmic-arb.c608
-rw-r--r--drivers/thunderbolt/Kconfig13
-rw-r--r--drivers/thunderbolt/Makefile2
-rw-r--r--drivers/thunderbolt/cap.c169
-rw-r--r--drivers/thunderbolt/ctl.c665
-rw-r--r--drivers/thunderbolt/ctl.h105
-rw-r--r--drivers/thunderbolt/dma_port.c524
-rw-r--r--drivers/thunderbolt/dma_port.h34
-rw-r--r--drivers/thunderbolt/domain.c456
-rw-r--r--drivers/thunderbolt/eeprom.c121
-rw-r--r--drivers/thunderbolt/icm.c1089
-rw-r--r--drivers/thunderbolt/nhi.c306
-rw-r--r--drivers/thunderbolt/nhi.h93
-rw-r--r--drivers/thunderbolt/nhi_regs.h27
-rw-r--r--drivers/thunderbolt/switch.c1178
-rw-r--r--drivers/thunderbolt/tb.c240
-rw-r--r--drivers/thunderbolt/tb.h251
-rw-r--r--drivers/thunderbolt/tb_msgs.h260
-rw-r--r--drivers/thunderbolt/tb_regs.h50
-rw-r--r--drivers/thunderbolt/tunnel_pci.c17
-rw-r--r--drivers/uio/uio_pci_generic.c20
-rw-r--r--drivers/vhost/vsock.c4
-rw-r--r--drivers/w1/masters/ds1wm.c3
-rw-r--r--drivers/w1/masters/ds2482.c48
-rw-r--r--drivers/w1/masters/ds2490.c36
-rw-r--r--drivers/w1/masters/matrox_w1.c43
-rw-r--r--drivers/w1/masters/mxc_w1.c3
-rw-r--r--drivers/w1/masters/omap_hdq.c60
-rw-r--r--drivers/w1/masters/w1-gpio.c3
-rw-r--r--drivers/w1/slaves/w1_bq27000.c15
-rw-r--r--drivers/w1/slaves/w1_ds2405.c5
-rw-r--r--drivers/w1/slaves/w1_ds2406.c12
-rw-r--r--drivers/w1/slaves/w1_ds2408.c15
-rw-r--r--drivers/w1/slaves/w1_ds2413.c14
-rw-r--r--drivers/w1/slaves/w1_ds2423.c8
-rw-r--r--drivers/w1/slaves/w1_ds2431.c8
-rw-r--r--drivers/w1/slaves/w1_ds2433.c14
-rw-r--r--drivers/w1/slaves/w1_ds2438.c5
-rw-r--r--drivers/w1/slaves/w1_ds2760.c18
-rw-r--r--drivers/w1/slaves/w1_ds2780.c9
-rw-r--r--drivers/w1/slaves/w1_ds2781.c9
-rw-r--r--drivers/w1/slaves/w1_ds28e04.c14
-rw-r--r--drivers/w1/slaves/w1_smem.c17
-rw-r--r--drivers/w1/slaves/w1_therm.c26
-rw-r--r--drivers/w1/w1.c22
-rw-r--r--drivers/w1/w1_family.c8
-rw-r--r--drivers/w1/w1_family.h98
-rw-r--r--drivers/w1/w1_int.c6
-rw-r--r--drivers/w1/w1_int.h27
-rw-r--r--drivers/w1/w1_internal.h87
-rw-r--r--drivers/w1/w1_io.c2
-rw-r--r--drivers/w1/w1_netlink.c2
-rw-r--r--drivers/w1/w1_netlink.h2
-rw-r--r--include/dt-bindings/mux/mux.h16
-rw-r--r--include/linux/coresight.h10
-rw-r--r--include/linux/crc4.h8
-rw-r--r--include/linux/fsi.h35
-rw-r--r--include/linux/iio/consumer.h37
-rw-r--r--include/linux/miscdevice.h1
-rw-r--r--include/linux/mux/consumer.h32
-rw-r--r--include/linux/mux/driver.h108
-rw-r--r--include/linux/platform_data/apds990x.h (renamed from include/linux/i2c/apds990x.h)0
-rw-r--r--include/linux/platform_data/bh1770glc.h (renamed from include/linux/i2c/bh1770glc.h)0
-rw-r--r--include/linux/sram.h8
-rw-r--r--include/linux/w1.h (renamed from drivers/w1/w1.h)114
-rw-r--r--include/trace/events/fsi.h127
-rw-r--r--include/trace/events/fsi_master_gpio.h68
-rw-r--r--kernel/configs/android-base.config11
-rw-r--r--kernel/configs/android-recommended.config5
-rw-r--r--lib/Kconfig8
-rw-r--r--lib/Makefile1
-rw-r--r--lib/crc4.c46
-rw-r--r--tools/hv/hv_kvp_daemon.c44
-rw-r--r--tools/hv/hv_vss_daemon.c4
167 files changed, 12819 insertions, 1630 deletions
diff --git a/Documentation/ABI/testing/sysfs-bus-fsi b/Documentation/ABI/testing/sysfs-bus-fsi
new file mode 100644
index 000000000000..57c806350d6c
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-fsi
@@ -0,0 +1,38 @@
+What: /sys/bus/platform/devices/fsi-master/rescan
+Date: May 2017
+KernelVersion: 4.12
+Contact: cbostic@linux.vnet.ibm.com
+Description:
+ Initiates a FSI master scan for all connected slave devices
+ on its links.
+
+What: /sys/bus/platform/devices/fsi-master/break
+Date: May 2017
+KernelVersion: 4.12
+Contact: cbostic@linux.vnet.ibm.com
+Description:
+ Sends an FSI BREAK command on a master's communication
+ link to any connnected slaves. A BREAK resets connected
+ device's logic and preps it to receive further commands
+ from the master.
+
+What: /sys/bus/platform/devices/fsi-master/slave@00:00/term
+Date: May 2017
+KernelVersion: 4.12
+Contact: cbostic@linux.vnet.ibm.com
+Description:
+ Sends an FSI terminate command from the master to its
+ connected slave. A terminate resets the slave's state machines
+ that control access to the internally connected engines. In
+ addition the slave freezes its internal error register for
+ debugging purposes. This command is also needed to abort any
+ ongoing operation in case of an expired 'Master Time Out'
+ timer.
+
+What: /sys/bus/platform/devices/fsi-master/slave@00:00/raw
+Date: May 2017
+KernelVersion: 4.12
+Contact: cbostic@linux.vnet.ibm.com
+Description:
+ Provides a means of reading/writing a 32 bit value from/to a
+ specified FSI bus address.
diff --git a/Documentation/ABI/testing/sysfs-bus-thunderbolt b/Documentation/ABI/testing/sysfs-bus-thunderbolt
new file mode 100644
index 000000000000..2a98149943ea
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-thunderbolt
@@ -0,0 +1,110 @@
+What: /sys/bus/thunderbolt/devices/.../domainX/security
+Date: Sep 2017
+KernelVersion: 4.13
+Contact: thunderbolt-software@lists.01.org
+Description: This attribute holds current Thunderbolt security level
+ set by the system BIOS. Possible values are:
+
+ none: All devices are automatically authorized
+ user: Devices are only authorized based on writing
+ appropriate value to the authorized attribute
+ secure: Require devices that support secure connect at
+ minimum. User needs to authorize each device.
+ dponly: Automatically tunnel Display port (and USB). No
+ PCIe tunnels are created.
+
+What: /sys/bus/thunderbolt/devices/.../authorized
+Date: Sep 2017
+KernelVersion: 4.13
+Contact: thunderbolt-software@lists.01.org
+Description: This attribute is used to authorize Thunderbolt devices
+ after they have been connected. If the device is not
+ authorized, no devices such as PCIe and Display port are
+ available to the system.
+
+ Contents of this attribute will be 0 when the device is not
+ yet authorized.
+
+ Possible values are supported:
+ 1: The device will be authorized and connected
+
+ When key attribute contains 32 byte hex string the possible
+ values are:
+ 1: The 32 byte hex string is added to the device NVM and
+ the device is authorized.
+ 2: Send a challenge based on the 32 byte hex string. If the
+ challenge response from device is valid, the device is
+ authorized. In case of failure errno will be ENOKEY if
+ the device did not contain a key at all, and
+ EKEYREJECTED if the challenge response did not match.
+
+What: /sys/bus/thunderbolt/devices/.../key
+Date: Sep 2017
+KernelVersion: 4.13
+Contact: thunderbolt-software@lists.01.org
+Description: When a devices supports Thunderbolt secure connect it will
+ have this attribute. Writing 32 byte hex string changes
+ authorization to use the secure connection method instead.
+
+What: /sys/bus/thunderbolt/devices/.../device
+Date: Sep 2017
+KernelVersion: 4.13
+Contact: thunderbolt-software@lists.01.org
+Description: This attribute contains id of this device extracted from
+ the device DROM.
+
+What: /sys/bus/thunderbolt/devices/.../device_name
+Date: Sep 2017
+KernelVersion: 4.13
+Contact: thunderbolt-software@lists.01.org
+Description: This attribute contains name of this device extracted from
+ the device DROM.
+
+What: /sys/bus/thunderbolt/devices/.../vendor
+Date: Sep 2017
+KernelVersion: 4.13
+Contact: thunderbolt-software@lists.01.org
+Description: This attribute contains vendor id of this device extracted
+ from the device DROM.
+
+What: /sys/bus/thunderbolt/devices/.../vendor_name
+Date: Sep 2017
+KernelVersion: 4.13
+Contact: thunderbolt-software@lists.01.org
+Description: This attribute contains vendor name of this device extracted
+ from the device DROM.
+
+What: /sys/bus/thunderbolt/devices/.../unique_id
+Date: Sep 2017
+KernelVersion: 4.13
+Contact: thunderbolt-software@lists.01.org
+Description: This attribute contains unique_id string of this device.
+ This is either read from hardware registers (UUID on
+ newer hardware) or based on UID from the device DROM.
+ Can be used to uniquely identify particular device.
+
+What: /sys/bus/thunderbolt/devices/.../nvm_version
+Date: Sep 2017
+KernelVersion: 4.13
+Contact: thunderbolt-software@lists.01.org
+Description: If the device has upgradeable firmware the version
+ number is available here. Format: %x.%x, major.minor.
+ If the device is in safe mode reading the file returns
+ -ENODATA instead as the NVM version is not available.
+
+What: /sys/bus/thunderbolt/devices/.../nvm_authenticate
+Date: Sep 2017
+KernelVersion: 4.13
+Contact: thunderbolt-software@lists.01.org
+Description: When new NVM image is written to the non-active NVM
+ area (through non_activeX NVMem device), the
+ authentication procedure is started by writing 1 to
+ this file. If everything goes well, the device is
+ restarted with the new NVM firmware. If the image
+ verification fails an error code is returned instead.
+
+ When read holds status of the last authentication
+ operation if an error occurred during the process. This
+ is directly the status value from the DMA configuration
+ based mailbox before the device is power cycled. Writing
+ 0 here clears the status.
diff --git a/Documentation/ABI/testing/sysfs-class-mux b/Documentation/ABI/testing/sysfs-class-mux
new file mode 100644
index 000000000000..8715f9c7bd4f
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-class-mux
@@ -0,0 +1,16 @@
+What: /sys/class/mux/
+Date: April 2017
+KernelVersion: 4.13
+Contact: Peter Rosin <peda@axentia.se>
+Description:
+ The mux/ class sub-directory belongs to the Generic MUX
+ Framework and provides a sysfs interface for using MUX
+ controllers.
+
+What: /sys/class/mux/muxchipN/
+Date: April 2017
+KernelVersion: 4.13
+Contact: Peter Rosin <peda@axentia.se>
+Description:
+ A /sys/class/mux/muxchipN directory is created for each
+ probed MUX chip where N is a simple enumeration.
diff --git a/Documentation/DocBook/w1.tmpl b/Documentation/DocBook/w1.tmpl
index b0228d4c81bb..c65cb27abef9 100644
--- a/Documentation/DocBook/w1.tmpl
+++ b/Documentation/DocBook/w1.tmpl
@@ -51,9 +51,9 @@
<sect1 id="w1_internal_api">
<title>W1 API internal to the kernel</title>
<sect2 id="w1.h">
- <title>drivers/w1/w1.h</title>
- <para>W1 core functions.</para>
-!Idrivers/w1/w1.h
+ <title>include/linux/w1.h</title>
+ <para>W1 kernel API functions.</para>
+!Iinclude/linux/w1.h
</sect2>
<sect2 id="w1.c">
@@ -62,18 +62,18 @@
!Idrivers/w1/w1.c
</sect2>
- <sect2 id="w1_family.h">
- <title>drivers/w1/w1_family.h</title>
- <para>Allows registering device family operations.</para>
-!Idrivers/w1/w1_family.h
- </sect2>
-
<sect2 id="w1_family.c">
<title>drivers/w1/w1_family.c</title>
<para>Allows registering device family operations.</para>
!Edrivers/w1/w1_family.c
</sect2>
+ <sect2 id="w1_internal.h">
+ <title>drivers/w1/w1_internal.h</title>
+ <para>W1 internal initialization for master devices.</para>
+!Idrivers/w1/w1_internal.h
+ </sect2>
+
<sect2 id="w1_int.c">
<title>drivers/w1/w1_int.c</title>
<para>W1 internal initialization for master devices.</para>
diff --git a/Documentation/admin-guide/devices.txt b/Documentation/admin-guide/devices.txt
index c9cea2e39c21..6b71852dadc2 100644
--- a/Documentation/admin-guide/devices.txt
+++ b/Documentation/admin-guide/devices.txt
@@ -369,8 +369,10 @@
237 = /dev/loop-control Loopback control device
238 = /dev/vhost-net Host kernel accelerator for virtio net
239 = /dev/uhid User-space I/O driver support for HID subsystem
+ 240 = /dev/userio Serio driver testing device
+ 241 = /dev/vhost-vsock Host kernel driver for virtio vsock
- 240-254 Reserved for local use
+ 242-254 Reserved for local use
255 Reserved for MISC_DYNAMIC_MINOR
11 char Raw keyboard device (Linux/SPARC only)
diff --git a/Documentation/admin-guide/index.rst b/Documentation/admin-guide/index.rst
index 8c60a8a32a1a..6d99a7ce6e21 100644
--- a/Documentation/admin-guide/index.rst
+++ b/Documentation/admin-guide/index.rst
@@ -61,6 +61,7 @@ configure specific aspects of kernel behavior to your liking.
java
ras
pm/index
+ thunderbolt
.. only:: subproject and html
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index d4c3b6c6ef92..783010e95f51 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -649,6 +649,13 @@
/proc/<pid>/coredump_filter.
See also Documentation/filesystems/proc.txt.
+ coresight_cpu_debug.enable
+ [ARM,ARM64]
+ Format: <bool>
+ Enable/disable the CPU sampling based debugging.
+ 0: default value, disable debugging
+ 1: enable debugging at boot time
+
cpuidle.off=1 [CPU_IDLE]
disable the cpuidle sub-system
diff --git a/Documentation/admin-guide/thunderbolt.rst b/Documentation/admin-guide/thunderbolt.rst
new file mode 100644
index 000000000000..6a4cd1f159ca
--- /dev/null
+++ b/Documentation/admin-guide/thunderbolt.rst
@@ -0,0 +1,199 @@
+=============
+ Thunderbolt
+=============
+The interface presented here is not meant for end users. Instead there
+should be a userspace tool that handles all the low-level details, keeps
+database of the authorized devices and prompts user for new connections.
+
+More details about the sysfs interface for Thunderbolt devices can be
+found in ``Documentation/ABI/testing/sysfs-bus-thunderbolt``.
+
+Those users who just want to connect any device without any sort of
+manual work, can add following line to
+``/etc/udev/rules.d/99-local.rules``::
+
+ ACTION=="add", SUBSYSTEM=="thunderbolt", ATTR{authorized}=="0", ATTR{authorized}="1"
+
+This will authorize all devices automatically when they appear. However,
+keep in mind that this bypasses the security levels and makes the system
+vulnerable to DMA attacks.
+
+Security levels and how to use them
+-----------------------------------
+Starting from Intel Falcon Ridge Thunderbolt controller there are 4
+security levels available. The reason for these is the fact that the
+connected devices can be DMA masters and thus read contents of the host
+memory without CPU and OS knowing about it. There are ways to prevent
+this by setting up an IOMMU but it is not always available for various
+reasons.
+
+The security levels are as follows:
+
+ none
+ All devices are automatically connected by the firmware. No user
+ approval is needed. In BIOS settings this is typically called
+ *Legacy mode*.
+
+ user
+ User is asked whether the device is allowed to be connected.
+ Based on the device identification information available through
+ ``/sys/bus/thunderbolt/devices``. user then can do the decision.
+ In BIOS settings this is typically called *Unique ID*.
+
+ secure
+ User is asked whether the device is allowed to be connected. In
+ addition to UUID the device (if it supports secure connect) is sent
+ a challenge that should match the expected one based on a random key
+ written to ``key`` sysfs attribute. In BIOS settings this is
+ typically called *One time saved key*.
+
+ dponly
+ The firmware automatically creates tunnels for Display Port and
+ USB. No PCIe tunneling is done. In BIOS settings this is
+ typically called *Display Port Only*.
+
+The current security level can be read from
+``/sys/bus/thunderbolt/devices/domainX/security`` where ``domainX`` is
+the Thunderbolt domain the host controller manages. There is typically
+one domain per Thunderbolt host controller.
+
+If the security level reads as ``user`` or ``secure`` the connected
+device must be authorized by the user before PCIe tunnels are created
+(e.g the PCIe device appears).
+
+Each Thunderbolt device plugged in will appear in sysfs under
+``/sys/bus/thunderbolt/devices``. The device directory carries
+information that can be used to identify the particular device,
+including its name and UUID.
+
+Authorizing devices when security level is ``user`` or ``secure``
+-----------------------------------------------------------------
+When a device is plugged in it will appear in sysfs as follows::
+
+ /sys/bus/thunderbolt/devices/0-1/authorized - 0
+ /sys/bus/thunderbolt/devices/0-1/device - 0x8004
+ /sys/bus/thunderbolt/devices/0-1/device_name - Thunderbolt to FireWire Adapter
+ /sys/bus/thunderbolt/devices/0-1/vendor - 0x1
+ /sys/bus/thunderbolt/devices/0-1/vendor_name - Apple, Inc.
+ /sys/bus/thunderbolt/devices/0-1/unique_id - e0376f00-0300-0100-ffff-ffffffffffff
+
+The ``authorized`` attribute reads 0 which means no PCIe tunnels are
+created yet. The user can authorize the device by simply::
+
+ # echo 1 > /sys/bus/thunderbolt/devices/0-1/authorized
+
+This will create the PCIe tunnels and the device is now connected.
+
+If the device supports secure connect, and the domain security level is
+set to ``secure``, it has an additional attribute ``key`` which can hold
+a random 32 byte value used for authorization and challenging the device in
+future connects::
+
+ /sys/bus/thunderbolt/devices/0-3/authorized - 0
+ /sys/bus/thunderbolt/devices/0-3/device - 0x305
+ /sys/bus/thunderbolt/devices/0-3/device_name - AKiTiO Thunder3 PCIe Box
+ /sys/bus/thunderbolt/devices/0-3/key -
+ /sys/bus/thunderbolt/devices/0-3/vendor - 0x41
+ /sys/bus/thunderbolt/devices/0-3/vendor_name - inXtron
+ /sys/bus/thunderbolt/devices/0-3/unique_id - dc010000-0000-8508-a22d-32ca6421cb16
+
+Notice the key is empty by default.
+
+If the user does not want to use secure connect it can just ``echo 1``
+to the ``authorized`` attribute and the PCIe tunnels will be created in
+the same way than in ``user`` security level.
+
+If the user wants to use secure connect, the first time the device is
+plugged a key needs to be created and send to the device::
+
+ # key=$(openssl rand -hex 32)
+ # echo $key > /sys/bus/thunderbolt/devices/0-3/key
+ # echo 1 > /sys/bus/thunderbolt/devices/0-3/authorized
+
+Now the device is connected (PCIe tunnels are created) and in addition
+the key is stored on the device NVM.
+
+Next time the device is plugged in the user can verify (challenge) the
+device using the same key::
+
+ # echo $key > /sys/bus/thunderbolt/devices/0-3/key
+ # echo 2 > /sys/bus/thunderbolt/devices/0-3/authorized
+
+If the challenge the device returns back matches the one we expect based
+on the key, the device is connected and the PCIe tunnels are created.
+However, if the challenge failed no tunnels are created and error is
+returned to the user.
+
+If the user still wants to connect the device it can either approve
+the device without a key or write new key and write 1 to the
+``authorized`` file to get the new key stored on the device NVM.
+
+Upgrading NVM on Thunderbolt device or host
+-------------------------------------------
+Since most of the functionality is handled in a firmware running on a
+host controller or a device, it is important that the firmware can be
+upgraded to the latest where possible bugs in it have been fixed.
+Typically OEMs provide this firmware from their support site.
+
+There is also a central site which has links where to download firmwares
+for some machines:
+
+ `Thunderbolt Updates <https://thunderbolttechnology.net/updates>`_
+
+Before you upgrade firmware on a device or host, please make sure it is
+the suitable. Failing to do that may render the device (or host) in a
+state where it cannot be used properly anymore without special tools!
+
+Host NVM upgrade on Apple Macs is not supported.
+
+Once the NVM image has been downloaded, you need to plug in a
+Thunderbolt device so that the host controller appears. It does not
+matter which device is connected (unless you are upgrading NVM on a
+device - then you need to connect that particular device).
+
+Note OEM-specific method to power the controller up ("force power") may
+be available for your system in which case there is no need to plug in a
+Thunderbolt device.
+
+After that we can write the firmware to the non-active parts of the NVM
+of the host or device. As an example here is how Intel NUC6i7KYK (Skull
+Canyon) Thunderbolt controller NVM is upgraded::
+
+ # dd if=KYK_TBT_FW_0018.bin of=/sys/bus/thunderbolt/devices/0-0/nvm_non_active0/nvmem
+
+Once the operation completes we can trigger NVM authentication and
+upgrade process as follows::
+
+ # echo 1 > /sys/bus/thunderbolt/devices/0-0/nvm_authenticate
+
+If no errors are returned, the host controller shortly disappears. Once
+it comes back the driver notices it and initiates a full power cycle.
+After a while the host controller appears again and this time it should
+be fully functional.
+
+We can verify that the new NVM firmware is active by running following
+commands::
+
+ # cat /sys/bus/thunderbolt/devices/0-0/nvm_authenticate
+ 0x0
+ # cat /sys/bus/thunderbolt/devices/0-0/nvm_version
+ 18.0
+
+If ``nvm_authenticate`` contains anything else than 0x0 it is the error
+code from the last authentication cycle, which means the authentication
+of the NVM image failed.
+
+Note names of the NVMem devices ``nvm_activeN`` and ``nvm_non_activeN``
+depends on the order they are registered in the NVMem subsystem. N in
+the name is the identifier added by the NVMem subsystem.
+
+Upgrading NVM when host controller is in safe mode
+--------------------------------------------------
+If the existing NVM is not properly authenticated (or is missing) the
+host controller goes into safe mode which means that only available
+functionality is flashing new NVM image. When in this mode the reading
+``nvm_version`` fails with ``ENODATA`` and the device identification
+information is missing.
+
+To recover from this mode, one needs to flash a valid NVM image to the
+host host controller in the same way it is done in the previous chapter.
diff --git a/Documentation/devicetree/bindings/arm/coresight-cpu-debug.txt b/Documentation/devicetree/bindings/arm/coresight-cpu-debug.txt
new file mode 100644
index 000000000000..298291211ea4
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/coresight-cpu-debug.txt
@@ -0,0 +1,49 @@
+* CoreSight CPU Debug Component:
+
+CoreSight CPU debug component are compliant with the ARMv8 architecture
+reference manual (ARM DDI 0487A.k) Chapter 'Part H: External debug'. The
+external debug module is mainly used for two modes: self-hosted debug and
+external debug, and it can be accessed from mmio region from Coresight
+and eventually the debug module connects with CPU for debugging. And the
+debug module provides sample-based profiling extension, which can be used
+to sample CPU program counter, secure state and exception level, etc;
+usually every CPU has one dedicated debug module to be connected.
+
+Required properties:
+
+- compatible : should be "arm,coresight-cpu-debug"; supplemented with
+ "arm,primecell" since this driver is using the AMBA bus
+ interface.
+
+- reg : physical base address and length of the register set.
+
+- clocks : the clock associated to this component.
+
+- clock-names : the name of the clock referenced by the code. Since we are
+ using the AMBA framework, the name of the clock providing
+ the interconnect should be "apb_pclk" and the clock is
+ mandatory. The interface between the debug logic and the
+ processor core is clocked by the internal CPU clock, so it
+ is enabled with CPU clock by default.
+
+- cpu : the CPU phandle the debug module is affined to. When omitted
+ the module is considered to belong to CPU0.
+
+Optional properties:
+
+- power-domains: a phandle to the debug power domain. We use "power-domains"
+ binding to turn on the debug logic if it has own dedicated
+ power domain and if necessary to use "cpuidle.off=1" or
+ "nohlt" in the kernel command line or sysfs node to
+ constrain idle states to ensure registers in the CPU power
+ domain are accessible.
+
+Example:
+
+ debug@f6590000 {
+ compatible = "arm,coresight-cpu-debug","arm,primecell";
+ reg = <0 0xf6590000 0 0x1000>;
+ clocks = <&sys_ctrl HI6220_DAPB_CLK>;
+ clock-names = "apb_pclk";
+ cpu = <&cpu0>;
+ };
diff --git a/Documentation/devicetree/bindings/fsi/fsi-master-gpio.txt b/Documentation/devicetree/bindings/fsi/fsi-master-gpio.txt
new file mode 100644
index 000000000000..a767259dedad
--- /dev/null
+++ b/Documentation/devicetree/bindings/fsi/fsi-master-gpio.txt
@@ -0,0 +1,24 @@
+Device-tree bindings for gpio-based FSI master driver
+-----------------------------------------------------
+
+Required properties:
+ - compatible = "fsi-master-gpio";
+ - clock-gpios = <gpio-descriptor>; : GPIO for FSI clock
+ - data-gpios = <gpio-descriptor>; : GPIO for FSI data signal
+
+Optional properties:
+ - enable-gpios = <gpio-descriptor>; : GPIO for enable signal
+ - trans-gpios = <gpio-descriptor>; : GPIO for voltage translator enable
+ - mux-gpios = <gpio-descriptor>; : GPIO for pin multiplexing with other
+ functions (eg, external FSI masters)
+
+Examples:
+
+ fsi-master {
+ compatible = "fsi-master-gpio", "fsi-master";
+ clock-gpios = <&gpio 0>;
+ data-gpios = <&gpio 1>;
+ enable-gpios = <&gpio 2>;
+ trans-gpios = <&gpio 3>;
+ mux-gpios = <&gpio 4>;
+ }
diff --git a/Documentation/devicetree/bindings/i2c/i2c-mux-gpmux.txt b/Documentation/devicetree/bindings/i2c/i2c-mux-gpmux.txt
new file mode 100644
index 000000000000..2907dab56298
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/i2c-mux-gpmux.txt
@@ -0,0 +1,99 @@
+General Purpose I2C Bus Mux
+
+This binding describes an I2C bus multiplexer that uses a mux controller
+from the mux subsystem to route the I2C signals.
+
+ .-----. .-----.
+ | dev | | dev |
+ .------------. '-----' '-----'
+ | SoC | | |
+ | | .--------+--------'
+ | .------. | .------+ child bus A, on MUX value set to 0
+ | | I2C |-|--| Mux |
+ | '------' | '--+---+ child bus B, on MUX value set to 1
+ | .------. | | '----------+--------+--------.
+ | | MUX- | | | | | |
+ | | Ctrl |-|-----+ .-----. .-----. .-----.
+ | '------' | | dev | | dev | | dev |
+ '------------' '-----' '-----' '-----'
+
+Required properties:
+- compatible: i2c-mux
+- i2c-parent: The phandle of the I2C bus that this multiplexer's master-side
+ port is connected to.
+- mux-controls: The phandle of the mux controller to use for operating the
+ mux.
+* Standard I2C mux properties. See i2c-mux.txt in this directory.
+* I2C child bus nodes. See i2c-mux.txt in this directory. The sub-bus number
+ is also the mux-controller state described in ../mux/mux-controller.txt
+
+Optional properties:
+- mux-locked: If present, explicitly allow unrelated I2C transactions on the
+ parent I2C adapter at these times:
+ + during setup of the multiplexer
+ + between setup of the multiplexer and the child bus I2C transaction
+ + between the child bus I2C transaction and releasing of the multiplexer
+ + during releasing of the multiplexer
+ However, I2C transactions to devices behind all I2C multiplexers connected
+ to the same parent adapter that this multiplexer is connected to are blocked
+ for the full duration of the complete multiplexed I2C transaction (i.e.
+ including the times covered by the above list).
+ If mux-locked is not present, the multiplexer is assumed to be parent-locked.
+ This means that no unrelated I2C transactions are allowed on the parent I2C
+ adapter for the complete multiplexed I2C transaction.
+ The properties of mux-locked and parent-locked multiplexers are discussed
+ in more detail in Documentation/i2c/i2c-topology.
+
+For each i2c child node, an I2C child bus will be created. They will
+be numbered based on their order in the device tree.
+
+Whenever an access is made to a device on a child bus, the value set
+in the relevant node's reg property will be set as the state in the
+mux controller.
+
+Example:
+ mux: mux-controller {
+ compatible = "gpio-mux";
+ #mux-control-cells = <0>;
+
+ mux-gpios = <&pioA 0 GPIO_ACTIVE_HIGH>,
+ <&pioA 1 GPIO_ACTIVE_HIGH>;
+ };
+
+ i2c-mux {
+ compatible = "i2c-mux";
+ mux-locked;
+ i2c-parent = <&i2c1>;
+
+ mux-controls = <&mux>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ i2c@1 {
+ reg = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ssd1307: oled@3c {
+ compatible = "solomon,ssd1307fb-i2c";
+ reg = <0x3c>;
+ pwms = <&pwm 4 3000>;
+ reset-gpios = <&gpio2 7 1>;
+ reset-active-low;
+ };
+ };
+
+ i2c@3 {
+ reg = <3>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pca9555: pca9555@20 {
+ compatible = "nxp,pca9555";
+ gpio-controller;
+ #gpio-cells = <2>;
+ reg = <0x20>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/iio/multiplexer/io-channel-mux.txt b/Documentation/devicetree/bindings/iio/multiplexer/io-channel-mux.txt
new file mode 100644
index 000000000000..c82794002595
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/multiplexer/io-channel-mux.txt
@@ -0,0 +1,39 @@
+I/O channel multiplexer bindings
+
+If a multiplexer is used to select which hardware signal is fed to
+e.g. an ADC channel, these bindings describe that situation.
+
+Required properties:
+- compatible : "io-channel-mux"
+- io-channels : Channel node of the parent channel that has multiplexed
+ input.
+- io-channel-names : Should be "parent".
+- #address-cells = <1>;
+- #size-cells = <0>;
+- mux-controls : Mux controller node to use for operating the mux
+- channels : List of strings, labeling the mux controller states.
+
+For each non-empty string in the channels property, an io-channel will
+be created. The number of this io-channel is the same as the index into
+the list of strings in the channels property, and also matches the mux
+controller state. The mux controller state is described in
+../mux/mux-controller.txt
+
+Example:
+ mux: mux-controller {
+ compatible = "mux-gpio";
+ #mux-control-cells = <0>;
+
+ mux-gpios = <&pioA 0 GPIO_ACTIVE_HIGH>,
+ <&pioA 1 GPIO_ACTIVE_HIGH>;
+ };
+
+ adc-mux {
+ compatible = "io-channel-mux";
+ io-channels = <&adc 0>;
+ io-channel-names = "parent";
+
+ mux-controls = <&mux>;
+
+ channels = "sync", "in", "system-regulator";
+ };
diff --git a/Documentation/devicetree/bindings/mux/adi,adg792a.txt b/Documentation/devicetree/bindings/mux/adi,adg792a.txt
new file mode 100644
index 000000000000..96b787a69f50
--- /dev/null
+++ b/Documentation/devicetree/bindings/mux/adi,adg792a.txt
@@ -0,0 +1,75 @@
+Bindings for Analog Devices ADG792A/G Triple 4:1 Multiplexers
+
+Required properties:
+- compatible : "adi,adg792a" or "adi,adg792g"
+- #mux-control-cells : <0> if parallel (the three muxes are bound together
+ with a single mux controller controlling all three muxes), or <1> if
+ not (one mux controller for each mux).
+* Standard mux-controller bindings as described in mux-controller.txt
+
+Optional properties for ADG792G:
+- gpio-controller : if present, #gpio-cells below is required.
+- #gpio-cells : should be <2>
+ - First cell is the GPO line number, i.e. 0 or 1
+ - Second cell is used to specify active high (0)
+ or active low (1)
+
+Optional properties:
+- idle-state : if present, array of states that the mux controllers will have
+ when idle. The special state MUX_IDLE_AS_IS is the default and
+ MUX_IDLE_DISCONNECT is also supported.
+
+States 0 through 3 correspond to signals A through D in the datasheet.
+
+Example:
+
+ /*
+ * Three independent mux controllers (of which one is used).
+ * Mux 0 is disconnected when idle, mux 1 idles in the previously
+ * selected state and mux 2 idles with signal B.
+ */
+ &i2c0 {
+ mux: mux-controller@50 {
+ compatible = "adi,adg792a";
+ reg = <0x50>;
+ #mux-control-cells = <1>;
+
+ idle-state = <MUX_IDLE_DISCONNECT MUX_IDLE_AS_IS 1>;
+ };
+ };
+
+ adc-mux {
+ compatible = "io-channel-mux";
+ io-channels = <&adc 0>;
+ io-channel-names = "parent";
+
+ mux-controls = <&mux 2>;
+
+ channels = "sync-1", "", "out";
+ };
+
+
+ /*
+ * Three parallel muxes with one mux controller, useful e.g. if
+ * the adc is differential, thus needing two signals to be muxed
+ * simultaneously for correct operation.
+ */
+ &i2c0 {
+ pmux: mux-controller@50 {
+ compatible = "adi,adg792a";
+ reg = <0x50>;
+ #mux-control-cells = <0>;
+
+ idle-state = <1>;
+ };
+ };
+
+ diff-adc-mux {
+ compatible = "io-channel-mux";
+ io-channels = <&adc 0>;
+ io-channel-names = "parent";
+
+ mux-controls = <&pmux>;
+
+ channels = "sync-1", "", "out";
+ };
diff --git a/Documentation/devicetree/bindings/mux/gpio-mux.txt b/Documentation/devicetree/bindings/mux/gpio-mux.txt
new file mode 100644
index 000000000000..b8f746344d80
--- /dev/null
+++ b/Documentation/devicetree/bindings/mux/gpio-mux.txt
@@ -0,0 +1,69 @@
+GPIO-based multiplexer controller bindings
+
+Define what GPIO pins are used to control a multiplexer. Or several
+multiplexers, if the same pins control more than one multiplexer.
+
+Required properties:
+- compatible : "gpio-mux"
+- mux-gpios : list of gpios used to control the multiplexer, least
+ significant bit first.
+- #mux-control-cells : <0>
+* Standard mux-controller bindings as decribed in mux-controller.txt
+
+Optional properties:
+- idle-state : if present, the state the mux will have when idle. The
+ special state MUX_IDLE_AS_IS is the default.
+
+The multiplexer state is defined as the number represented by the
+multiplexer GPIO pins, where the first pin is the least significant
+bit. An active pin is a binary 1, an inactive pin is a binary 0.
+
+Example:
+
+ mux: mux-controller {
+ compatible = "gpio-mux";
+ #mux-control-cells = <0>;
+
+ mux-gpios = <&pioA 0 GPIO_ACTIVE_HIGH>,
+ <&pioA 1 GPIO_ACTIVE_HIGH>;
+ };
+
+ adc-mux {
+ compatible = "io-channel-mux";
+ io-channels = <&adc 0>;
+ io-channel-names = "parent";
+
+ mux-controls = <&mux>;
+
+ channels = "sync-1", "in", "out", "sync-2";
+ };
+
+ i2c-mux {
+ compatible = "i2c-mux";
+ i2c-parent = <&i2c1>;
+
+ mux-controls = <&mux>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ i2c@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ssd1307: oled@3c {
+ /* ... */
+ };
+ };
+
+ i2c@3 {
+ reg = <3>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pca9555: pca9555@20 {
+ /* ... */
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/mux/mmio-mux.txt b/Documentation/devicetree/bindings/mux/mmio-mux.txt
new file mode 100644
index 000000000000..a9bfb4d8b6ac
--- /dev/null
+++ b/Documentation/devicetree/bindings/mux/mmio-mux.txt
@@ -0,0 +1,60 @@
+MMIO register bitfield-based multiplexer controller bindings
+
+Define register bitfields to be used to control multiplexers. The parent
+device tree node must be a syscon node to provide register access.
+
+Required properties:
+- compatible : "mmio-mux"
+- #mux-control-cells : <1>
+- mux-reg-masks : an array of register offset and pre-shifted bitfield mask
+ pairs, each describing a single mux control.
+* Standard mux-controller bindings as decribed in mux-controller.txt
+
+Optional properties:
+- idle-states : if present, the state the muxes will have when idle. The
+ special state MUX_IDLE_AS_IS is the default.
+
+The multiplexer state of each multiplexer is defined as the value of the
+bitfield described by the corresponding register offset and bitfield mask pair
+in the mux-reg-masks array, accessed through the parent syscon.
+
+Example:
+
+ syscon {
+ compatible = "syscon";
+
+ mux: mux-controller {
+ compatible = "mmio-mux";
+ #mux-control-cells = <1>;
+
+ mux-reg-masks = <0x3 0x30>, /* 0: reg 0x3, bits 5:4 */
+ <0x3 0x40>, /* 1: reg 0x3, bit 6 */
+ idle-states = <MUX_IDLE_AS_IS>, <0>;
+ };
+ };
+
+ video-mux {
+ compatible = "video-mux";
+ mux-controls = <&mux 0>;
+
+ ports {
+ /* inputs 0..3 */
+ port@0 {
+ reg = <0>;
+ };
+ port@1 {
+ reg = <1>;
+ };
+ port@2 {
+ reg = <2>;
+ };
+ port@3 {
+ reg = <3>;
+ };
+
+ /* output */
+ port@4 {
+ reg = <4>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/mux/mux-controller.txt b/Documentation/devicetree/bindings/mux/mux-controller.txt
new file mode 100644
index 000000000000..4f47e4bd2fa0
--- /dev/null
+++ b/Documentation/devicetree/bindings/mux/mux-controller.txt
@@ -0,0 +1,157 @@
+Common multiplexer controller bindings
+======================================
+
+A multiplexer (or mux) controller will have one, or several, consumer devices
+that uses the mux controller. Thus, a mux controller can possibly control
+several parallel multiplexers. Presumably there will be at least one
+multiplexer needed by each consumer, but a single mux controller can of course
+control several multiplexers for a single consumer.
+
+A mux controller provides a number of states to its consumers, and the state
+space is a simple zero-based enumeration. I.e. 0-1 for a 2-way multiplexer,
+0-7 for an 8-way multiplexer, etc.
+
+
+Consumers
+---------
+
+Mux controller consumers should specify a list of mux controllers that they
+want to use with a property containing a 'mux-ctrl-list':
+
+ mux-ctrl-list ::= <single-mux-ctrl> [mux-ctrl-list]
+ single-mux-ctrl ::= <mux-ctrl-phandle> [mux-ctrl-specifier]
+ mux-ctrl-phandle : phandle to mux controller node
+ mux-ctrl-specifier : array of #mux-control-cells specifying the
+ given mux controller (controller specific)
+
+Mux controller properties should be named "mux-controls". The exact meaning of
+each mux controller property must be documented in the device tree binding for
+each consumer. An optional property "mux-control-names" may contain a list of
+strings to label each of the mux controllers listed in the "mux-controls"
+property.
+
+Drivers for devices that use more than a single mux controller can use the
+"mux-control-names" property to map the name of the requested mux controller
+to an index into the list given by the "mux-controls" property.
+
+mux-ctrl-specifier typically encodes the chip-relative mux controller number.
+If the mux controller chip only provides a single mux controller, the
+mux-ctrl-specifier can typically be left out.
+
+Example:
+
+ /* One consumer of a 2-way mux controller (one GPIO-line) */
+ mux: mux-controller {
+ compatible = "gpio-mux";
+ #mux-control-cells = <0>;
+
+ mux-gpios = <&pioA 0 GPIO_ACTIVE_HIGH>;
+ };
+
+ adc-mux {
+ compatible = "io-channel-mux";
+ io-channels = <&adc 0>;
+ io-channel-names = "parent";
+
+ mux-controls = <&mux>;
+ mux-control-names = "adc";
+
+ channels = "sync", "in";
+ };
+
+Note that in the example above, specifying the "mux-control-names" is redundant
+because there is only one mux controller in the list. However, if the driver
+for the consumer node in fact asks for a named mux controller, that name is of
+course still required.
+
+ /*
+ * Two consumers (one for an ADC line and one for an i2c bus) of
+ * parallel 4-way multiplexers controlled by the same two GPIO-lines.
+ */
+ mux: mux-controller {
+ compatible = "gpio-mux";
+ #mux-control-cells = <0>;
+
+ mux-gpios = <&pioA 0 GPIO_ACTIVE_HIGH>,
+ <&pioA 1 GPIO_ACTIVE_HIGH>;
+ };
+
+ adc-mux {
+ compatible = "io-channel-mux";
+ io-channels = <&adc 0>;
+ io-channel-names = "parent";
+
+ mux-controls = <&mux>;
+
+ channels = "sync-1", "in", "out", "sync-2";
+ };
+
+ i2c-mux {
+ compatible = "i2c-mux";
+ i2c-parent = <&i2c1>;
+
+ mux-controls = <&mux>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ i2c@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ssd1307: oled@3c {
+ /* ... */
+ };
+ };
+
+ i2c@3 {
+ reg = <3>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pca9555: pca9555@20 {
+ /* ... */
+ };
+ };
+ };
+
+
+Mux controller nodes
+--------------------
+
+Mux controller nodes must specify the number of cells used for the
+specifier using the '#mux-control-cells' property.
+
+Optionally, mux controller nodes can also specify the state the mux should
+have when it is idle. The idle-state property is used for this. If the
+idle-state is not present, the mux controller is typically left as is when
+it is idle. For multiplexer chips that expose several mux controllers, the
+idle-state property is an array with one idle state for each mux controller.
+
+The special value (-1) may be used to indicate that the mux should be left
+as is when it is idle. This is the default, but can still be useful for
+mux controller chips with more than one mux controller, particularly when
+there is a need to "step past" a mux controller and set some other idle
+state for a mux controller with a higher index.
+
+Some mux controllers have the ability to disconnect the input/output of the
+multiplexer. Using this disconnected high-impedance state as the idle state
+is indicated with idle state (-2).
+
+These constants are available in
+
+ #include <dt-bindings/mux/mux.h>
+
+as MUX_IDLE_AS_IS (-1) and MUX_IDLE_DISCONNECT (-2).
+
+An example mux controller node look like this (the adg972a chip is a triple
+4-way multiplexer):
+
+ mux: mux-controller@50 {
+ compatible = "adi,adg792a";
+ reg = <0x50>;
+ #mux-control-cells = <1>;
+
+ idle-state = <MUX_IDLE_DISCONNECT MUX_IDLE_AS_IS 2>;
+ };
diff --git a/Documentation/devicetree/bindings/nvmem/rockchip-efuse.txt b/Documentation/devicetree/bindings/nvmem/rockchip-efuse.txt
index 94aeeeabadd5..194926f77194 100644
--- a/Documentation/devicetree/bindings/nvmem/rockchip-efuse.txt
+++ b/Documentation/devicetree/bindings/nvmem/rockchip-efuse.txt
@@ -4,6 +4,7 @@ Required properties:
- compatible: Should be one of the following.
- "rockchip,rk3066a-efuse" - for RK3066a SoCs.
- "rockchip,rk3188-efuse" - for RK3188 SoCs.
+ - "rockchip,rk322x-efuse" - for RK322x SoCs.
- "rockchip,rk3288-efuse" - for RK3288 SoCs.
- "rockchip,rk3399-efuse" - for RK3399 SoCs.
- reg: Should contain the registers location and exact eFuse size
diff --git a/Documentation/driver-model/devres.txt b/Documentation/driver-model/devres.txt
index 6a6618f34440..2d132fcea0f8 100644
--- a/Documentation/driver-model/devres.txt
+++ b/Documentation/driver-model/devres.txt
@@ -337,7 +337,12 @@ MEM
devm_kzalloc()
MFD
- devm_mfd_add_devices()
+ devm_mfd_add_devices()
+
+MUX
+ devm_mux_chip_alloc()
+ devm_mux_chip_register()
+ devm_mux_control_get()
PER-CPU MEM
devm_alloc_percpu()
diff --git a/Documentation/trace/coresight-cpu-debug.txt b/Documentation/trace/coresight-cpu-debug.txt
new file mode 100644
index 000000000000..b3da1f90b861
--- /dev/null
+++ b/Documentation/trace/coresight-cpu-debug.txt
@@ -0,0 +1,175 @@
+ Coresight CPU Debug Module
+ ==========================
+
+ Author: Leo Yan <leo.yan@linaro.org>
+ Date: April 5th, 2017
+
+Introduction
+------------
+
+Coresight CPU debug module is defined in ARMv8-a architecture reference manual
+(ARM DDI 0487A.k) Chapter 'Part H: External debug', the CPU can integrate
+debug module and it is mainly used for two modes: self-hosted debug and
+external debug. Usually the external debug mode is well known as the external
+debugger connects with SoC from JTAG port; on the other hand the program can
+explore debugging method which rely on self-hosted debug mode, this document
+is to focus on this part.
+
+The debug module provides sample-based profiling extension, which can be used
+to sample CPU program counter, secure state and exception level, etc; usually
+every CPU has one dedicated debug module to be connected. Based on self-hosted
+debug mechanism, Linux kernel can access these related registers from mmio
+region when the kernel panic happens. The callback notifier for kernel panic
+will dump related registers for every CPU; finally this is good for assistant
+analysis for panic.
+
+
+Implementation
+--------------
+
+- During driver registration, it uses EDDEVID and EDDEVID1 - two device ID
+ registers to decide if sample-based profiling is implemented or not. On some
+ platforms this hardware feature is fully or partially implemented; and if
+ this feature is not supported then registration will fail.
+
+- At the time this documentation was written, the debug driver mainly relies on
+ information gathered by the kernel panic callback notifier from three
+ sampling registers: EDPCSR, EDVIDSR and EDCIDSR: from EDPCSR we can get
+ program counter; EDVIDSR has information for secure state, exception level,
+ bit width, etc; EDCIDSR is context ID value which contains the sampled value
+ of CONTEXTIDR_EL1.
+
+- The driver supports a CPU running in either AArch64 or AArch32 mode. The
+ registers naming convention is a bit different between them, AArch64 uses
+ 'ED' for register prefix (ARM DDI 0487A.k, chapter H9.1) and AArch32 uses
+ 'DBG' as prefix (ARM DDI 0487A.k, chapter G5.1). The driver is unified to
+ use AArch64 naming convention.
+
+- ARMv8-a (ARM DDI 0487A.k) and ARMv7-a (ARM DDI 0406C.b) have different
+ register bits definition. So the driver consolidates two difference:
+
+ If PCSROffset=0b0000, on ARMv8-a the feature of EDPCSR is not implemented;
+ but ARMv7-a defines "PCSR samples are offset by a value that depends on the
+ instruction set state". For ARMv7-a, the driver checks furthermore if CPU
+ runs with ARM or thumb instruction set and calibrate PCSR value, the
+ detailed description for offset is in ARMv7-a ARM (ARM DDI 0406C.b) chapter
+ C11.11.34 "DBGPCSR, Program Counter Sampling Register".
+
+ If PCSROffset=0b0010, ARMv8-a defines "EDPCSR implemented, and samples have
+ no offset applied and do not sample the instruction set state in AArch32
+ state". So on ARMv8 if EDDEVID1.PCSROffset is 0b0010 and the CPU operates
+ in AArch32 state, EDPCSR is not sampled; when the CPU operates in AArch64
+ state EDPCSR is sampled and no offset are applied.
+
+
+Clock and power domain
+----------------------
+
+Before accessing debug registers, we should ensure the clock and power domain
+have been enabled properly. In ARMv8-a ARM (ARM DDI 0487A.k) chapter 'H9.1
+Debug registers', the debug registers are spread into two domains: the debug
+domain and the CPU domain.
+
+ +---------------+
+ | |
+ | |
+ +----------+--+ |
+ dbg_clock -->| |**| |<-- cpu_clock
+ | Debug |**| CPU |
+ dbg_power_domain -->| |**| |<-- cpu_power_domain
+ +----------+--+ |
+ | |
+ | |
+ +---------------+
+
+For debug domain, the user uses DT binding "clocks" and "power-domains" to
+specify the corresponding clock source and power supply for the debug logic.
+The driver calls the pm_runtime_{put|get} operations as needed to handle the
+debug power domain.
+
+For CPU domain, the different SoC designs have different power management
+schemes and finally this heavily impacts external debug module. So we can
+divide into below cases:
+
+- On systems with a sane power controller which can behave correctly with
+ respect to CPU power domain, the CPU power domain can be controlled by
+ register EDPRCR in driver. The driver firstly writes bit EDPRCR.COREPURQ
+ to power up the CPU, and then writes bit EDPRCR.CORENPDRQ for emulation
+ of CPU power down. As result, this can ensure the CPU power domain is
+ powered on properly during the period when access debug related registers;
+
+- Some designs will power down an entire cluster if all CPUs on the cluster
+ are powered down - including the parts of the debug registers that should
+ remain powered in the debug power domain. The bits in EDPRCR are not
+ respected in these cases, so these designs do not support debug over
+ power down in the way that the CoreSight / Debug designers anticipated.
+ This means that even checking EDPRSR has the potential to cause a bus hang
+ if the target register is unpowered.
+
+ In this case, accessing to the debug registers while they are not powered
+ is a recipe for disaster; so we need preventing CPU low power states at boot
+ time or when user enable module at the run time. Please see chapter
+ "How to use the module" for detailed usage info for this.
+
+
+Device Tree Bindings
+--------------------
+
+See Documentation/devicetree/bindings/arm/coresight-cpu-debug.txt for details.
+
+
+How to use the module
+---------------------
+
+If you want to enable debugging functionality at boot time, you can add
+"coresight_cpu_debug.enable=1" to the kernel command line parameter.
+
+The driver also can work as module, so can enable the debugging when insmod
+module:
+# insmod coresight_cpu_debug.ko debug=1
+
+When boot time or insmod module you have not enabled the debugging, the driver
+uses the debugfs file system to provide a knob to dynamically enable or disable
+debugging:
+
+To enable it, write a '1' into /sys/kernel/debug/coresight_cpu_debug/enable:
+# echo 1 > /sys/kernel/debug/coresight_cpu_debug/enable
+
+To disable it, write a '0' into /sys/kernel/debug/coresight_cpu_debug/enable:
+# echo 0 > /sys/kernel/debug/coresight_cpu_debug/enable
+
+As explained in chapter "Clock and power domain", if you are working on one
+platform which has idle states to power off debug logic and the power
+controller cannot work well for the request from EDPRCR, then you should
+firstly constraint CPU idle states before enable CPU debugging feature; so can
+ensure the accessing to debug logic.
+
+If you want to limit idle states at boot time, you can use "nohlt" or
+"cpuidle.off=1" in the kernel command line.
+
+At the runtime you can disable idle states with below methods:
+
+Set latency request to /dev/cpu_dma_latency to disable all CPUs specific idle
+states (if latency = 0uS then disable all idle states):
+# echo "what_ever_latency_you_need_in_uS" > /dev/cpu_dma_latency
+
+Disable specific CPU's specific idle state:
+# echo 1 > /sys/devices/system/cpu/cpu$cpu/cpuidle/state$state/disable
+
+
+Output format
+-------------
+
+Here is an example of the debugging output format:
+
+ARM external debug module:
+coresight-cpu-debug 850000.debug: CPU[0]:
+coresight-cpu-debug 850000.debug: EDPRSR: 00000001 (Power:On DLK:Unlock)
+coresight-cpu-debug 850000.debug: EDPCSR: [<ffff00000808e9bc>] handle_IPI+0x174/0x1d8
+coresight-cpu-debug 850000.debug: EDCIDSR: 00000000
+coresight-cpu-debug 850000.debug: EDVIDSR: 90000000 (State:Non-secure Mode:EL1/0 Width:64bits VMID:0)
+coresight-cpu-debug 852000.debug: CPU[1]:
+coresight-cpu-debug 852000.debug: EDPRSR: 00000001 (Power:On DLK:Unlock)
+coresight-cpu-debug 852000.debug: EDPCSR: [<ffff0000087fab34>] debug_notifier_call+0x23c/0x358
+coresight-cpu-debug 852000.debug: EDCIDSR: 00000000
+coresight-cpu-debug 852000.debug: EDVIDSR: 90000000 (State:Non-secure Mode:EL1/0 Width:64bits VMID:0)
diff --git a/MAINTAINERS b/MAINTAINERS
index d2363ec87445..ba64d98e7897 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1207,7 +1207,9 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: drivers/hwtracing/coresight/*
F: Documentation/trace/coresight.txt
+F: Documentation/trace/coresight-cpu-debug.txt
F: Documentation/devicetree/bindings/arm/coresight.txt
+F: Documentation/devicetree/bindings/arm/coresight-cpu-debug.txt
F: Documentation/ABI/testing/sysfs-bus-coresight-devices-*
F: tools/perf/arch/arm/util/pmu.c
F: tools/perf/arch/arm/util/auxtrace.c
@@ -6489,6 +6491,13 @@ F: Documentation/ABI/testing/sysfs-bus-iio-adc-envelope-detector
F: Documentation/devicetree/bindings/iio/adc/envelope-detector.txt
F: drivers/iio/adc/envelope-detector.c
+IIO MULTIPLEXER
+M: Peter Rosin <peda@axentia.se>
+L: linux-iio@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/iio/multiplexer/iio-mux.txt
+F: drivers/iio/multiplexer/iio-mux.c
+
IIO SUBSYSTEM AND DRIVERS
M: Jonathan Cameron <jic23@kernel.org>
R: Hartmut Knaack <knaack.h@gmx.de>
@@ -8724,6 +8733,15 @@ S: Orphan
F: drivers/mmc/host/mmc_spi.c
F: include/linux/spi/mmc_spi.h
+MULTIPLEXER SUBSYSTEM
+M: Peter Rosin <peda@axentia.se>
+S: Maintained
+F: Documentation/ABI/testing/mux/sysfs-class-mux*
+F: Documentation/devicetree/bindings/mux/
+F: include/linux/dt-bindings/mux/
+F: include/linux/mux/
+F: drivers/mux/
+
MULTISOUND SOUND DRIVER
M: Andrew Veliath <andrewtv@usa.net>
S: Maintained
@@ -11336,6 +11354,9 @@ F: Documentation/tee.txt
THUNDERBOLT DRIVER
M: Andreas Noever <andreas.noever@gmail.com>
+M: Michael Jamet <michael.jamet@intel.com>
+M: Mika Westerberg <mika.westerberg@linux.intel.com>
+M: Yehezkel Bernat <yehezkel.bernat@intel.com>
S: Maintained
F: drivers/thunderbolt/
@@ -13789,6 +13810,7 @@ M: Evgeniy Polyakov <zbr@ioremap.net>
S: Maintained
F: Documentation/w1/
F: drivers/w1/
+F: include/linux/w1.h
W83791D HARDWARE MONITORING DRIVER
M: Marc Hulsman <m.hulsman@tudelft.nl>
diff --git a/arch/arm64/boot/dts/hisilicon/hi6220.dtsi b/arch/arm64/boot/dts/hisilicon/hi6220.dtsi
index 5013e4b2ea71..6b82695ce32c 100644
--- a/arch/arm64/boot/dts/hisilicon/hi6220.dtsi
+++ b/arch/arm64/boot/dts/hisilicon/hi6220.dtsi
@@ -887,5 +887,69 @@
};
};
};
+
+ debug@f6590000 {
+ compatible = "arm,coresight-cpu-debug","arm,primecell";
+ reg = <0 0xf6590000 0 0x1000>;
+ clocks = <&sys_ctrl HI6220_DAPB_CLK>;
+ clock-names = "apb_pclk";
+ cpu = <&cpu0>;
+ };
+
+ debug@f6592000 {
+ compatible = "arm,coresight-cpu-debug","arm,primecell";
+ reg = <0 0xf6592000 0 0x1000>;
+ clocks = <&sys_ctrl HI6220_DAPB_CLK>;
+ clock-names = "apb_pclk";
+ cpu = <&cpu1>;
+ };
+
+ debug@f6594000 {
+ compatible = "arm,coresight-cpu-debug","arm,primecell";
+ reg = <0 0xf6594000 0 0x1000>;
+ clocks = <&sys_ctrl HI6220_DAPB_CLK>;
+ clock-names = "apb_pclk";
+ cpu = <&cpu2>;
+ };
+
+ debug@f6596000 {
+ compatible = "arm,coresight-cpu-debug","arm,primecell";
+ reg = <0 0xf6596000 0 0x1000>;
+ clocks = <&sys_ctrl HI6220_DAPB_CLK>;
+ clock-names = "apb_pclk";
+ cpu = <&cpu3>;
+ };
+
+ debug@f65d0000 {
+ compatible = "arm,coresight-cpu-debug","arm,primecell";
+ reg = <0 0xf65d0000 0 0x1000>;
+ clocks = <&sys_ctrl HI6220_DAPB_CLK>;
+ clock-names = "apb_pclk";
+ cpu = <&cpu4>;
+ };
+
+ debug@f65d2000 {
+ compatible = "arm,coresight-cpu-debug","arm,primecell";
+ reg = <0 0xf65d2000 0 0x1000>;
+ clocks = <&sys_ctrl HI6220_DAPB_CLK>;
+ clock-names = "apb_pclk";
+ cpu = <&cpu5>;
+ };
+
+ debug@f65d4000 {
+ compatible = "arm,coresight-cpu-debug","arm,primecell";
+ reg = <0 0xf65d4000 0 0x1000>;
+ clocks = <&sys_ctrl HI6220_DAPB_CLK>;
+ clock-names = "apb_pclk";
+ cpu = <&cpu6>;
+ };
+
+ debug@f65d6000 {
+ compatible = "arm,coresight-cpu-debug","arm,primecell";
+ reg = <0 0xf65d6000 0 0x1000>;
+ clocks = <&sys_ctrl HI6220_DAPB_CLK>;
+ clock-names = "apb_pclk";
+ cpu = <&cpu7>;
+ };
};
};
diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi
index ab3093995ded..17691abea608 100644
--- a/arch/arm64/boot/dts/qcom/msm8916.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi
@@ -1116,6 +1116,38 @@
};
};
+ debug@850000 {
+ compatible = "arm,coresight-cpu-debug","arm,primecell";
+ reg = <0x850000 0x1000>;
+ clocks = <&rpmcc RPM_QDSS_CLK>;
+ clock-names = "apb_pclk";
+ cpu = <&CPU0>;
+ };
+
+ debug@852000 {
+ compatible = "arm,coresight-cpu-debug","arm,primecell";
+ reg = <0x852000 0x1000>;
+ clocks = <&rpmcc RPM_QDSS_CLK>;
+ clock-names = "apb_pclk";
+ cpu = <&CPU1>;
+ };
+
+ debug@854000 {
+ compatible = "arm,coresight-cpu-debug","arm,primecell";
+ reg = <0x854000 0x1000>;
+ clocks = <&rpmcc RPM_QDSS_CLK>;
+ clock-names = "apb_pclk";
+ cpu = <&CPU2>;
+ };
+
+ debug@856000 {
+ compatible = "arm,coresight-cpu-debug","arm,primecell";
+ reg = <0x856000 0x1000>;
+ clocks = <&rpmcc RPM_QDSS_CLK>;
+ clock-names = "apb_pclk";
+ cpu = <&CPU3>;
+ };
+
etm@85c000 {
compatible = "arm,coresight-etm4x", "arm,primecell";
reg = <0x85c000 0x1000>;
diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h
index d5acc27ed1cc..2b58c8c1eeaa 100644
--- a/arch/x86/include/asm/mshyperv.h
+++ b/arch/x86/include/asm/mshyperv.h
@@ -136,7 +136,6 @@ static inline void vmbus_signal_eom(struct hv_message *msg, u32 old_msg_type)
}
}
-#define hv_get_current_tick(tick) rdmsrl(HV_X64_MSR_TIME_REF_COUNT, tick)
#define hv_init_timer(timer, tick) wrmsrl(timer, tick)
#define hv_init_timer_config(config, val) wrmsrl(config, val)
diff --git a/drivers/Kconfig b/drivers/Kconfig
index ba2901e76769..505c676fa9c7 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -206,4 +206,6 @@ source "drivers/fsi/Kconfig"
source "drivers/tee/Kconfig"
+source "drivers/mux/Kconfig"
+
endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index cfabd141dba2..dfdcda00bfe3 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -181,3 +181,4 @@ obj-$(CONFIG_NVMEM) += nvmem/
obj-$(CONFIG_FPGA) += fpga/
obj-$(CONFIG_FSI) += fsi/
obj-$(CONFIG_TEE) += tee/
+obj-$(CONFIG_MULTIPLEXER) += mux/
diff --git a/drivers/auxdisplay/panel.c b/drivers/auxdisplay/panel.c
index e0c014c2356f..7a8b8fb2f572 100644
--- a/drivers/auxdisplay/panel.c
+++ b/drivers/auxdisplay/panel.c
@@ -1345,14 +1345,11 @@ static inline void input_state_falling(struct logical_input *input)
static void panel_process_inputs(void)
{
- struct list_head *item;
struct logical_input *input;
keypressed = 0;
inputs_stable = 1;
- list_for_each(item, &logical_inputs) {
- input = list_entry(item, struct logical_input, list);
-
+ list_for_each_entry(input, &logical_inputs, list) {
switch (input->state) {
case INPUT_ST_LOW:
if ((phys_curr & input->mask) != input->value)
diff --git a/drivers/firmware/google/memconsole-coreboot.c b/drivers/firmware/google/memconsole-coreboot.c
index 02711114dece..52738887735c 100644
--- a/drivers/firmware/google/memconsole-coreboot.c
+++ b/drivers/firmware/google/memconsole-coreboot.c
@@ -26,12 +26,52 @@
/* CBMEM firmware console log descriptor. */
struct cbmem_cons {
- u32 buffer_size;
- u32 buffer_cursor;
- u8 buffer_body[0];
+ u32 size_dont_access_after_boot;
+ u32 cursor;
+ u8 body[0];
} __packed;
+#define CURSOR_MASK ((1 << 28) - 1)
+#define OVERFLOW (1 << 31)
+
static struct cbmem_cons __iomem *cbmem_console;
+static u32 cbmem_console_size;
+
+/*
+ * The cbmem_console structure is read again on every access because it may
+ * change at any time if runtime firmware logs new messages. This may rarely
+ * lead to race conditions where the firmware overwrites the beginning of the
+ * ring buffer with more lines after we have already read |cursor|. It should be
+ * rare and harmless enough that we don't spend extra effort working around it.
+ */
+static ssize_t memconsole_coreboot_read(char *buf, loff_t pos, size_t count)
+{
+ u32 cursor = cbmem_console->cursor & CURSOR_MASK;
+ u32 flags = cbmem_console->cursor & ~CURSOR_MASK;
+ u32 size = cbmem_console_size;
+ struct seg { /* describes ring buffer segments in logical order */
+ u32 phys; /* physical offset from start of mem buffer */
+ u32 len; /* length of segment */
+ } seg[2] = { {0}, {0} };
+ size_t done = 0;
+ int i;
+
+ if (flags & OVERFLOW) {
+ if (cursor > size) /* Shouldn't really happen, but... */
+ cursor = 0;
+ seg[0] = (struct seg){.phys = cursor, .len = size - cursor};
+ seg[1] = (struct seg){.phys = 0, .len = cursor};
+ } else {
+ seg[0] = (struct seg){.phys = 0, .len = min(cursor, size)};
+ }
+
+ for (i = 0; i < ARRAY_SIZE(seg) && count > done; i++) {
+ done += memory_read_from_buffer(buf + done, count - done, &pos,
+ cbmem_console->body + seg[i].phys, seg[i].len);
+ pos -= seg[i].len;
+ }
+ return done;
+}
static int memconsole_coreboot_init(phys_addr_t physaddr)
{
@@ -42,17 +82,17 @@ static int memconsole_coreboot_init(phys_addr_t physaddr)
if (!tmp_cbmc)
return -ENOMEM;
+ /* Read size only once to prevent overrun attack through /dev/mem. */
+ cbmem_console_size = tmp_cbmc->size_dont_access_after_boot;
cbmem_console = memremap(physaddr,
- tmp_cbmc->buffer_size + sizeof(*cbmem_console),
+ cbmem_console_size + sizeof(*cbmem_console),
MEMREMAP_WB);
memunmap(tmp_cbmc);
if (!cbmem_console)
return -ENOMEM;
- memconsole_setup(cbmem_console->buffer_body,
- min(cbmem_console->buffer_cursor, cbmem_console->buffer_size));
-
+ memconsole_setup(memconsole_coreboot_read);
return 0;
}
diff --git a/drivers/firmware/google/memconsole-x86-legacy.c b/drivers/firmware/google/memconsole-x86-legacy.c
index 1f279ee883b9..8c1bf6dbdaa6 100644
--- a/drivers/firmware/google/memconsole-x86-legacy.c
+++ b/drivers/firmware/google/memconsole-x86-legacy.c
@@ -48,6 +48,15 @@ struct biosmemcon_ebda {
};
} __packed;
+static char *memconsole_baseaddr;
+static size_t memconsole_length;
+
+static ssize_t memconsole_read(char *buf, loff_t pos, size_t count)
+{
+ return memory_read_from_buffer(buf, count, &pos, memconsole_baseaddr,
+ memconsole_length);
+}
+
static void found_v1_header(struct biosmemcon_ebda *hdr)
{
pr_info("memconsole: BIOS console v1 EBDA structure found at %p\n",
@@ -56,7 +65,9 @@ static void found_v1_header(struct biosmemcon_ebda *hdr)
hdr->v1.buffer_addr, hdr->v1.start,
hdr->v1.end, hdr->v1.num_chars);
- memconsole_setup(phys_to_virt(hdr->v1.buffer_addr), hdr->v1.num_chars);
+ memconsole_baseaddr = phys_to_virt(hdr->v1.buffer_addr);
+ memconsole_length = hdr->v1.num_chars;
+ memconsole_setup(memconsole_read);
}
static void found_v2_header(struct biosmemcon_ebda *hdr)
@@ -67,8 +78,9 @@ static void found_v2_header(struct biosmemcon_ebda *hdr)
hdr->v2.buffer_addr, hdr->v2.start,
hdr->v2.end, hdr->v2.num_bytes);
- memconsole_setup(phys_to_virt(hdr->v2.buffer_addr + hdr->v2.start),
- hdr->v2.end - hdr->v2.start);
+ memconsole_baseaddr = phys_to_virt(hdr->v2.buffer_addr + hdr->v2.start);
+ memconsole_length = hdr->v2.end - hdr->v2.start;
+ memconsole_setup(memconsole_read);
}
/*
diff --git a/drivers/firmware/google/memconsole.c b/drivers/firmware/google/memconsole.c
index 94e200ddb4fa..166f07c68c02 100644
--- a/drivers/firmware/google/memconsole.c
+++ b/drivers/firmware/google/memconsole.c
@@ -22,15 +22,15 @@
#include "memconsole.h"
-static char *memconsole_baseaddr;
-static size_t memconsole_length;
+static ssize_t (*memconsole_read_func)(char *, loff_t, size_t);
static ssize_t memconsole_read(struct file *filp, struct kobject *kobp,
struct bin_attribute *bin_attr, char *buf,
loff_t pos, size_t count)
{
- return memory_read_from_buffer(buf, count, &pos, memconsole_baseaddr,
- memconsole_length);
+ if (WARN_ON_ONCE(!memconsole_read_func))
+ return -EIO;
+ return memconsole_read_func(buf, pos, count);
}
static struct bin_attribute memconsole_bin_attr = {
@@ -38,16 +38,14 @@ static struct bin_attribute memconsole_bin_attr = {
.read = memconsole_read,
};
-void memconsole_setup(void *baseaddr, size_t length)
+void memconsole_setup(ssize_t (*read_func)(char *, loff_t, size_t))
{
- memconsole_baseaddr = baseaddr;
- memconsole_length = length;
+ memconsole_read_func = read_func;
}
EXPORT_SYMBOL(memconsole_setup);
int memconsole_sysfs_init(void)
{
- memconsole_bin_attr.size = memconsole_length;
return sysfs_create_bin_file(firmware_kobj, &memconsole_bin_attr);
}
EXPORT_SYMBOL(memconsole_sysfs_init);
diff --git a/drivers/firmware/google/memconsole.h b/drivers/firmware/google/memconsole.h
index 190fc03a51ae..ff1592dc7d1a 100644
--- a/drivers/firmware/google/memconsole.h
+++ b/drivers/firmware/google/memconsole.h
@@ -18,13 +18,14 @@
#ifndef __FIRMWARE_GOOGLE_MEMCONSOLE_H
#define __FIRMWARE_GOOGLE_MEMCONSOLE_H
+#include <linux/types.h>
+
/*
* memconsole_setup
*
- * Initialize the memory console from raw (virtual) base
- * address and length.
+ * Initialize the memory console, passing the function to handle read accesses.
*/
-void memconsole_setup(void *baseaddr, size_t length);
+void memconsole_setup(ssize_t (*read_func)(char *, loff_t, size_t));
/*
* memconsole_sysfs_init
diff --git a/drivers/firmware/google/vpd.c b/drivers/firmware/google/vpd.c
index 31058d400bda..78945729388e 100644
--- a/drivers/firmware/google/vpd.c
+++ b/drivers/firmware/google/vpd.c
@@ -118,14 +118,13 @@ static int vpd_section_attrib_add(const u8 *key, s32 key_len,
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
- info->key = kzalloc(key_len + 1, GFP_KERNEL);
+
+ info->key = kstrndup(key, key_len, GFP_KERNEL);
if (!info->key) {
ret = -ENOMEM;
goto free_info;
}
- memcpy(info->key, key, key_len);
-
sysfs_bin_attr_init(&info->bin_attr);
info->bin_attr.attr.name = info->key;
info->bin_attr.attr.mode = 0444;
@@ -191,8 +190,7 @@ static int vpd_section_create_attribs(struct vpd_section *sec)
static int vpd_section_init(const char *name, struct vpd_section *sec,
phys_addr_t physaddr, size_t size)
{
- int ret;
- int raw_len;
+ int err;
sec->baseaddr = memremap(physaddr, size, MEMREMAP_WB);
if (!sec->baseaddr)
@@ -201,10 +199,11 @@ static int vpd_section_init(const char *name, struct vpd_section *sec,
sec->name = name;
/* We want to export the raw partion with name ${name}_raw */
- raw_len = strlen(name) + 5;
- sec->raw_name = kzalloc(raw_len, GFP_KERNEL);
- strncpy(sec->raw_name, name, raw_len);
- strncat(sec->raw_name, "_raw", raw_len);
+ sec->raw_name = kasprintf(GFP_KERNEL, "%s_raw", name);
+ if (!sec->raw_name) {
+ err = -ENOMEM;
+ goto err_iounmap;
+ }
sysfs_bin_attr_init(&sec->bin_attr);
sec->bin_attr.attr.name = sec->raw_name;
@@ -213,14 +212,14 @@ static int vpd_section_init(const char *name, struct vpd_section *sec,
sec->bin_attr.read = vpd_section_read;
sec->bin_attr.private = sec;
- ret = sysfs_create_bin_file(vpd_kobj, &sec->bin_attr);
- if (ret)
- goto free_sec;
+ err = sysfs_create_bin_file(vpd_kobj, &sec->bin_attr);
+ if (err)
+ goto err_free_raw_name;
sec->kobj = kobject_create_and_add(name, vpd_kobj);
if (!sec->kobj) {
- ret = -EINVAL;
- goto sysfs_remove;
+ err = -EINVAL;
+ goto err_sysfs_remove;
}
INIT_LIST_HEAD(&sec->attribs);
@@ -230,14 +229,13 @@ static int vpd_section_init(const char *name, struct vpd_section *sec,
return 0;
-sysfs_remove:
+err_sysfs_remove:
sysfs_remove_bin_file(vpd_kobj, &sec->bin_attr);
-
-free_sec:
+err_free_raw_name:
kfree(sec->raw_name);
+err_iounmap:
iounmap(sec->baseaddr);
-
- return ret;
+ return err;
}
static int vpd_section_destroy(struct vpd_section *sec)
@@ -319,9 +317,6 @@ static int __init vpd_platform_init(void)
if (!vpd_kobj)
return -ENOMEM;
- memset(&ro_vpd, 0, sizeof(ro_vpd));
- memset(&rw_vpd, 0, sizeof(rw_vpd));
-
platform_driver_register(&vpd_driver);
return 0;
diff --git a/drivers/fsi/Kconfig b/drivers/fsi/Kconfig
index 04c1a0efa7a7..6821ed0cd5e8 100644
--- a/drivers/fsi/Kconfig
+++ b/drivers/fsi/Kconfig
@@ -6,7 +6,33 @@ menu "FSI support"
config FSI
tristate "FSI support"
+ select CRC4
---help---
FSI - the FRU Support Interface - is a simple bus for low-level
access to POWER-based hardware.
+
+if FSI
+
+config FSI_MASTER_GPIO
+ tristate "GPIO-based FSI master"
+ depends on GPIOLIB
+ select CRC4
+ ---help---
+ This option enables a FSI master driver using GPIO lines.
+
+config FSI_MASTER_HUB
+ tristate "FSI hub master"
+ ---help---
+ This option enables a FSI hub master driver. Hub is a type of FSI
+ master that is connected to the upstream master via a slave. Hubs
+ allow chaining of FSI links to an arbitrary depth. This allows for
+ a high target device fanout.
+
+config FSI_SCOM
+ tristate "SCOM FSI client device driver"
+ ---help---
+ This option enables an FSI based SCOM device driver.
+
+endif
+
endmenu
diff --git a/drivers/fsi/Makefile b/drivers/fsi/Makefile
index db0e5e7c1655..65eb99dfafdb 100644
--- a/drivers/fsi/Makefile
+++ b/drivers/fsi/Makefile
@@ -1,2 +1,5 @@
obj-$(CONFIG_FSI) += fsi-core.o
+obj-$(CONFIG_FSI_MASTER_HUB) += fsi-master-hub.o
+obj-$(CONFIG_FSI_MASTER_GPIO) += fsi-master-gpio.o
+obj-$(CONFIG_FSI_SCOM) += fsi-scom.o
diff --git a/drivers/fsi/fsi-core.c b/drivers/fsi/fsi-core.c
index 3d55bd547178..a485864cb512 100644
--- a/drivers/fsi/fsi-core.c
+++ b/drivers/fsi/fsi-core.c
@@ -13,9 +13,830 @@
* GNU General Public License for more details.
*/
+#include <linux/crc4.h>
#include <linux/device.h>
#include <linux/fsi.h>
+#include <linux/idr.h>
#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/bitops.h>
+
+#include "fsi-master.h"
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/fsi.h>
+
+#define FSI_SLAVE_CONF_NEXT_MASK GENMASK(31, 31)
+#define FSI_SLAVE_CONF_SLOTS_MASK GENMASK(23, 16)
+#define FSI_SLAVE_CONF_SLOTS_SHIFT 16
+#define FSI_SLAVE_CONF_VERSION_MASK GENMASK(15, 12)
+#define FSI_SLAVE_CONF_VERSION_SHIFT 12
+#define FSI_SLAVE_CONF_TYPE_MASK GENMASK(11, 4)
+#define FSI_SLAVE_CONF_TYPE_SHIFT 4
+#define FSI_SLAVE_CONF_CRC_SHIFT 4
+#define FSI_SLAVE_CONF_CRC_MASK GENMASK(3, 0)
+#define FSI_SLAVE_CONF_DATA_BITS 28
+
+#define FSI_PEEK_BASE 0x410
+
+static const int engine_page_size = 0x400;
+
+#define FSI_SLAVE_BASE 0x800
+
+/*
+ * FSI slave engine control register offsets
+ */
+#define FSI_SMODE 0x0 /* R/W: Mode register */
+#define FSI_SISC 0x8 /* R/W: Interrupt condition */
+#define FSI_SSTAT 0x14 /* R : Slave status */
+#define FSI_LLMODE 0x100 /* R/W: Link layer mode register */
+
+/*
+ * SMODE fields
+ */
+#define FSI_SMODE_WSC 0x80000000 /* Warm start done */
+#define FSI_SMODE_ECRC 0x20000000 /* Hw CRC check */
+#define FSI_SMODE_SID_SHIFT 24 /* ID shift */
+#define FSI_SMODE_SID_MASK 3 /* ID Mask */
+#define FSI_SMODE_ED_SHIFT 20 /* Echo delay shift */
+#define FSI_SMODE_ED_MASK 0xf /* Echo delay mask */
+#define FSI_SMODE_SD_SHIFT 16 /* Send delay shift */
+#define FSI_SMODE_SD_MASK 0xf /* Send delay mask */
+#define FSI_SMODE_LBCRR_SHIFT 8 /* Clk ratio shift */
+#define FSI_SMODE_LBCRR_MASK 0xf /* Clk ratio mask */
+
+/*
+ * LLMODE fields
+ */
+#define FSI_LLMODE_ASYNC 0x1
+
+#define FSI_SLAVE_SIZE_23b 0x800000
+
+static DEFINE_IDA(master_ida);
+
+struct fsi_slave {
+ struct device dev;
+ struct fsi_master *master;
+ int id;
+ int link;
+ uint32_t size; /* size of slave address space */
+};
+
+#define to_fsi_master(d) container_of(d, struct fsi_master, dev)
+#define to_fsi_slave(d) container_of(d, struct fsi_slave, dev)
+
+static const int slave_retries = 2;
+static int discard_errors;
+
+static int fsi_master_read(struct fsi_master *master, int link,
+ uint8_t slave_id, uint32_t addr, void *val, size_t size);
+static int fsi_master_write(struct fsi_master *master, int link,
+ uint8_t slave_id, uint32_t addr, const void *val, size_t size);
+static int fsi_master_break(struct fsi_master *master, int link);
+
+/*
+ * fsi_device_read() / fsi_device_write() / fsi_device_peek()
+ *
+ * FSI endpoint-device support
+ *
+ * Read / write / peek accessors for a client
+ *
+ * Parameters:
+ * dev: Structure passed to FSI client device drivers on probe().
+ * addr: FSI address of given device. Client should pass in its base address
+ * plus desired offset to access its register space.
+ * val: For read/peek this is the value read at the specified address. For
+ * write this is value to write to the specified address.
+ * The data in val must be FSI bus endian (big endian).
+ * size: Size in bytes of the operation. Sizes supported are 1, 2 and 4 bytes.
+ * Addresses must be aligned on size boundaries or an error will result.
+ */
+int fsi_device_read(struct fsi_device *dev, uint32_t addr, void *val,
+ size_t size)
+{
+ if (addr > dev->size || size > dev->size || addr > dev->size - size)
+ return -EINVAL;
+
+ return fsi_slave_read(dev->slave, dev->addr + addr, val, size);
+}
+EXPORT_SYMBOL_GPL(fsi_device_read);
+
+int fsi_device_write(struct fsi_device *dev, uint32_t addr, const void *val,
+ size_t size)
+{
+ if (addr > dev->size || size > dev->size || addr > dev->size - size)
+ return -EINVAL;
+
+ return fsi_slave_write(dev->slave, dev->addr + addr, val, size);
+}
+EXPORT_SYMBOL_GPL(fsi_device_write);
+
+int fsi_device_peek(struct fsi_device *dev, void *val)
+{
+ uint32_t addr = FSI_PEEK_BASE + ((dev->unit - 2) * sizeof(uint32_t));
+
+ return fsi_slave_read(dev->slave, addr, val, sizeof(uint32_t));
+}
+
+static void fsi_device_release(struct device *_device)
+{
+ struct fsi_device *device = to_fsi_dev(_device);
+
+ kfree(device);
+}
+
+static struct fsi_device *fsi_create_device(struct fsi_slave *slave)
+{
+ struct fsi_device *dev;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return NULL;
+
+ dev->dev.parent = &slave->dev;
+ dev->dev.bus = &fsi_bus_type;
+ dev->dev.release = fsi_device_release;
+
+ return dev;
+}
+
+/* FSI slave support */
+static int fsi_slave_calc_addr(struct fsi_slave *slave, uint32_t *addrp,
+ uint8_t *idp)
+{
+ uint32_t addr = *addrp;
+ uint8_t id = *idp;
+
+ if (addr > slave->size)
+ return -EINVAL;
+
+ /* For 23 bit addressing, we encode the extra two bits in the slave
+ * id (and the slave's actual ID needs to be 0).
+ */
+ if (addr > 0x1fffff) {
+ if (slave->id != 0)
+ return -EINVAL;
+ id = (addr >> 21) & 0x3;
+ addr &= 0x1fffff;
+ }
+
+ *addrp = addr;
+ *idp = id;
+ return 0;
+}
+
+int fsi_slave_report_and_clear_errors(struct fsi_slave *slave)
+{
+ struct fsi_master *master = slave->master;
+ uint32_t irq, stat;
+ int rc, link;
+ uint8_t id;
+
+ link = slave->link;
+ id = slave->id;
+
+ rc = fsi_master_read(master, link, id, FSI_SLAVE_BASE + FSI_SISC,
+ &irq, sizeof(irq));
+ if (rc)
+ return rc;
+
+ rc = fsi_master_read(master, link, id, FSI_SLAVE_BASE + FSI_SSTAT,
+ &stat, sizeof(stat));
+ if (rc)
+ return rc;
+
+ dev_info(&slave->dev, "status: 0x%08x, sisc: 0x%08x\n",
+ be32_to_cpu(stat), be32_to_cpu(irq));
+
+ /* clear interrupts */
+ return fsi_master_write(master, link, id, FSI_SLAVE_BASE + FSI_SISC,
+ &irq, sizeof(irq));
+}
+
+static int fsi_slave_set_smode(struct fsi_master *master, int link, int id);
+
+int fsi_slave_handle_error(struct fsi_slave *slave, bool write, uint32_t addr,
+ size_t size)
+{
+ struct fsi_master *master = slave->master;
+ int rc, link;
+ uint32_t reg;
+ uint8_t id;
+
+ if (discard_errors)
+ return -1;
+
+ link = slave->link;
+ id = slave->id;
+
+ dev_dbg(&slave->dev, "handling error on %s to 0x%08x[%zd]",
+ write ? "write" : "read", addr, size);
+
+ /* try a simple clear of error conditions, which may fail if we've lost
+ * communication with the slave
+ */
+ rc = fsi_slave_report_and_clear_errors(slave);
+ if (!rc)
+ return 0;
+
+ /* send a TERM and retry */
+ if (master->term) {
+ rc = master->term(master, link, id);
+ if (!rc) {
+ rc = fsi_master_read(master, link, id, 0,
+ &reg, sizeof(reg));
+ if (!rc)
+ rc = fsi_slave_report_and_clear_errors(slave);
+ if (!rc)
+ return 0;
+ }
+ }
+
+ /* getting serious, reset the slave via BREAK */
+ rc = fsi_master_break(master, link);
+ if (rc)
+ return rc;
+
+ rc = fsi_slave_set_smode(master, link, id);
+ if (rc)
+ return rc;
+
+ return fsi_slave_report_and_clear_errors(slave);
+}
+
+int fsi_slave_read(struct fsi_slave *slave, uint32_t addr,
+ void *val, size_t size)
+{
+ uint8_t id = slave->id;
+ int rc, err_rc, i;
+
+ rc = fsi_slave_calc_addr(slave, &addr, &id);
+ if (rc)
+ return rc;
+
+ for (i = 0; i < slave_retries; i++) {
+ rc = fsi_master_read(slave->master, slave->link,
+ id, addr, val, size);
+ if (!rc)
+ break;
+
+ err_rc = fsi_slave_handle_error(slave, false, addr, size);
+ if (err_rc)
+ break;
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(fsi_slave_read);
+
+int fsi_slave_write(struct fsi_slave *slave, uint32_t addr,
+ const void *val, size_t size)
+{
+ uint8_t id = slave->id;
+ int rc, err_rc, i;
+
+ rc = fsi_slave_calc_addr(slave, &addr, &id);
+ if (rc)
+ return rc;
+
+ for (i = 0; i < slave_retries; i++) {
+ rc = fsi_master_write(slave->master, slave->link,
+ id, addr, val, size);
+ if (!rc)
+ break;
+
+ err_rc = fsi_slave_handle_error(slave, true, addr, size);
+ if (err_rc)
+ break;
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(fsi_slave_write);
+
+extern int fsi_slave_claim_range(struct fsi_slave *slave,
+ uint32_t addr, uint32_t size)
+{
+ if (addr + size < addr)
+ return -EINVAL;
+
+ if (addr + size > slave->size)
+ return -EINVAL;
+
+ /* todo: check for overlapping claims */
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fsi_slave_claim_range);
+
+extern void fsi_slave_release_range(struct fsi_slave *slave,
+ uint32_t addr, uint32_t size)
+{
+}
+EXPORT_SYMBOL_GPL(fsi_slave_release_range);
+
+static int fsi_slave_scan(struct fsi_slave *slave)
+{
+ uint32_t engine_addr;
+ uint32_t conf;
+ int rc, i;
+
+ /*
+ * scan engines
+ *
+ * We keep the peek mode and slave engines for the core; so start
+ * at the third slot in the configuration table. We also need to
+ * skip the chip ID entry at the start of the address space.
+ */
+ engine_addr = engine_page_size * 3;
+ for (i = 2; i < engine_page_size / sizeof(uint32_t); i++) {
+ uint8_t slots, version, type, crc;
+ struct fsi_device *dev;
+
+ rc = fsi_slave_read(slave, (i + 1) * sizeof(conf),
+ &conf, sizeof(conf));
+ if (rc) {
+ dev_warn(&slave->dev,
+ "error reading slave registers\n");
+ return -1;
+ }
+ conf = be32_to_cpu(conf);
+
+ crc = crc4(0, conf, 32);
+ if (crc) {
+ dev_warn(&slave->dev,
+ "crc error in slave register at 0x%04x\n",
+ i);
+ return -1;
+ }
+
+ slots = (conf & FSI_SLAVE_CONF_SLOTS_MASK)
+ >> FSI_SLAVE_CONF_SLOTS_SHIFT;
+ version = (conf & FSI_SLAVE_CONF_VERSION_MASK)
+ >> FSI_SLAVE_CONF_VERSION_SHIFT;
+ type = (conf & FSI_SLAVE_CONF_TYPE_MASK)
+ >> FSI_SLAVE_CONF_TYPE_SHIFT;
+
+ /*
+ * Unused address areas are marked by a zero type value; this
+ * skips the defined address areas
+ */
+ if (type != 0 && slots != 0) {
+
+ /* create device */
+ dev = fsi_create_device(slave);
+ if (!dev)
+ return -ENOMEM;
+
+ dev->slave = slave;
+ dev->engine_type = type;
+ dev->version = version;
+ dev->unit = i;
+ dev->addr = engine_addr;
+ dev->size = slots * engine_page_size;
+
+ dev_dbg(&slave->dev,
+ "engine[%i]: type %x, version %x, addr %x size %x\n",
+ dev->unit, dev->engine_type, version,
+ dev->addr, dev->size);
+
+ dev_set_name(&dev->dev, "%02x:%02x:%02x:%02x",
+ slave->master->idx, slave->link,
+ slave->id, i - 2);
+
+ rc = device_register(&dev->dev);
+ if (rc) {
+ dev_warn(&slave->dev, "add failed: %d\n", rc);
+ put_device(&dev->dev);
+ }
+ }
+
+ engine_addr += slots * engine_page_size;
+
+ if (!(conf & FSI_SLAVE_CONF_NEXT_MASK))
+ break;
+ }
+
+ return 0;
+}
+
+static ssize_t fsi_slave_sysfs_raw_read(struct file *file,
+ struct kobject *kobj, struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count)
+{
+ struct fsi_slave *slave = to_fsi_slave(kobj_to_dev(kobj));
+ size_t total_len, read_len;
+ int rc;
+
+ if (off < 0)
+ return -EINVAL;
+
+ if (off > 0xffffffff || count > 0xffffffff || off + count > 0xffffffff)
+ return -EINVAL;
+
+ for (total_len = 0; total_len < count; total_len += read_len) {
+ read_len = min_t(size_t, count, 4);
+ read_len -= off & 0x3;
+
+ rc = fsi_slave_read(slave, off, buf + total_len, read_len);
+ if (rc)
+ return rc;
+
+ off += read_len;
+ }
+
+ return count;
+}
+
+static ssize_t fsi_slave_sysfs_raw_write(struct file *file,
+ struct kobject *kobj, struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct fsi_slave *slave = to_fsi_slave(kobj_to_dev(kobj));
+ size_t total_len, write_len;
+ int rc;
+
+ if (off < 0)
+ return -EINVAL;
+
+ if (off > 0xffffffff || count > 0xffffffff || off + count > 0xffffffff)
+ return -EINVAL;
+
+ for (total_len = 0; total_len < count; total_len += write_len) {
+ write_len = min_t(size_t, count, 4);
+ write_len -= off & 0x3;
+
+ rc = fsi_slave_write(slave, off, buf + total_len, write_len);
+ if (rc)
+ return rc;
+
+ off += write_len;
+ }
+
+ return count;
+}
+
+static struct bin_attribute fsi_slave_raw_attr = {
+ .attr = {
+ .name = "raw",
+ .mode = 0600,
+ },
+ .size = 0,
+ .read = fsi_slave_sysfs_raw_read,
+ .write = fsi_slave_sysfs_raw_write,
+};
+
+static ssize_t fsi_slave_sysfs_term_write(struct file *file,
+ struct kobject *kobj, struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct fsi_slave *slave = to_fsi_slave(kobj_to_dev(kobj));
+ struct fsi_master *master = slave->master;
+
+ if (!master->term)
+ return -ENODEV;
+
+ master->term(master, slave->link, slave->id);
+ return count;
+}
+
+static struct bin_attribute fsi_slave_term_attr = {
+ .attr = {
+ .name = "term",
+ .mode = 0200,
+ },
+ .size = 0,
+ .write = fsi_slave_sysfs_term_write,
+};
+
+/* Encode slave local bus echo delay */
+static inline uint32_t fsi_smode_echodly(int x)
+{
+ return (x & FSI_SMODE_ED_MASK) << FSI_SMODE_ED_SHIFT;
+}
+
+/* Encode slave local bus send delay */
+static inline uint32_t fsi_smode_senddly(int x)
+{
+ return (x & FSI_SMODE_SD_MASK) << FSI_SMODE_SD_SHIFT;
+}
+
+/* Encode slave local bus clock rate ratio */
+static inline uint32_t fsi_smode_lbcrr(int x)
+{
+ return (x & FSI_SMODE_LBCRR_MASK) << FSI_SMODE_LBCRR_SHIFT;
+}
+
+/* Encode slave ID */
+static inline uint32_t fsi_smode_sid(int x)
+{
+ return (x & FSI_SMODE_SID_MASK) << FSI_SMODE_SID_SHIFT;
+}
+
+static const uint32_t fsi_slave_smode(int id)
+{
+ return FSI_SMODE_WSC | FSI_SMODE_ECRC
+ | fsi_smode_sid(id)
+ | fsi_smode_echodly(0xf) | fsi_smode_senddly(0xf)
+ | fsi_smode_lbcrr(0x8);
+}
+
+static int fsi_slave_set_smode(struct fsi_master *master, int link, int id)
+{
+ uint32_t smode;
+
+ /* set our smode register with the slave ID field to 0; this enables
+ * extended slave addressing
+ */
+ smode = fsi_slave_smode(id);
+ smode = cpu_to_be32(smode);
+
+ return fsi_master_write(master, link, id, FSI_SLAVE_BASE + FSI_SMODE,
+ &smode, sizeof(smode));
+}
+
+static void fsi_slave_release(struct device *dev)
+{
+ struct fsi_slave *slave = to_fsi_slave(dev);
+
+ kfree(slave);
+}
+
+static int fsi_slave_init(struct fsi_master *master, int link, uint8_t id)
+{
+ uint32_t chip_id, llmode;
+ struct fsi_slave *slave;
+ uint8_t crc;
+ int rc;
+
+ /* Currently, we only support single slaves on a link, and use the
+ * full 23-bit address range
+ */
+ if (id != 0)
+ return -EINVAL;
+
+ rc = fsi_master_read(master, link, id, 0, &chip_id, sizeof(chip_id));
+ if (rc) {
+ dev_dbg(&master->dev, "can't read slave %02x:%02x %d\n",
+ link, id, rc);
+ return -ENODEV;
+ }
+ chip_id = be32_to_cpu(chip_id);
+
+ crc = crc4(0, chip_id, 32);
+ if (crc) {
+ dev_warn(&master->dev, "slave %02x:%02x invalid chip id CRC!\n",
+ link, id);
+ return -EIO;
+ }
+
+ dev_info(&master->dev, "fsi: found chip %08x at %02x:%02x:%02x\n",
+ chip_id, master->idx, link, id);
+
+ rc = fsi_slave_set_smode(master, link, id);
+ if (rc) {
+ dev_warn(&master->dev,
+ "can't set smode on slave:%02x:%02x %d\n",
+ link, id, rc);
+ return -ENODEV;
+ }
+
+ /* If we're behind a master that doesn't provide a self-running bus
+ * clock, put the slave into async mode
+ */
+ if (master->flags & FSI_MASTER_FLAG_SWCLOCK) {
+ llmode = cpu_to_be32(FSI_LLMODE_ASYNC);
+ rc = fsi_master_write(master, link, id,
+ FSI_SLAVE_BASE + FSI_LLMODE,
+ &llmode, sizeof(llmode));
+ if (rc)
+ dev_warn(&master->dev,
+ "can't set llmode on slave:%02x:%02x %d\n",
+ link, id, rc);
+ }
+
+ /* We can communicate with a slave; create the slave device and
+ * register.
+ */
+ slave = kzalloc(sizeof(*slave), GFP_KERNEL);
+ if (!slave)
+ return -ENOMEM;
+
+ slave->master = master;
+ slave->dev.parent = &master->dev;
+ slave->dev.release = fsi_slave_release;
+ slave->link = link;
+ slave->id = id;
+ slave->size = FSI_SLAVE_SIZE_23b;
+
+ dev_set_name(&slave->dev, "slave@%02x:%02x", link, id);
+ rc = device_register(&slave->dev);
+ if (rc < 0) {
+ dev_warn(&master->dev, "failed to create slave device: %d\n",
+ rc);
+ put_device(&slave->dev);
+ return rc;
+ }
+
+ rc = device_create_bin_file(&slave->dev, &fsi_slave_raw_attr);
+ if (rc)
+ dev_warn(&slave->dev, "failed to create raw attr: %d\n", rc);
+
+ rc = device_create_bin_file(&slave->dev, &fsi_slave_term_attr);
+ if (rc)
+ dev_warn(&slave->dev, "failed to create term attr: %d\n", rc);
+
+ rc = fsi_slave_scan(slave);
+ if (rc)
+ dev_dbg(&master->dev, "failed during slave scan with: %d\n",
+ rc);
+
+ return rc;
+}
+
+/* FSI master support */
+static int fsi_check_access(uint32_t addr, size_t size)
+{
+ if (size != 1 && size != 2 && size != 4)
+ return -EINVAL;
+
+ if ((addr & 0x3) != (size & 0x3))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int fsi_master_read(struct fsi_master *master, int link,
+ uint8_t slave_id, uint32_t addr, void *val, size_t size)
+{
+ int rc;
+
+ trace_fsi_master_read(master, link, slave_id, addr, size);
+
+ rc = fsi_check_access(addr, size);
+ if (!rc)
+ rc = master->read(master, link, slave_id, addr, val, size);
+
+ trace_fsi_master_rw_result(master, link, slave_id, addr, size,
+ false, val, rc);
+
+ return rc;
+}
+
+static int fsi_master_write(struct fsi_master *master, int link,
+ uint8_t slave_id, uint32_t addr, const void *val, size_t size)
+{
+ int rc;
+
+ trace_fsi_master_write(master, link, slave_id, addr, size, val);
+
+ rc = fsi_check_access(addr, size);
+ if (!rc)
+ rc = master->write(master, link, slave_id, addr, val, size);
+
+ trace_fsi_master_rw_result(master, link, slave_id, addr, size,
+ true, val, rc);
+
+ return rc;
+}
+
+static int fsi_master_link_enable(struct fsi_master *master, int link)
+{
+ if (master->link_enable)
+ return master->link_enable(master, link);
+
+ return 0;
+}
+
+/*
+ * Issue a break command on this link
+ */
+static int fsi_master_break(struct fsi_master *master, int link)
+{
+ trace_fsi_master_break(master, link);
+
+ if (master->send_break)
+ return master->send_break(master, link);
+
+ return 0;
+}
+
+static int fsi_master_scan(struct fsi_master *master)
+{
+ int link, rc;
+
+ for (link = 0; link < master->n_links; link++) {
+ rc = fsi_master_link_enable(master, link);
+ if (rc) {
+ dev_dbg(&master->dev,
+ "enable link %d failed: %d\n", link, rc);
+ continue;
+ }
+ rc = fsi_master_break(master, link);
+ if (rc) {
+ dev_dbg(&master->dev,
+ "break to link %d failed: %d\n", link, rc);
+ continue;
+ }
+
+ fsi_slave_init(master, link, 0);
+ }
+
+ return 0;
+}
+
+static int fsi_slave_remove_device(struct device *dev, void *arg)
+{
+ device_unregister(dev);
+ return 0;
+}
+
+static int fsi_master_remove_slave(struct device *dev, void *arg)
+{
+ device_for_each_child(dev, NULL, fsi_slave_remove_device);
+ device_unregister(dev);
+ return 0;
+}
+
+static void fsi_master_unscan(struct fsi_master *master)
+{
+ device_for_each_child(&master->dev, NULL, fsi_master_remove_slave);
+}
+
+static ssize_t master_rescan_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct fsi_master *master = to_fsi_master(dev);
+ int rc;
+
+ fsi_master_unscan(master);
+ rc = fsi_master_scan(master);
+ if (rc < 0)
+ return rc;
+
+ return count;
+}
+
+static DEVICE_ATTR(rescan, 0200, NULL, master_rescan_store);
+
+static ssize_t master_break_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct fsi_master *master = to_fsi_master(dev);
+
+ fsi_master_break(master, 0);
+
+ return count;
+}
+
+static DEVICE_ATTR(break, 0200, NULL, master_break_store);
+
+int fsi_master_register(struct fsi_master *master)
+{
+ int rc;
+
+ if (!master)
+ return -EINVAL;
+
+ master->idx = ida_simple_get(&master_ida, 0, INT_MAX, GFP_KERNEL);
+ dev_set_name(&master->dev, "fsi%d", master->idx);
+
+ rc = device_register(&master->dev);
+ if (rc) {
+ ida_simple_remove(&master_ida, master->idx);
+ return rc;
+ }
+
+ rc = device_create_file(&master->dev, &dev_attr_rescan);
+ if (rc) {
+ device_unregister(&master->dev);
+ ida_simple_remove(&master_ida, master->idx);
+ return rc;
+ }
+
+ rc = device_create_file(&master->dev, &dev_attr_break);
+ if (rc) {
+ device_unregister(&master->dev);
+ ida_simple_remove(&master_ida, master->idx);
+ return rc;
+ }
+
+ fsi_master_scan(master);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fsi_master_register);
+
+void fsi_master_unregister(struct fsi_master *master)
+{
+ if (master->idx >= 0) {
+ ida_simple_remove(&master_ida, master->idx);
+ master->idx = -1;
+ }
+
+ fsi_master_unscan(master);
+ device_unregister(&master->dev);
+}
+EXPORT_SYMBOL_GPL(fsi_master_unregister);
/* FSI core & Linux bus type definitions */
@@ -39,6 +860,23 @@ static int fsi_bus_match(struct device *dev, struct device_driver *drv)
return 0;
}
+int fsi_driver_register(struct fsi_driver *fsi_drv)
+{
+ if (!fsi_drv)
+ return -EINVAL;
+ if (!fsi_drv->id_table)
+ return -EINVAL;
+
+ return driver_register(&fsi_drv->drv);
+}
+EXPORT_SYMBOL_GPL(fsi_driver_register);
+
+void fsi_driver_unregister(struct fsi_driver *fsi_drv)
+{
+ driver_unregister(&fsi_drv->drv);
+}
+EXPORT_SYMBOL_GPL(fsi_driver_unregister);
+
struct bus_type fsi_bus_type = {
.name = "fsi",
.match = fsi_bus_match,
@@ -57,3 +895,6 @@ static void fsi_exit(void)
module_init(fsi_init);
module_exit(fsi_exit);
+module_param(discard_errors, int, 0664);
+MODULE_LICENSE("GPL");
+MODULE_PARM_DESC(discard_errors, "Don't invoke error handling on bus accesses");
diff --git a/drivers/fsi/fsi-master-gpio.c b/drivers/fsi/fsi-master-gpio.c
new file mode 100644
index 000000000000..ae2618768508
--- /dev/null
+++ b/drivers/fsi/fsi-master-gpio.c
@@ -0,0 +1,604 @@
+/*
+ * A FSI master controller, using a simple GPIO bit-banging interface
+ */
+
+#include <linux/crc4.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/fsi.h>
+#include <linux/gpio/consumer.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include "fsi-master.h"
+
+#define FSI_GPIO_STD_DLY 1 /* Standard pin delay in nS */
+#define FSI_ECHO_DELAY_CLOCKS 16 /* Number clocks for echo delay */
+#define FSI_PRE_BREAK_CLOCKS 50 /* Number clocks to prep for break */
+#define FSI_BREAK_CLOCKS 256 /* Number of clocks to issue break */
+#define FSI_POST_BREAK_CLOCKS 16000 /* Number clocks to set up cfam */
+#define FSI_INIT_CLOCKS 5000 /* Clock out any old data */
+#define FSI_GPIO_STD_DELAY 10 /* Standard GPIO delay in nS */
+ /* todo: adjust down as low as */
+ /* possible or eliminate */
+#define FSI_GPIO_CMD_DPOLL 0x2
+#define FSI_GPIO_CMD_TERM 0x3f
+#define FSI_GPIO_CMD_ABS_AR 0x4
+
+#define FSI_GPIO_DPOLL_CLOCKS 100 /* < 21 will cause slave to hang */
+
+/* Bus errors */
+#define FSI_GPIO_ERR_BUSY 1 /* Slave stuck in busy state */
+#define FSI_GPIO_RESP_ERRA 2 /* Any (misc) Error */
+#define FSI_GPIO_RESP_ERRC 3 /* Slave reports master CRC error */
+#define FSI_GPIO_MTOE 4 /* Master time out error */
+#define FSI_GPIO_CRC_INVAL 5 /* Master reports slave CRC error */
+
+/* Normal slave responses */
+#define FSI_GPIO_RESP_BUSY 1
+#define FSI_GPIO_RESP_ACK 0
+#define FSI_GPIO_RESP_ACKD 4
+
+#define FSI_GPIO_MAX_BUSY 100
+#define FSI_GPIO_MTOE_COUNT 1000
+#define FSI_GPIO_DRAIN_BITS 20
+#define FSI_GPIO_CRC_SIZE 4
+#define FSI_GPIO_MSG_ID_SIZE 2
+#define FSI_GPIO_MSG_RESPID_SIZE 2
+#define FSI_GPIO_PRIME_SLAVE_CLOCKS 100
+
+struct fsi_master_gpio {
+ struct fsi_master master;
+ struct device *dev;
+ spinlock_t cmd_lock; /* Lock for commands */
+ struct gpio_desc *gpio_clk;
+ struct gpio_desc *gpio_data;
+ struct gpio_desc *gpio_trans; /* Voltage translator */
+ struct gpio_desc *gpio_enable; /* FSI enable */
+ struct gpio_desc *gpio_mux; /* Mux control */
+};
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/fsi_master_gpio.h>
+
+#define to_fsi_master_gpio(m) container_of(m, struct fsi_master_gpio, master)
+
+struct fsi_gpio_msg {
+ uint64_t msg;
+ uint8_t bits;
+};
+
+static void clock_toggle(struct fsi_master_gpio *master, int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++) {
+ ndelay(FSI_GPIO_STD_DLY);
+ gpiod_set_value(master->gpio_clk, 0);
+ ndelay(FSI_GPIO_STD_DLY);
+ gpiod_set_value(master->gpio_clk, 1);
+ }
+}
+
+static int sda_in(struct fsi_master_gpio *master)
+{
+ int in;
+
+ ndelay(FSI_GPIO_STD_DLY);
+ in = gpiod_get_value(master->gpio_data);
+ return in ? 1 : 0;
+}
+
+static void sda_out(struct fsi_master_gpio *master, int value)
+{
+ gpiod_set_value(master->gpio_data, value);
+}
+
+static void set_sda_input(struct fsi_master_gpio *master)
+{
+ gpiod_direction_input(master->gpio_data);
+ gpiod_set_value(master->gpio_trans, 0);
+}
+
+static void set_sda_output(struct fsi_master_gpio *master, int value)
+{
+ gpiod_set_value(master->gpio_trans, 1);
+ gpiod_direction_output(master->gpio_data, value);
+}
+
+static void clock_zeros(struct fsi_master_gpio *master, int count)
+{
+ set_sda_output(master, 1);
+ clock_toggle(master, count);
+}
+
+static void serial_in(struct fsi_master_gpio *master, struct fsi_gpio_msg *msg,
+ uint8_t num_bits)
+{
+ uint8_t bit, in_bit;
+
+ set_sda_input(master);
+
+ for (bit = 0; bit < num_bits; bit++) {
+ clock_toggle(master, 1);
+ in_bit = sda_in(master);
+ msg->msg <<= 1;
+ msg->msg |= ~in_bit & 0x1; /* Data is active low */
+ }
+ msg->bits += num_bits;
+
+ trace_fsi_master_gpio_in(master, num_bits, msg->msg);
+}
+
+static void serial_out(struct fsi_master_gpio *master,
+ const struct fsi_gpio_msg *cmd)
+{
+ uint8_t bit;
+ uint64_t msg = ~cmd->msg; /* Data is active low */
+ uint64_t sda_mask = 0x1ULL << (cmd->bits - 1);
+ uint64_t last_bit = ~0;
+ int next_bit;
+
+ trace_fsi_master_gpio_out(master, cmd->bits, cmd->msg);
+
+ if (!cmd->bits) {
+ dev_warn(master->dev, "trying to output 0 bits\n");
+ return;
+ }
+ set_sda_output(master, 0);
+
+ /* Send the start bit */
+ sda_out(master, 0);
+ clock_toggle(master, 1);
+
+ /* Send the message */
+ for (bit = 0; bit < cmd->bits; bit++) {
+ next_bit = (msg & sda_mask) >> (cmd->bits - 1);
+ if (last_bit ^ next_bit) {
+ sda_out(master, next_bit);
+ last_bit = next_bit;
+ }
+ clock_toggle(master, 1);
+ msg <<= 1;
+ }
+}
+
+static void msg_push_bits(struct fsi_gpio_msg *msg, uint64_t data, int bits)
+{
+ msg->msg <<= bits;
+ msg->msg |= data & ((1ull << bits) - 1);
+ msg->bits += bits;
+}
+
+static void msg_push_crc(struct fsi_gpio_msg *msg)
+{
+ uint8_t crc;
+ int top;
+
+ top = msg->bits & 0x3;
+
+ /* start bit, and any non-aligned top bits */
+ crc = crc4(0, 1 << top | msg->msg >> (msg->bits - top), top + 1);
+
+ /* aligned bits */
+ crc = crc4(crc, msg->msg, msg->bits - top);
+
+ msg_push_bits(msg, crc, 4);
+}
+
+/*
+ * Encode an Absolute Address command
+ */
+static void build_abs_ar_command(struct fsi_gpio_msg *cmd,
+ uint8_t id, uint32_t addr, size_t size, const void *data)
+{
+ bool write = !!data;
+ uint8_t ds;
+ int i;
+
+ cmd->bits = 0;
+ cmd->msg = 0;
+
+ msg_push_bits(cmd, id, 2);
+ msg_push_bits(cmd, FSI_GPIO_CMD_ABS_AR, 3);
+ msg_push_bits(cmd, write ? 0 : 1, 1);
+
+ /*
+ * The read/write size is encoded in the lower bits of the address
+ * (as it must be naturally-aligned), and the following ds bit.
+ *
+ * size addr:1 addr:0 ds
+ * 1 x x 0
+ * 2 x 0 1
+ * 4 0 1 1
+ *
+ */
+ ds = size > 1 ? 1 : 0;
+ addr &= ~(size - 1);
+ if (size == 4)
+ addr |= 1;
+
+ msg_push_bits(cmd, addr & ((1 << 21) - 1), 21);
+ msg_push_bits(cmd, ds, 1);
+ for (i = 0; write && i < size; i++)
+ msg_push_bits(cmd, ((uint8_t *)data)[i], 8);
+
+ msg_push_crc(cmd);
+}
+
+static void build_dpoll_command(struct fsi_gpio_msg *cmd, uint8_t slave_id)
+{
+ cmd->bits = 0;
+ cmd->msg = 0;
+
+ msg_push_bits(cmd, slave_id, 2);
+ msg_push_bits(cmd, FSI_GPIO_CMD_DPOLL, 3);
+ msg_push_crc(cmd);
+}
+
+static void echo_delay(struct fsi_master_gpio *master)
+{
+ set_sda_output(master, 1);
+ clock_toggle(master, FSI_ECHO_DELAY_CLOCKS);
+}
+
+static void build_term_command(struct fsi_gpio_msg *cmd, uint8_t slave_id)
+{
+ cmd->bits = 0;
+ cmd->msg = 0;
+
+ msg_push_bits(cmd, slave_id, 2);
+ msg_push_bits(cmd, FSI_GPIO_CMD_TERM, 6);
+ msg_push_crc(cmd);
+}
+
+/*
+ * Store information on master errors so handler can detect and clean
+ * up the bus
+ */
+static void fsi_master_gpio_error(struct fsi_master_gpio *master, int error)
+{
+
+}
+
+static int read_one_response(struct fsi_master_gpio *master,
+ uint8_t data_size, struct fsi_gpio_msg *msgp, uint8_t *tagp)
+{
+ struct fsi_gpio_msg msg;
+ uint8_t id, tag;
+ uint32_t crc;
+ int i;
+
+ /* wait for the start bit */
+ for (i = 0; i < FSI_GPIO_MTOE_COUNT; i++) {
+ msg.bits = 0;
+ msg.msg = 0;
+ serial_in(master, &msg, 1);
+ if (msg.msg)
+ break;
+ }
+ if (i == FSI_GPIO_MTOE_COUNT) {
+ dev_dbg(master->dev,
+ "Master time out waiting for response\n");
+ fsi_master_gpio_error(master, FSI_GPIO_MTOE);
+ return -EIO;
+ }
+
+ msg.bits = 0;
+ msg.msg = 0;
+
+ /* Read slave ID & response tag */
+ serial_in(master, &msg, 4);
+
+ id = (msg.msg >> FSI_GPIO_MSG_RESPID_SIZE) & 0x3;
+ tag = msg.msg & 0x3;
+
+ /* If we have an ACK and we're expecting data, clock the data in too */
+ if (tag == FSI_GPIO_RESP_ACK && data_size)
+ serial_in(master, &msg, data_size * 8);
+
+ /* read CRC */
+ serial_in(master, &msg, FSI_GPIO_CRC_SIZE);
+
+ /* we have a whole message now; check CRC */
+ crc = crc4(0, 1, 1);
+ crc = crc4(crc, msg.msg, msg.bits);
+ if (crc) {
+ dev_dbg(master->dev, "ERR response CRC\n");
+ fsi_master_gpio_error(master, FSI_GPIO_CRC_INVAL);
+ return -EIO;
+ }
+
+ if (msgp)
+ *msgp = msg;
+ if (tagp)
+ *tagp = tag;
+
+ return 0;
+}
+
+static int issue_term(struct fsi_master_gpio *master, uint8_t slave)
+{
+ struct fsi_gpio_msg cmd;
+ uint8_t tag;
+ int rc;
+
+ build_term_command(&cmd, slave);
+ serial_out(master, &cmd);
+ echo_delay(master);
+
+ rc = read_one_response(master, 0, NULL, &tag);
+ if (rc < 0) {
+ dev_err(master->dev,
+ "TERM failed; lost communication with slave\n");
+ return -EIO;
+ } else if (tag != FSI_GPIO_RESP_ACK) {
+ dev_err(master->dev, "TERM failed; response %d\n", tag);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int poll_for_response(struct fsi_master_gpio *master,
+ uint8_t slave, uint8_t size, void *data)
+{
+ struct fsi_gpio_msg response, cmd;
+ int busy_count = 0, rc, i;
+ uint8_t tag;
+ uint8_t *data_byte = data;
+
+retry:
+ rc = read_one_response(master, size, &response, &tag);
+ if (rc)
+ return rc;
+
+ switch (tag) {
+ case FSI_GPIO_RESP_ACK:
+ if (size && data) {
+ uint64_t val = response.msg;
+ /* clear crc & mask */
+ val >>= 4;
+ val &= (1ull << (size * 8)) - 1;
+
+ for (i = 0; i < size; i++) {
+ data_byte[size-i-1] = val;
+ val >>= 8;
+ }
+ }
+ break;
+ case FSI_GPIO_RESP_BUSY:
+ /*
+ * Its necessary to clock slave before issuing
+ * d-poll, not indicated in the hardware protocol
+ * spec. < 20 clocks causes slave to hang, 21 ok.
+ */
+ clock_zeros(master, FSI_GPIO_DPOLL_CLOCKS);
+ if (busy_count++ < FSI_GPIO_MAX_BUSY) {
+ build_dpoll_command(&cmd, slave);
+ serial_out(master, &cmd);
+ echo_delay(master);
+ goto retry;
+ }
+ dev_warn(master->dev,
+ "ERR slave is stuck in busy state, issuing TERM\n");
+ issue_term(master, slave);
+ rc = -EIO;
+ break;
+
+ case FSI_GPIO_RESP_ERRA:
+ case FSI_GPIO_RESP_ERRC:
+ dev_dbg(master->dev, "ERR%c received: 0x%x\n",
+ tag == FSI_GPIO_RESP_ERRA ? 'A' : 'C',
+ (int)response.msg);
+ fsi_master_gpio_error(master, response.msg);
+ rc = -EIO;
+ break;
+ }
+
+ /* Clock the slave enough to be ready for next operation */
+ clock_zeros(master, FSI_GPIO_PRIME_SLAVE_CLOCKS);
+ return rc;
+}
+
+static int fsi_master_gpio_xfer(struct fsi_master_gpio *master, uint8_t slave,
+ struct fsi_gpio_msg *cmd, size_t resp_len, void *resp)
+{
+ unsigned long flags;
+ int rc;
+
+ spin_lock_irqsave(&master->cmd_lock, flags);
+ serial_out(master, cmd);
+ echo_delay(master);
+ rc = poll_for_response(master, slave, resp_len, resp);
+ spin_unlock_irqrestore(&master->cmd_lock, flags);
+
+ return rc;
+}
+
+static int fsi_master_gpio_read(struct fsi_master *_master, int link,
+ uint8_t id, uint32_t addr, void *val, size_t size)
+{
+ struct fsi_master_gpio *master = to_fsi_master_gpio(_master);
+ struct fsi_gpio_msg cmd;
+
+ if (link != 0)
+ return -ENODEV;
+
+ build_abs_ar_command(&cmd, id, addr, size, NULL);
+ return fsi_master_gpio_xfer(master, id, &cmd, size, val);
+}
+
+static int fsi_master_gpio_write(struct fsi_master *_master, int link,
+ uint8_t id, uint32_t addr, const void *val, size_t size)
+{
+ struct fsi_master_gpio *master = to_fsi_master_gpio(_master);
+ struct fsi_gpio_msg cmd;
+
+ if (link != 0)
+ return -ENODEV;
+
+ build_abs_ar_command(&cmd, id, addr, size, val);
+ return fsi_master_gpio_xfer(master, id, &cmd, 0, NULL);
+}
+
+static int fsi_master_gpio_term(struct fsi_master *_master,
+ int link, uint8_t id)
+{
+ struct fsi_master_gpio *master = to_fsi_master_gpio(_master);
+ struct fsi_gpio_msg cmd;
+
+ if (link != 0)
+ return -ENODEV;
+
+ build_term_command(&cmd, id);
+ return fsi_master_gpio_xfer(master, id, &cmd, 0, NULL);
+}
+
+static int fsi_master_gpio_break(struct fsi_master *_master, int link)
+{
+ struct fsi_master_gpio *master = to_fsi_master_gpio(_master);
+
+ if (link != 0)
+ return -ENODEV;
+
+ trace_fsi_master_gpio_break(master);
+
+ set_sda_output(master, 1);
+ sda_out(master, 1);
+ clock_toggle(master, FSI_PRE_BREAK_CLOCKS);
+ sda_out(master, 0);
+ clock_toggle(master, FSI_BREAK_CLOCKS);
+ echo_delay(master);
+ sda_out(master, 1);
+ clock_toggle(master, FSI_POST_BREAK_CLOCKS);
+
+ /* Wait for logic reset to take effect */
+ udelay(200);
+
+ return 0;
+}
+
+static void fsi_master_gpio_init(struct fsi_master_gpio *master)
+{
+ gpiod_direction_output(master->gpio_mux, 1);
+ gpiod_direction_output(master->gpio_trans, 1);
+ gpiod_direction_output(master->gpio_enable, 1);
+ gpiod_direction_output(master->gpio_clk, 1);
+ gpiod_direction_output(master->gpio_data, 1);
+
+ /* todo: evaluate if clocks can be reduced */
+ clock_zeros(master, FSI_INIT_CLOCKS);
+}
+
+static int fsi_master_gpio_link_enable(struct fsi_master *_master, int link)
+{
+ struct fsi_master_gpio *master = to_fsi_master_gpio(_master);
+
+ if (link != 0)
+ return -ENODEV;
+ gpiod_set_value(master->gpio_enable, 1);
+
+ return 0;
+}
+
+static int fsi_master_gpio_probe(struct platform_device *pdev)
+{
+ struct fsi_master_gpio *master;
+ struct gpio_desc *gpio;
+
+ master = devm_kzalloc(&pdev->dev, sizeof(*master), GFP_KERNEL);
+ if (!master)
+ return -ENOMEM;
+
+ master->dev = &pdev->dev;
+ master->master.dev.parent = master->dev;
+
+ gpio = devm_gpiod_get(&pdev->dev, "clock", 0);
+ if (IS_ERR(gpio)) {
+ dev_err(&pdev->dev, "failed to get clock gpio\n");
+ return PTR_ERR(gpio);
+ }
+ master->gpio_clk = gpio;
+
+ gpio = devm_gpiod_get(&pdev->dev, "data", 0);
+ if (IS_ERR(gpio)) {
+ dev_err(&pdev->dev, "failed to get data gpio\n");
+ return PTR_ERR(gpio);
+ }
+ master->gpio_data = gpio;
+
+ /* Optional GPIOs */
+ gpio = devm_gpiod_get_optional(&pdev->dev, "trans", 0);
+ if (IS_ERR(gpio)) {
+ dev_err(&pdev->dev, "failed to get trans gpio\n");
+ return PTR_ERR(gpio);
+ }
+ master->gpio_trans = gpio;
+
+ gpio = devm_gpiod_get_optional(&pdev->dev, "enable", 0);
+ if (IS_ERR(gpio)) {
+ dev_err(&pdev->dev, "failed to get enable gpio\n");
+ return PTR_ERR(gpio);
+ }
+ master->gpio_enable = gpio;
+
+ gpio = devm_gpiod_get_optional(&pdev->dev, "mux", 0);
+ if (IS_ERR(gpio)) {
+ dev_err(&pdev->dev, "failed to get mux gpio\n");
+ return PTR_ERR(gpio);
+ }
+ master->gpio_mux = gpio;
+
+ master->master.n_links = 1;
+ master->master.flags = FSI_MASTER_FLAG_SWCLOCK;
+ master->master.read = fsi_master_gpio_read;
+ master->master.write = fsi_master_gpio_write;
+ master->master.term = fsi_master_gpio_term;
+ master->master.send_break = fsi_master_gpio_break;
+ master->master.link_enable = fsi_master_gpio_link_enable;
+ platform_set_drvdata(pdev, master);
+ spin_lock_init(&master->cmd_lock);
+
+ fsi_master_gpio_init(master);
+
+ return fsi_master_register(&master->master);
+}
+
+
+static int fsi_master_gpio_remove(struct platform_device *pdev)
+{
+ struct fsi_master_gpio *master = platform_get_drvdata(pdev);
+
+ devm_gpiod_put(&pdev->dev, master->gpio_clk);
+ devm_gpiod_put(&pdev->dev, master->gpio_data);
+ if (master->gpio_trans)
+ devm_gpiod_put(&pdev->dev, master->gpio_trans);
+ if (master->gpio_enable)
+ devm_gpiod_put(&pdev->dev, master->gpio_enable);
+ if (master->gpio_mux)
+ devm_gpiod_put(&pdev->dev, master->gpio_mux);
+ fsi_master_unregister(&master->master);
+
+ return 0;
+}
+
+static const struct of_device_id fsi_master_gpio_match[] = {
+ { .compatible = "fsi-master-gpio" },
+ { },
+};
+
+static struct platform_driver fsi_master_gpio_driver = {
+ .driver = {
+ .name = "fsi-master-gpio",
+ .of_match_table = fsi_master_gpio_match,
+ },
+ .probe = fsi_master_gpio_probe,
+ .remove = fsi_master_gpio_remove,
+};
+
+module_platform_driver(fsi_master_gpio_driver);
+MODULE_LICENSE("GPL");
diff --git a/drivers/fsi/fsi-master-hub.c b/drivers/fsi/fsi-master-hub.c
new file mode 100644
index 000000000000..133b9bff1d65
--- /dev/null
+++ b/drivers/fsi/fsi-master-hub.c
@@ -0,0 +1,327 @@
+/*
+ * FSI hub master driver
+ *
+ * Copyright (C) IBM Corporation 2016
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/fsi.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include "fsi-master.h"
+
+/* Control Registers */
+#define FSI_MMODE 0x0 /* R/W: mode */
+#define FSI_MDLYR 0x4 /* R/W: delay */
+#define FSI_MCRSP 0x8 /* R/W: clock rate */
+#define FSI_MENP0 0x10 /* R/W: enable */
+#define FSI_MLEVP0 0x18 /* R: plug detect */
+#define FSI_MSENP0 0x18 /* S: Set enable */
+#define FSI_MCENP0 0x20 /* C: Clear enable */
+#define FSI_MAEB 0x70 /* R: Error address */
+#define FSI_MVER 0x74 /* R: master version/type */
+#define FSI_MRESP0 0xd0 /* W: Port reset */
+#define FSI_MESRB0 0x1d0 /* R: Master error status */
+#define FSI_MRESB0 0x1d0 /* W: Reset bridge */
+#define FSI_MECTRL 0x2e0 /* W: Error control */
+
+/* MMODE: Mode control */
+#define FSI_MMODE_EIP 0x80000000 /* Enable interrupt polling */
+#define FSI_MMODE_ECRC 0x40000000 /* Enable error recovery */
+#define FSI_MMODE_EPC 0x10000000 /* Enable parity checking */
+#define FSI_MMODE_P8_TO_LSB 0x00000010 /* Timeout value LSB */
+ /* MSB=1, LSB=0 is 0.8 ms */
+ /* MSB=0, LSB=1 is 0.9 ms */
+#define FSI_MMODE_CRS0SHFT 18 /* Clk rate selection 0 shift */
+#define FSI_MMODE_CRS0MASK 0x3ff /* Clk rate selection 0 mask */
+#define FSI_MMODE_CRS1SHFT 8 /* Clk rate selection 1 shift */
+#define FSI_MMODE_CRS1MASK 0x3ff /* Clk rate selection 1 mask */
+
+/* MRESB: Reset brindge */
+#define FSI_MRESB_RST_GEN 0x80000000 /* General reset */
+#define FSI_MRESB_RST_ERR 0x40000000 /* Error Reset */
+
+/* MRESB: Reset port */
+#define FSI_MRESP_RST_ALL_MASTER 0x20000000 /* Reset all FSI masters */
+#define FSI_MRESP_RST_ALL_LINK 0x10000000 /* Reset all FSI port contr. */
+#define FSI_MRESP_RST_MCR 0x08000000 /* Reset FSI master reg. */
+#define FSI_MRESP_RST_PYE 0x04000000 /* Reset FSI parity error */
+#define FSI_MRESP_RST_ALL 0xfc000000 /* Reset any error */
+
+/* MECTRL: Error control */
+#define FSI_MECTRL_EOAE 0x8000 /* Enable machine check when */
+ /* master 0 in error */
+#define FSI_MECTRL_P8_AUTO_TERM 0x4000 /* Auto terminate */
+
+#define FSI_ENGID_HUB_MASTER 0x1c
+#define FSI_HUB_LINK_OFFSET 0x80000
+#define FSI_HUB_LINK_SIZE 0x80000
+#define FSI_HUB_MASTER_MAX_LINKS 8
+
+#define FSI_LINK_ENABLE_SETUP_TIME 10 /* in mS */
+
+/*
+ * FSI hub master support
+ *
+ * A hub master increases the number of potential target devices that the
+ * primary FSI master can access. For each link a primary master supports,
+ * each of those links can in turn be chained to a hub master with multiple
+ * links of its own.
+ *
+ * The hub is controlled by a set of control registers exposed as a regular fsi
+ * device (the hub->upstream device), and provides access to the downstream FSI
+ * bus as through an address range on the slave itself (->addr and ->size).
+ *
+ * [This differs from "cascaded" masters, which expose the entire downstream
+ * bus entirely through the fsi device address range, and so have a smaller
+ * accessible address space.]
+ */
+struct fsi_master_hub {
+ struct fsi_master master;
+ struct fsi_device *upstream;
+ uint32_t addr, size; /* slave-relative addr of */
+ /* master address space */
+};
+
+#define to_fsi_master_hub(m) container_of(m, struct fsi_master_hub, master)
+
+static int hub_master_read(struct fsi_master *master, int link,
+ uint8_t id, uint32_t addr, void *val, size_t size)
+{
+ struct fsi_master_hub *hub = to_fsi_master_hub(master);
+
+ if (id != 0)
+ return -EINVAL;
+
+ addr += hub->addr + (link * FSI_HUB_LINK_SIZE);
+ return fsi_slave_read(hub->upstream->slave, addr, val, size);
+}
+
+static int hub_master_write(struct fsi_master *master, int link,
+ uint8_t id, uint32_t addr, const void *val, size_t size)
+{
+ struct fsi_master_hub *hub = to_fsi_master_hub(master);
+
+ if (id != 0)
+ return -EINVAL;
+
+ addr += hub->addr + (link * FSI_HUB_LINK_SIZE);
+ return fsi_slave_write(hub->upstream->slave, addr, val, size);
+}
+
+static int hub_master_break(struct fsi_master *master, int link)
+{
+ uint32_t addr, cmd;
+
+ addr = 0x4;
+ cmd = cpu_to_be32(0xc0de0000);
+
+ return hub_master_write(master, link, 0, addr, &cmd, sizeof(cmd));
+}
+
+static int hub_master_link_enable(struct fsi_master *master, int link)
+{
+ struct fsi_master_hub *hub = to_fsi_master_hub(master);
+ int idx, bit;
+ __be32 reg;
+ int rc;
+
+ idx = link / 32;
+ bit = link % 32;
+
+ reg = cpu_to_be32(0x80000000 >> bit);
+
+ rc = fsi_device_write(hub->upstream, FSI_MSENP0 + (4 * idx), &reg, 4);
+
+ mdelay(FSI_LINK_ENABLE_SETUP_TIME);
+
+ fsi_device_read(hub->upstream, FSI_MENP0 + (4 * idx), &reg, 4);
+
+ return rc;
+}
+
+static void hub_master_release(struct device *dev)
+{
+ struct fsi_master_hub *hub = to_fsi_master_hub(dev_to_fsi_master(dev));
+
+ kfree(hub);
+}
+
+/* mmode encoders */
+static inline u32 fsi_mmode_crs0(u32 x)
+{
+ return (x & FSI_MMODE_CRS0MASK) << FSI_MMODE_CRS0SHFT;
+}
+
+static inline u32 fsi_mmode_crs1(u32 x)
+{
+ return (x & FSI_MMODE_CRS1MASK) << FSI_MMODE_CRS1SHFT;
+}
+
+static int hub_master_init(struct fsi_master_hub *hub)
+{
+ struct fsi_device *dev = hub->upstream;
+ __be32 reg;
+ int rc;
+
+ reg = cpu_to_be32(FSI_MRESP_RST_ALL_MASTER | FSI_MRESP_RST_ALL_LINK
+ | FSI_MRESP_RST_MCR | FSI_MRESP_RST_PYE);
+ rc = fsi_device_write(dev, FSI_MRESP0, &reg, sizeof(reg));
+ if (rc)
+ return rc;
+
+ /* Initialize the MFSI (hub master) engine */
+ reg = cpu_to_be32(FSI_MRESP_RST_ALL_MASTER | FSI_MRESP_RST_ALL_LINK
+ | FSI_MRESP_RST_MCR | FSI_MRESP_RST_PYE);
+ rc = fsi_device_write(dev, FSI_MRESP0, &reg, sizeof(reg));
+ if (rc)
+ return rc;
+
+ reg = cpu_to_be32(FSI_MECTRL_EOAE | FSI_MECTRL_P8_AUTO_TERM);
+ rc = fsi_device_write(dev, FSI_MECTRL, &reg, sizeof(reg));
+ if (rc)
+ return rc;
+
+ reg = cpu_to_be32(FSI_MMODE_EIP | FSI_MMODE_ECRC | FSI_MMODE_EPC
+ | fsi_mmode_crs0(1) | fsi_mmode_crs1(1)
+ | FSI_MMODE_P8_TO_LSB);
+ rc = fsi_device_write(dev, FSI_MMODE, &reg, sizeof(reg));
+ if (rc)
+ return rc;
+
+ reg = cpu_to_be32(0xffff0000);
+ rc = fsi_device_write(dev, FSI_MDLYR, &reg, sizeof(reg));
+ if (rc)
+ return rc;
+
+ reg = ~0;
+ rc = fsi_device_write(dev, FSI_MSENP0, &reg, sizeof(reg));
+ if (rc)
+ return rc;
+
+ /* Leave enabled long enough for master logic to set up */
+ mdelay(FSI_LINK_ENABLE_SETUP_TIME);
+
+ rc = fsi_device_write(dev, FSI_MCENP0, &reg, sizeof(reg));
+ if (rc)
+ return rc;
+
+ rc = fsi_device_read(dev, FSI_MAEB, &reg, sizeof(reg));
+ if (rc)
+ return rc;
+
+ reg = cpu_to_be32(FSI_MRESP_RST_ALL_MASTER | FSI_MRESP_RST_ALL_LINK);
+ rc = fsi_device_write(dev, FSI_MRESP0, &reg, sizeof(reg));
+ if (rc)
+ return rc;
+
+ rc = fsi_device_read(dev, FSI_MLEVP0, &reg, sizeof(reg));
+ if (rc)
+ return rc;
+
+ /* Reset the master bridge */
+ reg = cpu_to_be32(FSI_MRESB_RST_GEN);
+ rc = fsi_device_write(dev, FSI_MRESB0, &reg, sizeof(reg));
+ if (rc)
+ return rc;
+
+ reg = cpu_to_be32(FSI_MRESB_RST_ERR);
+ return fsi_device_write(dev, FSI_MRESB0, &reg, sizeof(reg));
+}
+
+static int hub_master_probe(struct device *dev)
+{
+ struct fsi_device *fsi_dev = to_fsi_dev(dev);
+ struct fsi_master_hub *hub;
+ uint32_t reg, links;
+ __be32 __reg;
+ int rc;
+
+ rc = fsi_device_read(fsi_dev, FSI_MVER, &__reg, sizeof(__reg));
+ if (rc)
+ return rc;
+
+ reg = be32_to_cpu(__reg);
+ links = (reg >> 8) & 0xff;
+ dev_info(dev, "hub version %08x (%d links)\n", reg, links);
+
+ rc = fsi_slave_claim_range(fsi_dev->slave, FSI_HUB_LINK_OFFSET,
+ FSI_HUB_LINK_SIZE * links);
+ if (rc) {
+ dev_err(dev, "can't claim slave address range for links");
+ return rc;
+ }
+
+ hub = kzalloc(sizeof(*hub), GFP_KERNEL);
+ if (!hub) {
+ rc = -ENOMEM;
+ goto err_release;
+ }
+
+ hub->addr = FSI_HUB_LINK_OFFSET;
+ hub->size = FSI_HUB_LINK_SIZE * links;
+ hub->upstream = fsi_dev;
+
+ hub->master.dev.parent = dev;
+ hub->master.dev.release = hub_master_release;
+
+ hub->master.n_links = links;
+ hub->master.read = hub_master_read;
+ hub->master.write = hub_master_write;
+ hub->master.send_break = hub_master_break;
+ hub->master.link_enable = hub_master_link_enable;
+
+ dev_set_drvdata(dev, hub);
+
+ hub_master_init(hub);
+
+ rc = fsi_master_register(&hub->master);
+ if (!rc)
+ return 0;
+
+ kfree(hub);
+err_release:
+ fsi_slave_release_range(fsi_dev->slave, FSI_HUB_LINK_OFFSET,
+ FSI_HUB_LINK_SIZE * links);
+ return rc;
+}
+
+static int hub_master_remove(struct device *dev)
+{
+ struct fsi_master_hub *hub = dev_get_drvdata(dev);
+
+ fsi_master_unregister(&hub->master);
+ fsi_slave_release_range(hub->upstream->slave, hub->addr, hub->size);
+ return 0;
+}
+
+static struct fsi_device_id hub_master_ids[] = {
+ {
+ .engine_type = FSI_ENGID_HUB_MASTER,
+ .version = FSI_VERSION_ANY,
+ },
+ { 0 }
+};
+
+static struct fsi_driver hub_master_driver = {
+ .id_table = hub_master_ids,
+ .drv = {
+ .name = "fsi-master-hub",
+ .bus = &fsi_bus_type,
+ .probe = hub_master_probe,
+ .remove = hub_master_remove,
+ }
+};
+
+module_fsi_driver(hub_master_driver);
+MODULE_LICENSE("GPL");
diff --git a/drivers/fsi/fsi-master.h b/drivers/fsi/fsi-master.h
new file mode 100644
index 000000000000..12f7b119567d
--- /dev/null
+++ b/drivers/fsi/fsi-master.h
@@ -0,0 +1,43 @@
+/*
+ * FSI master definitions. These comprise the core <--> master interface,
+ * to allow the core to interact with the (hardware-specific) masters.
+ *
+ * Copyright (C) IBM Corporation 2016
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DRIVERS_FSI_MASTER_H
+#define DRIVERS_FSI_MASTER_H
+
+#include <linux/device.h>
+
+#define FSI_MASTER_FLAG_SWCLOCK 0x1
+
+struct fsi_master {
+ struct device dev;
+ int idx;
+ int n_links;
+ int flags;
+ int (*read)(struct fsi_master *, int link, uint8_t id,
+ uint32_t addr, void *val, size_t size);
+ int (*write)(struct fsi_master *, int link, uint8_t id,
+ uint32_t addr, const void *val, size_t size);
+ int (*term)(struct fsi_master *, int link, uint8_t id);
+ int (*send_break)(struct fsi_master *, int link);
+ int (*link_enable)(struct fsi_master *, int link);
+};
+
+#define dev_to_fsi_master(d) container_of(d, struct fsi_master, dev)
+
+extern int fsi_master_register(struct fsi_master *master);
+extern void fsi_master_unregister(struct fsi_master *master);
+
+#endif /* DRIVERS_FSI_MASTER_H */
diff --git a/drivers/fsi/fsi-scom.c b/drivers/fsi/fsi-scom.c
new file mode 100644
index 000000000000..98d062fd353e
--- /dev/null
+++ b/drivers/fsi/fsi-scom.c
@@ -0,0 +1,263 @@
+/*
+ * SCOM FSI Client device driver
+ *
+ * Copyright (C) IBM Corporation 2016
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERGCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/fsi.h>
+#include <linux/module.h>
+#include <linux/cdev.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/miscdevice.h>
+#include <linux/list.h>
+#include <linux/idr.h>
+
+#define FSI_ENGID_SCOM 0x5
+
+#define SCOM_FSI2PIB_DELAY 50
+
+/* SCOM engine register set */
+#define SCOM_DATA0_REG 0x00
+#define SCOM_DATA1_REG 0x04
+#define SCOM_CMD_REG 0x08
+#define SCOM_RESET_REG 0x1C
+
+#define SCOM_RESET_CMD 0x80000000
+#define SCOM_WRITE_CMD 0x80000000
+
+struct scom_device {
+ struct list_head link;
+ struct fsi_device *fsi_dev;
+ struct miscdevice mdev;
+ char name[32];
+ int idx;
+};
+
+#define to_scom_dev(x) container_of((x), struct scom_device, mdev)
+
+static struct list_head scom_devices;
+
+static DEFINE_IDA(scom_ida);
+
+static int put_scom(struct scom_device *scom_dev, uint64_t value,
+ uint32_t addr)
+{
+ int rc;
+ uint32_t data;
+
+ data = cpu_to_be32(SCOM_RESET_CMD);
+ rc = fsi_device_write(scom_dev->fsi_dev, SCOM_RESET_REG, &data,
+ sizeof(uint32_t));
+ if (rc)
+ return rc;
+
+ data = cpu_to_be32((value >> 32) & 0xffffffff);
+ rc = fsi_device_write(scom_dev->fsi_dev, SCOM_DATA0_REG, &data,
+ sizeof(uint32_t));
+ if (rc)
+ return rc;
+
+ data = cpu_to_be32(value & 0xffffffff);
+ rc = fsi_device_write(scom_dev->fsi_dev, SCOM_DATA1_REG, &data,
+ sizeof(uint32_t));
+ if (rc)
+ return rc;
+
+ data = cpu_to_be32(SCOM_WRITE_CMD | addr);
+ return fsi_device_write(scom_dev->fsi_dev, SCOM_CMD_REG, &data,
+ sizeof(uint32_t));
+}
+
+static int get_scom(struct scom_device *scom_dev, uint64_t *value,
+ uint32_t addr)
+{
+ uint32_t result, data;
+ int rc;
+
+ *value = 0ULL;
+ data = cpu_to_be32(addr);
+ rc = fsi_device_write(scom_dev->fsi_dev, SCOM_CMD_REG, &data,
+ sizeof(uint32_t));
+ if (rc)
+ return rc;
+
+ rc = fsi_device_read(scom_dev->fsi_dev, SCOM_DATA0_REG, &result,
+ sizeof(uint32_t));
+ if (rc)
+ return rc;
+
+ *value |= (uint64_t)cpu_to_be32(result) << 32;
+ rc = fsi_device_read(scom_dev->fsi_dev, SCOM_DATA1_REG, &result,
+ sizeof(uint32_t));
+ if (rc)
+ return rc;
+
+ *value |= cpu_to_be32(result);
+
+ return 0;
+}
+
+static ssize_t scom_read(struct file *filep, char __user *buf, size_t len,
+ loff_t *offset)
+{
+ int rc;
+ struct miscdevice *mdev =
+ (struct miscdevice *)filep->private_data;
+ struct scom_device *scom = to_scom_dev(mdev);
+ struct device *dev = &scom->fsi_dev->dev;
+ uint64_t val;
+
+ if (len != sizeof(uint64_t))
+ return -EINVAL;
+
+ rc = get_scom(scom, &val, *offset);
+ if (rc) {
+ dev_dbg(dev, "get_scom fail:%d\n", rc);
+ return rc;
+ }
+
+ rc = copy_to_user(buf, &val, len);
+ if (rc)
+ dev_dbg(dev, "copy to user failed:%d\n", rc);
+
+ return rc ? rc : len;
+}
+
+static ssize_t scom_write(struct file *filep, const char __user *buf,
+ size_t len, loff_t *offset)
+{
+ int rc;
+ struct miscdevice *mdev = filep->private_data;
+ struct scom_device *scom = to_scom_dev(mdev);
+ struct device *dev = &scom->fsi_dev->dev;
+ uint64_t val;
+
+ if (len != sizeof(uint64_t))
+ return -EINVAL;
+
+ rc = copy_from_user(&val, buf, len);
+ if (rc) {
+ dev_dbg(dev, "copy from user failed:%d\n", rc);
+ return -EINVAL;
+ }
+
+ rc = put_scom(scom, val, *offset);
+ if (rc) {
+ dev_dbg(dev, "put_scom failed with:%d\n", rc);
+ return rc;
+ }
+
+ return len;
+}
+
+static loff_t scom_llseek(struct file *file, loff_t offset, int whence)
+{
+ switch (whence) {
+ case SEEK_CUR:
+ break;
+ case SEEK_SET:
+ file->f_pos = offset;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return offset;
+}
+
+static const struct file_operations scom_fops = {
+ .owner = THIS_MODULE,
+ .llseek = scom_llseek,
+ .read = scom_read,
+ .write = scom_write,
+};
+
+static int scom_probe(struct device *dev)
+{
+ struct fsi_device *fsi_dev = to_fsi_dev(dev);
+ struct scom_device *scom;
+
+ scom = devm_kzalloc(dev, sizeof(*scom), GFP_KERNEL);
+ if (!scom)
+ return -ENOMEM;
+
+ scom->idx = ida_simple_get(&scom_ida, 1, INT_MAX, GFP_KERNEL);
+ snprintf(scom->name, sizeof(scom->name), "scom%d", scom->idx);
+ scom->fsi_dev = fsi_dev;
+ scom->mdev.minor = MISC_DYNAMIC_MINOR;
+ scom->mdev.fops = &scom_fops;
+ scom->mdev.name = scom->name;
+ scom->mdev.parent = dev;
+ list_add(&scom->link, &scom_devices);
+
+ return misc_register(&scom->mdev);
+}
+
+static int scom_remove(struct device *dev)
+{
+ struct scom_device *scom, *scom_tmp;
+ struct fsi_device *fsi_dev = to_fsi_dev(dev);
+
+ list_for_each_entry_safe(scom, scom_tmp, &scom_devices, link) {
+ if (scom->fsi_dev == fsi_dev) {
+ list_del(&scom->link);
+ ida_simple_remove(&scom_ida, scom->idx);
+ misc_deregister(&scom->mdev);
+ }
+ }
+
+ return 0;
+}
+
+static struct fsi_device_id scom_ids[] = {
+ {
+ .engine_type = FSI_ENGID_SCOM,
+ .version = FSI_VERSION_ANY,
+ },
+ { 0 }
+};
+
+static struct fsi_driver scom_drv = {
+ .id_table = scom_ids,
+ .drv = {
+ .name = "scom",
+ .bus = &fsi_bus_type,
+ .probe = scom_probe,
+ .remove = scom_remove,
+ }
+};
+
+static int scom_init(void)
+{
+ INIT_LIST_HEAD(&scom_devices);
+ return fsi_driver_register(&scom_drv);
+}
+
+static void scom_exit(void)
+{
+ struct list_head *pos;
+ struct scom_device *scom;
+
+ list_for_each(pos, &scom_devices) {
+ scom = list_entry(pos, struct scom_device, link);
+ misc_deregister(&scom->mdev);
+ devm_kfree(&scom->fsi_dev->dev, scom);
+ }
+ fsi_driver_unregister(&scom_drv);
+}
+
+module_init(scom_init);
+module_exit(scom_exit);
+MODULE_LICENSE("GPL");
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 736ac76d2a6a..e9bf0bb87ac4 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -630,9 +630,13 @@ void vmbus_close(struct vmbus_channel *channel)
*/
list_for_each_safe(cur, tmp, &channel->sc_list) {
cur_channel = list_entry(cur, struct vmbus_channel, sc_list);
- if (cur_channel->state != CHANNEL_OPENED_STATE)
- continue;
vmbus_close_internal(cur_channel);
+ if (cur_channel->rescind) {
+ mutex_lock(&vmbus_connection.channel_mutex);
+ hv_process_channel_removal(cur_channel,
+ cur_channel->offermsg.child_relid);
+ mutex_unlock(&vmbus_connection.channel_mutex);
+ }
}
/*
* Now close the primary.
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 735f9363f2e4..0fabd410efd9 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -428,7 +428,6 @@ void vmbus_free_channels(void)
{
struct vmbus_channel *channel, *tmp;
- mutex_lock(&vmbus_connection.channel_mutex);
list_for_each_entry_safe(channel, tmp, &vmbus_connection.chn_list,
listentry) {
/* hv_process_channel_removal() needs this */
@@ -436,7 +435,6 @@ void vmbus_free_channels(void)
vmbus_device_unregister(channel->device_obj);
}
- mutex_unlock(&vmbus_connection.channel_mutex);
}
/*
@@ -483,8 +481,10 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
list_add_tail(&newchannel->sc_list, &channel->sc_list);
channel->num_sc++;
spin_unlock_irqrestore(&channel->lock, flags);
- } else
+ } else {
+ atomic_dec(&vmbus_connection.offer_in_progress);
goto err_free_chan;
+ }
}
dev_type = hv_get_dev_type(newchannel);
@@ -511,6 +511,7 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
if (!fnew) {
if (channel->sc_creation_callback != NULL)
channel->sc_creation_callback(newchannel);
+ atomic_dec(&vmbus_connection.offer_in_progress);
return;
}
@@ -532,9 +533,7 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
* binding which eventually invokes the device driver's AddDevice()
* method.
*/
- mutex_lock(&vmbus_connection.channel_mutex);
ret = vmbus_device_register(newchannel->device_obj);
- mutex_unlock(&vmbus_connection.channel_mutex);
if (ret != 0) {
pr_err("unable to add child device object (relid %d)\n",
@@ -542,6 +541,8 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
kfree(newchannel->device_obj);
goto err_deq_chan;
}
+
+ atomic_dec(&vmbus_connection.offer_in_progress);
return;
err_deq_chan:
@@ -797,6 +798,7 @@ static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
newchannel = alloc_channel();
if (!newchannel) {
vmbus_release_relid(offer->child_relid);
+ atomic_dec(&vmbus_connection.offer_in_progress);
pr_err("Unable to allocate channel object\n");
return;
}
@@ -843,16 +845,38 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
rescind = (struct vmbus_channel_rescind_offer *)hdr;
+ /*
+ * The offer msg and the corresponding rescind msg
+ * from the host are guranteed to be ordered -
+ * offer comes in first and then the rescind.
+ * Since we process these events in work elements,
+ * and with preemption, we may end up processing
+ * the events out of order. Given that we handle these
+ * work elements on the same CPU, this is possible only
+ * in the case of preemption. In any case wait here
+ * until the offer processing has moved beyond the
+ * point where the channel is discoverable.
+ */
+
+ while (atomic_read(&vmbus_connection.offer_in_progress) != 0) {
+ /*
+ * We wait here until any channel offer is currently
+ * being processed.
+ */
+ msleep(1);
+ }
+
mutex_lock(&vmbus_connection.channel_mutex);
channel = relid2channel(rescind->child_relid);
+ mutex_unlock(&vmbus_connection.channel_mutex);
if (channel == NULL) {
/*
- * This is very impossible, because in
- * vmbus_process_offer(), we have already invoked
- * vmbus_release_relid() on error.
+ * We failed in processing the offer message;
+ * we would have cleaned up the relid in that
+ * failure path.
*/
- goto out;
+ return;
}
spin_lock_irqsave(&channel->lock, flags);
@@ -864,7 +888,7 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
if (channel->device_obj) {
if (channel->chn_rescind_callback) {
channel->chn_rescind_callback(channel);
- goto out;
+ return;
}
/*
* We will have to unregister this device from the
@@ -875,13 +899,26 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
vmbus_device_unregister(channel->device_obj);
put_device(dev);
}
- } else {
- hv_process_channel_removal(channel,
- channel->offermsg.child_relid);
}
-
-out:
- mutex_unlock(&vmbus_connection.channel_mutex);
+ if (channel->primary_channel != NULL) {
+ /*
+ * Sub-channel is being rescinded. Following is the channel
+ * close sequence when initiated from the driveri (refer to
+ * vmbus_close() for details):
+ * 1. Close all sub-channels first
+ * 2. Then close the primary channel.
+ */
+ if (channel->state == CHANNEL_OPEN_STATE) {
+ /*
+ * The channel is currently not open;
+ * it is safe for us to cleanup the channel.
+ */
+ mutex_lock(&vmbus_connection.channel_mutex);
+ hv_process_channel_removal(channel,
+ channel->offermsg.child_relid);
+ mutex_unlock(&vmbus_connection.channel_mutex);
+ }
+ }
}
void vmbus_hvsock_device_unregister(struct vmbus_channel *channel)
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index fce27fb141cc..59c11ff90d12 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -93,10 +93,13 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
* all the CPUs. This is needed for kexec to work correctly where
* the CPU attempting to connect may not be CPU 0.
*/
- if (version >= VERSION_WIN8_1)
+ if (version >= VERSION_WIN8_1) {
msg->target_vcpu = hv_context.vp_index[smp_processor_id()];
- else
+ vmbus_connection.connect_cpu = smp_processor_id();
+ } else {
msg->target_vcpu = 0;
+ vmbus_connection.connect_cpu = 0;
+ }
/*
* Add to list before we send the request since we may
@@ -370,7 +373,7 @@ int vmbus_post_msg(void *buffer, size_t buflen, bool can_sleep)
break;
case HV_STATUS_INSUFFICIENT_MEMORY:
case HV_STATUS_INSUFFICIENT_BUFFERS:
- ret = -ENOMEM;
+ ret = -ENOBUFS;
break;
case HV_STATUS_SUCCESS:
return ret;
@@ -387,7 +390,7 @@ int vmbus_post_msg(void *buffer, size_t buflen, bool can_sleep)
else
mdelay(usec / 1000);
- if (usec < 256000)
+ if (retries < 22)
usec *= 2;
}
return ret;
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index 12e7baecb84e..2ea12207caa0 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -82,10 +82,15 @@ int hv_post_message(union hv_connection_id connection_id,
aligned_msg->message_type = message_type;
aligned_msg->payload_size = payload_size;
memcpy((void *)aligned_msg->payload, payload, payload_size);
- put_cpu_ptr(hv_cpu);
status = hv_do_hypercall(HVCALL_POST_MESSAGE, aligned_msg, NULL);
+ /* Preemption must remain disabled until after the hypercall
+ * so some other thread can't get scheduled onto this cpu and
+ * corrupt the per-cpu post_msg_page
+ */
+ put_cpu_ptr(hv_cpu);
+
return status & 0xFFFF;
}
@@ -96,7 +101,7 @@ static int hv_ce_set_next_event(unsigned long delta,
WARN_ON(!clockevent_state_oneshot(evt));
- hv_get_current_tick(current_tick);
+ current_tick = hyperv_cs->read(NULL);
current_tick += delta;
hv_init_timer(HV_X64_MSR_STIMER0_COUNT, current_tick);
return 0;
diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
index e99ff2ddad40..9a90b915b5be 100644
--- a/drivers/hv/hv_kvp.c
+++ b/drivers/hv/hv_kvp.c
@@ -112,7 +112,7 @@ static void kvp_poll_wrapper(void *channel)
{
/* Transaction is finished, reset the state here to avoid races. */
kvp_transaction.state = HVUTIL_READY;
- hv_kvp_onchannelcallback(channel);
+ tasklet_schedule(&((struct vmbus_channel *)channel)->callback_event);
}
static void kvp_register_done(void)
@@ -159,7 +159,7 @@ static void kvp_timeout_func(struct work_struct *dummy)
static void kvp_host_handshake_func(struct work_struct *dummy)
{
- hv_poll_channel(kvp_transaction.recv_channel, hv_kvp_onchannelcallback);
+ tasklet_schedule(&kvp_transaction.recv_channel->callback_event);
}
static int kvp_handle_handshake(struct hv_kvp_msg *msg)
@@ -625,16 +625,17 @@ void hv_kvp_onchannelcallback(void *context)
NEGO_IN_PROGRESS,
NEGO_FINISHED} host_negotiatied = NEGO_NOT_STARTED;
- if (host_negotiatied == NEGO_NOT_STARTED &&
- kvp_transaction.state < HVUTIL_READY) {
+ if (kvp_transaction.state < HVUTIL_READY) {
/*
* If userspace daemon is not connected and host is asking
* us to negotiate we need to delay to not lose messages.
* This is important for Failover IP setting.
*/
- host_negotiatied = NEGO_IN_PROGRESS;
- schedule_delayed_work(&kvp_host_handshake_work,
+ if (host_negotiatied == NEGO_NOT_STARTED) {
+ host_negotiatied = NEGO_IN_PROGRESS;
+ schedule_delayed_work(&kvp_host_handshake_work,
HV_UTIL_NEGO_TIMEOUT * HZ);
+ }
return;
}
if (kvp_transaction.state > HVUTIL_READY)
@@ -702,6 +703,7 @@ void hv_kvp_onchannelcallback(void *context)
VM_PKT_DATA_INBAND, 0);
host_negotiatied = NEGO_FINISHED;
+ hv_poll_channel(kvp_transaction.recv_channel, kvp_poll_wrapper);
}
}
diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
index 186b10083c55..14dce25c104f 100644
--- a/drivers/hv/hv_util.c
+++ b/drivers/hv/hv_util.c
@@ -202,27 +202,39 @@ static void shutdown_onchannelcallback(void *context)
/*
* Set the host time in a process context.
*/
+static struct work_struct adj_time_work;
-struct adj_time_work {
- struct work_struct work;
- u64 host_time;
- u64 ref_time;
- u8 flags;
-};
+/*
+ * The last time sample, received from the host. PTP device responds to
+ * requests by using this data and the current partition-wide time reference
+ * count.
+ */
+static struct {
+ u64 host_time;
+ u64 ref_time;
+ spinlock_t lock;
+} host_ts;
-static void hv_set_host_time(struct work_struct *work)
+static struct timespec64 hv_get_adj_host_time(void)
{
- struct adj_time_work *wrk;
- struct timespec64 host_ts;
- u64 reftime, newtime;
-
- wrk = container_of(work, struct adj_time_work, work);
+ struct timespec64 ts;
+ u64 newtime, reftime;
+ unsigned long flags;
+ spin_lock_irqsave(&host_ts.lock, flags);
reftime = hyperv_cs->read(hyperv_cs);
- newtime = wrk->host_time + (reftime - wrk->ref_time);
- host_ts = ns_to_timespec64((newtime - WLTIMEDELTA) * 100);
+ newtime = host_ts.host_time + (reftime - host_ts.ref_time);
+ ts = ns_to_timespec64((newtime - WLTIMEDELTA) * 100);
+ spin_unlock_irqrestore(&host_ts.lock, flags);
- do_settimeofday64(&host_ts);
+ return ts;
+}
+
+static void hv_set_host_time(struct work_struct *work)
+{
+ struct timespec64 ts = hv_get_adj_host_time();
+
+ do_settimeofday64(&ts);
}
/*
@@ -238,62 +250,35 @@ static void hv_set_host_time(struct work_struct *work)
* typically used as a hint to the guest. The guest is under no obligation
* to discipline the clock.
*/
-static struct adj_time_work wrk;
-
-/*
- * The last time sample, received from the host. PTP device responds to
- * requests by using this data and the current partition-wide time reference
- * count.
- */
-static struct {
- u64 host_time;
- u64 ref_time;
- struct system_time_snapshot snap;
- spinlock_t lock;
-} host_ts;
-
static inline void adj_guesttime(u64 hosttime, u64 reftime, u8 adj_flags)
{
unsigned long flags;
u64 cur_reftime;
/*
- * This check is safe since we are executing in the
- * interrupt context and time synch messages are always
- * delivered on the same CPU.
+ * Save the adjusted time sample from the host and the snapshot
+ * of the current system time.
*/
- if (adj_flags & ICTIMESYNCFLAG_SYNC) {
- /* Queue a job to do do_settimeofday64() */
- if (work_pending(&wrk.work))
- return;
-
- wrk.host_time = hosttime;
- wrk.ref_time = reftime;
- wrk.flags = adj_flags;
- schedule_work(&wrk.work);
- } else {
- /*
- * Save the adjusted time sample from the host and the snapshot
- * of the current system time for PTP device.
- */
- spin_lock_irqsave(&host_ts.lock, flags);
-
- cur_reftime = hyperv_cs->read(hyperv_cs);
- host_ts.host_time = hosttime;
- host_ts.ref_time = cur_reftime;
- ktime_get_snapshot(&host_ts.snap);
-
- /*
- * TimeSync v4 messages contain reference time (guest's Hyper-V
- * clocksource read when the time sample was generated), we can
- * improve the precision by adding the delta between now and the
- * time of generation.
- */
- if (ts_srv_version > TS_VERSION_3)
- host_ts.host_time += (cur_reftime - reftime);
-
- spin_unlock_irqrestore(&host_ts.lock, flags);
- }
+ spin_lock_irqsave(&host_ts.lock, flags);
+
+ cur_reftime = hyperv_cs->read(hyperv_cs);
+ host_ts.host_time = hosttime;
+ host_ts.ref_time = cur_reftime;
+
+ /*
+ * TimeSync v4 messages contain reference time (guest's Hyper-V
+ * clocksource read when the time sample was generated), we can
+ * improve the precision by adding the delta between now and the
+ * time of generation. For older protocols we set
+ * reftime == cur_reftime on call.
+ */
+ host_ts.host_time += (cur_reftime - reftime);
+
+ spin_unlock_irqrestore(&host_ts.lock, flags);
+
+ /* Schedule work to do do_settimeofday64() */
+ if (adj_flags & ICTIMESYNCFLAG_SYNC)
+ schedule_work(&adj_time_work);
}
/*
@@ -341,8 +326,8 @@ static void timesync_onchannelcallback(void *context)
sizeof(struct vmbuspipe_hdr) +
sizeof(struct icmsg_hdr)];
adj_guesttime(timedatap->parenttime,
- 0,
- timedatap->flags);
+ hyperv_cs->read(hyperv_cs),
+ timedatap->flags);
}
}
@@ -526,58 +511,17 @@ static int hv_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
static int hv_ptp_gettime(struct ptp_clock_info *info, struct timespec64 *ts)
{
- unsigned long flags;
- u64 newtime, reftime;
-
- spin_lock_irqsave(&host_ts.lock, flags);
- reftime = hyperv_cs->read(hyperv_cs);
- newtime = host_ts.host_time + (reftime - host_ts.ref_time);
- *ts = ns_to_timespec64((newtime - WLTIMEDELTA) * 100);
- spin_unlock_irqrestore(&host_ts.lock, flags);
+ *ts = hv_get_adj_host_time();
return 0;
}
-static int hv_ptp_get_syncdevicetime(ktime_t *device,
- struct system_counterval_t *system,
- void *ctx)
-{
- system->cs = hyperv_cs;
- system->cycles = host_ts.ref_time;
- *device = ns_to_ktime((host_ts.host_time - WLTIMEDELTA) * 100);
-
- return 0;
-}
-
-static int hv_ptp_getcrosststamp(struct ptp_clock_info *ptp,
- struct system_device_crosststamp *xtstamp)
-{
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&host_ts.lock, flags);
-
- /*
- * host_ts contains the last time sample from the host and the snapshot
- * of system time. We don't need to calculate the time delta between
- * the reception and now as get_device_system_crosststamp() does the
- * required interpolation.
- */
- ret = get_device_system_crosststamp(hv_ptp_get_syncdevicetime,
- NULL, &host_ts.snap, xtstamp);
-
- spin_unlock_irqrestore(&host_ts.lock, flags);
-
- return ret;
-}
-
static struct ptp_clock_info ptp_hyperv_info = {
.name = "hyperv",
.enable = hv_ptp_enable,
.adjtime = hv_ptp_adjtime,
.adjfreq = hv_ptp_adjfreq,
.gettime64 = hv_ptp_gettime,
- .getcrosststamp = hv_ptp_getcrosststamp,
.settime64 = hv_ptp_settime,
.owner = THIS_MODULE,
};
@@ -592,7 +536,7 @@ static int hv_timesync_init(struct hv_util_service *srv)
spin_lock_init(&host_ts.lock);
- INIT_WORK(&wrk.work, hv_set_host_time);
+ INIT_WORK(&adj_time_work, hv_set_host_time);
/*
* ptp_clock_register() returns NULL when CONFIG_PTP_1588_CLOCK is
@@ -613,7 +557,7 @@ static void hv_timesync_deinit(void)
{
if (hv_ptp_clock)
ptp_clock_unregister(hv_ptp_clock);
- cancel_work_sync(&wrk.work);
+ cancel_work_sync(&adj_time_work);
}
static int __init init_hyperv_utils(void)
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index 6113e915c50e..1b6a5e0dfa75 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -303,6 +303,13 @@ enum vmbus_connect_state {
#define MAX_SIZE_CHANNEL_MESSAGE HV_MESSAGE_PAYLOAD_BYTE_COUNT
struct vmbus_connection {
+ /*
+ * CPU on which the initial host contact was made.
+ */
+ int connect_cpu;
+
+ atomic_t offer_in_progress;
+
enum vmbus_connect_state conn_state;
atomic_t next_gpadl_handle;
@@ -411,6 +418,10 @@ static inline void hv_poll_channel(struct vmbus_channel *channel,
if (!channel)
return;
+ if (in_interrupt() && (channel->target_cpu == smp_processor_id())) {
+ cb(channel);
+ return;
+ }
smp_call_function_single(channel->target_cpu, cb, channel, true);
}
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 0087b49095eb..ed84e96715a0 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -608,40 +608,6 @@ static void vmbus_free_dynids(struct hv_driver *drv)
spin_unlock(&drv->dynids.lock);
}
-/* Parse string of form: 1b4e28ba-2fa1-11d2-883f-b9a761bde3f */
-static int get_uuid_le(const char *str, uuid_le *uu)
-{
- unsigned int b[16];
- int i;
-
- if (strlen(str) < 37)
- return -1;
-
- for (i = 0; i < 36; i++) {
- switch (i) {
- case 8: case 13: case 18: case 23:
- if (str[i] != '-')
- return -1;
- break;
- default:
- if (!isxdigit(str[i]))
- return -1;
- }
- }
-
- /* unparse little endian output byte order */
- if (sscanf(str,
- "%2x%2x%2x%2x-%2x%2x-%2x%2x-%2x%2x-%2x%2x%2x%2x%2x%2x",
- &b[3], &b[2], &b[1], &b[0],
- &b[5], &b[4], &b[7], &b[6], &b[8], &b[9],
- &b[10], &b[11], &b[12], &b[13], &b[14], &b[15]) != 16)
- return -1;
-
- for (i = 0; i < 16; i++)
- uu->b[i] = b[i];
- return 0;
-}
-
/*
* store_new_id - sysfs frontend to vmbus_add_dynid()
*
@@ -651,11 +617,12 @@ static ssize_t new_id_store(struct device_driver *driver, const char *buf,
size_t count)
{
struct hv_driver *drv = drv_to_hv_drv(driver);
- uuid_le guid = NULL_UUID_LE;
+ uuid_le guid;
ssize_t retval;
- if (get_uuid_le(buf, &guid) != 0)
- return -EINVAL;
+ retval = uuid_le_to_bin(buf, &guid);
+ if (retval)
+ return retval;
if (hv_vmbus_get_id(drv, &guid))
return -EEXIST;
@@ -677,12 +644,14 @@ static ssize_t remove_id_store(struct device_driver *driver, const char *buf,
{
struct hv_driver *drv = drv_to_hv_drv(driver);
struct vmbus_dynid *dynid, *n;
- uuid_le guid = NULL_UUID_LE;
- size_t retval = -ENODEV;
+ uuid_le guid;
+ ssize_t retval;
- if (get_uuid_le(buf, &guid))
- return -EINVAL;
+ retval = uuid_le_to_bin(buf, &guid);
+ if (retval)
+ return retval;
+ retval = -ENODEV;
spin_lock(&drv->dynids.lock);
list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) {
struct hv_vmbus_device_id *id = &dynid->id;
@@ -798,8 +767,10 @@ static void vmbus_device_release(struct device *device)
struct hv_device *hv_dev = device_to_hv_device(device);
struct vmbus_channel *channel = hv_dev->channel;
+ mutex_lock(&vmbus_connection.channel_mutex);
hv_process_channel_removal(channel,
channel->offermsg.child_relid);
+ mutex_unlock(&vmbus_connection.channel_mutex);
kfree(hv_dev);
}
@@ -877,7 +848,32 @@ void vmbus_on_msg_dpc(unsigned long data)
INIT_WORK(&ctx->work, vmbus_onmessage_work);
memcpy(&ctx->msg, msg, sizeof(*msg));
- queue_work(vmbus_connection.work_queue, &ctx->work);
+ /*
+ * The host can generate a rescind message while we
+ * may still be handling the original offer. We deal with
+ * this condition by ensuring the processing is done on the
+ * same CPU.
+ */
+ switch (hdr->msgtype) {
+ case CHANNELMSG_RESCIND_CHANNELOFFER:
+ /*
+ * If we are handling the rescind message;
+ * schedule the work on the global work queue.
+ */
+ schedule_work_on(vmbus_connection.connect_cpu,
+ &ctx->work);
+ break;
+
+ case CHANNELMSG_OFFERCHANNEL:
+ atomic_inc(&vmbus_connection.offer_in_progress);
+ queue_work_on(vmbus_connection.connect_cpu,
+ vmbus_connection.work_queue,
+ &ctx->work);
+ break;
+
+ default:
+ queue_work(vmbus_connection.work_queue, &ctx->work);
+ }
} else
entry->message_handler(hdr);
diff --git a/drivers/hwtracing/coresight/Kconfig b/drivers/hwtracing/coresight/Kconfig
index 130cb2114059..8d55d6d79015 100644
--- a/drivers/hwtracing/coresight/Kconfig
+++ b/drivers/hwtracing/coresight/Kconfig
@@ -89,4 +89,18 @@ config CORESIGHT_STM
logging useful software events or data coming from various entities
in the system, possibly running different OSs
+config CORESIGHT_CPU_DEBUG
+ tristate "CoreSight CPU Debug driver"
+ depends on ARM || ARM64
+ depends on DEBUG_FS
+ help
+ This driver provides support for coresight debugging module. This
+ is primarily used to dump sample-based profiling registers when
+ system triggers panic, the driver will parse context registers so
+ can quickly get to know program counter (PC), secure state,
+ exception level, etc. Before use debugging functionality, platform
+ needs to ensure the clock domain and power domain are enabled
+ properly, please refer Documentation/trace/coresight-cpu-debug.txt
+ for detailed description and the example for usage.
+
endif
diff --git a/drivers/hwtracing/coresight/Makefile b/drivers/hwtracing/coresight/Makefile
index af480d9c1441..433d59025eb6 100644
--- a/drivers/hwtracing/coresight/Makefile
+++ b/drivers/hwtracing/coresight/Makefile
@@ -16,3 +16,4 @@ obj-$(CONFIG_CORESIGHT_SOURCE_ETM4X) += coresight-etm4x.o \
coresight-etm4x-sysfs.o
obj-$(CONFIG_CORESIGHT_QCOM_REPLICATOR) += coresight-replicator-qcom.o
obj-$(CONFIG_CORESIGHT_STM) += coresight-stm.o
+obj-$(CONFIG_CORESIGHT_CPU_DEBUG) += coresight-cpu-debug.o
diff --git a/drivers/hwtracing/coresight/coresight-cpu-debug.c b/drivers/hwtracing/coresight/coresight-cpu-debug.c
new file mode 100644
index 000000000000..64a77e00eaa6
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-cpu-debug.c
@@ -0,0 +1,700 @@
+/*
+ * Copyright (c) 2017 Linaro Limited. All rights reserved.
+ *
+ * Author: Leo Yan <leo.yan@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+#include <linux/amba/bus.h>
+#include <linux/coresight.h>
+#include <linux/cpu.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/pm_qos.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+
+#include "coresight-priv.h"
+
+#define EDPCSR 0x0A0
+#define EDCIDSR 0x0A4
+#define EDVIDSR 0x0A8
+#define EDPCSR_HI 0x0AC
+#define EDOSLAR 0x300
+#define EDPRCR 0x310
+#define EDPRSR 0x314
+#define EDDEVID1 0xFC4
+#define EDDEVID 0xFC8
+
+#define EDPCSR_PROHIBITED 0xFFFFFFFF
+
+/* bits definition for EDPCSR */
+#define EDPCSR_THUMB BIT(0)
+#define EDPCSR_ARM_INST_MASK GENMASK(31, 2)
+#define EDPCSR_THUMB_INST_MASK GENMASK(31, 1)
+
+/* bits definition for EDPRCR */
+#define EDPRCR_COREPURQ BIT(3)
+#define EDPRCR_CORENPDRQ BIT(0)
+
+/* bits definition for EDPRSR */
+#define EDPRSR_DLK BIT(6)
+#define EDPRSR_PU BIT(0)
+
+/* bits definition for EDVIDSR */
+#define EDVIDSR_NS BIT(31)
+#define EDVIDSR_E2 BIT(30)
+#define EDVIDSR_E3 BIT(29)
+#define EDVIDSR_HV BIT(28)
+#define EDVIDSR_VMID GENMASK(7, 0)
+
+/*
+ * bits definition for EDDEVID1:PSCROffset
+ *
+ * NOTE: armv8 and armv7 have different definition for the register,
+ * so consolidate the bits definition as below:
+ *
+ * 0b0000 - Sample offset applies based on the instruction state, we
+ * rely on EDDEVID to check if EDPCSR is implemented or not
+ * 0b0001 - No offset applies.
+ * 0b0010 - No offset applies, but do not use in AArch32 mode
+ *
+ */
+#define EDDEVID1_PCSR_OFFSET_MASK GENMASK(3, 0)
+#define EDDEVID1_PCSR_OFFSET_INS_SET (0x0)
+#define EDDEVID1_PCSR_NO_OFFSET_DIS_AARCH32 (0x2)
+
+/* bits definition for EDDEVID */
+#define EDDEVID_PCSAMPLE_MODE GENMASK(3, 0)
+#define EDDEVID_IMPL_EDPCSR (0x1)
+#define EDDEVID_IMPL_EDPCSR_EDCIDSR (0x2)
+#define EDDEVID_IMPL_FULL (0x3)
+
+#define DEBUG_WAIT_SLEEP 1000
+#define DEBUG_WAIT_TIMEOUT 32000
+
+struct debug_drvdata {
+ void __iomem *base;
+ struct device *dev;
+ int cpu;
+
+ bool edpcsr_present;
+ bool edcidsr_present;
+ bool edvidsr_present;
+ bool pc_has_offset;
+
+ u32 edpcsr;
+ u32 edpcsr_hi;
+ u32 edprsr;
+ u32 edvidsr;
+ u32 edcidsr;
+};
+
+static DEFINE_MUTEX(debug_lock);
+static DEFINE_PER_CPU(struct debug_drvdata *, debug_drvdata);
+static int debug_count;
+static struct dentry *debug_debugfs_dir;
+
+static bool debug_enable;
+module_param_named(enable, debug_enable, bool, 0600);
+MODULE_PARM_DESC(enable, "Control to enable coresight CPU debug functionality");
+
+static void debug_os_unlock(struct debug_drvdata *drvdata)
+{
+ /* Unlocks the debug registers */
+ writel_relaxed(0x0, drvdata->base + EDOSLAR);
+
+ /* Make sure the registers are unlocked before accessing */
+ wmb();
+}
+
+/*
+ * According to ARM DDI 0487A.k, before access external debug
+ * registers should firstly check the access permission; if any
+ * below condition has been met then cannot access debug
+ * registers to avoid lockup issue:
+ *
+ * - CPU power domain is powered off;
+ * - The OS Double Lock is locked;
+ *
+ * By checking EDPRSR can get to know if meet these conditions.
+ */
+static bool debug_access_permitted(struct debug_drvdata *drvdata)
+{
+ /* CPU is powered off */
+ if (!(drvdata->edprsr & EDPRSR_PU))
+ return false;
+
+ /* The OS Double Lock is locked */
+ if (drvdata->edprsr & EDPRSR_DLK)
+ return false;
+
+ return true;
+}
+
+static void debug_force_cpu_powered_up(struct debug_drvdata *drvdata)
+{
+ u32 edprcr;
+
+try_again:
+
+ /*
+ * Send request to power management controller and assert
+ * DBGPWRUPREQ signal; if power management controller has
+ * sane implementation, it should enable CPU power domain
+ * in case CPU is in low power state.
+ */
+ edprcr = readl_relaxed(drvdata->base + EDPRCR);
+ edprcr |= EDPRCR_COREPURQ;
+ writel_relaxed(edprcr, drvdata->base + EDPRCR);
+
+ /* Wait for CPU to be powered up (timeout~=32ms) */
+ if (readx_poll_timeout_atomic(readl_relaxed, drvdata->base + EDPRSR,
+ drvdata->edprsr, (drvdata->edprsr & EDPRSR_PU),
+ DEBUG_WAIT_SLEEP, DEBUG_WAIT_TIMEOUT)) {
+ /*
+ * Unfortunately the CPU cannot be powered up, so return
+ * back and later has no permission to access other
+ * registers. For this case, should disable CPU low power
+ * states to ensure CPU power domain is enabled!
+ */
+ dev_err(drvdata->dev, "%s: power up request for CPU%d failed\n",
+ __func__, drvdata->cpu);
+ return;
+ }
+
+ /*
+ * At this point the CPU is powered up, so set the no powerdown
+ * request bit so we don't lose power and emulate power down.
+ */
+ edprcr = readl_relaxed(drvdata->base + EDPRCR);
+ edprcr |= EDPRCR_COREPURQ | EDPRCR_CORENPDRQ;
+ writel_relaxed(edprcr, drvdata->base + EDPRCR);
+
+ drvdata->edprsr = readl_relaxed(drvdata->base + EDPRSR);
+
+ /* The core power domain got switched off on use, try again */
+ if (unlikely(!(drvdata->edprsr & EDPRSR_PU)))
+ goto try_again;
+}
+
+static void debug_read_regs(struct debug_drvdata *drvdata)
+{
+ u32 save_edprcr;
+
+ CS_UNLOCK(drvdata->base);
+
+ /* Unlock os lock */
+ debug_os_unlock(drvdata);
+
+ /* Save EDPRCR register */
+ save_edprcr = readl_relaxed(drvdata->base + EDPRCR);
+
+ /*
+ * Ensure CPU power domain is enabled to let registers
+ * are accessiable.
+ */
+ debug_force_cpu_powered_up(drvdata);
+
+ if (!debug_access_permitted(drvdata))
+ goto out;
+
+ drvdata->edpcsr = readl_relaxed(drvdata->base + EDPCSR);
+
+ /*
+ * As described in ARM DDI 0487A.k, if the processing
+ * element (PE) is in debug state, or sample-based
+ * profiling is prohibited, EDPCSR reads as 0xFFFFFFFF;
+ * EDCIDSR, EDVIDSR and EDPCSR_HI registers also become
+ * UNKNOWN state. So directly bail out for this case.
+ */
+ if (drvdata->edpcsr == EDPCSR_PROHIBITED)
+ goto out;
+
+ /*
+ * A read of the EDPCSR normally has the side-effect of
+ * indirectly writing to EDCIDSR, EDVIDSR and EDPCSR_HI;
+ * at this point it's safe to read value from them.
+ */
+ if (IS_ENABLED(CONFIG_64BIT))
+ drvdata->edpcsr_hi = readl_relaxed(drvdata->base + EDPCSR_HI);
+
+ if (drvdata->edcidsr_present)
+ drvdata->edcidsr = readl_relaxed(drvdata->base + EDCIDSR);
+
+ if (drvdata->edvidsr_present)
+ drvdata->edvidsr = readl_relaxed(drvdata->base + EDVIDSR);
+
+out:
+ /* Restore EDPRCR register */
+ writel_relaxed(save_edprcr, drvdata->base + EDPRCR);
+
+ CS_LOCK(drvdata->base);
+}
+
+#ifdef CONFIG_64BIT
+static unsigned long debug_adjust_pc(struct debug_drvdata *drvdata)
+{
+ return (unsigned long)drvdata->edpcsr_hi << 32 |
+ (unsigned long)drvdata->edpcsr;
+}
+#else
+static unsigned long debug_adjust_pc(struct debug_drvdata *drvdata)
+{
+ unsigned long arm_inst_offset = 0, thumb_inst_offset = 0;
+ unsigned long pc;
+
+ pc = (unsigned long)drvdata->edpcsr;
+
+ if (drvdata->pc_has_offset) {
+ arm_inst_offset = 8;
+ thumb_inst_offset = 4;
+ }
+
+ /* Handle thumb instruction */
+ if (pc & EDPCSR_THUMB) {
+ pc = (pc & EDPCSR_THUMB_INST_MASK) - thumb_inst_offset;
+ return pc;
+ }
+
+ /*
+ * Handle arm instruction offset, if the arm instruction
+ * is not 4 byte alignment then it's possible the case
+ * for implementation defined; keep original value for this
+ * case and print info for notice.
+ */
+ if (pc & BIT(1))
+ dev_emerg(drvdata->dev,
+ "Instruction offset is implementation defined\n");
+ else
+ pc = (pc & EDPCSR_ARM_INST_MASK) - arm_inst_offset;
+
+ return pc;
+}
+#endif
+
+static void debug_dump_regs(struct debug_drvdata *drvdata)
+{
+ struct device *dev = drvdata->dev;
+ unsigned long pc;
+
+ dev_emerg(dev, " EDPRSR: %08x (Power:%s DLK:%s)\n",
+ drvdata->edprsr,
+ drvdata->edprsr & EDPRSR_PU ? "On" : "Off",
+ drvdata->edprsr & EDPRSR_DLK ? "Lock" : "Unlock");
+
+ if (!debug_access_permitted(drvdata)) {
+ dev_emerg(dev, "No permission to access debug registers!\n");
+ return;
+ }
+
+ if (drvdata->edpcsr == EDPCSR_PROHIBITED) {
+ dev_emerg(dev, "CPU is in Debug state or profiling is prohibited!\n");
+ return;
+ }
+
+ pc = debug_adjust_pc(drvdata);
+ dev_emerg(dev, " EDPCSR: [<%p>] %pS\n", (void *)pc, (void *)pc);
+
+ if (drvdata->edcidsr_present)
+ dev_emerg(dev, " EDCIDSR: %08x\n", drvdata->edcidsr);
+
+ if (drvdata->edvidsr_present)
+ dev_emerg(dev, " EDVIDSR: %08x (State:%s Mode:%s Width:%dbits VMID:%x)\n",
+ drvdata->edvidsr,
+ drvdata->edvidsr & EDVIDSR_NS ?
+ "Non-secure" : "Secure",
+ drvdata->edvidsr & EDVIDSR_E3 ? "EL3" :
+ (drvdata->edvidsr & EDVIDSR_E2 ?
+ "EL2" : "EL1/0"),
+ drvdata->edvidsr & EDVIDSR_HV ? 64 : 32,
+ drvdata->edvidsr & (u32)EDVIDSR_VMID);
+}
+
+static void debug_init_arch_data(void *info)
+{
+ struct debug_drvdata *drvdata = info;
+ u32 mode, pcsr_offset;
+ u32 eddevid, eddevid1;
+
+ CS_UNLOCK(drvdata->base);
+
+ /* Read device info */
+ eddevid = readl_relaxed(drvdata->base + EDDEVID);
+ eddevid1 = readl_relaxed(drvdata->base + EDDEVID1);
+
+ CS_LOCK(drvdata->base);
+
+ /* Parse implementation feature */
+ mode = eddevid & EDDEVID_PCSAMPLE_MODE;
+ pcsr_offset = eddevid1 & EDDEVID1_PCSR_OFFSET_MASK;
+
+ drvdata->edpcsr_present = false;
+ drvdata->edcidsr_present = false;
+ drvdata->edvidsr_present = false;
+ drvdata->pc_has_offset = false;
+
+ switch (mode) {
+ case EDDEVID_IMPL_FULL:
+ drvdata->edvidsr_present = true;
+ /* Fall through */
+ case EDDEVID_IMPL_EDPCSR_EDCIDSR:
+ drvdata->edcidsr_present = true;
+ /* Fall through */
+ case EDDEVID_IMPL_EDPCSR:
+ /*
+ * In ARM DDI 0487A.k, the EDDEVID1.PCSROffset is used to
+ * define if has the offset for PC sampling value; if read
+ * back EDDEVID1.PCSROffset == 0x2, then this means the debug
+ * module does not sample the instruction set state when
+ * armv8 CPU in AArch32 state.
+ */
+ drvdata->edpcsr_present =
+ ((IS_ENABLED(CONFIG_64BIT) && pcsr_offset != 0) ||
+ (pcsr_offset != EDDEVID1_PCSR_NO_OFFSET_DIS_AARCH32));
+
+ drvdata->pc_has_offset =
+ (pcsr_offset == EDDEVID1_PCSR_OFFSET_INS_SET);
+ break;
+ default:
+ break;
+ }
+}
+
+/*
+ * Dump out information on panic.
+ */
+static int debug_notifier_call(struct notifier_block *self,
+ unsigned long v, void *p)
+{
+ int cpu;
+ struct debug_drvdata *drvdata;
+
+ mutex_lock(&debug_lock);
+
+ /* Bail out if the functionality is disabled */
+ if (!debug_enable)
+ goto skip_dump;
+
+ pr_emerg("ARM external debug module:\n");
+
+ for_each_possible_cpu(cpu) {
+ drvdata = per_cpu(debug_drvdata, cpu);
+ if (!drvdata)
+ continue;
+
+ dev_emerg(drvdata->dev, "CPU[%d]:\n", drvdata->cpu);
+
+ debug_read_regs(drvdata);
+ debug_dump_regs(drvdata);
+ }
+
+skip_dump:
+ mutex_unlock(&debug_lock);
+ return 0;
+}
+
+static struct notifier_block debug_notifier = {
+ .notifier_call = debug_notifier_call,
+};
+
+static int debug_enable_func(void)
+{
+ struct debug_drvdata *drvdata;
+ int cpu, ret = 0;
+ cpumask_t mask;
+
+ /*
+ * Use cpumask to track which debug power domains have
+ * been powered on and use it to handle failure case.
+ */
+ cpumask_clear(&mask);
+
+ for_each_possible_cpu(cpu) {
+ drvdata = per_cpu(debug_drvdata, cpu);
+ if (!drvdata)
+ continue;
+
+ ret = pm_runtime_get_sync(drvdata->dev);
+ if (ret < 0)
+ goto err;
+ else
+ cpumask_set_cpu(cpu, &mask);
+ }
+
+ return 0;
+
+err:
+ /*
+ * If pm_runtime_get_sync() has failed, need rollback on
+ * all the other CPUs that have been enabled before that.
+ */
+ for_each_cpu(cpu, &mask) {
+ drvdata = per_cpu(debug_drvdata, cpu);
+ pm_runtime_put_noidle(drvdata->dev);
+ }
+
+ return ret;
+}
+
+static int debug_disable_func(void)
+{
+ struct debug_drvdata *drvdata;
+ int cpu, ret, err = 0;
+
+ /*
+ * Disable debug power domains, records the error and keep
+ * circling through all other CPUs when an error has been
+ * encountered.
+ */
+ for_each_possible_cpu(cpu) {
+ drvdata = per_cpu(debug_drvdata, cpu);
+ if (!drvdata)
+ continue;
+
+ ret = pm_runtime_put(drvdata->dev);
+ if (ret < 0)
+ err = ret;
+ }
+
+ return err;
+}
+
+static ssize_t debug_func_knob_write(struct file *f,
+ const char __user *buf, size_t count, loff_t *ppos)
+{
+ u8 val;
+ int ret;
+
+ ret = kstrtou8_from_user(buf, count, 2, &val);
+ if (ret)
+ return ret;
+
+ mutex_lock(&debug_lock);
+
+ if (val == debug_enable)
+ goto out;
+
+ if (val)
+ ret = debug_enable_func();
+ else
+ ret = debug_disable_func();
+
+ if (ret) {
+ pr_err("%s: unable to %s debug function: %d\n",
+ __func__, val ? "enable" : "disable", ret);
+ goto err;
+ }
+
+ debug_enable = val;
+out:
+ ret = count;
+err:
+ mutex_unlock(&debug_lock);
+ return ret;
+}
+
+static ssize_t debug_func_knob_read(struct file *f,
+ char __user *ubuf, size_t count, loff_t *ppos)
+{
+ ssize_t ret;
+ char buf[3];
+
+ mutex_lock(&debug_lock);
+ snprintf(buf, sizeof(buf), "%d\n", debug_enable);
+ mutex_unlock(&debug_lock);
+
+ ret = simple_read_from_buffer(ubuf, count, ppos, buf, sizeof(buf));
+ return ret;
+}
+
+static const struct file_operations debug_func_knob_fops = {
+ .open = simple_open,
+ .read = debug_func_knob_read,
+ .write = debug_func_knob_write,
+};
+
+static int debug_func_init(void)
+{
+ struct dentry *file;
+ int ret;
+
+ /* Create debugfs node */
+ debug_debugfs_dir = debugfs_create_dir("coresight_cpu_debug", NULL);
+ if (!debug_debugfs_dir) {
+ pr_err("%s: unable to create debugfs directory\n", __func__);
+ return -ENOMEM;
+ }
+
+ file = debugfs_create_file("enable", 0644, debug_debugfs_dir, NULL,
+ &debug_func_knob_fops);
+ if (!file) {
+ pr_err("%s: unable to create enable knob file\n", __func__);
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ /* Register function to be called for panic */
+ ret = atomic_notifier_chain_register(&panic_notifier_list,
+ &debug_notifier);
+ if (ret) {
+ pr_err("%s: unable to register notifier: %d\n",
+ __func__, ret);
+ goto err;
+ }
+
+ return 0;
+
+err:
+ debugfs_remove_recursive(debug_debugfs_dir);
+ return ret;
+}
+
+static void debug_func_exit(void)
+{
+ atomic_notifier_chain_unregister(&panic_notifier_list,
+ &debug_notifier);
+ debugfs_remove_recursive(debug_debugfs_dir);
+}
+
+static int debug_probe(struct amba_device *adev, const struct amba_id *id)
+{
+ void __iomem *base;
+ struct device *dev = &adev->dev;
+ struct debug_drvdata *drvdata;
+ struct resource *res = &adev->res;
+ struct device_node *np = adev->dev.of_node;
+ int ret;
+
+ drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+
+ drvdata->cpu = np ? of_coresight_get_cpu(np) : 0;
+ if (per_cpu(debug_drvdata, drvdata->cpu)) {
+ dev_err(dev, "CPU%d drvdata has already been initialized\n",
+ drvdata->cpu);
+ return -EBUSY;
+ }
+
+ drvdata->dev = &adev->dev;
+ amba_set_drvdata(adev, drvdata);
+
+ /* Validity for the resource is already checked by the AMBA core */
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ drvdata->base = base;
+
+ get_online_cpus();
+ per_cpu(debug_drvdata, drvdata->cpu) = drvdata;
+ ret = smp_call_function_single(drvdata->cpu, debug_init_arch_data,
+ drvdata, 1);
+ put_online_cpus();
+
+ if (ret) {
+ dev_err(dev, "CPU%d debug arch init failed\n", drvdata->cpu);
+ goto err;
+ }
+
+ if (!drvdata->edpcsr_present) {
+ dev_err(dev, "CPU%d sample-based profiling isn't implemented\n",
+ drvdata->cpu);
+ ret = -ENXIO;
+ goto err;
+ }
+
+ if (!debug_count++) {
+ ret = debug_func_init();
+ if (ret)
+ goto err_func_init;
+ }
+
+ mutex_lock(&debug_lock);
+ /* Turn off debug power domain if debugging is disabled */
+ if (!debug_enable)
+ pm_runtime_put(dev);
+ mutex_unlock(&debug_lock);
+
+ dev_info(dev, "Coresight debug-CPU%d initialized\n", drvdata->cpu);
+ return 0;
+
+err_func_init:
+ debug_count--;
+err:
+ per_cpu(debug_drvdata, drvdata->cpu) = NULL;
+ return ret;
+}
+
+static int debug_remove(struct amba_device *adev)
+{
+ struct device *dev = &adev->dev;
+ struct debug_drvdata *drvdata = amba_get_drvdata(adev);
+
+ per_cpu(debug_drvdata, drvdata->cpu) = NULL;
+
+ mutex_lock(&debug_lock);
+ /* Turn off debug power domain before rmmod the module */
+ if (debug_enable)
+ pm_runtime_put(dev);
+ mutex_unlock(&debug_lock);
+
+ if (!--debug_count)
+ debug_func_exit();
+
+ return 0;
+}
+
+static struct amba_id debug_ids[] = {
+ { /* Debug for Cortex-A53 */
+ .id = 0x000bbd03,
+ .mask = 0x000fffff,
+ },
+ { /* Debug for Cortex-A57 */
+ .id = 0x000bbd07,
+ .mask = 0x000fffff,
+ },
+ { /* Debug for Cortex-A72 */
+ .id = 0x000bbd08,
+ .mask = 0x000fffff,
+ },
+ { 0, 0 },
+};
+
+static struct amba_driver debug_driver = {
+ .drv = {
+ .name = "coresight-cpu-debug",
+ .suppress_bind_attrs = true,
+ },
+ .probe = debug_probe,
+ .remove = debug_remove,
+ .id_table = debug_ids,
+};
+
+module_amba_driver(debug_driver);
+
+MODULE_AUTHOR("Leo Yan <leo.yan@linaro.org>");
+MODULE_DESCRIPTION("ARM Coresight CPU Debug Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwtracing/coresight/coresight-etb10.c b/drivers/hwtracing/coresight/coresight-etb10.c
index 979ea6ec7902..d5b96423e1a5 100644
--- a/drivers/hwtracing/coresight/coresight-etb10.c
+++ b/drivers/hwtracing/coresight/coresight-etb10.c
@@ -375,7 +375,7 @@ static void etb_update_buffer(struct coresight_device *csdev,
/*
* Entries should be aligned to the frame size. If they are not
- * go back to the last alignement point to give decoding tools a
+ * go back to the last alignment point to give decoding tools a
* chance to fix things.
*/
if (write_ptr % ETB_FRAME_SIZE_WORDS) {
@@ -675,11 +675,8 @@ static int etb_probe(struct amba_device *adev, const struct amba_id *id)
drvdata->buf = devm_kzalloc(dev,
drvdata->buffer_depth * 4, GFP_KERNEL);
- if (!drvdata->buf) {
- dev_err(dev, "Failed to allocate %u bytes for buffer data\n",
- drvdata->buffer_depth * 4);
+ if (!drvdata->buf)
return -ENOMEM;
- }
desc.type = CORESIGHT_DEV_TYPE_SINK;
desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c
index 288a423c1b27..8f546f59a3fd 100644
--- a/drivers/hwtracing/coresight/coresight-etm-perf.c
+++ b/drivers/hwtracing/coresight/coresight-etm-perf.c
@@ -201,6 +201,7 @@ static void *etm_setup_aux(int event_cpu, void **pages,
event_data = alloc_event_data(event_cpu);
if (!event_data)
return NULL;
+ INIT_WORK(&event_data->work, free_event_data);
/*
* In theory nothing prevent tracers in a trace session from being
@@ -217,8 +218,6 @@ static void *etm_setup_aux(int event_cpu, void **pages,
if (!sink)
goto err;
- INIT_WORK(&event_data->work, free_event_data);
-
mask = &event_data->mask;
/* Setup the path for each CPU in a trace session */
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c
index aec61a6d5c63..e3b9fb82eb8d 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etf.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c
@@ -166,9 +166,6 @@ out:
if (!used)
kfree(buf);
- if (!ret)
- dev_info(drvdata->dev, "TMC-ETB/ETF enabled\n");
-
return ret;
}
@@ -204,15 +201,27 @@ out:
static int tmc_enable_etf_sink(struct coresight_device *csdev, u32 mode)
{
+ int ret;
+ struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
switch (mode) {
case CS_MODE_SYSFS:
- return tmc_enable_etf_sink_sysfs(csdev);
+ ret = tmc_enable_etf_sink_sysfs(csdev);
+ break;
case CS_MODE_PERF:
- return tmc_enable_etf_sink_perf(csdev);
+ ret = tmc_enable_etf_sink_perf(csdev);
+ break;
+ /* We shouldn't be here */
+ default:
+ ret = -EINVAL;
+ break;
}
- /* We shouldn't be here */
- return -EINVAL;
+ if (ret)
+ return ret;
+
+ dev_info(drvdata->dev, "TMC-ETB/ETF enabled\n");
+ return 0;
}
static void tmc_disable_etf_sink(struct coresight_device *csdev)
@@ -273,7 +282,7 @@ static void tmc_disable_etf_link(struct coresight_device *csdev,
drvdata->mode = CS_MODE_DISABLED;
spin_unlock_irqrestore(&drvdata->spinlock, flags);
- dev_info(drvdata->dev, "TMC disabled\n");
+ dev_info(drvdata->dev, "TMC-ETF disabled\n");
}
static void *tmc_alloc_etf_buffer(struct coresight_device *csdev, int cpu,
diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c
index d8517d2a968c..864488793f09 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.c
+++ b/drivers/hwtracing/coresight/coresight-tmc.c
@@ -362,6 +362,13 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
desc.type = CORESIGHT_DEV_TYPE_SINK;
desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
desc.ops = &tmc_etr_cs_ops;
+ /*
+ * ETR configuration uses a 40-bit AXI master in place of
+ * the embedded SRAM of ETB/ETF.
+ */
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
+ if (ret)
+ goto out;
} else {
desc.type = CORESIGHT_DEV_TYPE_LINKSINK;
desc.subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_FIFO;
diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
index 0c37356e417c..6a0202b7384f 100644
--- a/drivers/hwtracing/coresight/coresight.c
+++ b/drivers/hwtracing/coresight/coresight.c
@@ -253,14 +253,22 @@ static int coresight_enable_source(struct coresight_device *csdev, u32 mode)
return 0;
}
-static void coresight_disable_source(struct coresight_device *csdev)
+/**
+ * coresight_disable_source - Drop the reference count by 1 and disable
+ * the device if there are no users left.
+ *
+ * @csdev - The coresight device to disable
+ *
+ * Returns true if the device has been disabled.
+ */
+static bool coresight_disable_source(struct coresight_device *csdev)
{
if (atomic_dec_return(csdev->refcnt) == 0) {
- if (source_ops(csdev)->disable) {
+ if (source_ops(csdev)->disable)
source_ops(csdev)->disable(csdev, NULL);
- csdev->enable = false;
- }
+ csdev->enable = false;
}
+ return !csdev->enable;
}
void coresight_disable_path(struct list_head *path)
@@ -550,6 +558,9 @@ int coresight_enable(struct coresight_device *csdev)
int cpu, ret = 0;
struct coresight_device *sink;
struct list_head *path;
+ enum coresight_dev_subtype_source subtype;
+
+ subtype = csdev->subtype.source_subtype;
mutex_lock(&coresight_mutex);
@@ -557,8 +568,16 @@ int coresight_enable(struct coresight_device *csdev)
if (ret)
goto out;
- if (csdev->enable)
+ if (csdev->enable) {
+ /*
+ * There could be multiple applications driving the software
+ * source. So keep the refcount for each such user when the
+ * source is already enabled.
+ */
+ if (subtype == CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE)
+ atomic_inc(csdev->refcnt);
goto out;
+ }
/*
* Search for a valid sink for this session but don't reset the
@@ -585,7 +604,7 @@ int coresight_enable(struct coresight_device *csdev)
if (ret)
goto err_source;
- switch (csdev->subtype.source_subtype) {
+ switch (subtype) {
case CORESIGHT_DEV_SUBTYPE_SOURCE_PROC:
/*
* When working from sysFS it is important to keep track
@@ -629,7 +648,7 @@ void coresight_disable(struct coresight_device *csdev)
if (ret)
goto out;
- if (!csdev->enable)
+ if (!csdev->enable || !coresight_disable_source(csdev))
goto out;
switch (csdev->subtype.source_subtype) {
@@ -647,7 +666,6 @@ void coresight_disable(struct coresight_device *csdev)
break;
}
- coresight_disable_source(csdev);
coresight_disable_path(path);
coresight_release_path(path);
diff --git a/drivers/hwtracing/coresight/of_coresight.c b/drivers/hwtracing/coresight/of_coresight.c
index 09142e99e915..a18794128bf8 100644
--- a/drivers/hwtracing/coresight/of_coresight.c
+++ b/drivers/hwtracing/coresight/of_coresight.c
@@ -52,7 +52,7 @@ of_coresight_get_endpoint_device(struct device_node *endpoint)
endpoint, of_dev_node_match);
}
-static void of_coresight_get_ports(struct device_node *node,
+static void of_coresight_get_ports(const struct device_node *node,
int *nr_inport, int *nr_outport)
{
struct device_node *ep = NULL;
@@ -101,14 +101,40 @@ static int of_coresight_alloc_memory(struct device *dev,
return 0;
}
-struct coresight_platform_data *of_get_coresight_platform_data(
- struct device *dev, struct device_node *node)
+int of_coresight_get_cpu(const struct device_node *node)
{
- int i = 0, ret = 0, cpu;
+ int cpu;
+ bool found;
+ struct device_node *dn, *np;
+
+ dn = of_parse_phandle(node, "cpu", 0);
+
+ /* Affinity defaults to CPU0 */
+ if (!dn)
+ return 0;
+
+ for_each_possible_cpu(cpu) {
+ np = of_cpu_device_node_get(cpu);
+ found = (dn == np);
+ of_node_put(np);
+ if (found)
+ break;
+ }
+ of_node_put(dn);
+
+ /* Affinity to CPU0 if no cpu nodes are found */
+ return found ? cpu : 0;
+}
+EXPORT_SYMBOL_GPL(of_coresight_get_cpu);
+
+struct coresight_platform_data *
+of_get_coresight_platform_data(struct device *dev,
+ const struct device_node *node)
+{
+ int i = 0, ret = 0;
struct coresight_platform_data *pdata;
struct of_endpoint endpoint, rendpoint;
struct device *rdev;
- struct device_node *dn;
struct device_node *ep = NULL;
struct device_node *rparent = NULL;
struct device_node *rport = NULL;
@@ -175,16 +201,7 @@ struct coresight_platform_data *of_get_coresight_platform_data(
} while (ep);
}
- /* Affinity defaults to CPU0 */
- pdata->cpu = 0;
- dn = of_parse_phandle(node, "cpu", 0);
- for (cpu = 0; dn && cpu < nr_cpu_ids; cpu++) {
- if (dn == of_get_cpu_node(cpu, NULL)) {
- pdata->cpu = cpu;
- break;
- }
- }
- of_node_put(dn);
+ pdata->cpu = of_coresight_get_cpu(node);
return pdata;
}
diff --git a/drivers/i2c/muxes/Kconfig b/drivers/i2c/muxes/Kconfig
index 1e160fc37ecc..2c64d0e0740f 100644
--- a/drivers/i2c/muxes/Kconfig
+++ b/drivers/i2c/muxes/Kconfig
@@ -30,6 +30,19 @@ config I2C_MUX_GPIO
This driver can also be built as a module. If so, the module
will be called i2c-mux-gpio.
+config I2C_MUX_GPMUX
+ tristate "General Purpose I2C multiplexer"
+ select MULTIPLEXER
+ depends on OF || COMPILE_TEST
+ help
+ If you say yes to this option, support will be included for a
+ general purpose I2C multiplexer. This driver provides access to
+ I2C busses connected through a MUX, which in turn is controlled
+ by a MUX-controller from the MUX subsystem.
+
+ This driver can also be built as a module. If so, the module
+ will be called i2c-mux-gpmux.
+
config I2C_MUX_LTC4306
tristate "LTC LTC4306/5 I2C multiplexer"
select GPIOLIB
diff --git a/drivers/i2c/muxes/Makefile b/drivers/i2c/muxes/Makefile
index ff7618cd5312..4a67d3199877 100644
--- a/drivers/i2c/muxes/Makefile
+++ b/drivers/i2c/muxes/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_I2C_ARB_GPIO_CHALLENGE) += i2c-arb-gpio-challenge.o
obj-$(CONFIG_I2C_DEMUX_PINCTRL) += i2c-demux-pinctrl.o
obj-$(CONFIG_I2C_MUX_GPIO) += i2c-mux-gpio.o
+obj-$(CONFIG_I2C_MUX_GPMUX) += i2c-mux-gpmux.o
obj-$(CONFIG_I2C_MUX_LTC4306) += i2c-mux-ltc4306.o
obj-$(CONFIG_I2C_MUX_MLXCPLD) += i2c-mux-mlxcpld.o
obj-$(CONFIG_I2C_MUX_PCA9541) += i2c-mux-pca9541.o
diff --git a/drivers/i2c/muxes/i2c-mux-gpmux.c b/drivers/i2c/muxes/i2c-mux-gpmux.c
new file mode 100644
index 000000000000..92cf5f48afe6
--- /dev/null
+++ b/drivers/i2c/muxes/i2c-mux-gpmux.c
@@ -0,0 +1,173 @@
+/*
+ * General Purpose I2C multiplexer
+ *
+ * Copyright (C) 2017 Axentia Technologies AB
+ *
+ * Author: Peter Rosin <peda@axentia.se>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/i2c.h>
+#include <linux/i2c-mux.h>
+#include <linux/module.h>
+#include <linux/mux/consumer.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+struct mux {
+ struct mux_control *control;
+
+ bool do_not_deselect;
+};
+
+static int i2c_mux_select(struct i2c_mux_core *muxc, u32 chan)
+{
+ struct mux *mux = i2c_mux_priv(muxc);
+ int ret;
+
+ ret = mux_control_select(mux->control, chan);
+ mux->do_not_deselect = ret < 0;
+
+ return ret;
+}
+
+static int i2c_mux_deselect(struct i2c_mux_core *muxc, u32 chan)
+{
+ struct mux *mux = i2c_mux_priv(muxc);
+
+ if (mux->do_not_deselect)
+ return 0;
+
+ return mux_control_deselect(mux->control);
+}
+
+static struct i2c_adapter *mux_parent_adapter(struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+ struct device_node *parent_np;
+ struct i2c_adapter *parent;
+
+ parent_np = of_parse_phandle(np, "i2c-parent", 0);
+ if (!parent_np) {
+ dev_err(dev, "Cannot parse i2c-parent\n");
+ return ERR_PTR(-ENODEV);
+ }
+ parent = of_find_i2c_adapter_by_node(parent_np);
+ of_node_put(parent_np);
+ if (!parent)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ return parent;
+}
+
+static const struct of_device_id i2c_mux_of_match[] = {
+ { .compatible = "i2c-mux", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, i2c_mux_of_match);
+
+static int i2c_mux_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct device_node *child;
+ struct i2c_mux_core *muxc;
+ struct mux *mux;
+ struct i2c_adapter *parent;
+ int children;
+ int ret;
+
+ if (!np)
+ return -ENODEV;
+
+ mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL);
+ if (!mux)
+ return -ENOMEM;
+
+ mux->control = devm_mux_control_get(dev, NULL);
+ if (IS_ERR(mux->control)) {
+ if (PTR_ERR(mux->control) != -EPROBE_DEFER)
+ dev_err(dev, "failed to get control-mux\n");
+ return PTR_ERR(mux->control);
+ }
+
+ parent = mux_parent_adapter(dev);
+ if (IS_ERR(parent)) {
+ if (PTR_ERR(parent) != -EPROBE_DEFER)
+ dev_err(dev, "failed to get i2c-parent adapter\n");
+ return PTR_ERR(parent);
+ }
+
+ children = of_get_child_count(np);
+
+ muxc = i2c_mux_alloc(parent, dev, children, 0, 0,
+ i2c_mux_select, i2c_mux_deselect);
+ if (!muxc) {
+ ret = -ENOMEM;
+ goto err_parent;
+ }
+ muxc->priv = mux;
+
+ platform_set_drvdata(pdev, muxc);
+
+ muxc->mux_locked = of_property_read_bool(np, "mux-locked");
+
+ for_each_child_of_node(np, child) {
+ u32 chan;
+
+ ret = of_property_read_u32(child, "reg", &chan);
+ if (ret < 0) {
+ dev_err(dev, "no reg property for node '%s'\n",
+ child->name);
+ goto err_children;
+ }
+
+ if (chan >= mux_control_states(mux->control)) {
+ dev_err(dev, "invalid reg %u\n", chan);
+ ret = -EINVAL;
+ goto err_children;
+ }
+
+ ret = i2c_mux_add_adapter(muxc, 0, chan, 0);
+ if (ret)
+ goto err_children;
+ }
+
+ dev_info(dev, "%d-port mux on %s adapter\n", children, parent->name);
+
+ return 0;
+
+err_children:
+ i2c_mux_del_adapters(muxc);
+err_parent:
+ i2c_put_adapter(parent);
+
+ return ret;
+}
+
+static int i2c_mux_remove(struct platform_device *pdev)
+{
+ struct i2c_mux_core *muxc = platform_get_drvdata(pdev);
+
+ i2c_mux_del_adapters(muxc);
+ i2c_put_adapter(muxc->parent);
+
+ return 0;
+}
+
+static struct platform_driver i2c_mux_driver = {
+ .probe = i2c_mux_probe,
+ .remove = i2c_mux_remove,
+ .driver = {
+ .name = "i2c-mux-gpmux",
+ .of_match_table = i2c_mux_of_match,
+ },
+};
+module_platform_driver(i2c_mux_driver);
+
+MODULE_DESCRIPTION("General Purpose I2C multiplexer driver");
+MODULE_AUTHOR("Peter Rosin <peda@axentia.se>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/Kconfig b/drivers/iio/Kconfig
index a918270d6f54..b3c8c6ef0dff 100644
--- a/drivers/iio/Kconfig
+++ b/drivers/iio/Kconfig
@@ -83,6 +83,7 @@ source "drivers/iio/humidity/Kconfig"
source "drivers/iio/imu/Kconfig"
source "drivers/iio/light/Kconfig"
source "drivers/iio/magnetometer/Kconfig"
+source "drivers/iio/multiplexer/Kconfig"
source "drivers/iio/orientation/Kconfig"
if IIO_TRIGGER
source "drivers/iio/trigger/Kconfig"
diff --git a/drivers/iio/Makefile b/drivers/iio/Makefile
index 33fa4026f92c..93c769cd99bf 100644
--- a/drivers/iio/Makefile
+++ b/drivers/iio/Makefile
@@ -28,6 +28,7 @@ obj-y += humidity/
obj-y += imu/
obj-y += light/
obj-y += magnetometer/
+obj-y += multiplexer/
obj-y += orientation/
obj-y += potentiometer/
obj-y += potentiostat/
diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
index a3941bade6a7..da3d06b073bb 100644
--- a/drivers/iio/inkern.c
+++ b/drivers/iio/inkern.c
@@ -867,3 +867,63 @@ err_unlock:
return ret;
}
EXPORT_SYMBOL_GPL(iio_write_channel_raw);
+
+unsigned int iio_get_channel_ext_info_count(struct iio_channel *chan)
+{
+ const struct iio_chan_spec_ext_info *ext_info;
+ unsigned int i = 0;
+
+ if (!chan->channel->ext_info)
+ return i;
+
+ for (ext_info = chan->channel->ext_info; ext_info->name; ext_info++)
+ ++i;
+
+ return i;
+}
+EXPORT_SYMBOL_GPL(iio_get_channel_ext_info_count);
+
+static const struct iio_chan_spec_ext_info *iio_lookup_ext_info(
+ const struct iio_channel *chan,
+ const char *attr)
+{
+ const struct iio_chan_spec_ext_info *ext_info;
+
+ if (!chan->channel->ext_info)
+ return NULL;
+
+ for (ext_info = chan->channel->ext_info; ext_info->name; ++ext_info) {
+ if (!strcmp(attr, ext_info->name))
+ return ext_info;
+ }
+
+ return NULL;
+}
+
+ssize_t iio_read_channel_ext_info(struct iio_channel *chan,
+ const char *attr, char *buf)
+{
+ const struct iio_chan_spec_ext_info *ext_info;
+
+ ext_info = iio_lookup_ext_info(chan, attr);
+ if (!ext_info)
+ return -EINVAL;
+
+ return ext_info->read(chan->indio_dev, ext_info->private,
+ chan->channel, buf);
+}
+EXPORT_SYMBOL_GPL(iio_read_channel_ext_info);
+
+ssize_t iio_write_channel_ext_info(struct iio_channel *chan, const char *attr,
+ const char *buf, size_t len)
+{
+ const struct iio_chan_spec_ext_info *ext_info;
+
+ ext_info = iio_lookup_ext_info(chan, attr);
+ if (!ext_info)
+ return -EINVAL;
+
+ return ext_info->write(chan->indio_dev, ext_info->private,
+ chan->channel, buf, len);
+}
+EXPORT_SYMBOL_GPL(iio_write_channel_ext_info);
diff --git a/drivers/iio/multiplexer/Kconfig b/drivers/iio/multiplexer/Kconfig
new file mode 100644
index 000000000000..735a7b0e6fd8
--- /dev/null
+++ b/drivers/iio/multiplexer/Kconfig
@@ -0,0 +1,18 @@
+#
+# Multiplexer drivers
+#
+# When adding new entries keep the list in alphabetical order
+
+menu "Multiplexers"
+
+config IIO_MUX
+ tristate "IIO multiplexer driver"
+ select MULTIPLEXER
+ depends on OF || COMPILE_TEST
+ help
+ Say yes here to build support for the IIO multiplexer.
+
+ To compile this driver as a module, choose M here: the
+ module will be called iio-mux.
+
+endmenu
diff --git a/drivers/iio/multiplexer/Makefile b/drivers/iio/multiplexer/Makefile
new file mode 100644
index 000000000000..68be3c4abd07
--- /dev/null
+++ b/drivers/iio/multiplexer/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for industrial I/O multiplexer drivers
+#
+
+# When adding new entries keep the list in alphabetical order
+obj-$(CONFIG_IIO_MUX) += iio-mux.o
diff --git a/drivers/iio/multiplexer/iio-mux.c b/drivers/iio/multiplexer/iio-mux.c
new file mode 100644
index 000000000000..37ba007f8dca
--- /dev/null
+++ b/drivers/iio/multiplexer/iio-mux.c
@@ -0,0 +1,459 @@
+/*
+ * IIO multiplexer driver
+ *
+ * Copyright (C) 2017 Axentia Technologies AB
+ *
+ * Author: Peter Rosin <peda@axentia.se>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/err.h>
+#include <linux/iio/consumer.h>
+#include <linux/iio/iio.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/mux/consumer.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+struct mux_ext_info_cache {
+ char *data;
+ ssize_t size;
+};
+
+struct mux_child {
+ struct mux_ext_info_cache *ext_info_cache;
+};
+
+struct mux {
+ int cached_state;
+ struct mux_control *control;
+ struct iio_channel *parent;
+ struct iio_dev *indio_dev;
+ struct iio_chan_spec *chan;
+ struct iio_chan_spec_ext_info *ext_info;
+ struct mux_child *child;
+};
+
+static int iio_mux_select(struct mux *mux, int idx)
+{
+ struct mux_child *child = &mux->child[idx];
+ struct iio_chan_spec const *chan = &mux->chan[idx];
+ int ret;
+ int i;
+
+ ret = mux_control_select(mux->control, chan->channel);
+ if (ret < 0) {
+ mux->cached_state = -1;
+ return ret;
+ }
+
+ if (mux->cached_state == chan->channel)
+ return 0;
+
+ if (chan->ext_info) {
+ for (i = 0; chan->ext_info[i].name; ++i) {
+ const char *attr = chan->ext_info[i].name;
+ struct mux_ext_info_cache *cache;
+
+ cache = &child->ext_info_cache[i];
+
+ if (cache->size < 0)
+ continue;
+
+ ret = iio_write_channel_ext_info(mux->parent, attr,
+ cache->data,
+ cache->size);
+
+ if (ret < 0) {
+ mux_control_deselect(mux->control);
+ mux->cached_state = -1;
+ return ret;
+ }
+ }
+ }
+ mux->cached_state = chan->channel;
+
+ return 0;
+}
+
+static void iio_mux_deselect(struct mux *mux)
+{
+ mux_control_deselect(mux->control);
+}
+
+static int mux_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct mux *mux = iio_priv(indio_dev);
+ int idx = chan - mux->chan;
+ int ret;
+
+ ret = iio_mux_select(mux, idx);
+ if (ret < 0)
+ return ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = iio_read_channel_raw(mux->parent, val);
+ break;
+
+ case IIO_CHAN_INFO_SCALE:
+ ret = iio_read_channel_scale(mux->parent, val, val2);
+ break;
+
+ default:
+ ret = -EINVAL;
+ }
+
+ iio_mux_deselect(mux);
+
+ return ret;
+}
+
+static int mux_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long mask)
+{
+ struct mux *mux = iio_priv(indio_dev);
+ int idx = chan - mux->chan;
+ int ret;
+
+ ret = iio_mux_select(mux, idx);
+ if (ret < 0)
+ return ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ *type = IIO_VAL_INT;
+ ret = iio_read_avail_channel_raw(mux->parent, vals, length);
+ break;
+
+ default:
+ ret = -EINVAL;
+ }
+
+ iio_mux_deselect(mux);
+
+ return ret;
+}
+
+static int mux_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct mux *mux = iio_priv(indio_dev);
+ int idx = chan - mux->chan;
+ int ret;
+
+ ret = iio_mux_select(mux, idx);
+ if (ret < 0)
+ return ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = iio_write_channel_raw(mux->parent, val);
+ break;
+
+ default:
+ ret = -EINVAL;
+ }
+
+ iio_mux_deselect(mux);
+
+ return ret;
+}
+
+static const struct iio_info mux_info = {
+ .read_raw = mux_read_raw,
+ .read_avail = mux_read_avail,
+ .write_raw = mux_write_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static ssize_t mux_read_ext_info(struct iio_dev *indio_dev, uintptr_t private,
+ struct iio_chan_spec const *chan, char *buf)
+{
+ struct mux *mux = iio_priv(indio_dev);
+ int idx = chan - mux->chan;
+ ssize_t ret;
+
+ ret = iio_mux_select(mux, idx);
+ if (ret < 0)
+ return ret;
+
+ ret = iio_read_channel_ext_info(mux->parent,
+ mux->ext_info[private].name,
+ buf);
+
+ iio_mux_deselect(mux);
+
+ return ret;
+}
+
+static ssize_t mux_write_ext_info(struct iio_dev *indio_dev, uintptr_t private,
+ struct iio_chan_spec const *chan,
+ const char *buf, size_t len)
+{
+ struct device *dev = indio_dev->dev.parent;
+ struct mux *mux = iio_priv(indio_dev);
+ int idx = chan - mux->chan;
+ char *new;
+ ssize_t ret;
+
+ if (len >= PAGE_SIZE)
+ return -EINVAL;
+
+ ret = iio_mux_select(mux, idx);
+ if (ret < 0)
+ return ret;
+
+ new = devm_kmemdup(dev, buf, len + 1, GFP_KERNEL);
+ if (!new) {
+ iio_mux_deselect(mux);
+ return -ENOMEM;
+ }
+
+ new[len] = 0;
+
+ ret = iio_write_channel_ext_info(mux->parent,
+ mux->ext_info[private].name,
+ buf, len);
+ if (ret < 0) {
+ iio_mux_deselect(mux);
+ devm_kfree(dev, new);
+ return ret;
+ }
+
+ devm_kfree(dev, mux->child[idx].ext_info_cache[private].data);
+ mux->child[idx].ext_info_cache[private].data = new;
+ mux->child[idx].ext_info_cache[private].size = len;
+
+ iio_mux_deselect(mux);
+
+ return ret;
+}
+
+static int mux_configure_channel(struct device *dev, struct mux *mux,
+ u32 state, const char *label, int idx)
+{
+ struct mux_child *child = &mux->child[idx];
+ struct iio_chan_spec *chan = &mux->chan[idx];
+ struct iio_chan_spec const *pchan = mux->parent->channel;
+ char *page = NULL;
+ int num_ext_info;
+ int i;
+ int ret;
+
+ chan->indexed = 1;
+ chan->output = pchan->output;
+ chan->datasheet_name = label;
+ chan->ext_info = mux->ext_info;
+
+ ret = iio_get_channel_type(mux->parent, &chan->type);
+ if (ret < 0) {
+ dev_err(dev, "failed to get parent channel type\n");
+ return ret;
+ }
+
+ if (iio_channel_has_info(pchan, IIO_CHAN_INFO_RAW))
+ chan->info_mask_separate |= BIT(IIO_CHAN_INFO_RAW);
+ if (iio_channel_has_info(pchan, IIO_CHAN_INFO_SCALE))
+ chan->info_mask_separate |= BIT(IIO_CHAN_INFO_SCALE);
+
+ if (iio_channel_has_available(pchan, IIO_CHAN_INFO_RAW))
+ chan->info_mask_separate_available |= BIT(IIO_CHAN_INFO_RAW);
+
+ if (state >= mux_control_states(mux->control)) {
+ dev_err(dev, "too many channels\n");
+ return -EINVAL;
+ }
+
+ chan->channel = state;
+
+ num_ext_info = iio_get_channel_ext_info_count(mux->parent);
+ if (num_ext_info) {
+ page = devm_kzalloc(dev, PAGE_SIZE, GFP_KERNEL);
+ if (!page)
+ return -ENOMEM;
+ }
+ child->ext_info_cache = devm_kzalloc(dev,
+ sizeof(*child->ext_info_cache) *
+ num_ext_info, GFP_KERNEL);
+ for (i = 0; i < num_ext_info; ++i) {
+ child->ext_info_cache[i].size = -1;
+
+ if (!pchan->ext_info[i].write)
+ continue;
+ if (!pchan->ext_info[i].read)
+ continue;
+
+ ret = iio_read_channel_ext_info(mux->parent,
+ mux->ext_info[i].name,
+ page);
+ if (ret < 0) {
+ dev_err(dev, "failed to get ext_info '%s'\n",
+ pchan->ext_info[i].name);
+ return ret;
+ }
+ if (ret >= PAGE_SIZE) {
+ dev_err(dev, "too large ext_info '%s'\n",
+ pchan->ext_info[i].name);
+ return -EINVAL;
+ }
+
+ child->ext_info_cache[i].data = devm_kmemdup(dev, page, ret + 1,
+ GFP_KERNEL);
+ child->ext_info_cache[i].data[ret] = 0;
+ child->ext_info_cache[i].size = ret;
+ }
+
+ if (page)
+ devm_kfree(dev, page);
+
+ return 0;
+}
+
+/*
+ * Same as of_property_for_each_string(), but also keeps track of the
+ * index of each string.
+ */
+#define of_property_for_each_string_index(np, propname, prop, s, i) \
+ for (prop = of_find_property(np, propname, NULL), \
+ s = of_prop_next_string(prop, NULL), \
+ i = 0; \
+ s; \
+ s = of_prop_next_string(prop, s), \
+ i++)
+
+static int mux_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = pdev->dev.of_node;
+ struct iio_dev *indio_dev;
+ struct iio_channel *parent;
+ struct mux *mux;
+ struct property *prop;
+ const char *label;
+ u32 state;
+ int sizeof_ext_info;
+ int children;
+ int sizeof_priv;
+ int i;
+ int ret;
+
+ if (!np)
+ return -ENODEV;
+
+ parent = devm_iio_channel_get(dev, "parent");
+ if (IS_ERR(parent)) {
+ if (PTR_ERR(parent) != -EPROBE_DEFER)
+ dev_err(dev, "failed to get parent channel\n");
+ return PTR_ERR(parent);
+ }
+
+ sizeof_ext_info = iio_get_channel_ext_info_count(parent);
+ if (sizeof_ext_info) {
+ sizeof_ext_info += 1; /* one extra entry for the sentinel */
+ sizeof_ext_info *= sizeof(*mux->ext_info);
+ }
+
+ children = 0;
+ of_property_for_each_string(np, "channels", prop, label) {
+ if (*label)
+ children++;
+ }
+ if (children <= 0) {
+ dev_err(dev, "not even a single child\n");
+ return -EINVAL;
+ }
+
+ sizeof_priv = sizeof(*mux);
+ sizeof_priv += sizeof(*mux->child) * children;
+ sizeof_priv += sizeof(*mux->chan) * children;
+ sizeof_priv += sizeof_ext_info;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof_priv);
+ if (!indio_dev)
+ return -ENOMEM;
+
+ mux = iio_priv(indio_dev);
+ mux->child = (struct mux_child *)(mux + 1);
+ mux->chan = (struct iio_chan_spec *)(mux->child + children);
+
+ platform_set_drvdata(pdev, indio_dev);
+
+ mux->parent = parent;
+ mux->cached_state = -1;
+
+ indio_dev->name = dev_name(dev);
+ indio_dev->dev.parent = dev;
+ indio_dev->info = &mux_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = mux->chan;
+ indio_dev->num_channels = children;
+ if (sizeof_ext_info) {
+ mux->ext_info = devm_kmemdup(dev,
+ parent->channel->ext_info,
+ sizeof_ext_info, GFP_KERNEL);
+ if (!mux->ext_info)
+ return -ENOMEM;
+
+ for (i = 0; mux->ext_info[i].name; ++i) {
+ if (parent->channel->ext_info[i].read)
+ mux->ext_info[i].read = mux_read_ext_info;
+ if (parent->channel->ext_info[i].write)
+ mux->ext_info[i].write = mux_write_ext_info;
+ mux->ext_info[i].private = i;
+ }
+ }
+
+ mux->control = devm_mux_control_get(dev, NULL);
+ if (IS_ERR(mux->control)) {
+ if (PTR_ERR(mux->control) != -EPROBE_DEFER)
+ dev_err(dev, "failed to get control-mux\n");
+ return PTR_ERR(mux->control);
+ }
+
+ i = 0;
+ of_property_for_each_string_index(np, "channels", prop, label, state) {
+ if (!*label)
+ continue;
+
+ ret = mux_configure_channel(dev, mux, state, label, i++);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = devm_iio_device_register(dev, indio_dev);
+ if (ret) {
+ dev_err(dev, "failed to register iio device\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id mux_match[] = {
+ { .compatible = "io-channel-mux" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mux_match);
+
+static struct platform_driver mux_driver = {
+ .probe = mux_probe,
+ .driver = {
+ .name = "iio-mux",
+ .of_match_table = mux_match,
+ },
+};
+module_platform_driver(mux_driver);
+
+MODULE_DESCRIPTION("IIO multiplexer driver");
+MODULE_AUTHOR("Peter Rosin <peda@axentia.se>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/ipack/ipack.c b/drivers/ipack/ipack.c
index 12102448fddd..a1e07a77d4e6 100644
--- a/drivers/ipack/ipack.c
+++ b/drivers/ipack/ipack.c
@@ -212,7 +212,7 @@ struct ipack_bus_device *ipack_bus_register(struct device *parent, int slots,
int bus_nr;
struct ipack_bus_device *bus;
- bus = kzalloc(sizeof(struct ipack_bus_device), GFP_KERNEL);
+ bus = kzalloc(sizeof(*bus), GFP_KERNEL);
if (!bus)
return NULL;
@@ -402,7 +402,6 @@ static int ipack_device_read_id(struct ipack_device *dev)
* ID ROM contents */
dev->id = kmalloc(dev->id_avail, GFP_KERNEL);
if (!dev->id) {
- dev_err(&dev->dev, "dev->id alloc failed.\n");
ret = -ENOMEM;
goto out;
}
diff --git a/drivers/memory/ti-aemif.c b/drivers/memory/ti-aemif.c
index 22c1aeeb6421..2744b1b91b57 100644
--- a/drivers/memory/ti-aemif.c
+++ b/drivers/memory/ti-aemif.c
@@ -357,7 +357,10 @@ static int aemif_probe(struct platform_device *pdev)
return PTR_ERR(aemif->clk);
}
- clk_prepare_enable(aemif->clk);
+ ret = clk_prepare_enable(aemif->clk);
+ if (ret)
+ return ret;
+
aemif->clk_rate = clk_get_rate(aemif->clk) / MSEC_PER_SEC;
if (of_device_is_compatible(np, "ti,da850-aemif"))
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 07bbd4cc1852..8136dc7e863d 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -490,6 +490,14 @@ config ASPEED_LPC_CTRL
ioctl()s, the driver also provides a read/write interface to a BMC ram
region where the host LPC read/write region can be buffered.
+config ASPEED_LPC_SNOOP
+ tristate "Aspeed ast2500 HOST LPC snoop support"
+ depends on (ARCH_ASPEED || COMPILE_TEST) && REGMAP && MFD_SYSCON
+ help
+ Provides a driver to control the LPC snoop interface which
+ allows the BMC to listen on and save the data written by
+ the host to an arbitrary LPC I/O port.
+
config PCI_ENDPOINT_TEST
depends on PCI
select CRC32
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 81ef3e67acc9..b0b766416306 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -53,6 +53,7 @@ obj-$(CONFIG_ECHO) += echo/
obj-$(CONFIG_VEXPRESS_SYSCFG) += vexpress-syscfg.o
obj-$(CONFIG_CXL_BASE) += cxl/
obj-$(CONFIG_ASPEED_LPC_CTRL) += aspeed-lpc-ctrl.o
+obj-$(CONFIG_ASPEED_LPC_SNOOP) += aspeed-lpc-snoop.o
obj-$(CONFIG_PCI_ENDPOINT_TEST) += pci_endpoint_test.o
lkdtm-$(CONFIG_LKDTM) += lkdtm_core.o
diff --git a/drivers/misc/apds990x.c b/drivers/misc/apds990x.c
index dfb72ecfa604..84e5b949399e 100644
--- a/drivers/misc/apds990x.c
+++ b/drivers/misc/apds990x.c
@@ -32,7 +32,7 @@
#include <linux/delay.h>
#include <linux/wait.h>
#include <linux/slab.h>
-#include <linux/i2c/apds990x.h>
+#include <linux/platform_data/apds990x.h>
/* Register map */
#define APDS990X_ENABLE 0x00 /* Enable of states and interrupts */
@@ -841,7 +841,7 @@ static ssize_t apds990x_prox_enable_store(struct device *dev,
static DEVICE_ATTR(prox0_raw_en, S_IRUGO | S_IWUSR, apds990x_prox_enable_show,
apds990x_prox_enable_store);
-static const char reporting_modes[][9] = {"trigger", "periodic"};
+static const char *reporting_modes[] = {"trigger", "periodic"};
static ssize_t apds990x_prox_reporting_mode_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -856,13 +856,13 @@ static ssize_t apds990x_prox_reporting_mode_store(struct device *dev,
const char *buf, size_t len)
{
struct apds990x_chip *chip = dev_get_drvdata(dev);
+ int ret;
- if (sysfs_streq(buf, reporting_modes[0]))
- chip->prox_continuous_mode = 0;
- else if (sysfs_streq(buf, reporting_modes[1]))
- chip->prox_continuous_mode = 1;
- else
- return -EINVAL;
+ ret = sysfs_match_string(reporting_modes, buf);
+ if (ret < 0)
+ return ret;
+
+ chip->prox_continuous_mode = ret;
return len;
}
diff --git a/drivers/misc/aspeed-lpc-snoop.c b/drivers/misc/aspeed-lpc-snoop.c
new file mode 100644
index 000000000000..593905565b74
--- /dev/null
+++ b/drivers/misc/aspeed-lpc-snoop.c
@@ -0,0 +1,261 @@
+/*
+ * Copyright 2017 Google Inc
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Provides a simple driver to control the ASPEED LPC snoop interface which
+ * allows the BMC to listen on and save the data written by
+ * the host to an arbitrary LPC I/O port.
+ *
+ * Typically used by the BMC to "watch" host boot progress via port
+ * 0x80 writes made by the BIOS during the boot process.
+ */
+
+#include <linux/bitops.h>
+#include <linux/interrupt.h>
+#include <linux/kfifo.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#define DEVICE_NAME "aspeed-lpc-snoop"
+
+#define NUM_SNOOP_CHANNELS 2
+#define SNOOP_FIFO_SIZE 2048
+
+#define HICR5 0x0
+#define HICR5_EN_SNP0W BIT(0)
+#define HICR5_ENINT_SNP0W BIT(1)
+#define HICR5_EN_SNP1W BIT(2)
+#define HICR5_ENINT_SNP1W BIT(3)
+
+#define HICR6 0x4
+#define HICR6_STR_SNP0W BIT(0)
+#define HICR6_STR_SNP1W BIT(1)
+#define SNPWADR 0x10
+#define SNPWADR_CH0_MASK GENMASK(15, 0)
+#define SNPWADR_CH0_SHIFT 0
+#define SNPWADR_CH1_MASK GENMASK(31, 16)
+#define SNPWADR_CH1_SHIFT 16
+#define SNPWDR 0x14
+#define SNPWDR_CH0_MASK GENMASK(7, 0)
+#define SNPWDR_CH0_SHIFT 0
+#define SNPWDR_CH1_MASK GENMASK(15, 8)
+#define SNPWDR_CH1_SHIFT 8
+#define HICRB 0x80
+#define HICRB_ENSNP0D BIT(14)
+#define HICRB_ENSNP1D BIT(15)
+
+struct aspeed_lpc_snoop {
+ struct regmap *regmap;
+ int irq;
+ struct kfifo snoop_fifo[NUM_SNOOP_CHANNELS];
+};
+
+/* Save a byte to a FIFO and discard the oldest byte if FIFO is full */
+static void put_fifo_with_discard(struct kfifo *fifo, u8 val)
+{
+ if (!kfifo_initialized(fifo))
+ return;
+ if (kfifo_is_full(fifo))
+ kfifo_skip(fifo);
+ kfifo_put(fifo, val);
+}
+
+static irqreturn_t aspeed_lpc_snoop_irq(int irq, void *arg)
+{
+ struct aspeed_lpc_snoop *lpc_snoop = arg;
+ u32 reg, data;
+
+ if (regmap_read(lpc_snoop->regmap, HICR6, &reg))
+ return IRQ_NONE;
+
+ /* Check if one of the snoop channels is interrupting */
+ reg &= (HICR6_STR_SNP0W | HICR6_STR_SNP1W);
+ if (!reg)
+ return IRQ_NONE;
+
+ /* Ack pending IRQs */
+ regmap_write(lpc_snoop->regmap, HICR6, reg);
+
+ /* Read and save most recent snoop'ed data byte to FIFO */
+ regmap_read(lpc_snoop->regmap, SNPWDR, &data);
+
+ if (reg & HICR6_STR_SNP0W) {
+ u8 val = (data & SNPWDR_CH0_MASK) >> SNPWDR_CH0_SHIFT;
+
+ put_fifo_with_discard(&lpc_snoop->snoop_fifo[0], val);
+ }
+ if (reg & HICR6_STR_SNP1W) {
+ u8 val = (data & SNPWDR_CH1_MASK) >> SNPWDR_CH1_SHIFT;
+
+ put_fifo_with_discard(&lpc_snoop->snoop_fifo[1], val);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int aspeed_lpc_snoop_config_irq(struct aspeed_lpc_snoop *lpc_snoop,
+ struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ int rc;
+
+ lpc_snoop->irq = platform_get_irq(pdev, 0);
+ if (!lpc_snoop->irq)
+ return -ENODEV;
+
+ rc = devm_request_irq(dev, lpc_snoop->irq,
+ aspeed_lpc_snoop_irq, IRQF_SHARED,
+ DEVICE_NAME, lpc_snoop);
+ if (rc < 0) {
+ dev_warn(dev, "Unable to request IRQ %d\n", lpc_snoop->irq);
+ lpc_snoop->irq = 0;
+ return rc;
+ }
+
+ return 0;
+}
+
+static int aspeed_lpc_enable_snoop(struct aspeed_lpc_snoop *lpc_snoop,
+ int channel, u16 lpc_port)
+{
+ int rc = 0;
+ u32 hicr5_en, snpwadr_mask, snpwadr_shift, hicrb_en;
+
+ /* Create FIFO datastructure */
+ rc = kfifo_alloc(&lpc_snoop->snoop_fifo[channel],
+ SNOOP_FIFO_SIZE, GFP_KERNEL);
+ if (rc)
+ return rc;
+
+ /* Enable LPC snoop channel at requested port */
+ switch (channel) {
+ case 0:
+ hicr5_en = HICR5_EN_SNP0W | HICR5_ENINT_SNP0W;
+ snpwadr_mask = SNPWADR_CH0_MASK;
+ snpwadr_shift = SNPWADR_CH0_SHIFT;
+ hicrb_en = HICRB_ENSNP0D;
+ break;
+ case 1:
+ hicr5_en = HICR5_EN_SNP1W | HICR5_ENINT_SNP1W;
+ snpwadr_mask = SNPWADR_CH1_MASK;
+ snpwadr_shift = SNPWADR_CH1_SHIFT;
+ hicrb_en = HICRB_ENSNP1D;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ regmap_update_bits(lpc_snoop->regmap, HICR5, hicr5_en, hicr5_en);
+ regmap_update_bits(lpc_snoop->regmap, SNPWADR, snpwadr_mask,
+ lpc_port << snpwadr_shift);
+ regmap_update_bits(lpc_snoop->regmap, HICRB, hicrb_en, hicrb_en);
+
+ return rc;
+}
+
+static void aspeed_lpc_disable_snoop(struct aspeed_lpc_snoop *lpc_snoop,
+ int channel)
+{
+ switch (channel) {
+ case 0:
+ regmap_update_bits(lpc_snoop->regmap, HICR5,
+ HICR5_EN_SNP0W | HICR5_ENINT_SNP0W,
+ 0);
+ break;
+ case 1:
+ regmap_update_bits(lpc_snoop->regmap, HICR5,
+ HICR5_EN_SNP1W | HICR5_ENINT_SNP1W,
+ 0);
+ break;
+ default:
+ return;
+ }
+
+ kfifo_free(&lpc_snoop->snoop_fifo[channel]);
+}
+
+static int aspeed_lpc_snoop_probe(struct platform_device *pdev)
+{
+ struct aspeed_lpc_snoop *lpc_snoop;
+ struct device *dev;
+ u32 port;
+ int rc;
+
+ dev = &pdev->dev;
+
+ lpc_snoop = devm_kzalloc(dev, sizeof(*lpc_snoop), GFP_KERNEL);
+ if (!lpc_snoop)
+ return -ENOMEM;
+
+ lpc_snoop->regmap = syscon_node_to_regmap(
+ pdev->dev.parent->of_node);
+ if (IS_ERR(lpc_snoop->regmap)) {
+ dev_err(dev, "Couldn't get regmap\n");
+ return -ENODEV;
+ }
+
+ dev_set_drvdata(&pdev->dev, lpc_snoop);
+
+ rc = of_property_read_u32_index(dev->of_node, "snoop-ports", 0, &port);
+ if (rc) {
+ dev_err(dev, "no snoop ports configured\n");
+ return -ENODEV;
+ }
+
+ rc = aspeed_lpc_snoop_config_irq(lpc_snoop, pdev);
+ if (rc)
+ return rc;
+
+ rc = aspeed_lpc_enable_snoop(lpc_snoop, 0, port);
+ if (rc)
+ return rc;
+
+ /* Configuration of 2nd snoop channel port is optional */
+ if (of_property_read_u32_index(dev->of_node, "snoop-ports",
+ 1, &port) == 0) {
+ rc = aspeed_lpc_enable_snoop(lpc_snoop, 1, port);
+ if (rc)
+ aspeed_lpc_disable_snoop(lpc_snoop, 0);
+ }
+
+ return rc;
+}
+
+static int aspeed_lpc_snoop_remove(struct platform_device *pdev)
+{
+ struct aspeed_lpc_snoop *lpc_snoop = dev_get_drvdata(&pdev->dev);
+
+ /* Disable both snoop channels */
+ aspeed_lpc_disable_snoop(lpc_snoop, 0);
+ aspeed_lpc_disable_snoop(lpc_snoop, 1);
+
+ return 0;
+}
+
+static const struct of_device_id aspeed_lpc_snoop_match[] = {
+ { .compatible = "aspeed,ast2500-lpc-snoop" },
+ { },
+};
+
+static struct platform_driver aspeed_lpc_snoop_driver = {
+ .driver = {
+ .name = DEVICE_NAME,
+ .of_match_table = aspeed_lpc_snoop_match,
+ },
+ .probe = aspeed_lpc_snoop_probe,
+ .remove = aspeed_lpc_snoop_remove,
+};
+
+module_platform_driver(aspeed_lpc_snoop_driver);
+
+MODULE_DEVICE_TABLE(of, aspeed_lpc_snoop_match);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Robert Lippert <rlippert@google.com>");
+MODULE_DESCRIPTION("Linux driver to control Aspeed LPC snoop functionality");
diff --git a/drivers/misc/bh1770glc.c b/drivers/misc/bh1770glc.c
index 845466e45b95..38fcfe219d1c 100644
--- a/drivers/misc/bh1770glc.c
+++ b/drivers/misc/bh1770glc.c
@@ -27,7 +27,7 @@
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/mutex.h>
-#include <linux/i2c/bh1770glc.h>
+#include <linux/platform_data/bh1770glc.h>
#include <linux/regulator/consumer.h>
#include <linux/pm_runtime.h>
#include <linux/workqueue.h>
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index 07aad8576334..40c79089e548 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -1040,7 +1040,7 @@ static void mei_cl_bus_dev_init(struct mei_device *bus,
*
* @bus: mei device
*/
-void mei_cl_bus_rescan(struct mei_device *bus)
+static void mei_cl_bus_rescan(struct mei_device *bus)
{
struct mei_cl_device *cldev, *n;
struct mei_me_client *me_cl;
diff --git a/drivers/misc/mei/hw.h b/drivers/misc/mei/hw.h
index e1e4d47d4d7d..5c8286b40b62 100644
--- a/drivers/misc/mei/hw.h
+++ b/drivers/misc/mei/hw.h
@@ -65,7 +65,7 @@
#define HBM_MAJOR_VERSION_DOT 2
/*
- * MEI version with notifcation support
+ * MEI version with notification support
*/
#define HBM_MINOR_VERSION_EV 0
#define HBM_MAJOR_VERSION_EV 2
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index c8ad9ee7cb80..d2f691424dd1 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -215,12 +215,6 @@ int mei_start(struct mei_device *dev)
}
} while (ret);
- /* we cannot start the device w/o hbm start message completed */
- if (dev->dev_state == MEI_DEV_DISABLED) {
- dev_err(dev->dev, "reset failed");
- goto err;
- }
-
if (mei_hbm_start_wait(dev)) {
dev_err(dev->dev, "HBM haven't started");
goto err;
diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c
index c14e35201721..b0b8f18a85e3 100644
--- a/drivers/misc/mei/interrupt.c
+++ b/drivers/misc/mei/interrupt.c
@@ -235,6 +235,17 @@ static inline bool hdr_is_fixed(struct mei_msg_hdr *mei_hdr)
return mei_hdr->host_addr == 0 && mei_hdr->me_addr != 0;
}
+static inline int hdr_is_valid(u32 msg_hdr)
+{
+ struct mei_msg_hdr *mei_hdr;
+
+ mei_hdr = (struct mei_msg_hdr *)&msg_hdr;
+ if (!msg_hdr || mei_hdr->reserved)
+ return -EBADMSG;
+
+ return 0;
+}
+
/**
* mei_irq_read_handler - bottom half read routine after ISR to
* handle the read processing.
@@ -256,17 +267,18 @@ int mei_irq_read_handler(struct mei_device *dev,
dev->rd_msg_hdr = mei_read_hdr(dev);
(*slots)--;
dev_dbg(dev->dev, "slots =%08x.\n", *slots);
- }
- mei_hdr = (struct mei_msg_hdr *) &dev->rd_msg_hdr;
- dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr));
- if (mei_hdr->reserved || !dev->rd_msg_hdr) {
- dev_err(dev->dev, "corrupted message header 0x%08X\n",
+ ret = hdr_is_valid(dev->rd_msg_hdr);
+ if (ret) {
+ dev_err(dev->dev, "corrupted message header 0x%08X\n",
dev->rd_msg_hdr);
- ret = -EBADMSG;
- goto end;
+ goto end;
+ }
}
+ mei_hdr = (struct mei_msg_hdr *)&dev->rd_msg_hdr;
+ dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr));
+
if (mei_slots2data(*slots) < mei_hdr->length) {
dev_err(dev->dev, "less data available than length=%08x.\n",
*slots);
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index 63a67c99fc78..ebcd5132e447 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -306,7 +306,6 @@ struct mei_hw_ops {
};
/* MEI bus API*/
-void mei_cl_bus_rescan(struct mei_device *bus);
void mei_cl_bus_rescan_work(struct work_struct *work);
void mei_cl_bus_dev_fixup(struct mei_cl_device *dev);
ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,
diff --git a/drivers/misc/sram-exec.c b/drivers/misc/sram-exec.c
index 3d528a13b8fc..426ad912b441 100644
--- a/drivers/misc/sram-exec.c
+++ b/drivers/misc/sram-exec.c
@@ -19,6 +19,7 @@
#include <linux/mm.h>
#include <linux/sram.h>
+#include <asm/fncpy.h>
#include <asm/set_memory.h>
#include "sram.h"
@@ -58,20 +59,32 @@ int sram_add_protect_exec(struct sram_partition *part)
* @src: Source address for the data to copy
* @size: Size of copy to perform, which starting from dst, must reside in pool
*
+ * Return: Address for copied data that can safely be called through function
+ * pointer, or NULL if problem.
+ *
* This helper function allows sram driver to act as central control location
* of 'protect-exec' pools which are normal sram pools but are always set
* read-only and executable except when copying data to them, at which point
* they are set to read-write non-executable, to make sure no memory is
* writeable and executable at the same time. This region must be page-aligned
* and is checked during probe, otherwise page attribute manipulation would
- * not be possible.
+ * not be possible. Care must be taken to only call the returned address as
+ * dst address is not guaranteed to be safely callable.
+ *
+ * NOTE: This function uses the fncpy macro to move code to the executable
+ * region. Some architectures have strict requirements for relocating
+ * executable code, so fncpy is a macro that must be defined by any arch
+ * making use of this functionality that guarantees a safe copy of exec
+ * data and returns a safe address that can be called as a C function
+ * pointer.
*/
-int sram_exec_copy(struct gen_pool *pool, void *dst, void *src,
- size_t size)
+void *sram_exec_copy(struct gen_pool *pool, void *dst, void *src,
+ size_t size)
{
struct sram_partition *part = NULL, *p;
unsigned long base;
int pages;
+ void *dst_cpy;
mutex_lock(&exec_pool_list_mutex);
list_for_each_entry(p, &exec_pool_list, list) {
@@ -81,10 +94,10 @@ int sram_exec_copy(struct gen_pool *pool, void *dst, void *src,
mutex_unlock(&exec_pool_list_mutex);
if (!part)
- return -EINVAL;
+ return NULL;
if (!addr_in_gen_pool(pool, (unsigned long)dst, size))
- return -EINVAL;
+ return NULL;
base = (unsigned long)part->base;
pages = PAGE_ALIGN(size) / PAGE_SIZE;
@@ -94,13 +107,13 @@ int sram_exec_copy(struct gen_pool *pool, void *dst, void *src,
set_memory_nx((unsigned long)base, pages);
set_memory_rw((unsigned long)base, pages);
- memcpy(dst, src, size);
+ dst_cpy = fncpy(dst, src, size);
set_memory_ro((unsigned long)base, pages);
set_memory_x((unsigned long)base, pages);
mutex_unlock(&part->lock);
- return 0;
+ return dst_cpy;
}
EXPORT_SYMBOL_GPL(sram_exec_copy);
diff --git a/drivers/mux/Kconfig b/drivers/mux/Kconfig
new file mode 100644
index 000000000000..7c754a0f14bb
--- /dev/null
+++ b/drivers/mux/Kconfig
@@ -0,0 +1,59 @@
+#
+# Multiplexer devices
+#
+
+menuconfig MULTIPLEXER
+ tristate "Multiplexer subsystem"
+ help
+ Multiplexer controller subsystem. Multiplexers are used in a
+ variety of settings, and this subsystem abstracts their use
+ so that the rest of the kernel sees a common interface. When
+ multiple parallel multiplexers are controlled by one single
+ multiplexer controller, this subsystem also coordinates the
+ multiplexer accesses.
+
+ To compile the subsystem as a module, choose M here: the module will
+ be called mux-core.
+
+if MULTIPLEXER
+
+config MUX_ADG792A
+ tristate "Analog Devices ADG792A/ADG792G Multiplexers"
+ depends on I2C
+ help
+ ADG792A and ADG792G Wide Bandwidth Triple 4:1 Multiplexers
+
+ The driver supports both operating the three multiplexers in
+ parallel and operating them independently.
+
+ To compile the driver as a module, choose M here: the module will
+ be called mux-adg792a.
+
+config MUX_GPIO
+ tristate "GPIO-controlled Multiplexer"
+ depends on GPIOLIB || COMPILE_TEST
+ help
+ GPIO-controlled Multiplexer controller.
+
+ The driver builds a single multiplexer controller using a number
+ of gpio pins. For N pins, there will be 2^N possible multiplexer
+ states. The GPIO pins can be connected (by the hardware) to several
+ multiplexers, which in that case will be operated in parallel.
+
+ To compile the driver as a module, choose M here: the module will
+ be called mux-gpio.
+
+config MUX_MMIO
+ tristate "MMIO register bitfield-controlled Multiplexer"
+ depends on (OF && MFD_SYSCON) || COMPILE_TEST
+ help
+ MMIO register bitfield-controlled Multiplexer controller.
+
+ The driver builds multiplexer controllers for bitfields in a syscon
+ register. For N bit wide bitfields, there will be 2^N possible
+ multiplexer states.
+
+ To compile the driver as a module, choose M here: the module will
+ be called mux-mmio.
+
+endif
diff --git a/drivers/mux/Makefile b/drivers/mux/Makefile
new file mode 100644
index 000000000000..6bac5b0fea13
--- /dev/null
+++ b/drivers/mux/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for multiplexer devices.
+#
+
+obj-$(CONFIG_MULTIPLEXER) += mux-core.o
+obj-$(CONFIG_MUX_ADG792A) += mux-adg792a.o
+obj-$(CONFIG_MUX_GPIO) += mux-gpio.o
+obj-$(CONFIG_MUX_MMIO) += mux-mmio.o
diff --git a/drivers/mux/mux-adg792a.c b/drivers/mux/mux-adg792a.c
new file mode 100644
index 000000000000..12aa221ab90d
--- /dev/null
+++ b/drivers/mux/mux-adg792a.c
@@ -0,0 +1,157 @@
+/*
+ * Multiplexer driver for Analog Devices ADG792A/G Triple 4:1 mux
+ *
+ * Copyright (C) 2017 Axentia Technologies AB
+ *
+ * Author: Peter Rosin <peda@axentia.se>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/mux/driver.h>
+#include <linux/property.h>
+
+#define ADG792A_LDSW BIT(0)
+#define ADG792A_RESETB BIT(1)
+#define ADG792A_DISABLE(mux) (0x50 | (mux))
+#define ADG792A_DISABLE_ALL (0x5f)
+#define ADG792A_MUX(mux, state) (0xc0 | (((mux) + 1) << 2) | (state))
+#define ADG792A_MUX_ALL(state) (0xc0 | (state))
+
+static int adg792a_write_cmd(struct i2c_client *i2c, u8 cmd, int reset)
+{
+ u8 data = ADG792A_RESETB | ADG792A_LDSW;
+
+ /* ADG792A_RESETB is active low, the chip resets when it is zero. */
+ if (reset)
+ data &= ~ADG792A_RESETB;
+
+ return i2c_smbus_write_byte_data(i2c, cmd, data);
+}
+
+static int adg792a_set(struct mux_control *mux, int state)
+{
+ struct i2c_client *i2c = to_i2c_client(mux->chip->dev.parent);
+ u8 cmd;
+
+ if (mux->chip->controllers == 1) {
+ /* parallel mux controller operation */
+ if (state == MUX_IDLE_DISCONNECT)
+ cmd = ADG792A_DISABLE_ALL;
+ else
+ cmd = ADG792A_MUX_ALL(state);
+ } else {
+ unsigned int controller = mux_control_get_index(mux);
+
+ if (state == MUX_IDLE_DISCONNECT)
+ cmd = ADG792A_DISABLE(controller);
+ else
+ cmd = ADG792A_MUX(controller, state);
+ }
+
+ return adg792a_write_cmd(i2c, cmd, 0);
+}
+
+static const struct mux_control_ops adg792a_ops = {
+ .set = adg792a_set,
+};
+
+static int adg792a_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &i2c->dev;
+ struct mux_chip *mux_chip;
+ s32 idle_state[3];
+ u32 cells;
+ int ret;
+ int i;
+
+ if (!i2c_check_functionality(i2c->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+ return -ENODEV;
+
+ ret = device_property_read_u32(dev, "#mux-control-cells", &cells);
+ if (ret < 0)
+ return ret;
+ if (cells >= 2)
+ return -EINVAL;
+
+ mux_chip = devm_mux_chip_alloc(dev, cells ? 3 : 1, 0);
+ if (IS_ERR(mux_chip))
+ return PTR_ERR(mux_chip);
+
+ mux_chip->ops = &adg792a_ops;
+
+ ret = adg792a_write_cmd(i2c, ADG792A_DISABLE_ALL, 1);
+ if (ret < 0)
+ return ret;
+
+ ret = device_property_read_u32_array(dev, "idle-state",
+ (u32 *)idle_state,
+ mux_chip->controllers);
+ if (ret < 0) {
+ idle_state[0] = MUX_IDLE_AS_IS;
+ idle_state[1] = MUX_IDLE_AS_IS;
+ idle_state[2] = MUX_IDLE_AS_IS;
+ }
+
+ for (i = 0; i < mux_chip->controllers; ++i) {
+ struct mux_control *mux = &mux_chip->mux[i];
+
+ mux->states = 4;
+
+ switch (idle_state[i]) {
+ case MUX_IDLE_DISCONNECT:
+ case MUX_IDLE_AS_IS:
+ case 0 ... 4:
+ mux->idle_state = idle_state[i];
+ break;
+ default:
+ dev_err(dev, "invalid idle-state %d\n", idle_state[i]);
+ return -EINVAL;
+ }
+ }
+
+ ret = devm_mux_chip_register(dev, mux_chip);
+ if (ret < 0)
+ return ret;
+
+ if (cells)
+ dev_info(dev, "3x single pole quadruple throw muxes registered\n");
+ else
+ dev_info(dev, "triple pole quadruple throw mux registered\n");
+
+ return 0;
+}
+
+static const struct i2c_device_id adg792a_id[] = {
+ { .name = "adg792a", },
+ { .name = "adg792g", },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, adg792a_id);
+
+static const struct of_device_id adg792a_of_match[] = {
+ { .compatible = "adi,adg792a", },
+ { .compatible = "adi,adg792g", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, adg792a_of_match);
+
+static struct i2c_driver adg792a_driver = {
+ .driver = {
+ .name = "adg792a",
+ .of_match_table = of_match_ptr(adg792a_of_match),
+ },
+ .probe = adg792a_probe,
+ .id_table = adg792a_id,
+};
+module_i2c_driver(adg792a_driver);
+
+MODULE_DESCRIPTION("Analog Devices ADG792A/G Triple 4:1 mux driver");
+MODULE_AUTHOR("Peter Rosin <peda@axentia.se>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mux/mux-core.c b/drivers/mux/mux-core.c
new file mode 100644
index 000000000000..90b8995f07cb
--- /dev/null
+++ b/drivers/mux/mux-core.c
@@ -0,0 +1,547 @@
+/*
+ * Multiplexer subsystem
+ *
+ * Copyright (C) 2017 Axentia Technologies AB
+ *
+ * Author: Peter Rosin <peda@axentia.se>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) "mux-core: " fmt
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/idr.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mux/consumer.h>
+#include <linux/mux/driver.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+
+/*
+ * The idle-as-is "state" is not an actual state that may be selected, it
+ * only implies that the state should not be changed. So, use that state
+ * as indication that the cached state of the multiplexer is unknown.
+ */
+#define MUX_CACHE_UNKNOWN MUX_IDLE_AS_IS
+
+static struct class mux_class = {
+ .name = "mux",
+ .owner = THIS_MODULE,
+};
+
+static DEFINE_IDA(mux_ida);
+
+static int __init mux_init(void)
+{
+ ida_init(&mux_ida);
+ return class_register(&mux_class);
+}
+
+static void __exit mux_exit(void)
+{
+ class_register(&mux_class);
+ ida_destroy(&mux_ida);
+}
+
+static void mux_chip_release(struct device *dev)
+{
+ struct mux_chip *mux_chip = to_mux_chip(dev);
+
+ ida_simple_remove(&mux_ida, mux_chip->id);
+ kfree(mux_chip);
+}
+
+static struct device_type mux_type = {
+ .name = "mux-chip",
+ .release = mux_chip_release,
+};
+
+/**
+ * mux_chip_alloc() - Allocate a mux-chip.
+ * @dev: The parent device implementing the mux interface.
+ * @controllers: The number of mux controllers to allocate for this chip.
+ * @sizeof_priv: Size of extra memory area for private use by the caller.
+ *
+ * After allocating the mux-chip with the desired number of mux controllers
+ * but before registering the chip, the mux driver is required to configure
+ * the number of valid mux states in the mux_chip->mux[N].states members and
+ * the desired idle state in the returned mux_chip->mux[N].idle_state members.
+ * The default idle state is MUX_IDLE_AS_IS. The mux driver also needs to
+ * provide a pointer to the operations struct in the mux_chip->ops member
+ * before registering the mux-chip with mux_chip_register.
+ *
+ * Return: A pointer to the new mux-chip, or an ERR_PTR with a negative errno.
+ */
+struct mux_chip *mux_chip_alloc(struct device *dev,
+ unsigned int controllers, size_t sizeof_priv)
+{
+ struct mux_chip *mux_chip;
+ int i;
+
+ if (WARN_ON(!dev || !controllers))
+ return ERR_PTR(-EINVAL);
+
+ mux_chip = kzalloc(sizeof(*mux_chip) +
+ controllers * sizeof(*mux_chip->mux) +
+ sizeof_priv, GFP_KERNEL);
+ if (!mux_chip)
+ return ERR_PTR(-ENOMEM);
+
+ mux_chip->mux = (struct mux_control *)(mux_chip + 1);
+ mux_chip->dev.class = &mux_class;
+ mux_chip->dev.type = &mux_type;
+ mux_chip->dev.parent = dev;
+ mux_chip->dev.of_node = dev->of_node;
+ dev_set_drvdata(&mux_chip->dev, mux_chip);
+
+ mux_chip->id = ida_simple_get(&mux_ida, 0, 0, GFP_KERNEL);
+ if (mux_chip->id < 0) {
+ int err = mux_chip->id;
+
+ pr_err("muxchipX failed to get a device id\n");
+ kfree(mux_chip);
+ return ERR_PTR(err);
+ }
+ dev_set_name(&mux_chip->dev, "muxchip%d", mux_chip->id);
+
+ mux_chip->controllers = controllers;
+ for (i = 0; i < controllers; ++i) {
+ struct mux_control *mux = &mux_chip->mux[i];
+
+ mux->chip = mux_chip;
+ sema_init(&mux->lock, 1);
+ mux->cached_state = MUX_CACHE_UNKNOWN;
+ mux->idle_state = MUX_IDLE_AS_IS;
+ }
+
+ device_initialize(&mux_chip->dev);
+
+ return mux_chip;
+}
+EXPORT_SYMBOL_GPL(mux_chip_alloc);
+
+static int mux_control_set(struct mux_control *mux, int state)
+{
+ int ret = mux->chip->ops->set(mux, state);
+
+ mux->cached_state = ret < 0 ? MUX_CACHE_UNKNOWN : state;
+
+ return ret;
+}
+
+/**
+ * mux_chip_register() - Register a mux-chip, thus readying the controllers
+ * for use.
+ * @mux_chip: The mux-chip to register.
+ *
+ * Do not retry registration of the same mux-chip on failure. You should
+ * instead put it away with mux_chip_free() and allocate a new one, if you
+ * for some reason would like to retry registration.
+ *
+ * Return: Zero on success or a negative errno on error.
+ */
+int mux_chip_register(struct mux_chip *mux_chip)
+{
+ int i;
+ int ret;
+
+ for (i = 0; i < mux_chip->controllers; ++i) {
+ struct mux_control *mux = &mux_chip->mux[i];
+
+ if (mux->idle_state == mux->cached_state)
+ continue;
+
+ ret = mux_control_set(mux, mux->idle_state);
+ if (ret < 0) {
+ dev_err(&mux_chip->dev, "unable to set idle state\n");
+ return ret;
+ }
+ }
+
+ ret = device_add(&mux_chip->dev);
+ if (ret < 0)
+ dev_err(&mux_chip->dev,
+ "device_add failed in %s: %d\n", __func__, ret);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mux_chip_register);
+
+/**
+ * mux_chip_unregister() - Take the mux-chip off-line.
+ * @mux_chip: The mux-chip to unregister.
+ *
+ * mux_chip_unregister() reverses the effects of mux_chip_register().
+ * But not completely, you should not try to call mux_chip_register()
+ * on a mux-chip that has been registered before.
+ */
+void mux_chip_unregister(struct mux_chip *mux_chip)
+{
+ device_del(&mux_chip->dev);
+}
+EXPORT_SYMBOL_GPL(mux_chip_unregister);
+
+/**
+ * mux_chip_free() - Free the mux-chip for good.
+ * @mux_chip: The mux-chip to free.
+ *
+ * mux_chip_free() reverses the effects of mux_chip_alloc().
+ */
+void mux_chip_free(struct mux_chip *mux_chip)
+{
+ if (!mux_chip)
+ return;
+
+ put_device(&mux_chip->dev);
+}
+EXPORT_SYMBOL_GPL(mux_chip_free);
+
+static void devm_mux_chip_release(struct device *dev, void *res)
+{
+ struct mux_chip *mux_chip = *(struct mux_chip **)res;
+
+ mux_chip_free(mux_chip);
+}
+
+/**
+ * devm_mux_chip_alloc() - Resource-managed version of mux_chip_alloc().
+ * @dev: The parent device implementing the mux interface.
+ * @controllers: The number of mux controllers to allocate for this chip.
+ * @sizeof_priv: Size of extra memory area for private use by the caller.
+ *
+ * See mux_chip_alloc() for more details.
+ *
+ * Return: A pointer to the new mux-chip, or an ERR_PTR with a negative errno.
+ */
+struct mux_chip *devm_mux_chip_alloc(struct device *dev,
+ unsigned int controllers,
+ size_t sizeof_priv)
+{
+ struct mux_chip **ptr, *mux_chip;
+
+ ptr = devres_alloc(devm_mux_chip_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ mux_chip = mux_chip_alloc(dev, controllers, sizeof_priv);
+ if (IS_ERR(mux_chip)) {
+ devres_free(ptr);
+ return mux_chip;
+ }
+
+ *ptr = mux_chip;
+ devres_add(dev, ptr);
+
+ return mux_chip;
+}
+EXPORT_SYMBOL_GPL(devm_mux_chip_alloc);
+
+static void devm_mux_chip_reg_release(struct device *dev, void *res)
+{
+ struct mux_chip *mux_chip = *(struct mux_chip **)res;
+
+ mux_chip_unregister(mux_chip);
+}
+
+/**
+ * devm_mux_chip_register() - Resource-managed version mux_chip_register().
+ * @dev: The parent device implementing the mux interface.
+ * @mux_chip: The mux-chip to register.
+ *
+ * See mux_chip_register() for more details.
+ *
+ * Return: Zero on success or a negative errno on error.
+ */
+int devm_mux_chip_register(struct device *dev,
+ struct mux_chip *mux_chip)
+{
+ struct mux_chip **ptr;
+ int res;
+
+ ptr = devres_alloc(devm_mux_chip_reg_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ res = mux_chip_register(mux_chip);
+ if (res) {
+ devres_free(ptr);
+ return res;
+ }
+
+ *ptr = mux_chip;
+ devres_add(dev, ptr);
+
+ return res;
+}
+EXPORT_SYMBOL_GPL(devm_mux_chip_register);
+
+/**
+ * mux_control_states() - Query the number of multiplexer states.
+ * @mux: The mux-control to query.
+ *
+ * Return: The number of multiplexer states.
+ */
+unsigned int mux_control_states(struct mux_control *mux)
+{
+ return mux->states;
+}
+EXPORT_SYMBOL_GPL(mux_control_states);
+
+/*
+ * The mux->lock must be down when calling this function.
+ */
+static int __mux_control_select(struct mux_control *mux, int state)
+{
+ int ret;
+
+ if (WARN_ON(state < 0 || state >= mux->states))
+ return -EINVAL;
+
+ if (mux->cached_state == state)
+ return 0;
+
+ ret = mux_control_set(mux, state);
+ if (ret >= 0)
+ return 0;
+
+ /* The mux update failed, try to revert if appropriate... */
+ if (mux->idle_state != MUX_IDLE_AS_IS)
+ mux_control_set(mux, mux->idle_state);
+
+ return ret;
+}
+
+/**
+ * mux_control_select() - Select the given multiplexer state.
+ * @mux: The mux-control to request a change of state from.
+ * @state: The new requested state.
+ *
+ * On successfully selecting the mux-control state, it will be locked until
+ * there is a call to mux_control_deselect(). If the mux-control is already
+ * selected when mux_control_select() is called, the caller will be blocked
+ * until mux_control_deselect() is called (by someone else).
+ *
+ * Therefore, make sure to call mux_control_deselect() when the operation is
+ * complete and the mux-control is free for others to use, but do not call
+ * mux_control_deselect() if mux_control_select() fails.
+ *
+ * Return: 0 when the mux-control state has the requested state or a negative
+ * errno on error.
+ */
+int mux_control_select(struct mux_control *mux, unsigned int state)
+{
+ int ret;
+
+ ret = down_killable(&mux->lock);
+ if (ret < 0)
+ return ret;
+
+ ret = __mux_control_select(mux, state);
+
+ if (ret < 0)
+ up(&mux->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mux_control_select);
+
+/**
+ * mux_control_try_select() - Try to select the given multiplexer state.
+ * @mux: The mux-control to request a change of state from.
+ * @state: The new requested state.
+ *
+ * On successfully selecting the mux-control state, it will be locked until
+ * mux_control_deselect() called.
+ *
+ * Therefore, make sure to call mux_control_deselect() when the operation is
+ * complete and the mux-control is free for others to use, but do not call
+ * mux_control_deselect() if mux_control_try_select() fails.
+ *
+ * Return: 0 when the mux-control state has the requested state or a negative
+ * errno on error. Specifically -EBUSY if the mux-control is contended.
+ */
+int mux_control_try_select(struct mux_control *mux, unsigned int state)
+{
+ int ret;
+
+ if (down_trylock(&mux->lock))
+ return -EBUSY;
+
+ ret = __mux_control_select(mux, state);
+
+ if (ret < 0)
+ up(&mux->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mux_control_try_select);
+
+/**
+ * mux_control_deselect() - Deselect the previously selected multiplexer state.
+ * @mux: The mux-control to deselect.
+ *
+ * It is required that a single call is made to mux_control_deselect() for
+ * each and every successful call made to either of mux_control_select() or
+ * mux_control_try_select().
+ *
+ * Return: 0 on success and a negative errno on error. An error can only
+ * occur if the mux has an idle state. Note that even if an error occurs, the
+ * mux-control is unlocked and is thus free for the next access.
+ */
+int mux_control_deselect(struct mux_control *mux)
+{
+ int ret = 0;
+
+ if (mux->idle_state != MUX_IDLE_AS_IS &&
+ mux->idle_state != mux->cached_state)
+ ret = mux_control_set(mux, mux->idle_state);
+
+ up(&mux->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mux_control_deselect);
+
+static int of_dev_node_match(struct device *dev, const void *data)
+{
+ return dev->of_node == data;
+}
+
+static struct mux_chip *of_find_mux_chip_by_node(struct device_node *np)
+{
+ struct device *dev;
+
+ dev = class_find_device(&mux_class, NULL, np, of_dev_node_match);
+
+ return dev ? to_mux_chip(dev) : NULL;
+}
+
+/**
+ * mux_control_get() - Get the mux-control for a device.
+ * @dev: The device that needs a mux-control.
+ * @mux_name: The name identifying the mux-control.
+ *
+ * Return: A pointer to the mux-control, or an ERR_PTR with a negative errno.
+ */
+struct mux_control *mux_control_get(struct device *dev, const char *mux_name)
+{
+ struct device_node *np = dev->of_node;
+ struct of_phandle_args args;
+ struct mux_chip *mux_chip;
+ unsigned int controller;
+ int index = 0;
+ int ret;
+
+ if (mux_name) {
+ index = of_property_match_string(np, "mux-control-names",
+ mux_name);
+ if (index < 0) {
+ dev_err(dev, "mux controller '%s' not found\n",
+ mux_name);
+ return ERR_PTR(index);
+ }
+ }
+
+ ret = of_parse_phandle_with_args(np,
+ "mux-controls", "#mux-control-cells",
+ index, &args);
+ if (ret) {
+ dev_err(dev, "%s: failed to get mux-control %s(%i)\n",
+ np->full_name, mux_name ?: "", index);
+ return ERR_PTR(ret);
+ }
+
+ mux_chip = of_find_mux_chip_by_node(args.np);
+ of_node_put(args.np);
+ if (!mux_chip)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ if (args.args_count > 1 ||
+ (!args.args_count && (mux_chip->controllers > 1))) {
+ dev_err(dev, "%s: wrong #mux-control-cells for %s\n",
+ np->full_name, args.np->full_name);
+ return ERR_PTR(-EINVAL);
+ }
+
+ controller = 0;
+ if (args.args_count)
+ controller = args.args[0];
+
+ if (controller >= mux_chip->controllers) {
+ dev_err(dev, "%s: bad mux controller %u specified in %s\n",
+ np->full_name, controller, args.np->full_name);
+ return ERR_PTR(-EINVAL);
+ }
+
+ get_device(&mux_chip->dev);
+ return &mux_chip->mux[controller];
+}
+EXPORT_SYMBOL_GPL(mux_control_get);
+
+/**
+ * mux_control_put() - Put away the mux-control for good.
+ * @mux: The mux-control to put away.
+ *
+ * mux_control_put() reverses the effects of mux_control_get().
+ */
+void mux_control_put(struct mux_control *mux)
+{
+ put_device(&mux->chip->dev);
+}
+EXPORT_SYMBOL_GPL(mux_control_put);
+
+static void devm_mux_control_release(struct device *dev, void *res)
+{
+ struct mux_control *mux = *(struct mux_control **)res;
+
+ mux_control_put(mux);
+}
+
+/**
+ * devm_mux_control_get() - Get the mux-control for a device, with resource
+ * management.
+ * @dev: The device that needs a mux-control.
+ * @mux_name: The name identifying the mux-control.
+ *
+ * Return: Pointer to the mux-control, or an ERR_PTR with a negative errno.
+ */
+struct mux_control *devm_mux_control_get(struct device *dev,
+ const char *mux_name)
+{
+ struct mux_control **ptr, *mux;
+
+ ptr = devres_alloc(devm_mux_control_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ mux = mux_control_get(dev, mux_name);
+ if (IS_ERR(mux)) {
+ devres_free(ptr);
+ return mux;
+ }
+
+ *ptr = mux;
+ devres_add(dev, ptr);
+
+ return mux;
+}
+EXPORT_SYMBOL_GPL(devm_mux_control_get);
+
+/*
+ * Using subsys_initcall instead of module_init here to try to ensure - for
+ * the non-modular case - that the subsystem is initialized when mux consumers
+ * and mux controllers start to use it.
+ * For the modular case, the ordering is ensured with module dependencies.
+ */
+subsys_initcall(mux_init);
+module_exit(mux_exit);
+
+MODULE_DESCRIPTION("Multiplexer subsystem");
+MODULE_AUTHOR("Peter Rosin <peda@axentia.se>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mux/mux-gpio.c b/drivers/mux/mux-gpio.c
new file mode 100644
index 000000000000..468bf1709606
--- /dev/null
+++ b/drivers/mux/mux-gpio.c
@@ -0,0 +1,114 @@
+/*
+ * GPIO-controlled multiplexer driver
+ *
+ * Copyright (C) 2017 Axentia Technologies AB
+ *
+ * Author: Peter Rosin <peda@axentia.se>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/mux/driver.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+
+struct mux_gpio {
+ struct gpio_descs *gpios;
+ int *val;
+};
+
+static int mux_gpio_set(struct mux_control *mux, int state)
+{
+ struct mux_gpio *mux_gpio = mux_chip_priv(mux->chip);
+ int i;
+
+ for (i = 0; i < mux_gpio->gpios->ndescs; i++)
+ mux_gpio->val[i] = (state >> i) & 1;
+
+ gpiod_set_array_value_cansleep(mux_gpio->gpios->ndescs,
+ mux_gpio->gpios->desc,
+ mux_gpio->val);
+
+ return 0;
+}
+
+static const struct mux_control_ops mux_gpio_ops = {
+ .set = mux_gpio_set,
+};
+
+static const struct of_device_id mux_gpio_dt_ids[] = {
+ { .compatible = "gpio-mux", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mux_gpio_dt_ids);
+
+static int mux_gpio_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mux_chip *mux_chip;
+ struct mux_gpio *mux_gpio;
+ int pins;
+ s32 idle_state;
+ int ret;
+
+ pins = gpiod_count(dev, "mux");
+ if (pins < 0)
+ return pins;
+
+ mux_chip = devm_mux_chip_alloc(dev, 1, sizeof(*mux_gpio) +
+ pins * sizeof(*mux_gpio->val));
+ if (IS_ERR(mux_chip))
+ return PTR_ERR(mux_chip);
+
+ mux_gpio = mux_chip_priv(mux_chip);
+ mux_gpio->val = (int *)(mux_gpio + 1);
+ mux_chip->ops = &mux_gpio_ops;
+
+ mux_gpio->gpios = devm_gpiod_get_array(dev, "mux", GPIOD_OUT_LOW);
+ if (IS_ERR(mux_gpio->gpios)) {
+ ret = PTR_ERR(mux_gpio->gpios);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to get gpios\n");
+ return ret;
+ }
+ WARN_ON(pins != mux_gpio->gpios->ndescs);
+ mux_chip->mux->states = 1 << pins;
+
+ ret = device_property_read_u32(dev, "idle-state", (u32 *)&idle_state);
+ if (ret >= 0 && idle_state != MUX_IDLE_AS_IS) {
+ if (idle_state < 0 || idle_state >= mux_chip->mux->states) {
+ dev_err(dev, "invalid idle-state %u\n", idle_state);
+ return -EINVAL;
+ }
+
+ mux_chip->mux->idle_state = idle_state;
+ }
+
+ ret = devm_mux_chip_register(dev, mux_chip);
+ if (ret < 0)
+ return ret;
+
+ dev_info(dev, "%u-way mux-controller registered\n",
+ mux_chip->mux->states);
+
+ return 0;
+}
+
+static struct platform_driver mux_gpio_driver = {
+ .driver = {
+ .name = "gpio-mux",
+ .of_match_table = of_match_ptr(mux_gpio_dt_ids),
+ },
+ .probe = mux_gpio_probe,
+};
+module_platform_driver(mux_gpio_driver);
+
+MODULE_DESCRIPTION("GPIO-controlled multiplexer driver");
+MODULE_AUTHOR("Peter Rosin <peda@axentia.se>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mux/mux-mmio.c b/drivers/mux/mux-mmio.c
new file mode 100644
index 000000000000..37c1de359a70
--- /dev/null
+++ b/drivers/mux/mux-mmio.c
@@ -0,0 +1,141 @@
+/*
+ * MMIO register bitfield-controlled multiplexer driver
+ *
+ * Copyright (C) 2017 Pengutronix, Philipp Zabel <kernel@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/mux/driver.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+
+static int mux_mmio_set(struct mux_control *mux, int state)
+{
+ struct regmap_field **fields = mux_chip_priv(mux->chip);
+
+ return regmap_field_write(fields[mux_control_get_index(mux)], state);
+}
+
+static const struct mux_control_ops mux_mmio_ops = {
+ .set = mux_mmio_set,
+};
+
+static const struct of_device_id mux_mmio_dt_ids[] = {
+ { .compatible = "mmio-mux", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mux_mmio_dt_ids);
+
+static int mux_mmio_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct regmap_field **fields;
+ struct mux_chip *mux_chip;
+ struct regmap *regmap;
+ int num_fields;
+ int ret;
+ int i;
+
+ regmap = syscon_node_to_regmap(np->parent);
+ if (IS_ERR(regmap)) {
+ ret = PTR_ERR(regmap);
+ dev_err(dev, "failed to get regmap: %d\n", ret);
+ return ret;
+ }
+
+ ret = of_property_count_u32_elems(np, "mux-reg-masks");
+ if (ret == 0 || ret % 2)
+ ret = -EINVAL;
+ if (ret < 0) {
+ dev_err(dev, "mux-reg-masks property missing or invalid: %d\n",
+ ret);
+ return ret;
+ }
+ num_fields = ret / 2;
+
+ mux_chip = devm_mux_chip_alloc(dev, num_fields, num_fields *
+ sizeof(*fields));
+ if (IS_ERR(mux_chip))
+ return PTR_ERR(mux_chip);
+
+ fields = mux_chip_priv(mux_chip);
+
+ for (i = 0; i < num_fields; i++) {
+ struct mux_control *mux = &mux_chip->mux[i];
+ struct reg_field field;
+ s32 idle_state = MUX_IDLE_AS_IS;
+ u32 reg, mask;
+ int bits;
+
+ ret = of_property_read_u32_index(np, "mux-reg-masks",
+ 2 * i, &reg);
+ if (!ret)
+ ret = of_property_read_u32_index(np, "mux-reg-masks",
+ 2 * i + 1, &mask);
+ if (ret < 0) {
+ dev_err(dev, "bitfield %d: failed to read mux-reg-masks property: %d\n",
+ i, ret);
+ return ret;
+ }
+
+ field.reg = reg;
+ field.msb = fls(mask) - 1;
+ field.lsb = ffs(mask) - 1;
+
+ if (mask != GENMASK(field.msb, field.lsb)) {
+ dev_err(dev, "bitfield %d: invalid mask 0x%x\n",
+ i, mask);
+ return -EINVAL;
+ }
+
+ fields[i] = devm_regmap_field_alloc(dev, regmap, field);
+ if (IS_ERR(fields[i])) {
+ ret = PTR_ERR(fields[i]);
+ dev_err(dev, "bitfield %d: failed allocate: %d\n",
+ i, ret);
+ return ret;
+ }
+
+ bits = 1 + field.msb - field.lsb;
+ mux->states = 1 << bits;
+
+ of_property_read_u32_index(np, "idle-states", i,
+ (u32 *)&idle_state);
+ if (idle_state != MUX_IDLE_AS_IS) {
+ if (idle_state < 0 || idle_state >= mux->states) {
+ dev_err(dev, "bitfield: %d: out of range idle state %d\n",
+ i, idle_state);
+ return -EINVAL;
+ }
+
+ mux->idle_state = idle_state;
+ }
+ }
+
+ mux_chip->ops = &mux_mmio_ops;
+
+ return devm_mux_chip_register(dev, mux_chip);
+}
+
+static struct platform_driver mux_mmio_driver = {
+ .driver = {
+ .name = "mmio-mux",
+ .of_match_table = of_match_ptr(mux_mmio_dt_ids),
+ },
+ .probe = mux_mmio_probe,
+};
+module_platform_driver(mux_mmio_driver);
+
+MODULE_DESCRIPTION("MMIO register bitfield-controlled multiplexer driver");
+MODULE_AUTHOR("Philipp Zabel <p.zabel@pengutronix.de>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvmem/bcm-ocotp.c b/drivers/nvmem/bcm-ocotp.c
index 646cadbf1f93..3c56e3b2bd65 100644
--- a/drivers/nvmem/bcm-ocotp.c
+++ b/drivers/nvmem/bcm-ocotp.c
@@ -34,7 +34,7 @@
#define OTPC_CMD_READ 0x0
#define OTPC_CMD_OTP_PROG_ENABLE 0x2
#define OTPC_CMD_OTP_PROG_DISABLE 0x3
-#define OTPC_CMD_PROGRAM 0xA
+#define OTPC_CMD_PROGRAM 0x8
/* OTPC Status Bits */
#define OTPC_STAT_CMD_DONE BIT(1)
@@ -209,7 +209,7 @@ static int bcm_otpc_write(void *context, unsigned int offset, void *val,
set_command(priv->base, OTPC_CMD_PROGRAM);
set_cpu_address(priv->base, address++);
for (i = 0; i < priv->map->otpc_row_size; i++) {
- writel(*buf, priv->base + priv->map->data_r_offset[i]);
+ writel(*buf, priv->base + priv->map->data_w_offset[i]);
buf++;
bytes_written += sizeof(*buf);
}
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index 8c830a80a648..4c49285168fb 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -287,9 +287,15 @@ static struct nvmem_cell *nvmem_find_cell(const char *cell_id)
{
struct nvmem_cell *p;
+ mutex_lock(&nvmem_cells_mutex);
+
list_for_each_entry(p, &nvmem_cells, node)
- if (p && !strcmp(p->name, cell_id))
+ if (p && !strcmp(p->name, cell_id)) {
+ mutex_unlock(&nvmem_cells_mutex);
return p;
+ }
+
+ mutex_unlock(&nvmem_cells_mutex);
return NULL;
}
@@ -489,21 +495,24 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
rval = device_add(&nvmem->dev);
if (rval)
- goto out;
+ goto err_put_device;
if (config->compat) {
rval = nvmem_setup_compat(nvmem, config);
if (rval)
- goto out;
+ goto err_device_del;
}
if (config->cells)
nvmem_add_cells(nvmem, config);
return nvmem;
-out:
- ida_simple_remove(&nvmem_ida, nvmem->id);
- kfree(nvmem);
+
+err_device_del:
+ device_del(&nvmem->dev);
+err_put_device:
+ put_device(&nvmem->dev);
+
return ERR_PTR(rval);
}
EXPORT_SYMBOL_GPL(nvmem_register);
@@ -529,6 +538,7 @@ int nvmem_unregister(struct nvmem_device *nvmem)
nvmem_device_remove_all_cells(nvmem);
device_del(&nvmem->dev);
+ put_device(&nvmem->dev);
return 0;
}
diff --git a/drivers/nvmem/rockchip-efuse.c b/drivers/nvmem/rockchip-efuse.c
index 423907bdd259..a0d4ede9b8fc 100644
--- a/drivers/nvmem/rockchip-efuse.c
+++ b/drivers/nvmem/rockchip-efuse.c
@@ -170,6 +170,10 @@ static const struct of_device_id rockchip_efuse_match[] = {
.data = (void *)&rockchip_rk3288_efuse_read,
},
{
+ .compatible = "rockchip,rk322x-efuse",
+ .data = (void *)&rockchip_rk3288_efuse_read,
+ },
+ {
.compatible = "rockchip,rk3288-efuse",
.data = (void *)&rockchip_rk3288_efuse_read,
},
diff --git a/drivers/platform/goldfish/goldfish_pipe.c b/drivers/platform/goldfish/goldfish_pipe.c
index 5f3672153b12..0578d34eec3f 100644
--- a/drivers/platform/goldfish/goldfish_pipe.c
+++ b/drivers/platform/goldfish/goldfish_pipe.c
@@ -266,7 +266,7 @@ struct goldfish_pipe_dev {
unsigned char __iomem *base;
};
-struct goldfish_pipe_dev pipe_dev[1] = {};
+static struct goldfish_pipe_dev pipe_dev[1] = {};
static int goldfish_cmd_locked(struct goldfish_pipe *pipe, enum PipeCmdCode cmd)
{
diff --git a/drivers/power/supply/ds2760_battery.c b/drivers/power/supply/ds2760_battery.c
index 17225689e3f6..ae180dc929c9 100644
--- a/drivers/power/supply/ds2760_battery.c
+++ b/drivers/power/supply/ds2760_battery.c
@@ -28,7 +28,7 @@
#include <linux/platform_device.h>
#include <linux/power_supply.h>
-#include "../../w1/w1.h"
+#include <linux/w1.h>
#include "../../w1/slaves/w1_ds2760.h"
struct ds2760_device_info {
diff --git a/drivers/power/supply/ds2780_battery.c b/drivers/power/supply/ds2780_battery.c
index 1b3b6fa89c28..8edd4aa5f475 100644
--- a/drivers/power/supply/ds2780_battery.c
+++ b/drivers/power/supply/ds2780_battery.c
@@ -21,7 +21,7 @@
#include <linux/power_supply.h>
#include <linux/idr.h>
-#include "../../w1/w1.h"
+#include <linux/w1.h>
#include "../../w1/slaves/w1_ds2780.h"
/* Current unit measurement in uA for a 1 milli-ohm sense resistor */
diff --git a/drivers/power/supply/ds2781_battery.c b/drivers/power/supply/ds2781_battery.c
index cc0149131f89..4400402f9ec5 100644
--- a/drivers/power/supply/ds2781_battery.c
+++ b/drivers/power/supply/ds2781_battery.c
@@ -19,7 +19,7 @@
#include <linux/power_supply.h>
#include <linux/idr.h>
-#include "../../w1/w1.h"
+#include <linux/w1.h>
#include "../../w1/slaves/w1_ds2781.h"
/* Current unit measurement in uA for a 1 milli-ohm sense resistor */
diff --git a/drivers/pps/Kconfig b/drivers/pps/Kconfig
index 564a51abeece..4b29a7182d7b 100644
--- a/drivers/pps/Kconfig
+++ b/drivers/pps/Kconfig
@@ -2,9 +2,7 @@
# PPS support configuration
#
-menu "PPS support"
-
-config PPS
+menuconfig PPS
tristate "PPS support"
---help---
PPS (Pulse Per Second) is a special pulse provided by some GPS
@@ -20,10 +18,10 @@ config PPS
To compile this driver as a module, choose M here: the module
will be called pps_core.ko.
-if PPS
config PPS_DEBUG
bool "PPS debugging messages"
+ depends on PPS
help
Say Y here if you want the PPS support to produce a bunch of debug
messages to the system log. Select this if you are having a
@@ -31,17 +29,13 @@ config PPS_DEBUG
config NTP_PPS
bool "PPS kernel consumer support"
- depends on !NO_HZ_COMMON
+ depends on PPS && !NO_HZ_COMMON
help
This option adds support for direct in-kernel time
synchronization using an external PPS signal.
It doesn't work on tickless systems at the moment.
-endif
-
source drivers/pps/clients/Kconfig
source drivers/pps/generators/Kconfig
-
-endmenu
diff --git a/drivers/pps/clients/Kconfig b/drivers/pps/clients/Kconfig
index 0c9f2805d076..efec021ce662 100644
--- a/drivers/pps/clients/Kconfig
+++ b/drivers/pps/clients/Kconfig
@@ -2,12 +2,12 @@
# PPS clients configuration
#
-if PPS
-
comment "PPS clients support"
+ depends on PPS
config PPS_CLIENT_KTIMER
tristate "Kernel timer client (Testing client, use for debug)"
+ depends on PPS
help
If you say yes here you get support for a PPS debugging client
which uses a kernel timer to generate the PPS signal.
@@ -37,5 +37,3 @@ config PPS_CLIENT_GPIO
GPIO. To be useful you must also register a platform device
specifying the GPIO pin and other options, usually in your board
setup.
-
-endif
diff --git a/drivers/pps/generators/Kconfig b/drivers/pps/generators/Kconfig
index e4c4f3dc0728..86b59378e71f 100644
--- a/drivers/pps/generators/Kconfig
+++ b/drivers/pps/generators/Kconfig
@@ -3,10 +3,11 @@
#
comment "PPS generators support"
+ depends on PPS
config PPS_GENERATOR_PARPORT
tristate "Parallel port PPS signal generator"
- depends on PARPORT && BROKEN
+ depends on PPS && PARPORT && BROKEN
help
If you say yes here you get support for a PPS signal generator which
utilizes STROBE pin of a parallel port to send PPS signals. It uses
diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
index 5ec3a595dc7d..2afe3597982e 100644
--- a/drivers/spmi/spmi-pmic-arb.c
+++ b/drivers/spmi/spmi-pmic-arb.c
@@ -28,6 +28,7 @@
/* PMIC Arbiter configuration registers */
#define PMIC_ARB_VERSION 0x0000
#define PMIC_ARB_VERSION_V2_MIN 0x20010000
+#define PMIC_ARB_VERSION_V3_MIN 0x30000000
#define PMIC_ARB_INT_EN 0x0004
/* PMIC Arbiter channel registers offsets */
@@ -58,10 +59,10 @@
/* Channel Status fields */
enum pmic_arb_chnl_status {
- PMIC_ARB_STATUS_DONE = (1 << 0),
- PMIC_ARB_STATUS_FAILURE = (1 << 1),
- PMIC_ARB_STATUS_DENIED = (1 << 2),
- PMIC_ARB_STATUS_DROPPED = (1 << 3),
+ PMIC_ARB_STATUS_DONE = BIT(0),
+ PMIC_ARB_STATUS_FAILURE = BIT(1),
+ PMIC_ARB_STATUS_DENIED = BIT(2),
+ PMIC_ARB_STATUS_DROPPED = BIT(3),
};
/* Command register fields */
@@ -96,10 +97,26 @@ enum pmic_arb_cmd_op_code {
/* interrupt enable bit */
#define SPMI_PIC_ACC_ENABLE_BIT BIT(0)
+#define HWIRQ(slave_id, periph_id, irq_id, apid) \
+ ((((slave_id) & 0xF) << 28) | \
+ (((periph_id) & 0xFF) << 20) | \
+ (((irq_id) & 0x7) << 16) | \
+ (((apid) & 0x1FF) << 0))
+
+#define HWIRQ_SID(hwirq) (((hwirq) >> 28) & 0xF)
+#define HWIRQ_PER(hwirq) (((hwirq) >> 20) & 0xFF)
+#define HWIRQ_IRQ(hwirq) (((hwirq) >> 16) & 0x7)
+#define HWIRQ_APID(hwirq) (((hwirq) >> 0) & 0x1FF)
+
struct pmic_arb_ver_ops;
+struct apid_data {
+ u16 ppid;
+ u8 owner;
+};
+
/**
- * spmi_pmic_arb_dev - SPMI PMIC Arbiter object
+ * spmi_pmic_arb - SPMI PMIC Arbiter object
*
* @rd_base: on v1 "core", on v2 "observer" register base off DT.
* @wr_base: on v1 "core", on v2 "chnls" register base off DT.
@@ -111,15 +128,15 @@ struct pmic_arb_ver_ops;
* @ee: the current Execution Environment
* @min_apid: minimum APID (used for bounding IRQ search)
* @max_apid: maximum APID
+ * @max_periph: maximum number of PMIC peripherals supported by HW.
* @mapping_table: in-memory copy of PPID -> APID mapping table.
* @domain: irq domain object for PMIC IRQ domain
* @spmic: SPMI controller object
- * @apid_to_ppid: in-memory copy of APID -> PPID mapping table.
* @ver_ops: version dependent operations.
- * @ppid_to_chan in-memory copy of PPID -> channel (APID) mapping table.
+ * @ppid_to_apid in-memory copy of PPID -> channel (APID) mapping table.
* v2 only.
*/
-struct spmi_pmic_arb_dev {
+struct spmi_pmic_arb {
void __iomem *rd_base;
void __iomem *wr_base;
void __iomem *intr;
@@ -132,19 +149,23 @@ struct spmi_pmic_arb_dev {
u8 ee;
u16 min_apid;
u16 max_apid;
+ u16 max_periph;
u32 *mapping_table;
DECLARE_BITMAP(mapping_table_valid, PMIC_ARB_MAX_PERIPHS);
struct irq_domain *domain;
struct spmi_controller *spmic;
- u16 *apid_to_ppid;
const struct pmic_arb_ver_ops *ver_ops;
- u16 *ppid_to_chan;
- u16 last_channel;
+ u16 *ppid_to_apid;
+ u16 last_apid;
+ struct apid_data apid_data[PMIC_ARB_MAX_PERIPHS];
};
/**
* pmic_arb_ver: version dependent functionality.
*
+ * @ver_str: version string.
+ * @ppid_to_apid: finds the apid for a given ppid.
+ * @mode: access rights to specified pmic peripheral.
* @non_data_cmd: on v1 issues an spmi non-data command.
* on v2 no HW support, returns -EOPNOTSUPP.
* @offset: on v1 offset of per-ee channel.
@@ -160,28 +181,33 @@ struct spmi_pmic_arb_dev {
* on v2 offset of SPMI_PIC_IRQ_CLEARn.
*/
struct pmic_arb_ver_ops {
+ const char *ver_str;
+ int (*ppid_to_apid)(struct spmi_pmic_arb *pa, u8 sid, u16 addr,
+ u16 *apid);
+ int (*mode)(struct spmi_pmic_arb *dev, u8 sid, u16 addr,
+ mode_t *mode);
/* spmi commands (read_cmd, write_cmd, cmd) functionality */
- int (*offset)(struct spmi_pmic_arb_dev *dev, u8 sid, u16 addr,
+ int (*offset)(struct spmi_pmic_arb *dev, u8 sid, u16 addr,
u32 *offset);
u32 (*fmt_cmd)(u8 opc, u8 sid, u16 addr, u8 bc);
int (*non_data_cmd)(struct spmi_controller *ctrl, u8 opc, u8 sid);
/* Interrupts controller functionality (offset of PIC registers) */
- u32 (*owner_acc_status)(u8 m, u8 n);
- u32 (*acc_enable)(u8 n);
- u32 (*irq_status)(u8 n);
- u32 (*irq_clear)(u8 n);
+ u32 (*owner_acc_status)(u8 m, u16 n);
+ u32 (*acc_enable)(u16 n);
+ u32 (*irq_status)(u16 n);
+ u32 (*irq_clear)(u16 n);
};
-static inline void pmic_arb_base_write(struct spmi_pmic_arb_dev *dev,
+static inline void pmic_arb_base_write(struct spmi_pmic_arb *pa,
u32 offset, u32 val)
{
- writel_relaxed(val, dev->wr_base + offset);
+ writel_relaxed(val, pa->wr_base + offset);
}
-static inline void pmic_arb_set_rd_cmd(struct spmi_pmic_arb_dev *dev,
+static inline void pmic_arb_set_rd_cmd(struct spmi_pmic_arb *pa,
u32 offset, u32 val)
{
- writel_relaxed(val, dev->rd_base + offset);
+ writel_relaxed(val, pa->rd_base + offset);
}
/**
@@ -190,9 +216,10 @@ static inline void pmic_arb_set_rd_cmd(struct spmi_pmic_arb_dev *dev,
* @reg: register's address
* @buf: output parameter, length must be bc + 1
*/
-static void pa_read_data(struct spmi_pmic_arb_dev *dev, u8 *buf, u32 reg, u8 bc)
+static void pa_read_data(struct spmi_pmic_arb *pa, u8 *buf, u32 reg, u8 bc)
{
- u32 data = __raw_readl(dev->rd_base + reg);
+ u32 data = __raw_readl(pa->rd_base + reg);
+
memcpy(buf, &data, (bc & 3) + 1);
}
@@ -203,23 +230,24 @@ static void pa_read_data(struct spmi_pmic_arb_dev *dev, u8 *buf, u32 reg, u8 bc)
* @buf: buffer to write. length must be bc + 1.
*/
static void
-pa_write_data(struct spmi_pmic_arb_dev *dev, const u8 *buf, u32 reg, u8 bc)
+pa_write_data(struct spmi_pmic_arb *pa, const u8 *buf, u32 reg, u8 bc)
{
u32 data = 0;
+
memcpy(&data, buf, (bc & 3) + 1);
- __raw_writel(data, dev->wr_base + reg);
+ pmic_arb_base_write(pa, reg, data);
}
static int pmic_arb_wait_for_done(struct spmi_controller *ctrl,
void __iomem *base, u8 sid, u16 addr)
{
- struct spmi_pmic_arb_dev *dev = spmi_controller_get_drvdata(ctrl);
+ struct spmi_pmic_arb *pa = spmi_controller_get_drvdata(ctrl);
u32 status = 0;
u32 timeout = PMIC_ARB_TIMEOUT_US;
u32 offset;
int rc;
- rc = dev->ver_ops->offset(dev, sid, addr, &offset);
+ rc = pa->ver_ops->offset(pa, sid, addr, &offset);
if (rc)
return rc;
@@ -264,22 +292,22 @@ static int pmic_arb_wait_for_done(struct spmi_controller *ctrl,
static int
pmic_arb_non_data_cmd_v1(struct spmi_controller *ctrl, u8 opc, u8 sid)
{
- struct spmi_pmic_arb_dev *pmic_arb = spmi_controller_get_drvdata(ctrl);
+ struct spmi_pmic_arb *pa = spmi_controller_get_drvdata(ctrl);
unsigned long flags;
u32 cmd;
int rc;
u32 offset;
- rc = pmic_arb->ver_ops->offset(pmic_arb, sid, 0, &offset);
+ rc = pa->ver_ops->offset(pa, sid, 0, &offset);
if (rc)
return rc;
cmd = ((opc | 0x40) << 27) | ((sid & 0xf) << 20);
- raw_spin_lock_irqsave(&pmic_arb->lock, flags);
- pmic_arb_base_write(pmic_arb, offset + PMIC_ARB_CMD, cmd);
- rc = pmic_arb_wait_for_done(ctrl, pmic_arb->wr_base, sid, 0);
- raw_spin_unlock_irqrestore(&pmic_arb->lock, flags);
+ raw_spin_lock_irqsave(&pa->lock, flags);
+ pmic_arb_base_write(pa, offset + PMIC_ARB_CMD, cmd);
+ rc = pmic_arb_wait_for_done(ctrl, pa->wr_base, sid, 0);
+ raw_spin_unlock_irqrestore(&pa->lock, flags);
return rc;
}
@@ -293,7 +321,7 @@ pmic_arb_non_data_cmd_v2(struct spmi_controller *ctrl, u8 opc, u8 sid)
/* Non-data command */
static int pmic_arb_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid)
{
- struct spmi_pmic_arb_dev *pmic_arb = spmi_controller_get_drvdata(ctrl);
+ struct spmi_pmic_arb *pa = spmi_controller_get_drvdata(ctrl);
dev_dbg(&ctrl->dev, "cmd op:0x%x sid:%d\n", opc, sid);
@@ -301,23 +329,35 @@ static int pmic_arb_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid)
if (opc < SPMI_CMD_RESET || opc > SPMI_CMD_WAKEUP)
return -EINVAL;
- return pmic_arb->ver_ops->non_data_cmd(ctrl, opc, sid);
+ return pa->ver_ops->non_data_cmd(ctrl, opc, sid);
}
static int pmic_arb_read_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
u16 addr, u8 *buf, size_t len)
{
- struct spmi_pmic_arb_dev *pmic_arb = spmi_controller_get_drvdata(ctrl);
+ struct spmi_pmic_arb *pa = spmi_controller_get_drvdata(ctrl);
unsigned long flags;
u8 bc = len - 1;
u32 cmd;
int rc;
u32 offset;
+ mode_t mode;
+
+ rc = pa->ver_ops->offset(pa, sid, addr, &offset);
+ if (rc)
+ return rc;
- rc = pmic_arb->ver_ops->offset(pmic_arb, sid, addr, &offset);
+ rc = pa->ver_ops->mode(pa, sid, addr, &mode);
if (rc)
return rc;
+ if (!(mode & S_IRUSR)) {
+ dev_err(&pa->spmic->dev,
+ "error: impermissible read from peripheral sid:%d addr:0x%x\n",
+ sid, addr);
+ return -EPERM;
+ }
+
if (bc >= PMIC_ARB_MAX_TRANS_BYTES) {
dev_err(&ctrl->dev,
"pmic-arb supports 1..%d bytes per trans, but:%zu requested",
@@ -335,40 +375,51 @@ static int pmic_arb_read_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
else
return -EINVAL;
- cmd = pmic_arb->ver_ops->fmt_cmd(opc, sid, addr, bc);
+ cmd = pa->ver_ops->fmt_cmd(opc, sid, addr, bc);
- raw_spin_lock_irqsave(&pmic_arb->lock, flags);
- pmic_arb_set_rd_cmd(pmic_arb, offset + PMIC_ARB_CMD, cmd);
- rc = pmic_arb_wait_for_done(ctrl, pmic_arb->rd_base, sid, addr);
+ raw_spin_lock_irqsave(&pa->lock, flags);
+ pmic_arb_set_rd_cmd(pa, offset + PMIC_ARB_CMD, cmd);
+ rc = pmic_arb_wait_for_done(ctrl, pa->rd_base, sid, addr);
if (rc)
goto done;
- pa_read_data(pmic_arb, buf, offset + PMIC_ARB_RDATA0,
+ pa_read_data(pa, buf, offset + PMIC_ARB_RDATA0,
min_t(u8, bc, 3));
if (bc > 3)
- pa_read_data(pmic_arb, buf + 4,
- offset + PMIC_ARB_RDATA1, bc - 4);
+ pa_read_data(pa, buf + 4, offset + PMIC_ARB_RDATA1, bc - 4);
done:
- raw_spin_unlock_irqrestore(&pmic_arb->lock, flags);
+ raw_spin_unlock_irqrestore(&pa->lock, flags);
return rc;
}
static int pmic_arb_write_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
u16 addr, const u8 *buf, size_t len)
{
- struct spmi_pmic_arb_dev *pmic_arb = spmi_controller_get_drvdata(ctrl);
+ struct spmi_pmic_arb *pa = spmi_controller_get_drvdata(ctrl);
unsigned long flags;
u8 bc = len - 1;
u32 cmd;
int rc;
u32 offset;
+ mode_t mode;
+
+ rc = pa->ver_ops->offset(pa, sid, addr, &offset);
+ if (rc)
+ return rc;
- rc = pmic_arb->ver_ops->offset(pmic_arb, sid, addr, &offset);
+ rc = pa->ver_ops->mode(pa, sid, addr, &mode);
if (rc)
return rc;
+ if (!(mode & S_IWUSR)) {
+ dev_err(&pa->spmic->dev,
+ "error: impermissible write to peripheral sid:%d addr:0x%x\n",
+ sid, addr);
+ return -EPERM;
+ }
+
if (bc >= PMIC_ARB_MAX_TRANS_BYTES) {
dev_err(&ctrl->dev,
"pmic-arb supports 1..%d bytes per trans, but:%zu requested",
@@ -388,20 +439,18 @@ static int pmic_arb_write_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
else
return -EINVAL;
- cmd = pmic_arb->ver_ops->fmt_cmd(opc, sid, addr, bc);
+ cmd = pa->ver_ops->fmt_cmd(opc, sid, addr, bc);
/* Write data to FIFOs */
- raw_spin_lock_irqsave(&pmic_arb->lock, flags);
- pa_write_data(pmic_arb, buf, offset + PMIC_ARB_WDATA0,
- min_t(u8, bc, 3));
+ raw_spin_lock_irqsave(&pa->lock, flags);
+ pa_write_data(pa, buf, offset + PMIC_ARB_WDATA0, min_t(u8, bc, 3));
if (bc > 3)
- pa_write_data(pmic_arb, buf + 4,
- offset + PMIC_ARB_WDATA1, bc - 4);
+ pa_write_data(pa, buf + 4, offset + PMIC_ARB_WDATA1, bc - 4);
/* Start the transaction */
- pmic_arb_base_write(pmic_arb, offset + PMIC_ARB_CMD, cmd);
- rc = pmic_arb_wait_for_done(ctrl, pmic_arb->wr_base, sid, addr);
- raw_spin_unlock_irqrestore(&pmic_arb->lock, flags);
+ pmic_arb_base_write(pa, offset + PMIC_ARB_CMD, cmd);
+ rc = pmic_arb_wait_for_done(ctrl, pa->wr_base, sid, addr);
+ raw_spin_unlock_irqrestore(&pa->lock, flags);
return rc;
}
@@ -427,9 +476,9 @@ struct spmi_pmic_arb_qpnpint_type {
static void qpnpint_spmi_write(struct irq_data *d, u8 reg, void *buf,
size_t len)
{
- struct spmi_pmic_arb_dev *pa = irq_data_get_irq_chip_data(d);
- u8 sid = d->hwirq >> 24;
- u8 per = d->hwirq >> 16;
+ struct spmi_pmic_arb *pa = irq_data_get_irq_chip_data(d);
+ u8 sid = HWIRQ_SID(d->hwirq);
+ u8 per = HWIRQ_PER(d->hwirq);
if (pmic_arb_write_cmd(pa->spmic, SPMI_CMD_EXT_WRITEL, sid,
(per << 8) + reg, buf, len))
@@ -440,9 +489,9 @@ static void qpnpint_spmi_write(struct irq_data *d, u8 reg, void *buf,
static void qpnpint_spmi_read(struct irq_data *d, u8 reg, void *buf, size_t len)
{
- struct spmi_pmic_arb_dev *pa = irq_data_get_irq_chip_data(d);
- u8 sid = d->hwirq >> 24;
- u8 per = d->hwirq >> 16;
+ struct spmi_pmic_arb *pa = irq_data_get_irq_chip_data(d);
+ u8 sid = HWIRQ_SID(d->hwirq);
+ u8 per = HWIRQ_PER(d->hwirq);
if (pmic_arb_read_cmd(pa->spmic, SPMI_CMD_EXT_READL, sid,
(per << 8) + reg, buf, len))
@@ -451,33 +500,58 @@ static void qpnpint_spmi_read(struct irq_data *d, u8 reg, void *buf, size_t len)
d->irq);
}
-static void periph_interrupt(struct spmi_pmic_arb_dev *pa, u8 apid)
+static void cleanup_irq(struct spmi_pmic_arb *pa, u16 apid, int id)
+{
+ u16 ppid = pa->apid_data[apid].ppid;
+ u8 sid = ppid >> 8;
+ u8 per = ppid & 0xFF;
+ u8 irq_mask = BIT(id);
+
+ writel_relaxed(irq_mask, pa->intr + pa->ver_ops->irq_clear(apid));
+
+ if (pmic_arb_write_cmd(pa->spmic, SPMI_CMD_EXT_WRITEL, sid,
+ (per << 8) + QPNPINT_REG_LATCHED_CLR, &irq_mask, 1))
+ dev_err_ratelimited(&pa->spmic->dev,
+ "failed to ack irq_mask = 0x%x for ppid = %x\n",
+ irq_mask, ppid);
+
+ if (pmic_arb_write_cmd(pa->spmic, SPMI_CMD_EXT_WRITEL, sid,
+ (per << 8) + QPNPINT_REG_EN_CLR, &irq_mask, 1))
+ dev_err_ratelimited(&pa->spmic->dev,
+ "failed to ack irq_mask = 0x%x for ppid = %x\n",
+ irq_mask, ppid);
+}
+
+static void periph_interrupt(struct spmi_pmic_arb *pa, u16 apid)
{
unsigned int irq;
u32 status;
int id;
+ u8 sid = (pa->apid_data[apid].ppid >> 8) & 0xF;
+ u8 per = pa->apid_data[apid].ppid & 0xFF;
status = readl_relaxed(pa->intr + pa->ver_ops->irq_status(apid));
while (status) {
id = ffs(status) - 1;
- status &= ~(1 << id);
- irq = irq_find_mapping(pa->domain,
- pa->apid_to_ppid[apid] << 16
- | id << 8
- | apid);
+ status &= ~BIT(id);
+ irq = irq_find_mapping(pa->domain, HWIRQ(sid, per, id, apid));
+ if (irq == 0) {
+ cleanup_irq(pa, apid, id);
+ continue;
+ }
generic_handle_irq(irq);
}
}
static void pmic_arb_chained_irq(struct irq_desc *desc)
{
- struct spmi_pmic_arb_dev *pa = irq_desc_get_handler_data(desc);
+ struct spmi_pmic_arb *pa = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
void __iomem *intr = pa->intr;
int first = pa->min_apid >> 5;
int last = pa->max_apid >> 5;
- u32 status;
- int i, id;
+ u32 status, enable;
+ int i, id, apid;
chained_irq_enter(chip, desc);
@@ -486,8 +560,12 @@ static void pmic_arb_chained_irq(struct irq_desc *desc)
pa->ver_ops->owner_acc_status(pa->ee, i));
while (status) {
id = ffs(status) - 1;
- status &= ~(1 << id);
- periph_interrupt(pa, id + i * 32);
+ status &= ~BIT(id);
+ apid = id + i * 32;
+ enable = readl_relaxed(intr +
+ pa->ver_ops->acc_enable(apid));
+ if (enable & SPMI_PIC_ACC_ENABLE_BIT)
+ periph_interrupt(pa, apid);
}
}
@@ -496,100 +574,81 @@ static void pmic_arb_chained_irq(struct irq_desc *desc)
static void qpnpint_irq_ack(struct irq_data *d)
{
- struct spmi_pmic_arb_dev *pa = irq_data_get_irq_chip_data(d);
- u8 irq = d->hwirq >> 8;
- u8 apid = d->hwirq;
- unsigned long flags;
+ struct spmi_pmic_arb *pa = irq_data_get_irq_chip_data(d);
+ u8 irq = HWIRQ_IRQ(d->hwirq);
+ u16 apid = HWIRQ_APID(d->hwirq);
u8 data;
- raw_spin_lock_irqsave(&pa->lock, flags);
- writel_relaxed(1 << irq, pa->intr + pa->ver_ops->irq_clear(apid));
- raw_spin_unlock_irqrestore(&pa->lock, flags);
+ writel_relaxed(BIT(irq), pa->intr + pa->ver_ops->irq_clear(apid));
- data = 1 << irq;
+ data = BIT(irq);
qpnpint_spmi_write(d, QPNPINT_REG_LATCHED_CLR, &data, 1);
}
static void qpnpint_irq_mask(struct irq_data *d)
{
- struct spmi_pmic_arb_dev *pa = irq_data_get_irq_chip_data(d);
- u8 irq = d->hwirq >> 8;
- u8 apid = d->hwirq;
- unsigned long flags;
- u32 status;
- u8 data;
+ u8 irq = HWIRQ_IRQ(d->hwirq);
+ u8 data = BIT(irq);
- raw_spin_lock_irqsave(&pa->lock, flags);
- status = readl_relaxed(pa->intr + pa->ver_ops->acc_enable(apid));
- if (status & SPMI_PIC_ACC_ENABLE_BIT) {
- status = status & ~SPMI_PIC_ACC_ENABLE_BIT;
- writel_relaxed(status, pa->intr +
- pa->ver_ops->acc_enable(apid));
- }
- raw_spin_unlock_irqrestore(&pa->lock, flags);
-
- data = 1 << irq;
qpnpint_spmi_write(d, QPNPINT_REG_EN_CLR, &data, 1);
}
static void qpnpint_irq_unmask(struct irq_data *d)
{
- struct spmi_pmic_arb_dev *pa = irq_data_get_irq_chip_data(d);
- u8 irq = d->hwirq >> 8;
- u8 apid = d->hwirq;
- unsigned long flags;
- u32 status;
- u8 data;
-
- raw_spin_lock_irqsave(&pa->lock, flags);
- status = readl_relaxed(pa->intr + pa->ver_ops->acc_enable(apid));
- if (!(status & SPMI_PIC_ACC_ENABLE_BIT)) {
- writel_relaxed(status | SPMI_PIC_ACC_ENABLE_BIT,
- pa->intr + pa->ver_ops->acc_enable(apid));
+ struct spmi_pmic_arb *pa = irq_data_get_irq_chip_data(d);
+ u8 irq = HWIRQ_IRQ(d->hwirq);
+ u16 apid = HWIRQ_APID(d->hwirq);
+ u8 buf[2];
+
+ writel_relaxed(SPMI_PIC_ACC_ENABLE_BIT,
+ pa->intr + pa->ver_ops->acc_enable(apid));
+
+ qpnpint_spmi_read(d, QPNPINT_REG_EN_SET, &buf[0], 1);
+ if (!(buf[0] & BIT(irq))) {
+ /*
+ * Since the interrupt is currently disabled, write to both the
+ * LATCHED_CLR and EN_SET registers so that a spurious interrupt
+ * cannot be triggered when the interrupt is enabled
+ */
+ buf[0] = BIT(irq);
+ buf[1] = BIT(irq);
+ qpnpint_spmi_write(d, QPNPINT_REG_LATCHED_CLR, &buf, 2);
}
- raw_spin_unlock_irqrestore(&pa->lock, flags);
-
- data = 1 << irq;
- qpnpint_spmi_write(d, QPNPINT_REG_EN_SET, &data, 1);
-}
-
-static void qpnpint_irq_enable(struct irq_data *d)
-{
- u8 irq = d->hwirq >> 8;
- u8 data;
-
- qpnpint_irq_unmask(d);
-
- data = 1 << irq;
- qpnpint_spmi_write(d, QPNPINT_REG_LATCHED_CLR, &data, 1);
}
static int qpnpint_irq_set_type(struct irq_data *d, unsigned int flow_type)
{
struct spmi_pmic_arb_qpnpint_type type;
- u8 irq = d->hwirq >> 8;
+ u8 irq = HWIRQ_IRQ(d->hwirq);
+ u8 bit_mask_irq = BIT(irq);
qpnpint_spmi_read(d, QPNPINT_REG_SET_TYPE, &type, sizeof(type));
if (flow_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) {
- type.type |= 1 << irq;
+ type.type |= bit_mask_irq;
if (flow_type & IRQF_TRIGGER_RISING)
- type.polarity_high |= 1 << irq;
+ type.polarity_high |= bit_mask_irq;
if (flow_type & IRQF_TRIGGER_FALLING)
- type.polarity_low |= 1 << irq;
+ type.polarity_low |= bit_mask_irq;
} else {
if ((flow_type & (IRQF_TRIGGER_HIGH)) &&
(flow_type & (IRQF_TRIGGER_LOW)))
return -EINVAL;
- type.type &= ~(1 << irq); /* level trig */
+ type.type &= ~bit_mask_irq; /* level trig */
if (flow_type & IRQF_TRIGGER_HIGH)
- type.polarity_high |= 1 << irq;
+ type.polarity_high |= bit_mask_irq;
else
- type.polarity_low |= 1 << irq;
+ type.polarity_low |= bit_mask_irq;
}
qpnpint_spmi_write(d, QPNPINT_REG_SET_TYPE, &type, sizeof(type));
+
+ if (flow_type & IRQ_TYPE_EDGE_BOTH)
+ irq_set_handler_locked(d, handle_edge_irq);
+ else
+ irq_set_handler_locked(d, handle_level_irq);
+
return 0;
}
@@ -597,7 +656,7 @@ static int qpnpint_get_irqchip_state(struct irq_data *d,
enum irqchip_irq_state which,
bool *state)
{
- u8 irq = d->hwirq >> 8;
+ u8 irq = HWIRQ_IRQ(d->hwirq);
u8 status = 0;
if (which != IRQCHIP_STATE_LINE_LEVEL)
@@ -611,7 +670,6 @@ static int qpnpint_get_irqchip_state(struct irq_data *d,
static struct irq_chip pmic_arb_irqchip = {
.name = "pmic_arb",
- .irq_enable = qpnpint_irq_enable,
.irq_ack = qpnpint_irq_ack,
.irq_mask = qpnpint_irq_mask,
.irq_unmask = qpnpint_irq_unmask,
@@ -621,48 +679,6 @@ static struct irq_chip pmic_arb_irqchip = {
| IRQCHIP_SKIP_SET_WAKE,
};
-struct spmi_pmic_arb_irq_spec {
- unsigned slave:4;
- unsigned per:8;
- unsigned irq:3;
-};
-
-static int search_mapping_table(struct spmi_pmic_arb_dev *pa,
- struct spmi_pmic_arb_irq_spec *spec,
- u8 *apid)
-{
- u16 ppid = spec->slave << 8 | spec->per;
- u32 *mapping_table = pa->mapping_table;
- int index = 0, i;
- u32 data;
-
- for (i = 0; i < SPMI_MAPPING_TABLE_TREE_DEPTH; ++i) {
- if (!test_and_set_bit(index, pa->mapping_table_valid))
- mapping_table[index] = readl_relaxed(pa->cnfg +
- SPMI_MAPPING_TABLE_REG(index));
-
- data = mapping_table[index];
-
- if (ppid & (1 << SPMI_MAPPING_BIT_INDEX(data))) {
- if (SPMI_MAPPING_BIT_IS_1_FLAG(data)) {
- index = SPMI_MAPPING_BIT_IS_1_RESULT(data);
- } else {
- *apid = SPMI_MAPPING_BIT_IS_1_RESULT(data);
- return 0;
- }
- } else {
- if (SPMI_MAPPING_BIT_IS_0_FLAG(data)) {
- index = SPMI_MAPPING_BIT_IS_0_RESULT(data);
- } else {
- *apid = SPMI_MAPPING_BIT_IS_0_RESULT(data);
- return 0;
- }
- }
- }
-
- return -ENODEV;
-}
-
static int qpnpint_irq_domain_dt_translate(struct irq_domain *d,
struct device_node *controller,
const u32 *intspec,
@@ -670,10 +686,9 @@ static int qpnpint_irq_domain_dt_translate(struct irq_domain *d,
unsigned long *out_hwirq,
unsigned int *out_type)
{
- struct spmi_pmic_arb_dev *pa = d->host_data;
- struct spmi_pmic_arb_irq_spec spec;
- int err;
- u8 apid;
+ struct spmi_pmic_arb *pa = d->host_data;
+ int rc;
+ u16 apid;
dev_dbg(&pa->spmic->dev,
"intspec[0] 0x%1x intspec[1] 0x%02x intspec[2] 0x%02x\n",
@@ -686,15 +701,14 @@ static int qpnpint_irq_domain_dt_translate(struct irq_domain *d,
if (intspec[0] > 0xF || intspec[1] > 0xFF || intspec[2] > 0x7)
return -EINVAL;
- spec.slave = intspec[0];
- spec.per = intspec[1];
- spec.irq = intspec[2];
-
- err = search_mapping_table(pa, &spec, &apid);
- if (err)
- return err;
-
- pa->apid_to_ppid[apid] = spec.slave << 8 | spec.per;
+ rc = pa->ver_ops->ppid_to_apid(pa, intspec[0],
+ (intspec[1] << 8), &apid);
+ if (rc < 0) {
+ dev_err(&pa->spmic->dev,
+ "failed to xlate sid = 0x%x, periph = 0x%x, irq = %x rc = %d\n",
+ intspec[0], intspec[1], intspec[2], rc);
+ return rc;
+ }
/* Keep track of {max,min}_apid for bounding search during interrupt */
if (apid > pa->max_apid)
@@ -702,10 +716,7 @@ static int qpnpint_irq_domain_dt_translate(struct irq_domain *d,
if (apid < pa->min_apid)
pa->min_apid = apid;
- *out_hwirq = spec.slave << 24
- | spec.per << 16
- | spec.irq << 8
- | apid;
+ *out_hwirq = HWIRQ(intspec[0], intspec[1], intspec[2], apid);
*out_type = intspec[3] & IRQ_TYPE_SENSE_MASK;
dev_dbg(&pa->spmic->dev, "out_hwirq = %lu\n", *out_hwirq);
@@ -717,7 +728,7 @@ static int qpnpint_irq_domain_map(struct irq_domain *d,
unsigned int virq,
irq_hw_number_t hwirq)
{
- struct spmi_pmic_arb_dev *pa = d->host_data;
+ struct spmi_pmic_arb *pa = d->host_data;
dev_dbg(&pa->spmic->dev, "virq = %u, hwirq = %lu\n", virq, hwirq);
@@ -727,26 +738,85 @@ static int qpnpint_irq_domain_map(struct irq_domain *d,
return 0;
}
+static int
+pmic_arb_ppid_to_apid_v1(struct spmi_pmic_arb *pa, u8 sid, u16 addr, u16 *apid)
+{
+ u16 ppid = sid << 8 | ((addr >> 8) & 0xFF);
+ u32 *mapping_table = pa->mapping_table;
+ int index = 0, i;
+ u16 apid_valid;
+ u32 data;
+
+ apid_valid = pa->ppid_to_apid[ppid];
+ if (apid_valid & PMIC_ARB_CHAN_VALID) {
+ *apid = (apid_valid & ~PMIC_ARB_CHAN_VALID);
+ return 0;
+ }
+
+ for (i = 0; i < SPMI_MAPPING_TABLE_TREE_DEPTH; ++i) {
+ if (!test_and_set_bit(index, pa->mapping_table_valid))
+ mapping_table[index] = readl_relaxed(pa->cnfg +
+ SPMI_MAPPING_TABLE_REG(index));
+
+ data = mapping_table[index];
+
+ if (ppid & BIT(SPMI_MAPPING_BIT_INDEX(data))) {
+ if (SPMI_MAPPING_BIT_IS_1_FLAG(data)) {
+ index = SPMI_MAPPING_BIT_IS_1_RESULT(data);
+ } else {
+ *apid = SPMI_MAPPING_BIT_IS_1_RESULT(data);
+ pa->ppid_to_apid[ppid]
+ = *apid | PMIC_ARB_CHAN_VALID;
+ pa->apid_data[*apid].ppid = ppid;
+ return 0;
+ }
+ } else {
+ if (SPMI_MAPPING_BIT_IS_0_FLAG(data)) {
+ index = SPMI_MAPPING_BIT_IS_0_RESULT(data);
+ } else {
+ *apid = SPMI_MAPPING_BIT_IS_0_RESULT(data);
+ pa->ppid_to_apid[ppid]
+ = *apid | PMIC_ARB_CHAN_VALID;
+ pa->apid_data[*apid].ppid = ppid;
+ return 0;
+ }
+ }
+ }
+
+ return -ENODEV;
+}
+
+static int
+pmic_arb_mode_v1_v3(struct spmi_pmic_arb *pa, u8 sid, u16 addr, mode_t *mode)
+{
+ *mode = S_IRUSR | S_IWUSR;
+ return 0;
+}
+
/* v1 offset per ee */
static int
-pmic_arb_offset_v1(struct spmi_pmic_arb_dev *pa, u8 sid, u16 addr, u32 *offset)
+pmic_arb_offset_v1(struct spmi_pmic_arb *pa, u8 sid, u16 addr, u32 *offset)
{
*offset = 0x800 + 0x80 * pa->channel;
return 0;
}
-static u16 pmic_arb_find_chan(struct spmi_pmic_arb_dev *pa, u16 ppid)
+static u16 pmic_arb_find_apid(struct spmi_pmic_arb *pa, u16 ppid)
{
u32 regval, offset;
- u16 chan;
+ u16 apid;
u16 id;
/*
* PMIC_ARB_REG_CHNL is a table in HW mapping channel to ppid.
- * ppid_to_chan is an in-memory invert of that table.
+ * ppid_to_apid is an in-memory invert of that table.
*/
- for (chan = pa->last_channel; ; chan++) {
- offset = PMIC_ARB_REG_CHNL(chan);
+ for (apid = pa->last_apid; apid < pa->max_periph; apid++) {
+ regval = readl_relaxed(pa->cnfg +
+ SPMI_OWNERSHIP_TABLE_REG(apid));
+ pa->apid_data[apid].owner = SPMI_OWNERSHIP_PERIPH2OWNER(regval);
+
+ offset = PMIC_ARB_REG_CHNL(apid);
if (offset >= pa->core_size)
break;
@@ -755,33 +825,65 @@ static u16 pmic_arb_find_chan(struct spmi_pmic_arb_dev *pa, u16 ppid)
continue;
id = (regval >> 8) & PMIC_ARB_PPID_MASK;
- pa->ppid_to_chan[id] = chan | PMIC_ARB_CHAN_VALID;
+ pa->ppid_to_apid[id] = apid | PMIC_ARB_CHAN_VALID;
+ pa->apid_data[apid].ppid = id;
if (id == ppid) {
- chan |= PMIC_ARB_CHAN_VALID;
+ apid |= PMIC_ARB_CHAN_VALID;
break;
}
}
- pa->last_channel = chan & ~PMIC_ARB_CHAN_VALID;
+ pa->last_apid = apid & ~PMIC_ARB_CHAN_VALID;
- return chan;
+ return apid;
}
-/* v2 offset per ppid (chan) and per ee */
static int
-pmic_arb_offset_v2(struct spmi_pmic_arb_dev *pa, u8 sid, u16 addr, u32 *offset)
+pmic_arb_ppid_to_apid_v2(struct spmi_pmic_arb *pa, u8 sid, u16 addr, u16 *apid)
{
u16 ppid = (sid << 8) | (addr >> 8);
- u16 chan;
+ u16 apid_valid;
- chan = pa->ppid_to_chan[ppid];
- if (!(chan & PMIC_ARB_CHAN_VALID))
- chan = pmic_arb_find_chan(pa, ppid);
- if (!(chan & PMIC_ARB_CHAN_VALID))
+ apid_valid = pa->ppid_to_apid[ppid];
+ if (!(apid_valid & PMIC_ARB_CHAN_VALID))
+ apid_valid = pmic_arb_find_apid(pa, ppid);
+ if (!(apid_valid & PMIC_ARB_CHAN_VALID))
return -ENODEV;
- chan &= ~PMIC_ARB_CHAN_VALID;
- *offset = 0x1000 * pa->ee + 0x8000 * chan;
+ *apid = (apid_valid & ~PMIC_ARB_CHAN_VALID);
+ return 0;
+}
+
+static int
+pmic_arb_mode_v2(struct spmi_pmic_arb *pa, u8 sid, u16 addr, mode_t *mode)
+{
+ u16 apid;
+ int rc;
+
+ rc = pmic_arb_ppid_to_apid_v2(pa, sid, addr, &apid);
+ if (rc < 0)
+ return rc;
+
+ *mode = 0;
+ *mode |= S_IRUSR;
+
+ if (pa->ee == pa->apid_data[apid].owner)
+ *mode |= S_IWUSR;
+ return 0;
+}
+
+/* v2 offset per ppid and per ee */
+static int
+pmic_arb_offset_v2(struct spmi_pmic_arb *pa, u8 sid, u16 addr, u32 *offset)
+{
+ u16 apid;
+ int rc;
+
+ rc = pmic_arb_ppid_to_apid_v2(pa, sid, addr, &apid);
+ if (rc < 0)
+ return rc;
+
+ *offset = 0x1000 * pa->ee + 0x8000 * apid;
return 0;
}
@@ -795,47 +897,55 @@ static u32 pmic_arb_fmt_cmd_v2(u8 opc, u8 sid, u16 addr, u8 bc)
return (opc << 27) | ((addr & 0xff) << 4) | (bc & 0x7);
}
-static u32 pmic_arb_owner_acc_status_v1(u8 m, u8 n)
+static u32 pmic_arb_owner_acc_status_v1(u8 m, u16 n)
{
return 0x20 * m + 0x4 * n;
}
-static u32 pmic_arb_owner_acc_status_v2(u8 m, u8 n)
+static u32 pmic_arb_owner_acc_status_v2(u8 m, u16 n)
{
return 0x100000 + 0x1000 * m + 0x4 * n;
}
-static u32 pmic_arb_acc_enable_v1(u8 n)
+static u32 pmic_arb_owner_acc_status_v3(u8 m, u16 n)
+{
+ return 0x200000 + 0x1000 * m + 0x4 * n;
+}
+
+static u32 pmic_arb_acc_enable_v1(u16 n)
{
return 0x200 + 0x4 * n;
}
-static u32 pmic_arb_acc_enable_v2(u8 n)
+static u32 pmic_arb_acc_enable_v2(u16 n)
{
return 0x1000 * n;
}
-static u32 pmic_arb_irq_status_v1(u8 n)
+static u32 pmic_arb_irq_status_v1(u16 n)
{
return 0x600 + 0x4 * n;
}
-static u32 pmic_arb_irq_status_v2(u8 n)
+static u32 pmic_arb_irq_status_v2(u16 n)
{
return 0x4 + 0x1000 * n;
}
-static u32 pmic_arb_irq_clear_v1(u8 n)
+static u32 pmic_arb_irq_clear_v1(u16 n)
{
return 0xA00 + 0x4 * n;
}
-static u32 pmic_arb_irq_clear_v2(u8 n)
+static u32 pmic_arb_irq_clear_v2(u16 n)
{
return 0x8 + 0x1000 * n;
}
static const struct pmic_arb_ver_ops pmic_arb_v1 = {
+ .ver_str = "v1",
+ .ppid_to_apid = pmic_arb_ppid_to_apid_v1,
+ .mode = pmic_arb_mode_v1_v3,
.non_data_cmd = pmic_arb_non_data_cmd_v1,
.offset = pmic_arb_offset_v1,
.fmt_cmd = pmic_arb_fmt_cmd_v1,
@@ -846,6 +956,9 @@ static const struct pmic_arb_ver_ops pmic_arb_v1 = {
};
static const struct pmic_arb_ver_ops pmic_arb_v2 = {
+ .ver_str = "v2",
+ .ppid_to_apid = pmic_arb_ppid_to_apid_v2,
+ .mode = pmic_arb_mode_v2,
.non_data_cmd = pmic_arb_non_data_cmd_v2,
.offset = pmic_arb_offset_v2,
.fmt_cmd = pmic_arb_fmt_cmd_v2,
@@ -855,6 +968,19 @@ static const struct pmic_arb_ver_ops pmic_arb_v2 = {
.irq_clear = pmic_arb_irq_clear_v2,
};
+static const struct pmic_arb_ver_ops pmic_arb_v3 = {
+ .ver_str = "v3",
+ .ppid_to_apid = pmic_arb_ppid_to_apid_v2,
+ .mode = pmic_arb_mode_v1_v3,
+ .non_data_cmd = pmic_arb_non_data_cmd_v2,
+ .offset = pmic_arb_offset_v2,
+ .fmt_cmd = pmic_arb_fmt_cmd_v2,
+ .owner_acc_status = pmic_arb_owner_acc_status_v3,
+ .acc_enable = pmic_arb_acc_enable_v2,
+ .irq_status = pmic_arb_irq_status_v2,
+ .irq_clear = pmic_arb_irq_clear_v2,
+};
+
static const struct irq_domain_ops pmic_arb_irq_domain_ops = {
.map = qpnpint_irq_domain_map,
.xlate = qpnpint_irq_domain_dt_translate,
@@ -862,13 +988,12 @@ static const struct irq_domain_ops pmic_arb_irq_domain_ops = {
static int spmi_pmic_arb_probe(struct platform_device *pdev)
{
- struct spmi_pmic_arb_dev *pa;
+ struct spmi_pmic_arb *pa;
struct spmi_controller *ctrl;
struct resource *res;
void __iomem *core;
u32 channel, ee, hw_ver;
int err;
- bool is_v1;
ctrl = spmi_controller_alloc(&pdev->dev, sizeof(*pa));
if (!ctrl)
@@ -879,6 +1004,12 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev)
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core");
pa->core_size = resource_size(res);
+ if (pa->core_size <= 0x800) {
+ dev_err(&pdev->dev, "core_size is smaller than 0x800. Failing Probe\n");
+ err = -EINVAL;
+ goto err_put_ctrl;
+ }
+
core = devm_ioremap_resource(&ctrl->dev, res);
if (IS_ERR(core)) {
err = PTR_ERR(core);
@@ -886,18 +1017,21 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev)
}
hw_ver = readl_relaxed(core + PMIC_ARB_VERSION);
- is_v1 = (hw_ver < PMIC_ARB_VERSION_V2_MIN);
-
- dev_info(&ctrl->dev, "PMIC Arb Version-%d (0x%x)\n", (is_v1 ? 1 : 2),
- hw_ver);
- if (is_v1) {
+ if (hw_ver < PMIC_ARB_VERSION_V2_MIN) {
pa->ver_ops = &pmic_arb_v1;
pa->wr_base = core;
pa->rd_base = core;
} else {
pa->core = core;
- pa->ver_ops = &pmic_arb_v2;
+
+ if (hw_ver < PMIC_ARB_VERSION_V3_MIN)
+ pa->ver_ops = &pmic_arb_v2;
+ else
+ pa->ver_ops = &pmic_arb_v3;
+
+ /* the apid to ppid table starts at PMIC_ARB_REG_CHNL(0) */
+ pa->max_periph = (pa->core_size - PMIC_ARB_REG_CHNL(0)) / 4;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"obsrvr");
@@ -915,16 +1049,19 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev)
goto err_put_ctrl;
}
- pa->ppid_to_chan = devm_kcalloc(&ctrl->dev,
+ pa->ppid_to_apid = devm_kcalloc(&ctrl->dev,
PMIC_ARB_MAX_PPID,
- sizeof(*pa->ppid_to_chan),
+ sizeof(*pa->ppid_to_apid),
GFP_KERNEL);
- if (!pa->ppid_to_chan) {
+ if (!pa->ppid_to_apid) {
err = -ENOMEM;
goto err_put_ctrl;
}
}
+ dev_info(&ctrl->dev, "PMIC arbiter version %s (0x%x)\n",
+ pa->ver_ops->ver_str, hw_ver);
+
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "intr");
pa->intr = devm_ioremap_resource(&ctrl->dev, res);
if (IS_ERR(pa->intr)) {
@@ -974,14 +1111,6 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev)
pa->ee = ee;
- pa->apid_to_ppid = devm_kcalloc(&ctrl->dev, PMIC_ARB_MAX_PERIPHS,
- sizeof(*pa->apid_to_ppid),
- GFP_KERNEL);
- if (!pa->apid_to_ppid) {
- err = -ENOMEM;
- goto err_put_ctrl;
- }
-
pa->mapping_table = devm_kcalloc(&ctrl->dev, PMIC_ARB_MAX_PERIPHS - 1,
sizeof(*pa->mapping_table), GFP_KERNEL);
if (!pa->mapping_table) {
@@ -1011,6 +1140,7 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev)
}
irq_set_chained_handler_and_data(pa->irq, pmic_arb_chained_irq, pa);
+ enable_irq_wake(pa->irq);
err = spmi_controller_add(ctrl);
if (err)
@@ -1029,7 +1159,7 @@ err_put_ctrl:
static int spmi_pmic_arb_remove(struct platform_device *pdev)
{
struct spmi_controller *ctrl = platform_get_drvdata(pdev);
- struct spmi_pmic_arb_dev *pa = spmi_controller_get_drvdata(ctrl);
+ struct spmi_pmic_arb *pa = spmi_controller_get_drvdata(ctrl);
spmi_controller_remove(ctrl);
irq_set_chained_handler_and_data(pa->irq, NULL, NULL);
irq_domain_remove(pa->domain);
diff --git a/drivers/thunderbolt/Kconfig b/drivers/thunderbolt/Kconfig
index d35db16aa43f..f4869c38c7e4 100644
--- a/drivers/thunderbolt/Kconfig
+++ b/drivers/thunderbolt/Kconfig
@@ -1,15 +1,16 @@
menuconfig THUNDERBOLT
- tristate "Thunderbolt support for Apple devices"
+ tristate "Thunderbolt support"
depends on PCI
depends on X86 || COMPILE_TEST
select APPLE_PROPERTIES if EFI_STUB && X86
select CRC32
+ select CRYPTO
+ select CRYPTO_HASH
+ select NVMEM
help
- Cactus Ridge Thunderbolt Controller driver
- This driver is required if you want to hotplug Thunderbolt devices on
- Apple hardware.
-
- Device chaining is currently not supported.
+ Thunderbolt Controller driver. This driver is required if you
+ want to hotplug Thunderbolt devices on Apple hardware or on PCs
+ with Intel Falcon Ridge or newer.
To compile this driver a module, choose M here. The module will be
called thunderbolt.
diff --git a/drivers/thunderbolt/Makefile b/drivers/thunderbolt/Makefile
index 5d1053cdfa54..4900febc6c8a 100644
--- a/drivers/thunderbolt/Makefile
+++ b/drivers/thunderbolt/Makefile
@@ -1,3 +1,3 @@
obj-${CONFIG_THUNDERBOLT} := thunderbolt.o
thunderbolt-objs := nhi.o ctl.o tb.o switch.o cap.o path.o tunnel_pci.o eeprom.o
-
+thunderbolt-objs += domain.o dma_port.o icm.o
diff --git a/drivers/thunderbolt/cap.c b/drivers/thunderbolt/cap.c
index a7b47e7cddbd..38bc27a5ce4f 100644
--- a/drivers/thunderbolt/cap.c
+++ b/drivers/thunderbolt/cap.c
@@ -9,6 +9,8 @@
#include "tb.h"
+#define CAP_OFFSET_MAX 0xff
+#define VSE_CAP_OFFSET_MAX 0xffff
struct tb_cap_any {
union {
@@ -18,99 +20,110 @@ struct tb_cap_any {
};
} __packed;
-static bool tb_cap_is_basic(struct tb_cap_any *cap)
-{
- /* basic.cap is u8. This checks only the lower 8 bit of cap. */
- return cap->basic.cap != 5;
-}
-
-static bool tb_cap_is_long(struct tb_cap_any *cap)
+/**
+ * tb_port_find_cap() - Find port capability
+ * @port: Port to find the capability for
+ * @cap: Capability to look
+ *
+ * Returns offset to start of capability or %-ENOENT if no such
+ * capability was found. Negative errno is returned if there was an
+ * error.
+ */
+int tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap)
{
- return !tb_cap_is_basic(cap)
- && cap->extended_short.next == 0
- && cap->extended_short.length == 0;
-}
+ u32 offset;
-static enum tb_cap tb_cap(struct tb_cap_any *cap)
-{
- if (tb_cap_is_basic(cap))
- return cap->basic.cap;
+ /*
+ * DP out adapters claim to implement TMU capability but in
+ * reality they do not so we hard code the adapter specific
+ * capability offset here.
+ */
+ if (port->config.type == TB_TYPE_DP_HDMI_OUT)
+ offset = 0x39;
else
- /* extended_short/long have cap at the same offset. */
- return cap->extended_short.cap;
+ offset = 0x1;
+
+ do {
+ struct tb_cap_any header;
+ int ret;
+
+ ret = tb_port_read(port, &header, TB_CFG_PORT, offset, 1);
+ if (ret)
+ return ret;
+
+ if (header.basic.cap == cap)
+ return offset;
+
+ offset = header.basic.next;
+ } while (offset);
+
+ return -ENOENT;
}
-static u32 tb_cap_next(struct tb_cap_any *cap, u32 offset)
+static int tb_switch_find_cap(struct tb_switch *sw, enum tb_switch_cap cap)
{
- int next;
- if (offset == 1) {
- /*
- * The first pointer is part of the switch header and always
- * a simple pointer.
- */
- next = cap->basic.next;
- } else {
- /*
- * Somehow Intel decided to use 3 different types of capability
- * headers. It is not like anyone could have predicted that
- * single byte offsets are not enough...
- */
- if (tb_cap_is_basic(cap))
- next = cap->basic.next;
- else if (!tb_cap_is_long(cap))
- next = cap->extended_short.next;
- else
- next = cap->extended_long.next;
+ int offset = sw->config.first_cap_offset;
+
+ while (offset > 0 && offset < CAP_OFFSET_MAX) {
+ struct tb_cap_any header;
+ int ret;
+
+ ret = tb_sw_read(sw, &header, TB_CFG_SWITCH, offset, 1);
+ if (ret)
+ return ret;
+
+ if (header.basic.cap == cap)
+ return offset;
+
+ offset = header.basic.next;
}
- /*
- * "Hey, we could terminate some capability lists with a null offset
- * and others with a pointer to the last element." - "Great idea!"
- */
- if (next == offset)
- return 0;
- return next;
+
+ return -ENOENT;
}
/**
- * tb_find_cap() - find a capability
+ * tb_switch_find_vse_cap() - Find switch vendor specific capability
+ * @sw: Switch to find the capability for
+ * @vsec: Vendor specific capability to look
*
- * Return: Returns a positive offset if the capability was found and 0 if not.
- * Returns an error code on failure.
+ * Functions enumerates vendor specific capabilities (VSEC) of a switch
+ * and returns offset when capability matching @vsec is found. If no
+ * such capability is found returns %-ENOENT. In case of error returns
+ * negative errno.
*/
-int tb_find_cap(struct tb_port *port, enum tb_cfg_space space, enum tb_cap cap)
+int tb_switch_find_vse_cap(struct tb_switch *sw, enum tb_switch_vse_cap vsec)
{
- u32 offset = 1;
struct tb_cap_any header;
- int res;
- int retries = 10;
- while (retries--) {
- res = tb_port_read(port, &header, space, offset, 1);
- if (res) {
- /* Intel needs some help with linked lists. */
- if (space == TB_CFG_PORT && offset == 0xa
- && port->config.type == TB_TYPE_DP_HDMI_OUT) {
- offset = 0x39;
- continue;
- }
- return res;
- }
- if (offset != 1) {
- if (tb_cap(&header) == cap)
+ int offset;
+
+ offset = tb_switch_find_cap(sw, TB_SWITCH_CAP_VSE);
+ if (offset < 0)
+ return offset;
+
+ while (offset > 0 && offset < VSE_CAP_OFFSET_MAX) {
+ int ret;
+
+ ret = tb_sw_read(sw, &header, TB_CFG_SWITCH, offset, 2);
+ if (ret)
+ return ret;
+
+ /*
+ * Extended vendor specific capabilities come in two
+ * flavors: short and long. The latter is used when
+ * offset is over 0xff.
+ */
+ if (offset >= CAP_OFFSET_MAX) {
+ if (header.extended_long.vsec_id == vsec)
return offset;
- if (tb_cap_is_long(&header)) {
- /* tb_cap_extended_long is 2 dwords */
- res = tb_port_read(port, &header, space,
- offset, 2);
- if (res)
- return res;
- }
+ offset = header.extended_long.next;
+ } else {
+ if (header.extended_short.vsec_id == vsec)
+ return offset;
+ if (!header.extended_short.length)
+ return -ENOENT;
+ offset = header.extended_short.next;
}
- offset = tb_cap_next(&header, offset);
- if (!offset)
- return 0;
}
- tb_port_WARN(port,
- "run out of retries while looking for cap %#x in config space %d, last offset: %#x\n",
- cap, space, offset);
- return -EIO;
+
+ return -ENOENT;
}
diff --git a/drivers/thunderbolt/ctl.c b/drivers/thunderbolt/ctl.c
index 1146ff4210a9..69c0232a22f8 100644
--- a/drivers/thunderbolt/ctl.c
+++ b/drivers/thunderbolt/ctl.c
@@ -5,22 +5,17 @@
*/
#include <linux/crc32.h>
+#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/dmapool.h>
#include <linux/workqueue.h>
-#include <linux/kfifo.h>
#include "ctl.h"
-struct ctl_pkg {
- struct tb_ctl *ctl;
- void *buffer;
- struct ring_frame frame;
-};
-
-#define TB_CTL_RX_PKG_COUNT 10
+#define TB_CTL_RX_PKG_COUNT 10
+#define TB_CTL_RETRIES 4
/**
* struct tb_cfg - thunderbolt control channel
@@ -32,10 +27,11 @@ struct tb_ctl {
struct dma_pool *frame_pool;
struct ctl_pkg *rx_packets[TB_CTL_RX_PKG_COUNT];
- DECLARE_KFIFO(response_fifo, struct ctl_pkg*, 16);
- struct completion response_ready;
+ struct mutex request_queue_lock;
+ struct list_head request_queue;
+ bool running;
- hotplug_cb callback;
+ event_cb callback;
void *callback_data;
};
@@ -52,102 +48,124 @@ struct tb_ctl {
#define tb_ctl_info(ctl, format, arg...) \
dev_info(&(ctl)->nhi->pdev->dev, format, ## arg)
+#define tb_ctl_dbg(ctl, format, arg...) \
+ dev_dbg(&(ctl)->nhi->pdev->dev, format, ## arg)
-/* configuration packets definitions */
+static DECLARE_WAIT_QUEUE_HEAD(tb_cfg_request_cancel_queue);
+/* Serializes access to request kref_get/put */
+static DEFINE_MUTEX(tb_cfg_request_lock);
-enum tb_cfg_pkg_type {
- TB_CFG_PKG_READ = 1,
- TB_CFG_PKG_WRITE = 2,
- TB_CFG_PKG_ERROR = 3,
- TB_CFG_PKG_NOTIFY_ACK = 4,
- TB_CFG_PKG_EVENT = 5,
- TB_CFG_PKG_XDOMAIN_REQ = 6,
- TB_CFG_PKG_XDOMAIN_RESP = 7,
- TB_CFG_PKG_OVERRIDE = 8,
- TB_CFG_PKG_RESET = 9,
- TB_CFG_PKG_PREPARE_TO_SLEEP = 0xd,
-};
+/**
+ * tb_cfg_request_alloc() - Allocates a new config request
+ *
+ * This is refcounted object so when you are done with this, call
+ * tb_cfg_request_put() to it.
+ */
+struct tb_cfg_request *tb_cfg_request_alloc(void)
+{
+ struct tb_cfg_request *req;
-/* common header */
-struct tb_cfg_header {
- u32 route_hi:22;
- u32 unknown:10; /* highest order bit is set on replies */
- u32 route_lo;
-} __packed;
-
-/* additional header for read/write packets */
-struct tb_cfg_address {
- u32 offset:13; /* in dwords */
- u32 length:6; /* in dwords */
- u32 port:6;
- enum tb_cfg_space space:2;
- u32 seq:2; /* sequence number */
- u32 zero:3;
-} __packed;
-
-/* TB_CFG_PKG_READ, response for TB_CFG_PKG_WRITE */
-struct cfg_read_pkg {
- struct tb_cfg_header header;
- struct tb_cfg_address addr;
-} __packed;
-
-/* TB_CFG_PKG_WRITE, response for TB_CFG_PKG_READ */
-struct cfg_write_pkg {
- struct tb_cfg_header header;
- struct tb_cfg_address addr;
- u32 data[64]; /* maximum size, tb_cfg_address.length has 6 bits */
-} __packed;
-
-/* TB_CFG_PKG_ERROR */
-struct cfg_error_pkg {
- struct tb_cfg_header header;
- enum tb_cfg_error error:4;
- u32 zero1:4;
- u32 port:6;
- u32 zero2:2; /* Both should be zero, still they are different fields. */
- u32 zero3:16;
-} __packed;
-
-/* TB_CFG_PKG_EVENT */
-struct cfg_event_pkg {
- struct tb_cfg_header header;
- u32 port:6;
- u32 zero:25;
- bool unplug:1;
-} __packed;
-
-/* TB_CFG_PKG_RESET */
-struct cfg_reset_pkg {
- struct tb_cfg_header header;
-} __packed;
-
-/* TB_CFG_PKG_PREPARE_TO_SLEEP */
-struct cfg_pts_pkg {
- struct tb_cfg_header header;
- u32 data;
-} __packed;
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return NULL;
+ kref_init(&req->kref);
-/* utility functions */
+ return req;
+}
-static u64 get_route(struct tb_cfg_header header)
+/**
+ * tb_cfg_request_get() - Increase refcount of a request
+ * @req: Request whose refcount is increased
+ */
+void tb_cfg_request_get(struct tb_cfg_request *req)
{
- return (u64) header.route_hi << 32 | header.route_lo;
+ mutex_lock(&tb_cfg_request_lock);
+ kref_get(&req->kref);
+ mutex_unlock(&tb_cfg_request_lock);
}
-static struct tb_cfg_header make_header(u64 route)
+static void tb_cfg_request_destroy(struct kref *kref)
{
- struct tb_cfg_header header = {
- .route_hi = route >> 32,
- .route_lo = route,
- };
- /* check for overflow, route_hi is not 32 bits! */
- WARN_ON(get_route(header) != route);
- return header;
+ struct tb_cfg_request *req = container_of(kref, typeof(*req), kref);
+
+ kfree(req);
+}
+
+/**
+ * tb_cfg_request_put() - Decrease refcount and possibly release the request
+ * @req: Request whose refcount is decreased
+ *
+ * Call this function when you are done with the request. When refcount
+ * goes to %0 the object is released.
+ */
+void tb_cfg_request_put(struct tb_cfg_request *req)
+{
+ mutex_lock(&tb_cfg_request_lock);
+ kref_put(&req->kref, tb_cfg_request_destroy);
+ mutex_unlock(&tb_cfg_request_lock);
}
-static int check_header(struct ctl_pkg *pkg, u32 len, enum tb_cfg_pkg_type type,
- u64 route)
+static int tb_cfg_request_enqueue(struct tb_ctl *ctl,
+ struct tb_cfg_request *req)
+{
+ WARN_ON(test_bit(TB_CFG_REQUEST_ACTIVE, &req->flags));
+ WARN_ON(req->ctl);
+
+ mutex_lock(&ctl->request_queue_lock);
+ if (!ctl->running) {
+ mutex_unlock(&ctl->request_queue_lock);
+ return -ENOTCONN;
+ }
+ req->ctl = ctl;
+ list_add_tail(&req->list, &ctl->request_queue);
+ set_bit(TB_CFG_REQUEST_ACTIVE, &req->flags);
+ mutex_unlock(&ctl->request_queue_lock);
+ return 0;
+}
+
+static void tb_cfg_request_dequeue(struct tb_cfg_request *req)
+{
+ struct tb_ctl *ctl = req->ctl;
+
+ mutex_lock(&ctl->request_queue_lock);
+ list_del(&req->list);
+ clear_bit(TB_CFG_REQUEST_ACTIVE, &req->flags);
+ if (test_bit(TB_CFG_REQUEST_CANCELED, &req->flags))
+ wake_up(&tb_cfg_request_cancel_queue);
+ mutex_unlock(&ctl->request_queue_lock);
+}
+
+static bool tb_cfg_request_is_active(struct tb_cfg_request *req)
+{
+ return test_bit(TB_CFG_REQUEST_ACTIVE, &req->flags);
+}
+
+static struct tb_cfg_request *
+tb_cfg_request_find(struct tb_ctl *ctl, struct ctl_pkg *pkg)
+{
+ struct tb_cfg_request *req;
+ bool found = false;
+
+ mutex_lock(&pkg->ctl->request_queue_lock);
+ list_for_each_entry(req, &pkg->ctl->request_queue, list) {
+ tb_cfg_request_get(req);
+ if (req->match(req, pkg)) {
+ found = true;
+ break;
+ }
+ tb_cfg_request_put(req);
+ }
+ mutex_unlock(&pkg->ctl->request_queue_lock);
+
+ return found ? req : NULL;
+}
+
+/* utility functions */
+
+
+static int check_header(const struct ctl_pkg *pkg, u32 len,
+ enum tb_cfg_pkg_type type, u64 route)
{
struct tb_cfg_header *header = pkg->buffer;
@@ -167,9 +185,9 @@ static int check_header(struct ctl_pkg *pkg, u32 len, enum tb_cfg_pkg_type type,
if (WARN(header->unknown != 1 << 9,
"header->unknown is %#x\n", header->unknown))
return -EIO;
- if (WARN(route != get_route(*header),
+ if (WARN(route != tb_cfg_get_route(header),
"wrong route (expected %llx, got %llx)",
- route, get_route(*header)))
+ route, tb_cfg_get_route(header)))
return -EIO;
return 0;
}
@@ -189,8 +207,6 @@ static int check_config_address(struct tb_cfg_address addr,
if (WARN(length != addr.length, "wrong space (expected %x, got %x\n)",
length, addr.length))
return -EIO;
- if (WARN(addr.seq, "addr.seq is %#x\n", addr.seq))
- return -EIO;
/*
* We cannot check addr->port as it is set to the upstream port of the
* sender.
@@ -198,14 +214,14 @@ static int check_config_address(struct tb_cfg_address addr,
return 0;
}
-static struct tb_cfg_result decode_error(struct ctl_pkg *response)
+static struct tb_cfg_result decode_error(const struct ctl_pkg *response)
{
struct cfg_error_pkg *pkg = response->buffer;
struct tb_cfg_result res = { 0 };
- res.response_route = get_route(pkg->header);
+ res.response_route = tb_cfg_get_route(&pkg->header);
res.response_port = 0;
res.err = check_header(response, sizeof(*pkg), TB_CFG_PKG_ERROR,
- get_route(pkg->header));
+ tb_cfg_get_route(&pkg->header));
if (res.err)
return res;
@@ -219,7 +235,7 @@ static struct tb_cfg_result decode_error(struct ctl_pkg *response)
}
-static struct tb_cfg_result parse_header(struct ctl_pkg *pkg, u32 len,
+static struct tb_cfg_result parse_header(const struct ctl_pkg *pkg, u32 len,
enum tb_cfg_pkg_type type, u64 route)
{
struct tb_cfg_header *header = pkg->buffer;
@@ -229,7 +245,7 @@ static struct tb_cfg_result parse_header(struct ctl_pkg *pkg, u32 len,
return decode_error(pkg);
res.response_port = 0; /* will be updated later for cfg_read/write */
- res.response_route = get_route(*header);
+ res.response_route = tb_cfg_get_route(header);
res.err = check_header(pkg, len, type, route);
return res;
}
@@ -273,7 +289,7 @@ static void tb_cfg_print_error(struct tb_ctl *ctl,
}
}
-static void cpu_to_be32_array(__be32 *dst, u32 *src, size_t len)
+static void cpu_to_be32_array(__be32 *dst, const u32 *src, size_t len)
{
int i;
for (i = 0; i < len; i++)
@@ -287,7 +303,7 @@ static void be32_to_cpu_array(u32 *dst, __be32 *src, size_t len)
dst[i] = be32_to_cpu(src[i]);
}
-static __be32 tb_crc(void *data, size_t len)
+static __be32 tb_crc(const void *data, size_t len)
{
return cpu_to_be32(~__crc32c_le(~0, data, len));
}
@@ -333,7 +349,7 @@ static void tb_ctl_tx_callback(struct tb_ring *ring, struct ring_frame *frame,
*
* Return: Returns 0 on success or an error code on failure.
*/
-static int tb_ctl_tx(struct tb_ctl *ctl, void *data, size_t len,
+static int tb_ctl_tx(struct tb_ctl *ctl, const void *data, size_t len,
enum tb_cfg_pkg_type type)
{
int res;
@@ -364,24 +380,12 @@ static int tb_ctl_tx(struct tb_ctl *ctl, void *data, size_t len,
}
/**
- * tb_ctl_handle_plug_event() - acknowledge a plug event, invoke ctl->callback
+ * tb_ctl_handle_event() - acknowledge a plug event, invoke ctl->callback
*/
-static void tb_ctl_handle_plug_event(struct tb_ctl *ctl,
- struct ctl_pkg *response)
+static void tb_ctl_handle_event(struct tb_ctl *ctl, enum tb_cfg_pkg_type type,
+ struct ctl_pkg *pkg, size_t size)
{
- struct cfg_event_pkg *pkg = response->buffer;
- u64 route = get_route(pkg->header);
-
- if (check_header(response, sizeof(*pkg), TB_CFG_PKG_EVENT, route)) {
- tb_ctl_warn(ctl, "malformed TB_CFG_PKG_EVENT\n");
- return;
- }
-
- if (tb_cfg_error(ctl, route, pkg->port, TB_CFG_ERROR_ACK_PLUG_EVENT))
- tb_ctl_warn(ctl, "could not ack plug event on %llx:%x\n",
- route, pkg->port);
- WARN(pkg->zero, "pkg->zero is %#x\n", pkg->zero);
- ctl->callback(ctl->callback_data, route, pkg->port, pkg->unplug);
+ ctl->callback(ctl->callback_data, type, pkg->buffer, size);
}
static void tb_ctl_rx_submit(struct ctl_pkg *pkg)
@@ -394,10 +398,30 @@ static void tb_ctl_rx_submit(struct ctl_pkg *pkg)
*/
}
+static int tb_async_error(const struct ctl_pkg *pkg)
+{
+ const struct cfg_error_pkg *error = (const struct cfg_error_pkg *)pkg;
+
+ if (pkg->frame.eof != TB_CFG_PKG_ERROR)
+ return false;
+
+ switch (error->error) {
+ case TB_CFG_ERROR_LINK_ERROR:
+ case TB_CFG_ERROR_HEC_ERROR_DETECTED:
+ case TB_CFG_ERROR_FLOW_CONTROL_ERROR:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
static void tb_ctl_rx_callback(struct tb_ring *ring, struct ring_frame *frame,
bool canceled)
{
struct ctl_pkg *pkg = container_of(frame, typeof(*pkg), frame);
+ struct tb_cfg_request *req;
+ __be32 crc32;
if (canceled)
return; /*
@@ -412,55 +436,168 @@ static void tb_ctl_rx_callback(struct tb_ring *ring, struct ring_frame *frame,
}
frame->size -= 4; /* remove checksum */
- if (*(__be32 *) (pkg->buffer + frame->size)
- != tb_crc(pkg->buffer, frame->size)) {
- tb_ctl_err(pkg->ctl,
- "RX: checksum mismatch, dropping packet\n");
- goto rx;
- }
+ crc32 = tb_crc(pkg->buffer, frame->size);
be32_to_cpu_array(pkg->buffer, pkg->buffer, frame->size / 4);
- if (frame->eof == TB_CFG_PKG_EVENT) {
- tb_ctl_handle_plug_event(pkg->ctl, pkg);
+ switch (frame->eof) {
+ case TB_CFG_PKG_READ:
+ case TB_CFG_PKG_WRITE:
+ case TB_CFG_PKG_ERROR:
+ case TB_CFG_PKG_OVERRIDE:
+ case TB_CFG_PKG_RESET:
+ if (*(__be32 *)(pkg->buffer + frame->size) != crc32) {
+ tb_ctl_err(pkg->ctl,
+ "RX: checksum mismatch, dropping packet\n");
+ goto rx;
+ }
+ if (tb_async_error(pkg)) {
+ tb_ctl_handle_event(pkg->ctl, frame->eof,
+ pkg, frame->size);
+ goto rx;
+ }
+ break;
+
+ case TB_CFG_PKG_EVENT:
+ if (*(__be32 *)(pkg->buffer + frame->size) != crc32) {
+ tb_ctl_err(pkg->ctl,
+ "RX: checksum mismatch, dropping packet\n");
+ goto rx;
+ }
+ /* Fall through */
+ case TB_CFG_PKG_ICM_EVENT:
+ tb_ctl_handle_event(pkg->ctl, frame->eof, pkg, frame->size);
goto rx;
+
+ default:
+ break;
}
- if (!kfifo_put(&pkg->ctl->response_fifo, pkg)) {
- tb_ctl_err(pkg->ctl, "RX: fifo is full\n");
- goto rx;
+
+ /*
+ * The received packet will be processed only if there is an
+ * active request and that the packet is what is expected. This
+ * prevents packets such as replies coming after timeout has
+ * triggered from messing with the active requests.
+ */
+ req = tb_cfg_request_find(pkg->ctl, pkg);
+ if (req) {
+ if (req->copy(req, pkg))
+ schedule_work(&req->work);
+ tb_cfg_request_put(req);
}
- complete(&pkg->ctl->response_ready);
- return;
+
rx:
tb_ctl_rx_submit(pkg);
}
+static void tb_cfg_request_work(struct work_struct *work)
+{
+ struct tb_cfg_request *req = container_of(work, typeof(*req), work);
+
+ if (!test_bit(TB_CFG_REQUEST_CANCELED, &req->flags))
+ req->callback(req->callback_data);
+
+ tb_cfg_request_dequeue(req);
+ tb_cfg_request_put(req);
+}
+
/**
- * tb_ctl_rx() - receive a packet from the control channel
+ * tb_cfg_request() - Start control request not waiting for it to complete
+ * @ctl: Control channel to use
+ * @req: Request to start
+ * @callback: Callback called when the request is completed
+ * @callback_data: Data to be passed to @callback
+ *
+ * This queues @req on the given control channel without waiting for it
+ * to complete. When the request completes @callback is called.
*/
-static struct tb_cfg_result tb_ctl_rx(struct tb_ctl *ctl, void *buffer,
- size_t length, int timeout_msec,
- u64 route, enum tb_cfg_pkg_type type)
+int tb_cfg_request(struct tb_ctl *ctl, struct tb_cfg_request *req,
+ void (*callback)(void *), void *callback_data)
{
- struct tb_cfg_result res;
- struct ctl_pkg *pkg;
+ int ret;
- if (!wait_for_completion_timeout(&ctl->response_ready,
- msecs_to_jiffies(timeout_msec))) {
- tb_ctl_WARN(ctl, "RX: timeout\n");
- return (struct tb_cfg_result) { .err = -ETIMEDOUT };
- }
- if (!kfifo_get(&ctl->response_fifo, &pkg)) {
- tb_ctl_WARN(ctl, "empty kfifo\n");
- return (struct tb_cfg_result) { .err = -EIO };
- }
+ req->flags = 0;
+ req->callback = callback;
+ req->callback_data = callback_data;
+ INIT_WORK(&req->work, tb_cfg_request_work);
+ INIT_LIST_HEAD(&req->list);
- res = parse_header(pkg, length, type, route);
- if (!res.err)
- memcpy(buffer, pkg->buffer, length);
- tb_ctl_rx_submit(pkg);
- return res;
+ tb_cfg_request_get(req);
+ ret = tb_cfg_request_enqueue(ctl, req);
+ if (ret)
+ goto err_put;
+
+ ret = tb_ctl_tx(ctl, req->request, req->request_size,
+ req->request_type);
+ if (ret)
+ goto err_dequeue;
+
+ if (!req->response)
+ schedule_work(&req->work);
+
+ return 0;
+
+err_dequeue:
+ tb_cfg_request_dequeue(req);
+err_put:
+ tb_cfg_request_put(req);
+
+ return ret;
+}
+
+/**
+ * tb_cfg_request_cancel() - Cancel a control request
+ * @req: Request to cancel
+ * @err: Error to assign to the request
+ *
+ * This function can be used to cancel ongoing request. It will wait
+ * until the request is not active anymore.
+ */
+void tb_cfg_request_cancel(struct tb_cfg_request *req, int err)
+{
+ set_bit(TB_CFG_REQUEST_CANCELED, &req->flags);
+ schedule_work(&req->work);
+ wait_event(tb_cfg_request_cancel_queue, !tb_cfg_request_is_active(req));
+ req->result.err = err;
+}
+
+static void tb_cfg_request_complete(void *data)
+{
+ complete(data);
}
+/**
+ * tb_cfg_request_sync() - Start control request and wait until it completes
+ * @ctl: Control channel to use
+ * @req: Request to start
+ * @timeout_msec: Timeout how long to wait @req to complete
+ *
+ * Starts a control request and waits until it completes. If timeout
+ * triggers the request is canceled before function returns. Note the
+ * caller needs to make sure only one message for given switch is active
+ * at a time.
+ */
+struct tb_cfg_result tb_cfg_request_sync(struct tb_ctl *ctl,
+ struct tb_cfg_request *req,
+ int timeout_msec)
+{
+ unsigned long timeout = msecs_to_jiffies(timeout_msec);
+ struct tb_cfg_result res = { 0 };
+ DECLARE_COMPLETION_ONSTACK(done);
+ int ret;
+
+ ret = tb_cfg_request(ctl, req, tb_cfg_request_complete, &done);
+ if (ret) {
+ res.err = ret;
+ return res;
+ }
+
+ if (!wait_for_completion_timeout(&done, timeout))
+ tb_cfg_request_cancel(req, -ETIMEDOUT);
+
+ flush_work(&req->work);
+
+ return req->result;
+}
/* public interface, alloc/start/stop/free */
@@ -471,7 +608,7 @@ static struct tb_cfg_result tb_ctl_rx(struct tb_ctl *ctl, void *buffer,
*
* Return: Returns a pointer on success or NULL on failure.
*/
-struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, hotplug_cb cb, void *cb_data)
+struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, event_cb cb, void *cb_data)
{
int i;
struct tb_ctl *ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
@@ -481,18 +618,18 @@ struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, hotplug_cb cb, void *cb_data)
ctl->callback = cb;
ctl->callback_data = cb_data;
- init_completion(&ctl->response_ready);
- INIT_KFIFO(ctl->response_fifo);
+ mutex_init(&ctl->request_queue_lock);
+ INIT_LIST_HEAD(&ctl->request_queue);
ctl->frame_pool = dma_pool_create("thunderbolt_ctl", &nhi->pdev->dev,
TB_FRAME_SIZE, 4, 0);
if (!ctl->frame_pool)
goto err;
- ctl->tx = ring_alloc_tx(nhi, 0, 10);
+ ctl->tx = ring_alloc_tx(nhi, 0, 10, RING_FLAG_NO_SUSPEND);
if (!ctl->tx)
goto err;
- ctl->rx = ring_alloc_rx(nhi, 0, 10);
+ ctl->rx = ring_alloc_rx(nhi, 0, 10, RING_FLAG_NO_SUSPEND);
if (!ctl->rx)
goto err;
@@ -520,6 +657,10 @@ err:
void tb_ctl_free(struct tb_ctl *ctl)
{
int i;
+
+ if (!ctl)
+ return;
+
if (ctl->rx)
ring_free(ctl->rx);
if (ctl->tx)
@@ -546,6 +687,8 @@ void tb_ctl_start(struct tb_ctl *ctl)
ring_start(ctl->rx);
for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++)
tb_ctl_rx_submit(ctl->rx_packets[i]);
+
+ ctl->running = true;
}
/**
@@ -558,12 +701,16 @@ void tb_ctl_start(struct tb_ctl *ctl)
*/
void tb_ctl_stop(struct tb_ctl *ctl)
{
+ mutex_lock(&ctl->request_queue_lock);
+ ctl->running = false;
+ mutex_unlock(&ctl->request_queue_lock);
+
ring_stop(ctl->rx);
ring_stop(ctl->tx);
- if (!kfifo_is_empty(&ctl->response_fifo))
- tb_ctl_WARN(ctl, "dangling response in response_fifo\n");
- kfifo_reset(&ctl->response_fifo);
+ if (!list_empty(&ctl->request_queue))
+ tb_ctl_WARN(ctl, "dangling request in request_queue\n");
+ INIT_LIST_HEAD(&ctl->request_queue);
tb_ctl_info(ctl, "control channel stopped\n");
}
@@ -578,7 +725,7 @@ int tb_cfg_error(struct tb_ctl *ctl, u64 route, u32 port,
enum tb_cfg_error error)
{
struct cfg_error_pkg pkg = {
- .header = make_header(route),
+ .header = tb_cfg_make_header(route),
.port = port,
.error = error,
};
@@ -586,6 +733,49 @@ int tb_cfg_error(struct tb_ctl *ctl, u64 route, u32 port,
return tb_ctl_tx(ctl, &pkg, sizeof(pkg), TB_CFG_PKG_ERROR);
}
+static bool tb_cfg_match(const struct tb_cfg_request *req,
+ const struct ctl_pkg *pkg)
+{
+ u64 route = tb_cfg_get_route(pkg->buffer) & ~BIT_ULL(63);
+
+ if (pkg->frame.eof == TB_CFG_PKG_ERROR)
+ return true;
+
+ if (pkg->frame.eof != req->response_type)
+ return false;
+ if (route != tb_cfg_get_route(req->request))
+ return false;
+ if (pkg->frame.size != req->response_size)
+ return false;
+
+ if (pkg->frame.eof == TB_CFG_PKG_READ ||
+ pkg->frame.eof == TB_CFG_PKG_WRITE) {
+ const struct cfg_read_pkg *req_hdr = req->request;
+ const struct cfg_read_pkg *res_hdr = pkg->buffer;
+
+ if (req_hdr->addr.seq != res_hdr->addr.seq)
+ return false;
+ }
+
+ return true;
+}
+
+static bool tb_cfg_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg)
+{
+ struct tb_cfg_result res;
+
+ /* Now make sure it is in expected format */
+ res = parse_header(pkg, req->response_size, req->response_type,
+ tb_cfg_get_route(req->request));
+ if (!res.err)
+ memcpy(req->response, pkg->buffer, req->response_size);
+
+ req->result = res;
+
+ /* Always complete when first response is received */
+ return true;
+}
+
/**
* tb_cfg_reset() - send a reset packet and wait for a response
*
@@ -596,16 +786,31 @@ int tb_cfg_error(struct tb_ctl *ctl, u64 route, u32 port,
struct tb_cfg_result tb_cfg_reset(struct tb_ctl *ctl, u64 route,
int timeout_msec)
{
- int err;
- struct cfg_reset_pkg request = { .header = make_header(route) };
+ struct cfg_reset_pkg request = { .header = tb_cfg_make_header(route) };
+ struct tb_cfg_result res = { 0 };
struct tb_cfg_header reply;
+ struct tb_cfg_request *req;
+
+ req = tb_cfg_request_alloc();
+ if (!req) {
+ res.err = -ENOMEM;
+ return res;
+ }
+
+ req->match = tb_cfg_match;
+ req->copy = tb_cfg_copy;
+ req->request = &request;
+ req->request_size = sizeof(request);
+ req->request_type = TB_CFG_PKG_RESET;
+ req->response = &reply;
+ req->response_size = sizeof(reply);
+ req->response_type = sizeof(TB_CFG_PKG_RESET);
+
+ res = tb_cfg_request_sync(ctl, req, timeout_msec);
- err = tb_ctl_tx(ctl, &request, sizeof(request), TB_CFG_PKG_RESET);
- if (err)
- return (struct tb_cfg_result) { .err = err };
+ tb_cfg_request_put(req);
- return tb_ctl_rx(ctl, &reply, sizeof(reply), timeout_msec, route,
- TB_CFG_PKG_RESET);
+ return res;
}
/**
@@ -619,7 +824,7 @@ struct tb_cfg_result tb_cfg_read_raw(struct tb_ctl *ctl, void *buffer,
{
struct tb_cfg_result res = { 0 };
struct cfg_read_pkg request = {
- .header = make_header(route),
+ .header = tb_cfg_make_header(route),
.addr = {
.port = port,
.space = space,
@@ -628,13 +833,39 @@ struct tb_cfg_result tb_cfg_read_raw(struct tb_ctl *ctl, void *buffer,
},
};
struct cfg_write_pkg reply;
+ int retries = 0;
- res.err = tb_ctl_tx(ctl, &request, sizeof(request), TB_CFG_PKG_READ);
- if (res.err)
- return res;
+ while (retries < TB_CTL_RETRIES) {
+ struct tb_cfg_request *req;
+
+ req = tb_cfg_request_alloc();
+ if (!req) {
+ res.err = -ENOMEM;
+ return res;
+ }
+
+ request.addr.seq = retries++;
+
+ req->match = tb_cfg_match;
+ req->copy = tb_cfg_copy;
+ req->request = &request;
+ req->request_size = sizeof(request);
+ req->request_type = TB_CFG_PKG_READ;
+ req->response = &reply;
+ req->response_size = 12 + 4 * length;
+ req->response_type = TB_CFG_PKG_READ;
+
+ res = tb_cfg_request_sync(ctl, req, timeout_msec);
+
+ tb_cfg_request_put(req);
+
+ if (res.err != -ETIMEDOUT)
+ break;
+
+ /* Wait a bit (arbitrary time) until we send a retry */
+ usleep_range(10, 100);
+ }
- res = tb_ctl_rx(ctl, &reply, 12 + 4 * length, timeout_msec, route,
- TB_CFG_PKG_READ);
if (res.err)
return res;
@@ -650,13 +881,13 @@ struct tb_cfg_result tb_cfg_read_raw(struct tb_ctl *ctl, void *buffer,
*
* Offset and length are in dwords.
*/
-struct tb_cfg_result tb_cfg_write_raw(struct tb_ctl *ctl, void *buffer,
+struct tb_cfg_result tb_cfg_write_raw(struct tb_ctl *ctl, const void *buffer,
u64 route, u32 port, enum tb_cfg_space space,
u32 offset, u32 length, int timeout_msec)
{
struct tb_cfg_result res = { 0 };
struct cfg_write_pkg request = {
- .header = make_header(route),
+ .header = tb_cfg_make_header(route),
.addr = {
.port = port,
.space = space,
@@ -665,15 +896,41 @@ struct tb_cfg_result tb_cfg_write_raw(struct tb_ctl *ctl, void *buffer,
},
};
struct cfg_read_pkg reply;
+ int retries = 0;
memcpy(&request.data, buffer, length * 4);
- res.err = tb_ctl_tx(ctl, &request, 12 + 4 * length, TB_CFG_PKG_WRITE);
- if (res.err)
- return res;
+ while (retries < TB_CTL_RETRIES) {
+ struct tb_cfg_request *req;
+
+ req = tb_cfg_request_alloc();
+ if (!req) {
+ res.err = -ENOMEM;
+ return res;
+ }
+
+ request.addr.seq = retries++;
+
+ req->match = tb_cfg_match;
+ req->copy = tb_cfg_copy;
+ req->request = &request;
+ req->request_size = 12 + 4 * length;
+ req->request_type = TB_CFG_PKG_WRITE;
+ req->response = &reply;
+ req->response_size = sizeof(reply);
+ req->response_type = TB_CFG_PKG_WRITE;
+
+ res = tb_cfg_request_sync(ctl, req, timeout_msec);
+
+ tb_cfg_request_put(req);
+
+ if (res.err != -ETIMEDOUT)
+ break;
+
+ /* Wait a bit (arbitrary time) until we send a retry */
+ usleep_range(10, 100);
+ }
- res = tb_ctl_rx(ctl, &reply, sizeof(reply), timeout_msec, route,
- TB_CFG_PKG_WRITE);
if (res.err)
return res;
@@ -687,24 +944,52 @@ int tb_cfg_read(struct tb_ctl *ctl, void *buffer, u64 route, u32 port,
{
struct tb_cfg_result res = tb_cfg_read_raw(ctl, buffer, route, port,
space, offset, length, TB_CFG_DEFAULT_TIMEOUT);
- if (res.err == 1) {
+ switch (res.err) {
+ case 0:
+ /* Success */
+ break;
+
+ case 1:
+ /* Thunderbolt error, tb_error holds the actual number */
tb_cfg_print_error(ctl, &res);
return -EIO;
+
+ case -ETIMEDOUT:
+ tb_ctl_warn(ctl, "timeout reading config space %u from %#x\n",
+ space, offset);
+ break;
+
+ default:
+ WARN(1, "tb_cfg_read: %d\n", res.err);
+ break;
}
- WARN(res.err, "tb_cfg_read: %d\n", res.err);
return res.err;
}
-int tb_cfg_write(struct tb_ctl *ctl, void *buffer, u64 route, u32 port,
+int tb_cfg_write(struct tb_ctl *ctl, const void *buffer, u64 route, u32 port,
enum tb_cfg_space space, u32 offset, u32 length)
{
struct tb_cfg_result res = tb_cfg_write_raw(ctl, buffer, route, port,
space, offset, length, TB_CFG_DEFAULT_TIMEOUT);
- if (res.err == 1) {
+ switch (res.err) {
+ case 0:
+ /* Success */
+ break;
+
+ case 1:
+ /* Thunderbolt error, tb_error holds the actual number */
tb_cfg_print_error(ctl, &res);
return -EIO;
+
+ case -ETIMEDOUT:
+ tb_ctl_warn(ctl, "timeout writing config space %u to %#x\n",
+ space, offset);
+ break;
+
+ default:
+ WARN(1, "tb_cfg_write: %d\n", res.err);
+ break;
}
- WARN(res.err, "tb_cfg_write: %d\n", res.err);
return res.err;
}
diff --git a/drivers/thunderbolt/ctl.h b/drivers/thunderbolt/ctl.h
index ba87d6e731dd..36fd28b1c1c5 100644
--- a/drivers/thunderbolt/ctl.h
+++ b/drivers/thunderbolt/ctl.h
@@ -7,14 +7,18 @@
#ifndef _TB_CFG
#define _TB_CFG
+#include <linux/kref.h>
+
#include "nhi.h"
+#include "tb_msgs.h"
/* control channel */
struct tb_ctl;
-typedef void (*hotplug_cb)(void *data, u64 route, u8 port, bool unplug);
+typedef void (*event_cb)(void *data, enum tb_cfg_pkg_type type,
+ const void *buf, size_t size);
-struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, hotplug_cb cb, void *cb_data);
+struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, event_cb cb, void *cb_data);
void tb_ctl_start(struct tb_ctl *ctl);
void tb_ctl_stop(struct tb_ctl *ctl);
void tb_ctl_free(struct tb_ctl *ctl);
@@ -23,21 +27,6 @@ void tb_ctl_free(struct tb_ctl *ctl);
#define TB_CFG_DEFAULT_TIMEOUT 5000 /* msec */
-enum tb_cfg_space {
- TB_CFG_HOPS = 0,
- TB_CFG_PORT = 1,
- TB_CFG_SWITCH = 2,
- TB_CFG_COUNTERS = 3,
-};
-
-enum tb_cfg_error {
- TB_CFG_ERROR_PORT_NOT_CONNECTED = 0,
- TB_CFG_ERROR_INVALID_CONFIG_SPACE = 2,
- TB_CFG_ERROR_NO_SUCH_PORT = 4,
- TB_CFG_ERROR_ACK_PLUG_EVENT = 7, /* send as reply to TB_CFG_PKG_EVENT */
- TB_CFG_ERROR_LOOP = 8,
-};
-
struct tb_cfg_result {
u64 response_route;
u32 response_port; /*
@@ -52,6 +41,84 @@ struct tb_cfg_result {
enum tb_cfg_error tb_error; /* valid if err == 1 */
};
+struct ctl_pkg {
+ struct tb_ctl *ctl;
+ void *buffer;
+ struct ring_frame frame;
+};
+
+/**
+ * struct tb_cfg_request - Control channel request
+ * @kref: Reference count
+ * @ctl: Pointer to the control channel structure. Only set when the
+ * request is queued.
+ * @request_size: Size of the request packet (in bytes)
+ * @request_type: Type of the request packet
+ * @response: Response is stored here
+ * @response_size: Maximum size of one response packet
+ * @response_type: Expected type of the response packet
+ * @npackets: Number of packets expected to be returned with this request
+ * @match: Function used to match the incoming packet
+ * @copy: Function used to copy the incoming packet to @response
+ * @callback: Callback called when the request is finished successfully
+ * @callback_data: Data to be passed to @callback
+ * @flags: Flags for the request
+ * @work: Work item used to complete the request
+ * @result: Result after the request has been completed
+ * @list: Requests are queued using this field
+ *
+ * An arbitrary request over Thunderbolt control channel. For standard
+ * control channel message, one should use tb_cfg_read/write() and
+ * friends if possible.
+ */
+struct tb_cfg_request {
+ struct kref kref;
+ struct tb_ctl *ctl;
+ const void *request;
+ size_t request_size;
+ enum tb_cfg_pkg_type request_type;
+ void *response;
+ size_t response_size;
+ enum tb_cfg_pkg_type response_type;
+ size_t npackets;
+ bool (*match)(const struct tb_cfg_request *req,
+ const struct ctl_pkg *pkg);
+ bool (*copy)(struct tb_cfg_request *req, const struct ctl_pkg *pkg);
+ void (*callback)(void *callback_data);
+ void *callback_data;
+ unsigned long flags;
+ struct work_struct work;
+ struct tb_cfg_result result;
+ struct list_head list;
+};
+
+#define TB_CFG_REQUEST_ACTIVE 0
+#define TB_CFG_REQUEST_CANCELED 1
+
+struct tb_cfg_request *tb_cfg_request_alloc(void);
+void tb_cfg_request_get(struct tb_cfg_request *req);
+void tb_cfg_request_put(struct tb_cfg_request *req);
+int tb_cfg_request(struct tb_ctl *ctl, struct tb_cfg_request *req,
+ void (*callback)(void *), void *callback_data);
+void tb_cfg_request_cancel(struct tb_cfg_request *req, int err);
+struct tb_cfg_result tb_cfg_request_sync(struct tb_ctl *ctl,
+ struct tb_cfg_request *req, int timeout_msec);
+
+static inline u64 tb_cfg_get_route(const struct tb_cfg_header *header)
+{
+ return (u64) header->route_hi << 32 | header->route_lo;
+}
+
+static inline struct tb_cfg_header tb_cfg_make_header(u64 route)
+{
+ struct tb_cfg_header header = {
+ .route_hi = route >> 32,
+ .route_lo = route,
+ };
+ /* check for overflow, route_hi is not 32 bits! */
+ WARN_ON(tb_cfg_get_route(&header) != route);
+ return header;
+}
int tb_cfg_error(struct tb_ctl *ctl, u64 route, u32 port,
enum tb_cfg_error error);
@@ -61,13 +128,13 @@ struct tb_cfg_result tb_cfg_read_raw(struct tb_ctl *ctl, void *buffer,
u64 route, u32 port,
enum tb_cfg_space space, u32 offset,
u32 length, int timeout_msec);
-struct tb_cfg_result tb_cfg_write_raw(struct tb_ctl *ctl, void *buffer,
+struct tb_cfg_result tb_cfg_write_raw(struct tb_ctl *ctl, const void *buffer,
u64 route, u32 port,
enum tb_cfg_space space, u32 offset,
u32 length, int timeout_msec);
int tb_cfg_read(struct tb_ctl *ctl, void *buffer, u64 route, u32 port,
enum tb_cfg_space space, u32 offset, u32 length);
-int tb_cfg_write(struct tb_ctl *ctl, void *buffer, u64 route, u32 port,
+int tb_cfg_write(struct tb_ctl *ctl, const void *buffer, u64 route, u32 port,
enum tb_cfg_space space, u32 offset, u32 length);
int tb_cfg_get_upstream_port(struct tb_ctl *ctl, u64 route);
diff --git a/drivers/thunderbolt/dma_port.c b/drivers/thunderbolt/dma_port.c
new file mode 100644
index 000000000000..af6dde347bee
--- /dev/null
+++ b/drivers/thunderbolt/dma_port.c
@@ -0,0 +1,524 @@
+/*
+ * Thunderbolt DMA configuration based mailbox support
+ *
+ * Copyright (C) 2017, Intel Corporation
+ * Authors: Michael Jamet <michael.jamet@intel.com>
+ * Mika Westerberg <mika.westerberg@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/slab.h>
+
+#include "dma_port.h"
+#include "tb_regs.h"
+
+#define DMA_PORT_CAP 0x3e
+
+#define MAIL_DATA 1
+#define MAIL_DATA_DWORDS 16
+
+#define MAIL_IN 17
+#define MAIL_IN_CMD_SHIFT 28
+#define MAIL_IN_CMD_MASK GENMASK(31, 28)
+#define MAIL_IN_CMD_FLASH_WRITE 0x0
+#define MAIL_IN_CMD_FLASH_UPDATE_AUTH 0x1
+#define MAIL_IN_CMD_FLASH_READ 0x2
+#define MAIL_IN_CMD_POWER_CYCLE 0x4
+#define MAIL_IN_DWORDS_SHIFT 24
+#define MAIL_IN_DWORDS_MASK GENMASK(27, 24)
+#define MAIL_IN_ADDRESS_SHIFT 2
+#define MAIL_IN_ADDRESS_MASK GENMASK(23, 2)
+#define MAIL_IN_CSS BIT(1)
+#define MAIL_IN_OP_REQUEST BIT(0)
+
+#define MAIL_OUT 18
+#define MAIL_OUT_STATUS_RESPONSE BIT(29)
+#define MAIL_OUT_STATUS_CMD_SHIFT 4
+#define MAIL_OUT_STATUS_CMD_MASK GENMASK(7, 4)
+#define MAIL_OUT_STATUS_MASK GENMASK(3, 0)
+#define MAIL_OUT_STATUS_COMPLETED 0
+#define MAIL_OUT_STATUS_ERR_AUTH 1
+#define MAIL_OUT_STATUS_ERR_ACCESS 2
+
+#define DMA_PORT_TIMEOUT 5000 /* ms */
+#define DMA_PORT_RETRIES 3
+
+/**
+ * struct tb_dma_port - DMA control port
+ * @sw: Switch the DMA port belongs to
+ * @port: Switch port number where DMA capability is found
+ * @base: Start offset of the mailbox registers
+ * @buf: Temporary buffer to store a single block
+ */
+struct tb_dma_port {
+ struct tb_switch *sw;
+ u8 port;
+ u32 base;
+ u8 *buf;
+};
+
+/*
+ * When the switch is in safe mode it supports very little functionality
+ * so we don't validate that much here.
+ */
+static bool dma_port_match(const struct tb_cfg_request *req,
+ const struct ctl_pkg *pkg)
+{
+ u64 route = tb_cfg_get_route(pkg->buffer) & ~BIT_ULL(63);
+
+ if (pkg->frame.eof == TB_CFG_PKG_ERROR)
+ return true;
+ if (pkg->frame.eof != req->response_type)
+ return false;
+ if (route != tb_cfg_get_route(req->request))
+ return false;
+ if (pkg->frame.size != req->response_size)
+ return false;
+
+ return true;
+}
+
+static bool dma_port_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg)
+{
+ memcpy(req->response, pkg->buffer, req->response_size);
+ return true;
+}
+
+static int dma_port_read(struct tb_ctl *ctl, void *buffer, u64 route,
+ u32 port, u32 offset, u32 length, int timeout_msec)
+{
+ struct cfg_read_pkg request = {
+ .header = tb_cfg_make_header(route),
+ .addr = {
+ .seq = 1,
+ .port = port,
+ .space = TB_CFG_PORT,
+ .offset = offset,
+ .length = length,
+ },
+ };
+ struct tb_cfg_request *req;
+ struct cfg_write_pkg reply;
+ struct tb_cfg_result res;
+
+ req = tb_cfg_request_alloc();
+ if (!req)
+ return -ENOMEM;
+
+ req->match = dma_port_match;
+ req->copy = dma_port_copy;
+ req->request = &request;
+ req->request_size = sizeof(request);
+ req->request_type = TB_CFG_PKG_READ;
+ req->response = &reply;
+ req->response_size = 12 + 4 * length;
+ req->response_type = TB_CFG_PKG_READ;
+
+ res = tb_cfg_request_sync(ctl, req, timeout_msec);
+
+ tb_cfg_request_put(req);
+
+ if (res.err)
+ return res.err;
+
+ memcpy(buffer, &reply.data, 4 * length);
+ return 0;
+}
+
+static int dma_port_write(struct tb_ctl *ctl, const void *buffer, u64 route,
+ u32 port, u32 offset, u32 length, int timeout_msec)
+{
+ struct cfg_write_pkg request = {
+ .header = tb_cfg_make_header(route),
+ .addr = {
+ .seq = 1,
+ .port = port,
+ .space = TB_CFG_PORT,
+ .offset = offset,
+ .length = length,
+ },
+ };
+ struct tb_cfg_request *req;
+ struct cfg_read_pkg reply;
+ struct tb_cfg_result res;
+
+ memcpy(&request.data, buffer, length * 4);
+
+ req = tb_cfg_request_alloc();
+ if (!req)
+ return -ENOMEM;
+
+ req->match = dma_port_match;
+ req->copy = dma_port_copy;
+ req->request = &request;
+ req->request_size = 12 + 4 * length;
+ req->request_type = TB_CFG_PKG_WRITE;
+ req->response = &reply;
+ req->response_size = sizeof(reply);
+ req->response_type = TB_CFG_PKG_WRITE;
+
+ res = tb_cfg_request_sync(ctl, req, timeout_msec);
+
+ tb_cfg_request_put(req);
+
+ return res.err;
+}
+
+static int dma_find_port(struct tb_switch *sw)
+{
+ int port, ret;
+ u32 type;
+
+ /*
+ * The DMA (NHI) port is either 3 or 5 depending on the
+ * controller. Try both starting from 5 which is more common.
+ */
+ port = 5;
+ ret = dma_port_read(sw->tb->ctl, &type, tb_route(sw), port, 2, 1,
+ DMA_PORT_TIMEOUT);
+ if (!ret && (type & 0xffffff) == TB_TYPE_NHI)
+ return port;
+
+ port = 3;
+ ret = dma_port_read(sw->tb->ctl, &type, tb_route(sw), port, 2, 1,
+ DMA_PORT_TIMEOUT);
+ if (!ret && (type & 0xffffff) == TB_TYPE_NHI)
+ return port;
+
+ return -ENODEV;
+}
+
+/**
+ * dma_port_alloc() - Finds DMA control port from a switch pointed by route
+ * @sw: Switch from where find the DMA port
+ *
+ * Function checks if the switch NHI port supports DMA configuration
+ * based mailbox capability and if it does, allocates and initializes
+ * DMA port structure. Returns %NULL if the capabity was not found.
+ *
+ * The DMA control port is functional also when the switch is in safe
+ * mode.
+ */
+struct tb_dma_port *dma_port_alloc(struct tb_switch *sw)
+{
+ struct tb_dma_port *dma;
+ int port;
+
+ port = dma_find_port(sw);
+ if (port < 0)
+ return NULL;
+
+ dma = kzalloc(sizeof(*dma), GFP_KERNEL);
+ if (!dma)
+ return NULL;
+
+ dma->buf = kmalloc_array(MAIL_DATA_DWORDS, sizeof(u32), GFP_KERNEL);
+ if (!dma->buf) {
+ kfree(dma);
+ return NULL;
+ }
+
+ dma->sw = sw;
+ dma->port = port;
+ dma->base = DMA_PORT_CAP;
+
+ return dma;
+}
+
+/**
+ * dma_port_free() - Release DMA control port structure
+ * @dma: DMA control port
+ */
+void dma_port_free(struct tb_dma_port *dma)
+{
+ if (dma) {
+ kfree(dma->buf);
+ kfree(dma);
+ }
+}
+
+static int dma_port_wait_for_completion(struct tb_dma_port *dma,
+ unsigned int timeout)
+{
+ unsigned long end = jiffies + msecs_to_jiffies(timeout);
+ struct tb_switch *sw = dma->sw;
+
+ do {
+ int ret;
+ u32 in;
+
+ ret = dma_port_read(sw->tb->ctl, &in, tb_route(sw), dma->port,
+ dma->base + MAIL_IN, 1, 50);
+ if (ret) {
+ if (ret != -ETIMEDOUT)
+ return ret;
+ } else if (!(in & MAIL_IN_OP_REQUEST)) {
+ return 0;
+ }
+
+ usleep_range(50, 100);
+ } while (time_before(jiffies, end));
+
+ return -ETIMEDOUT;
+}
+
+static int status_to_errno(u32 status)
+{
+ switch (status & MAIL_OUT_STATUS_MASK) {
+ case MAIL_OUT_STATUS_COMPLETED:
+ return 0;
+ case MAIL_OUT_STATUS_ERR_AUTH:
+ return -EINVAL;
+ case MAIL_OUT_STATUS_ERR_ACCESS:
+ return -EACCES;
+ }
+
+ return -EIO;
+}
+
+static int dma_port_request(struct tb_dma_port *dma, u32 in,
+ unsigned int timeout)
+{
+ struct tb_switch *sw = dma->sw;
+ u32 out;
+ int ret;
+
+ ret = dma_port_write(sw->tb->ctl, &in, tb_route(sw), dma->port,
+ dma->base + MAIL_IN, 1, DMA_PORT_TIMEOUT);
+ if (ret)
+ return ret;
+
+ ret = dma_port_wait_for_completion(dma, timeout);
+ if (ret)
+ return ret;
+
+ ret = dma_port_read(sw->tb->ctl, &out, tb_route(sw), dma->port,
+ dma->base + MAIL_OUT, 1, DMA_PORT_TIMEOUT);
+ if (ret)
+ return ret;
+
+ return status_to_errno(out);
+}
+
+static int dma_port_flash_read_block(struct tb_dma_port *dma, u32 address,
+ void *buf, u32 size)
+{
+ struct tb_switch *sw = dma->sw;
+ u32 in, dwaddress, dwords;
+ int ret;
+
+ dwaddress = address / 4;
+ dwords = size / 4;
+
+ in = MAIL_IN_CMD_FLASH_READ << MAIL_IN_CMD_SHIFT;
+ if (dwords < MAIL_DATA_DWORDS)
+ in |= (dwords << MAIL_IN_DWORDS_SHIFT) & MAIL_IN_DWORDS_MASK;
+ in |= (dwaddress << MAIL_IN_ADDRESS_SHIFT) & MAIL_IN_ADDRESS_MASK;
+ in |= MAIL_IN_OP_REQUEST;
+
+ ret = dma_port_request(dma, in, DMA_PORT_TIMEOUT);
+ if (ret)
+ return ret;
+
+ return dma_port_read(sw->tb->ctl, buf, tb_route(sw), dma->port,
+ dma->base + MAIL_DATA, dwords, DMA_PORT_TIMEOUT);
+}
+
+static int dma_port_flash_write_block(struct tb_dma_port *dma, u32 address,
+ const void *buf, u32 size)
+{
+ struct tb_switch *sw = dma->sw;
+ u32 in, dwaddress, dwords;
+ int ret;
+
+ dwords = size / 4;
+
+ /* Write the block to MAIL_DATA registers */
+ ret = dma_port_write(sw->tb->ctl, buf, tb_route(sw), dma->port,
+ dma->base + MAIL_DATA, dwords, DMA_PORT_TIMEOUT);
+
+ in = MAIL_IN_CMD_FLASH_WRITE << MAIL_IN_CMD_SHIFT;
+
+ /* CSS header write is always done to the same magic address */
+ if (address >= DMA_PORT_CSS_ADDRESS) {
+ dwaddress = DMA_PORT_CSS_ADDRESS;
+ in |= MAIL_IN_CSS;
+ } else {
+ dwaddress = address / 4;
+ }
+
+ in |= ((dwords - 1) << MAIL_IN_DWORDS_SHIFT) & MAIL_IN_DWORDS_MASK;
+ in |= (dwaddress << MAIL_IN_ADDRESS_SHIFT) & MAIL_IN_ADDRESS_MASK;
+ in |= MAIL_IN_OP_REQUEST;
+
+ return dma_port_request(dma, in, DMA_PORT_TIMEOUT);
+}
+
+/**
+ * dma_port_flash_read() - Read from active flash region
+ * @dma: DMA control port
+ * @address: Address relative to the start of active region
+ * @buf: Buffer where the data is read
+ * @size: Size of the buffer
+ */
+int dma_port_flash_read(struct tb_dma_port *dma, unsigned int address,
+ void *buf, size_t size)
+{
+ unsigned int retries = DMA_PORT_RETRIES;
+ unsigned int offset;
+
+ offset = address & 3;
+ address = address & ~3;
+
+ do {
+ u32 nbytes = min_t(u32, size, MAIL_DATA_DWORDS * 4);
+ int ret;
+
+ ret = dma_port_flash_read_block(dma, address, dma->buf,
+ ALIGN(nbytes, 4));
+ if (ret) {
+ if (ret == -ETIMEDOUT) {
+ if (retries--)
+ continue;
+ ret = -EIO;
+ }
+ return ret;
+ }
+
+ memcpy(buf, dma->buf + offset, nbytes);
+
+ size -= nbytes;
+ address += nbytes;
+ buf += nbytes;
+ } while (size > 0);
+
+ return 0;
+}
+
+/**
+ * dma_port_flash_write() - Write to non-active flash region
+ * @dma: DMA control port
+ * @address: Address relative to the start of non-active region
+ * @buf: Data to write
+ * @size: Size of the buffer
+ *
+ * Writes block of data to the non-active flash region of the switch. If
+ * the address is given as %DMA_PORT_CSS_ADDRESS the block is written
+ * using CSS command.
+ */
+int dma_port_flash_write(struct tb_dma_port *dma, unsigned int address,
+ const void *buf, size_t size)
+{
+ unsigned int retries = DMA_PORT_RETRIES;
+ unsigned int offset;
+
+ if (address >= DMA_PORT_CSS_ADDRESS) {
+ offset = 0;
+ if (size > DMA_PORT_CSS_MAX_SIZE)
+ return -E2BIG;
+ } else {
+ offset = address & 3;
+ address = address & ~3;
+ }
+
+ do {
+ u32 nbytes = min_t(u32, size, MAIL_DATA_DWORDS * 4);
+ int ret;
+
+ memcpy(dma->buf + offset, buf, nbytes);
+
+ ret = dma_port_flash_write_block(dma, address, buf, nbytes);
+ if (ret) {
+ if (ret == -ETIMEDOUT) {
+ if (retries--)
+ continue;
+ ret = -EIO;
+ }
+ return ret;
+ }
+
+ size -= nbytes;
+ address += nbytes;
+ buf += nbytes;
+ } while (size > 0);
+
+ return 0;
+}
+
+/**
+ * dma_port_flash_update_auth() - Starts flash authenticate cycle
+ * @dma: DMA control port
+ *
+ * Starts the flash update authentication cycle. If the image in the
+ * non-active area was valid, the switch starts upgrade process where
+ * active and non-active area get swapped in the end. Caller should call
+ * dma_port_flash_update_auth_status() to get status of this command.
+ * This is because if the switch in question is root switch the
+ * thunderbolt host controller gets reset as well.
+ */
+int dma_port_flash_update_auth(struct tb_dma_port *dma)
+{
+ u32 in;
+
+ in = MAIL_IN_CMD_FLASH_UPDATE_AUTH << MAIL_IN_CMD_SHIFT;
+ in |= MAIL_IN_OP_REQUEST;
+
+ return dma_port_request(dma, in, 150);
+}
+
+/**
+ * dma_port_flash_update_auth_status() - Reads status of update auth command
+ * @dma: DMA control port
+ * @status: Status code of the operation
+ *
+ * The function checks if there is status available from the last update
+ * auth command. Returns %0 if there is no status and no further
+ * action is required. If there is status, %1 is returned instead and
+ * @status holds the failure code.
+ *
+ * Negative return means there was an error reading status from the
+ * switch.
+ */
+int dma_port_flash_update_auth_status(struct tb_dma_port *dma, u32 *status)
+{
+ struct tb_switch *sw = dma->sw;
+ u32 out, cmd;
+ int ret;
+
+ ret = dma_port_read(sw->tb->ctl, &out, tb_route(sw), dma->port,
+ dma->base + MAIL_OUT, 1, DMA_PORT_TIMEOUT);
+ if (ret)
+ return ret;
+
+ /* Check if the status relates to flash update auth */
+ cmd = (out & MAIL_OUT_STATUS_CMD_MASK) >> MAIL_OUT_STATUS_CMD_SHIFT;
+ if (cmd == MAIL_IN_CMD_FLASH_UPDATE_AUTH) {
+ if (status)
+ *status = out & MAIL_OUT_STATUS_MASK;
+
+ /* Reset is needed in any case */
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * dma_port_power_cycle() - Power cycles the switch
+ * @dma: DMA control port
+ *
+ * Triggers power cycle to the switch.
+ */
+int dma_port_power_cycle(struct tb_dma_port *dma)
+{
+ u32 in;
+
+ in = MAIL_IN_CMD_POWER_CYCLE << MAIL_IN_CMD_SHIFT;
+ in |= MAIL_IN_OP_REQUEST;
+
+ return dma_port_request(dma, in, 150);
+}
diff --git a/drivers/thunderbolt/dma_port.h b/drivers/thunderbolt/dma_port.h
new file mode 100644
index 000000000000..c4a69e0fbff7
--- /dev/null
+++ b/drivers/thunderbolt/dma_port.h
@@ -0,0 +1,34 @@
+/*
+ * Thunderbolt DMA configuration based mailbox support
+ *
+ * Copyright (C) 2017, Intel Corporation
+ * Authors: Michael Jamet <michael.jamet@intel.com>
+ * Mika Westerberg <mika.westerberg@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef DMA_PORT_H_
+#define DMA_PORT_H_
+
+#include "tb.h"
+
+struct tb_switch;
+struct tb_dma_port;
+
+#define DMA_PORT_CSS_ADDRESS 0x3fffff
+#define DMA_PORT_CSS_MAX_SIZE SZ_128
+
+struct tb_dma_port *dma_port_alloc(struct tb_switch *sw);
+void dma_port_free(struct tb_dma_port *dma);
+int dma_port_flash_read(struct tb_dma_port *dma, unsigned int address,
+ void *buf, size_t size);
+int dma_port_flash_update_auth(struct tb_dma_port *dma);
+int dma_port_flash_update_auth_status(struct tb_dma_port *dma, u32 *status);
+int dma_port_flash_write(struct tb_dma_port *dma, unsigned int address,
+ const void *buf, size_t size);
+int dma_port_power_cycle(struct tb_dma_port *dma);
+
+#endif
diff --git a/drivers/thunderbolt/domain.c b/drivers/thunderbolt/domain.c
new file mode 100644
index 000000000000..9f2dcd48974d
--- /dev/null
+++ b/drivers/thunderbolt/domain.c
@@ -0,0 +1,456 @@
+/*
+ * Thunderbolt bus support
+ *
+ * Copyright (C) 2017, Intel Corporation
+ * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/device.h>
+#include <linux/idr.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/random.h>
+#include <crypto/hash.h>
+
+#include "tb.h"
+
+static DEFINE_IDA(tb_domain_ida);
+
+static const char * const tb_security_names[] = {
+ [TB_SECURITY_NONE] = "none",
+ [TB_SECURITY_USER] = "user",
+ [TB_SECURITY_SECURE] = "secure",
+ [TB_SECURITY_DPONLY] = "dponly",
+};
+
+static ssize_t security_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tb *tb = container_of(dev, struct tb, dev);
+
+ return sprintf(buf, "%s\n", tb_security_names[tb->security_level]);
+}
+static DEVICE_ATTR_RO(security);
+
+static struct attribute *domain_attrs[] = {
+ &dev_attr_security.attr,
+ NULL,
+};
+
+static struct attribute_group domain_attr_group = {
+ .attrs = domain_attrs,
+};
+
+static const struct attribute_group *domain_attr_groups[] = {
+ &domain_attr_group,
+ NULL,
+};
+
+struct bus_type tb_bus_type = {
+ .name = "thunderbolt",
+};
+
+static void tb_domain_release(struct device *dev)
+{
+ struct tb *tb = container_of(dev, struct tb, dev);
+
+ tb_ctl_free(tb->ctl);
+ destroy_workqueue(tb->wq);
+ ida_simple_remove(&tb_domain_ida, tb->index);
+ mutex_destroy(&tb->lock);
+ kfree(tb);
+}
+
+struct device_type tb_domain_type = {
+ .name = "thunderbolt_domain",
+ .release = tb_domain_release,
+};
+
+/**
+ * tb_domain_alloc() - Allocate a domain
+ * @nhi: Pointer to the host controller
+ * @privsize: Size of the connection manager private data
+ *
+ * Allocates and initializes a new Thunderbolt domain. Connection
+ * managers are expected to call this and then fill in @cm_ops
+ * accordingly.
+ *
+ * Call tb_domain_put() to release the domain before it has been added
+ * to the system.
+ *
+ * Return: allocated domain structure on %NULL in case of error
+ */
+struct tb *tb_domain_alloc(struct tb_nhi *nhi, size_t privsize)
+{
+ struct tb *tb;
+
+ /*
+ * Make sure the structure sizes map with that the hardware
+ * expects because bit-fields are being used.
+ */
+ BUILD_BUG_ON(sizeof(struct tb_regs_switch_header) != 5 * 4);
+ BUILD_BUG_ON(sizeof(struct tb_regs_port_header) != 8 * 4);
+ BUILD_BUG_ON(sizeof(struct tb_regs_hop) != 2 * 4);
+
+ tb = kzalloc(sizeof(*tb) + privsize, GFP_KERNEL);
+ if (!tb)
+ return NULL;
+
+ tb->nhi = nhi;
+ mutex_init(&tb->lock);
+
+ tb->index = ida_simple_get(&tb_domain_ida, 0, 0, GFP_KERNEL);
+ if (tb->index < 0)
+ goto err_free;
+
+ tb->wq = alloc_ordered_workqueue("thunderbolt%d", 0, tb->index);
+ if (!tb->wq)
+ goto err_remove_ida;
+
+ tb->dev.parent = &nhi->pdev->dev;
+ tb->dev.bus = &tb_bus_type;
+ tb->dev.type = &tb_domain_type;
+ tb->dev.groups = domain_attr_groups;
+ dev_set_name(&tb->dev, "domain%d", tb->index);
+ device_initialize(&tb->dev);
+
+ return tb;
+
+err_remove_ida:
+ ida_simple_remove(&tb_domain_ida, tb->index);
+err_free:
+ kfree(tb);
+
+ return NULL;
+}
+
+static void tb_domain_event_cb(void *data, enum tb_cfg_pkg_type type,
+ const void *buf, size_t size)
+{
+ struct tb *tb = data;
+
+ if (!tb->cm_ops->handle_event) {
+ tb_warn(tb, "domain does not have event handler\n");
+ return;
+ }
+
+ tb->cm_ops->handle_event(tb, type, buf, size);
+}
+
+/**
+ * tb_domain_add() - Add domain to the system
+ * @tb: Domain to add
+ *
+ * Starts the domain and adds it to the system. Hotplugging devices will
+ * work after this has been returned successfully. In order to remove
+ * and release the domain after this function has been called, call
+ * tb_domain_remove().
+ *
+ * Return: %0 in case of success and negative errno in case of error
+ */
+int tb_domain_add(struct tb *tb)
+{
+ int ret;
+
+ if (WARN_ON(!tb->cm_ops))
+ return -EINVAL;
+
+ mutex_lock(&tb->lock);
+
+ tb->ctl = tb_ctl_alloc(tb->nhi, tb_domain_event_cb, tb);
+ if (!tb->ctl) {
+ ret = -ENOMEM;
+ goto err_unlock;
+ }
+
+ /*
+ * tb_schedule_hotplug_handler may be called as soon as the config
+ * channel is started. Thats why we have to hold the lock here.
+ */
+ tb_ctl_start(tb->ctl);
+
+ if (tb->cm_ops->driver_ready) {
+ ret = tb->cm_ops->driver_ready(tb);
+ if (ret)
+ goto err_ctl_stop;
+ }
+
+ ret = device_add(&tb->dev);
+ if (ret)
+ goto err_ctl_stop;
+
+ /* Start the domain */
+ if (tb->cm_ops->start) {
+ ret = tb->cm_ops->start(tb);
+ if (ret)
+ goto err_domain_del;
+ }
+
+ /* This starts event processing */
+ mutex_unlock(&tb->lock);
+
+ return 0;
+
+err_domain_del:
+ device_del(&tb->dev);
+err_ctl_stop:
+ tb_ctl_stop(tb->ctl);
+err_unlock:
+ mutex_unlock(&tb->lock);
+
+ return ret;
+}
+
+/**
+ * tb_domain_remove() - Removes and releases a domain
+ * @tb: Domain to remove
+ *
+ * Stops the domain, removes it from the system and releases all
+ * resources once the last reference has been released.
+ */
+void tb_domain_remove(struct tb *tb)
+{
+ mutex_lock(&tb->lock);
+ if (tb->cm_ops->stop)
+ tb->cm_ops->stop(tb);
+ /* Stop the domain control traffic */
+ tb_ctl_stop(tb->ctl);
+ mutex_unlock(&tb->lock);
+
+ flush_workqueue(tb->wq);
+ device_unregister(&tb->dev);
+}
+
+/**
+ * tb_domain_suspend_noirq() - Suspend a domain
+ * @tb: Domain to suspend
+ *
+ * Suspends all devices in the domain and stops the control channel.
+ */
+int tb_domain_suspend_noirq(struct tb *tb)
+{
+ int ret = 0;
+
+ /*
+ * The control channel interrupt is left enabled during suspend
+ * and taking the lock here prevents any events happening before
+ * we actually have stopped the domain and the control channel.
+ */
+ mutex_lock(&tb->lock);
+ if (tb->cm_ops->suspend_noirq)
+ ret = tb->cm_ops->suspend_noirq(tb);
+ if (!ret)
+ tb_ctl_stop(tb->ctl);
+ mutex_unlock(&tb->lock);
+
+ return ret;
+}
+
+/**
+ * tb_domain_resume_noirq() - Resume a domain
+ * @tb: Domain to resume
+ *
+ * Re-starts the control channel, and resumes all devices connected to
+ * the domain.
+ */
+int tb_domain_resume_noirq(struct tb *tb)
+{
+ int ret = 0;
+
+ mutex_lock(&tb->lock);
+ tb_ctl_start(tb->ctl);
+ if (tb->cm_ops->resume_noirq)
+ ret = tb->cm_ops->resume_noirq(tb);
+ mutex_unlock(&tb->lock);
+
+ return ret;
+}
+
+int tb_domain_suspend(struct tb *tb)
+{
+ int ret;
+
+ mutex_lock(&tb->lock);
+ if (tb->cm_ops->suspend) {
+ ret = tb->cm_ops->suspend(tb);
+ if (ret) {
+ mutex_unlock(&tb->lock);
+ return ret;
+ }
+ }
+ mutex_unlock(&tb->lock);
+ return 0;
+}
+
+void tb_domain_complete(struct tb *tb)
+{
+ mutex_lock(&tb->lock);
+ if (tb->cm_ops->complete)
+ tb->cm_ops->complete(tb);
+ mutex_unlock(&tb->lock);
+}
+
+/**
+ * tb_domain_approve_switch() - Approve switch
+ * @tb: Domain the switch belongs to
+ * @sw: Switch to approve
+ *
+ * This will approve switch by connection manager specific means. In
+ * case of success the connection manager will create tunnels for all
+ * supported protocols.
+ */
+int tb_domain_approve_switch(struct tb *tb, struct tb_switch *sw)
+{
+ struct tb_switch *parent_sw;
+
+ if (!tb->cm_ops->approve_switch)
+ return -EPERM;
+
+ /* The parent switch must be authorized before this one */
+ parent_sw = tb_to_switch(sw->dev.parent);
+ if (!parent_sw || !parent_sw->authorized)
+ return -EINVAL;
+
+ return tb->cm_ops->approve_switch(tb, sw);
+}
+
+/**
+ * tb_domain_approve_switch_key() - Approve switch and add key
+ * @tb: Domain the switch belongs to
+ * @sw: Switch to approve
+ *
+ * For switches that support secure connect, this function first adds
+ * key to the switch NVM using connection manager specific means. If
+ * adding the key is successful, the switch is approved and connected.
+ *
+ * Return: %0 on success and negative errno in case of failure.
+ */
+int tb_domain_approve_switch_key(struct tb *tb, struct tb_switch *sw)
+{
+ struct tb_switch *parent_sw;
+ int ret;
+
+ if (!tb->cm_ops->approve_switch || !tb->cm_ops->add_switch_key)
+ return -EPERM;
+
+ /* The parent switch must be authorized before this one */
+ parent_sw = tb_to_switch(sw->dev.parent);
+ if (!parent_sw || !parent_sw->authorized)
+ return -EINVAL;
+
+ ret = tb->cm_ops->add_switch_key(tb, sw);
+ if (ret)
+ return ret;
+
+ return tb->cm_ops->approve_switch(tb, sw);
+}
+
+/**
+ * tb_domain_challenge_switch_key() - Challenge and approve switch
+ * @tb: Domain the switch belongs to
+ * @sw: Switch to approve
+ *
+ * For switches that support secure connect, this function generates
+ * random challenge and sends it to the switch. The switch responds to
+ * this and if the response matches our random challenge, the switch is
+ * approved and connected.
+ *
+ * Return: %0 on success and negative errno in case of failure.
+ */
+int tb_domain_challenge_switch_key(struct tb *tb, struct tb_switch *sw)
+{
+ u8 challenge[TB_SWITCH_KEY_SIZE];
+ u8 response[TB_SWITCH_KEY_SIZE];
+ u8 hmac[TB_SWITCH_KEY_SIZE];
+ struct tb_switch *parent_sw;
+ struct crypto_shash *tfm;
+ struct shash_desc *shash;
+ int ret;
+
+ if (!tb->cm_ops->approve_switch || !tb->cm_ops->challenge_switch_key)
+ return -EPERM;
+
+ /* The parent switch must be authorized before this one */
+ parent_sw = tb_to_switch(sw->dev.parent);
+ if (!parent_sw || !parent_sw->authorized)
+ return -EINVAL;
+
+ get_random_bytes(challenge, sizeof(challenge));
+ ret = tb->cm_ops->challenge_switch_key(tb, sw, challenge, response);
+ if (ret)
+ return ret;
+
+ tfm = crypto_alloc_shash("hmac(sha256)", 0, 0);
+ if (IS_ERR(tfm))
+ return PTR_ERR(tfm);
+
+ ret = crypto_shash_setkey(tfm, sw->key, TB_SWITCH_KEY_SIZE);
+ if (ret)
+ goto err_free_tfm;
+
+ shash = kzalloc(sizeof(*shash) + crypto_shash_descsize(tfm),
+ GFP_KERNEL);
+ if (!shash) {
+ ret = -ENOMEM;
+ goto err_free_tfm;
+ }
+
+ shash->tfm = tfm;
+ shash->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ memset(hmac, 0, sizeof(hmac));
+ ret = crypto_shash_digest(shash, challenge, sizeof(hmac), hmac);
+ if (ret)
+ goto err_free_shash;
+
+ /* The returned HMAC must match the one we calculated */
+ if (memcmp(response, hmac, sizeof(hmac))) {
+ ret = -EKEYREJECTED;
+ goto err_free_shash;
+ }
+
+ crypto_free_shash(tfm);
+ kfree(shash);
+
+ return tb->cm_ops->approve_switch(tb, sw);
+
+err_free_shash:
+ kfree(shash);
+err_free_tfm:
+ crypto_free_shash(tfm);
+
+ return ret;
+}
+
+/**
+ * tb_domain_disconnect_pcie_paths() - Disconnect all PCIe paths
+ * @tb: Domain whose PCIe paths to disconnect
+ *
+ * This needs to be called in preparation for NVM upgrade of the host
+ * controller. Makes sure all PCIe paths are disconnected.
+ *
+ * Return %0 on success and negative errno in case of error.
+ */
+int tb_domain_disconnect_pcie_paths(struct tb *tb)
+{
+ if (!tb->cm_ops->disconnect_pcie_paths)
+ return -EPERM;
+
+ return tb->cm_ops->disconnect_pcie_paths(tb);
+}
+
+int tb_domain_init(void)
+{
+ return bus_register(&tb_bus_type);
+}
+
+void tb_domain_exit(void)
+{
+ bus_unregister(&tb_bus_type);
+ ida_destroy(&tb_domain_ida);
+ tb_switch_exit();
+}
diff --git a/drivers/thunderbolt/eeprom.c b/drivers/thunderbolt/eeprom.c
index 6392990c984d..308b6e17c88a 100644
--- a/drivers/thunderbolt/eeprom.c
+++ b/drivers/thunderbolt/eeprom.c
@@ -204,6 +204,11 @@ struct tb_drom_entry_header {
enum tb_drom_entry_type type:1;
} __packed;
+struct tb_drom_entry_generic {
+ struct tb_drom_entry_header header;
+ u8 data[0];
+} __packed;
+
struct tb_drom_entry_port {
/* BYTES 0-1 */
struct tb_drom_entry_header header;
@@ -276,6 +281,9 @@ int tb_drom_read_uid_only(struct tb_switch *sw, u64 *uid)
if (res)
return res;
+ if (drom_offset == 0)
+ return -ENODEV;
+
/* read uid */
res = tb_eeprom_read_n(sw, drom_offset, data, 9);
if (res)
@@ -283,7 +291,7 @@ int tb_drom_read_uid_only(struct tb_switch *sw, u64 *uid)
crc = tb_crc8(data + 1, 8);
if (crc != data[0]) {
- tb_sw_warn(sw, "uid crc8 missmatch (expected: %#x, got: %#x)\n",
+ tb_sw_warn(sw, "uid crc8 mismatch (expected: %#x, got: %#x)\n",
data[0], crc);
return -EIO;
}
@@ -292,25 +300,39 @@ int tb_drom_read_uid_only(struct tb_switch *sw, u64 *uid)
return 0;
}
-static void tb_drom_parse_port_entry(struct tb_port *port,
- struct tb_drom_entry_port *entry)
+static int tb_drom_parse_entry_generic(struct tb_switch *sw,
+ struct tb_drom_entry_header *header)
{
- port->link_nr = entry->link_nr;
- if (entry->has_dual_link_port)
- port->dual_link_port =
- &port->sw->ports[entry->dual_link_port_nr];
+ const struct tb_drom_entry_generic *entry =
+ (const struct tb_drom_entry_generic *)header;
+
+ switch (header->index) {
+ case 1:
+ /* Length includes 2 bytes header so remove it before copy */
+ sw->vendor_name = kstrndup(entry->data,
+ header->len - sizeof(*header), GFP_KERNEL);
+ if (!sw->vendor_name)
+ return -ENOMEM;
+ break;
+
+ case 2:
+ sw->device_name = kstrndup(entry->data,
+ header->len - sizeof(*header), GFP_KERNEL);
+ if (!sw->device_name)
+ return -ENOMEM;
+ break;
+ }
+
+ return 0;
}
-static int tb_drom_parse_entry(struct tb_switch *sw,
- struct tb_drom_entry_header *header)
+static int tb_drom_parse_entry_port(struct tb_switch *sw,
+ struct tb_drom_entry_header *header)
{
struct tb_port *port;
int res;
enum tb_port_type type;
- if (header->type != TB_DROM_ENTRY_PORT)
- return 0;
-
port = &sw->ports[header->index];
port->disabled = header->port_disabled;
if (port->disabled)
@@ -329,7 +351,10 @@ static int tb_drom_parse_entry(struct tb_switch *sw,
header->len, sizeof(struct tb_drom_entry_port));
return -EIO;
}
- tb_drom_parse_port_entry(port, entry);
+ port->link_nr = entry->link_nr;
+ if (entry->has_dual_link_port)
+ port->dual_link_port =
+ &port->sw->ports[entry->dual_link_port_nr];
}
return 0;
}
@@ -344,6 +369,7 @@ static int tb_drom_parse_entries(struct tb_switch *sw)
struct tb_drom_header *header = (void *) sw->drom;
u16 pos = sizeof(*header);
u16 drom_size = header->data_len + TB_DROM_DATA_START;
+ int res;
while (pos < drom_size) {
struct tb_drom_entry_header *entry = (void *) (sw->drom + pos);
@@ -353,7 +379,16 @@ static int tb_drom_parse_entries(struct tb_switch *sw)
return -EIO;
}
- tb_drom_parse_entry(sw, entry);
+ switch (entry->type) {
+ case TB_DROM_ENTRY_GENERIC:
+ res = tb_drom_parse_entry_generic(sw, entry);
+ break;
+ case TB_DROM_ENTRY_PORT:
+ res = tb_drom_parse_entry_port(sw, entry);
+ break;
+ }
+ if (res)
+ return res;
pos += entry->len;
}
@@ -394,6 +429,50 @@ err:
return -EINVAL;
}
+static int tb_drom_copy_nvm(struct tb_switch *sw, u16 *size)
+{
+ u32 drom_offset;
+ int ret;
+
+ if (!sw->dma_port)
+ return -ENODEV;
+
+ ret = tb_sw_read(sw, &drom_offset, TB_CFG_SWITCH,
+ sw->cap_plug_events + 12, 1);
+ if (ret)
+ return ret;
+
+ if (!drom_offset)
+ return -ENODEV;
+
+ ret = dma_port_flash_read(sw->dma_port, drom_offset + 14, size,
+ sizeof(*size));
+ if (ret)
+ return ret;
+
+ /* Size includes CRC8 + UID + CRC32 */
+ *size += 1 + 8 + 4;
+ sw->drom = kzalloc(*size, GFP_KERNEL);
+ if (!sw->drom)
+ return -ENOMEM;
+
+ ret = dma_port_flash_read(sw->dma_port, drom_offset, sw->drom, *size);
+ if (ret)
+ goto err_free;
+
+ /*
+ * Read UID from the minimal DROM because the one in NVM is just
+ * a placeholder.
+ */
+ tb_drom_read_uid_only(sw, &sw->uid);
+ return 0;
+
+err_free:
+ kfree(sw->drom);
+ sw->drom = NULL;
+ return ret;
+}
+
/**
* tb_drom_read - copy drom to sw->drom and parse it
*/
@@ -415,6 +494,10 @@ int tb_drom_read(struct tb_switch *sw)
if (tb_drom_copy_efi(sw, &size) == 0)
goto parse;
+ /* Non-Apple hardware has the DROM as part of NVM */
+ if (tb_drom_copy_nvm(sw, &size) == 0)
+ goto parse;
+
/*
* The root switch contains only a dummy drom (header only,
* no entries). Hardcode the configuration here.
@@ -475,17 +558,19 @@ parse:
header->uid_crc8, crc);
goto err;
}
- sw->uid = header->uid;
+ if (!sw->uid)
+ sw->uid = header->uid;
+ sw->vendor = header->vendor_id;
+ sw->device = header->model_id;
crc = tb_crc32(sw->drom + TB_DROM_DATA_START, header->data_len);
if (crc != header->data_crc32) {
tb_sw_warn(sw,
- "drom data crc32 mismatch (expected: %#x, got: %#x), aborting\n",
+ "drom data crc32 mismatch (expected: %#x, got: %#x), continuing\n",
header->data_crc32, crc);
- goto err;
}
- if (header->device_rom_revision > 1)
+ if (header->device_rom_revision > 2)
tb_sw_warn(sw, "drom device_rom_revision %#x unknown\n",
header->device_rom_revision);
diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c
new file mode 100644
index 000000000000..8ee340290219
--- /dev/null
+++ b/drivers/thunderbolt/icm.c
@@ -0,0 +1,1089 @@
+/*
+ * Internal Thunderbolt Connection Manager. This is a firmware running on
+ * the Thunderbolt host controller performing most of the low-level
+ * handling.
+ *
+ * Copyright (C) 2017, Intel Corporation
+ * Authors: Michael Jamet <michael.jamet@intel.com>
+ * Mika Westerberg <mika.westerberg@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/dmi.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+
+#include "ctl.h"
+#include "nhi_regs.h"
+#include "tb.h"
+
+#define PCIE2CIO_CMD 0x30
+#define PCIE2CIO_CMD_TIMEOUT BIT(31)
+#define PCIE2CIO_CMD_START BIT(30)
+#define PCIE2CIO_CMD_WRITE BIT(21)
+#define PCIE2CIO_CMD_CS_MASK GENMASK(20, 19)
+#define PCIE2CIO_CMD_CS_SHIFT 19
+#define PCIE2CIO_CMD_PORT_MASK GENMASK(18, 13)
+#define PCIE2CIO_CMD_PORT_SHIFT 13
+
+#define PCIE2CIO_WRDATA 0x34
+#define PCIE2CIO_RDDATA 0x38
+
+#define PHY_PORT_CS1 0x37
+#define PHY_PORT_CS1_LINK_DISABLE BIT(14)
+#define PHY_PORT_CS1_LINK_STATE_MASK GENMASK(29, 26)
+#define PHY_PORT_CS1_LINK_STATE_SHIFT 26
+
+#define ICM_TIMEOUT 5000 /* ms */
+#define ICM_MAX_LINK 4
+#define ICM_MAX_DEPTH 6
+
+/**
+ * struct icm - Internal connection manager private data
+ * @request_lock: Makes sure only one message is send to ICM at time
+ * @rescan_work: Work used to rescan the surviving switches after resume
+ * @upstream_port: Pointer to the PCIe upstream port this host
+ * controller is connected. This is only set for systems
+ * where ICM needs to be started manually
+ * @vnd_cap: Vendor defined capability where PCIe2CIO mailbox resides
+ * (only set when @upstream_port is not %NULL)
+ * @safe_mode: ICM is in safe mode
+ * @is_supported: Checks if we can support ICM on this controller
+ * @get_mode: Read and return the ICM firmware mode (optional)
+ * @get_route: Find a route string for given switch
+ * @device_connected: Handle device connected ICM message
+ * @device_disconnected: Handle device disconnected ICM message
+ */
+struct icm {
+ struct mutex request_lock;
+ struct delayed_work rescan_work;
+ struct pci_dev *upstream_port;
+ int vnd_cap;
+ bool safe_mode;
+ bool (*is_supported)(struct tb *tb);
+ int (*get_mode)(struct tb *tb);
+ int (*get_route)(struct tb *tb, u8 link, u8 depth, u64 *route);
+ void (*device_connected)(struct tb *tb,
+ const struct icm_pkg_header *hdr);
+ void (*device_disconnected)(struct tb *tb,
+ const struct icm_pkg_header *hdr);
+};
+
+struct icm_notification {
+ struct work_struct work;
+ struct icm_pkg_header *pkg;
+ struct tb *tb;
+};
+
+static inline struct tb *icm_to_tb(struct icm *icm)
+{
+ return ((void *)icm - sizeof(struct tb));
+}
+
+static inline u8 phy_port_from_route(u64 route, u8 depth)
+{
+ return tb_switch_phy_port_from_link(route >> ((depth - 1) * 8));
+}
+
+static inline u8 dual_link_from_link(u8 link)
+{
+ return link ? ((link - 1) ^ 0x01) + 1 : 0;
+}
+
+static inline u64 get_route(u32 route_hi, u32 route_lo)
+{
+ return (u64)route_hi << 32 | route_lo;
+}
+
+static inline bool is_apple(void)
+{
+ return dmi_match(DMI_BOARD_VENDOR, "Apple Inc.");
+}
+
+static bool icm_match(const struct tb_cfg_request *req,
+ const struct ctl_pkg *pkg)
+{
+ const struct icm_pkg_header *res_hdr = pkg->buffer;
+ const struct icm_pkg_header *req_hdr = req->request;
+
+ if (pkg->frame.eof != req->response_type)
+ return false;
+ if (res_hdr->code != req_hdr->code)
+ return false;
+
+ return true;
+}
+
+static bool icm_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg)
+{
+ const struct icm_pkg_header *hdr = pkg->buffer;
+
+ if (hdr->packet_id < req->npackets) {
+ size_t offset = hdr->packet_id * req->response_size;
+
+ memcpy(req->response + offset, pkg->buffer, req->response_size);
+ }
+
+ return hdr->packet_id == hdr->total_packets - 1;
+}
+
+static int icm_request(struct tb *tb, const void *request, size_t request_size,
+ void *response, size_t response_size, size_t npackets,
+ unsigned int timeout_msec)
+{
+ struct icm *icm = tb_priv(tb);
+ int retries = 3;
+
+ do {
+ struct tb_cfg_request *req;
+ struct tb_cfg_result res;
+
+ req = tb_cfg_request_alloc();
+ if (!req)
+ return -ENOMEM;
+
+ req->match = icm_match;
+ req->copy = icm_copy;
+ req->request = request;
+ req->request_size = request_size;
+ req->request_type = TB_CFG_PKG_ICM_CMD;
+ req->response = response;
+ req->npackets = npackets;
+ req->response_size = response_size;
+ req->response_type = TB_CFG_PKG_ICM_RESP;
+
+ mutex_lock(&icm->request_lock);
+ res = tb_cfg_request_sync(tb->ctl, req, timeout_msec);
+ mutex_unlock(&icm->request_lock);
+
+ tb_cfg_request_put(req);
+
+ if (res.err != -ETIMEDOUT)
+ return res.err == 1 ? -EIO : res.err;
+
+ usleep_range(20, 50);
+ } while (retries--);
+
+ return -ETIMEDOUT;
+}
+
+static bool icm_fr_is_supported(struct tb *tb)
+{
+ return !is_apple();
+}
+
+static inline int icm_fr_get_switch_index(u32 port)
+{
+ int index;
+
+ if ((port & ICM_PORT_TYPE_MASK) != TB_TYPE_PORT)
+ return 0;
+
+ index = port >> ICM_PORT_INDEX_SHIFT;
+ return index != 0xff ? index : 0;
+}
+
+static int icm_fr_get_route(struct tb *tb, u8 link, u8 depth, u64 *route)
+{
+ struct icm_fr_pkg_get_topology_response *switches, *sw;
+ struct icm_fr_pkg_get_topology request = {
+ .hdr = { .code = ICM_GET_TOPOLOGY },
+ };
+ size_t npackets = ICM_GET_TOPOLOGY_PACKETS;
+ int ret, index;
+ u8 i;
+
+ switches = kcalloc(npackets, sizeof(*switches), GFP_KERNEL);
+ if (!switches)
+ return -ENOMEM;
+
+ ret = icm_request(tb, &request, sizeof(request), switches,
+ sizeof(*switches), npackets, ICM_TIMEOUT);
+ if (ret)
+ goto err_free;
+
+ sw = &switches[0];
+ index = icm_fr_get_switch_index(sw->ports[link]);
+ if (!index) {
+ ret = -ENODEV;
+ goto err_free;
+ }
+
+ sw = &switches[index];
+ for (i = 1; i < depth; i++) {
+ unsigned int j;
+
+ if (!(sw->first_data & ICM_SWITCH_USED)) {
+ ret = -ENODEV;
+ goto err_free;
+ }
+
+ for (j = 0; j < ARRAY_SIZE(sw->ports); j++) {
+ index = icm_fr_get_switch_index(sw->ports[j]);
+ if (index > sw->switch_index) {
+ sw = &switches[index];
+ break;
+ }
+ }
+ }
+
+ *route = get_route(sw->route_hi, sw->route_lo);
+
+err_free:
+ kfree(switches);
+ return ret;
+}
+
+static int icm_fr_approve_switch(struct tb *tb, struct tb_switch *sw)
+{
+ struct icm_fr_pkg_approve_device request;
+ struct icm_fr_pkg_approve_device reply;
+ int ret;
+
+ memset(&request, 0, sizeof(request));
+ memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
+ request.hdr.code = ICM_APPROVE_DEVICE;
+ request.connection_id = sw->connection_id;
+ request.connection_key = sw->connection_key;
+
+ memset(&reply, 0, sizeof(reply));
+ /* Use larger timeout as establishing tunnels can take some time */
+ ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+ 1, 10000);
+ if (ret)
+ return ret;
+
+ if (reply.hdr.flags & ICM_FLAGS_ERROR) {
+ tb_warn(tb, "PCIe tunnel creation failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int icm_fr_add_switch_key(struct tb *tb, struct tb_switch *sw)
+{
+ struct icm_fr_pkg_add_device_key request;
+ struct icm_fr_pkg_add_device_key_response reply;
+ int ret;
+
+ memset(&request, 0, sizeof(request));
+ memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
+ request.hdr.code = ICM_ADD_DEVICE_KEY;
+ request.connection_id = sw->connection_id;
+ request.connection_key = sw->connection_key;
+ memcpy(request.key, sw->key, TB_SWITCH_KEY_SIZE);
+
+ memset(&reply, 0, sizeof(reply));
+ ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+ 1, ICM_TIMEOUT);
+ if (ret)
+ return ret;
+
+ if (reply.hdr.flags & ICM_FLAGS_ERROR) {
+ tb_warn(tb, "Adding key to switch failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int icm_fr_challenge_switch_key(struct tb *tb, struct tb_switch *sw,
+ const u8 *challenge, u8 *response)
+{
+ struct icm_fr_pkg_challenge_device request;
+ struct icm_fr_pkg_challenge_device_response reply;
+ int ret;
+
+ memset(&request, 0, sizeof(request));
+ memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
+ request.hdr.code = ICM_CHALLENGE_DEVICE;
+ request.connection_id = sw->connection_id;
+ request.connection_key = sw->connection_key;
+ memcpy(request.challenge, challenge, TB_SWITCH_KEY_SIZE);
+
+ memset(&reply, 0, sizeof(reply));
+ ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+ 1, ICM_TIMEOUT);
+ if (ret)
+ return ret;
+
+ if (reply.hdr.flags & ICM_FLAGS_ERROR)
+ return -EKEYREJECTED;
+ if (reply.hdr.flags & ICM_FLAGS_NO_KEY)
+ return -ENOKEY;
+
+ memcpy(response, reply.response, TB_SWITCH_KEY_SIZE);
+
+ return 0;
+}
+
+static void remove_switch(struct tb_switch *sw)
+{
+ struct tb_switch *parent_sw;
+
+ parent_sw = tb_to_switch(sw->dev.parent);
+ tb_port_at(tb_route(sw), parent_sw)->remote = NULL;
+ tb_switch_remove(sw);
+}
+
+static void
+icm_fr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
+{
+ const struct icm_fr_event_device_connected *pkg =
+ (const struct icm_fr_event_device_connected *)hdr;
+ struct tb_switch *sw, *parent_sw;
+ struct icm *icm = tb_priv(tb);
+ bool authorized = false;
+ u8 link, depth;
+ u64 route;
+ int ret;
+
+ link = pkg->link_info & ICM_LINK_INFO_LINK_MASK;
+ depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >>
+ ICM_LINK_INFO_DEPTH_SHIFT;
+ authorized = pkg->link_info & ICM_LINK_INFO_APPROVED;
+
+ ret = icm->get_route(tb, link, depth, &route);
+ if (ret) {
+ tb_err(tb, "failed to find route string for switch at %u.%u\n",
+ link, depth);
+ return;
+ }
+
+ sw = tb_switch_find_by_uuid(tb, &pkg->ep_uuid);
+ if (sw) {
+ u8 phy_port, sw_phy_port;
+
+ parent_sw = tb_to_switch(sw->dev.parent);
+ sw_phy_port = phy_port_from_route(tb_route(sw), sw->depth);
+ phy_port = phy_port_from_route(route, depth);
+
+ /*
+ * On resume ICM will send us connected events for the
+ * devices that still are present. However, that
+ * information might have changed for example by the
+ * fact that a switch on a dual-link connection might
+ * have been enumerated using the other link now. Make
+ * sure our book keeping matches that.
+ */
+ if (sw->depth == depth && sw_phy_port == phy_port &&
+ !!sw->authorized == authorized) {
+ tb_port_at(tb_route(sw), parent_sw)->remote = NULL;
+ tb_port_at(route, parent_sw)->remote =
+ tb_upstream_port(sw);
+ sw->config.route_hi = upper_32_bits(route);
+ sw->config.route_lo = lower_32_bits(route);
+ sw->connection_id = pkg->connection_id;
+ sw->connection_key = pkg->connection_key;
+ sw->link = link;
+ sw->depth = depth;
+ sw->is_unplugged = false;
+ tb_switch_put(sw);
+ return;
+ }
+
+ /*
+ * User connected the same switch to another physical
+ * port or to another part of the topology. Remove the
+ * existing switch now before adding the new one.
+ */
+ remove_switch(sw);
+ tb_switch_put(sw);
+ }
+
+ /*
+ * If the switch was not found by UUID, look for a switch on
+ * same physical port (taking possible link aggregation into
+ * account) and depth. If we found one it is definitely a stale
+ * one so remove it first.
+ */
+ sw = tb_switch_find_by_link_depth(tb, link, depth);
+ if (!sw) {
+ u8 dual_link;
+
+ dual_link = dual_link_from_link(link);
+ if (dual_link)
+ sw = tb_switch_find_by_link_depth(tb, dual_link, depth);
+ }
+ if (sw) {
+ remove_switch(sw);
+ tb_switch_put(sw);
+ }
+
+ parent_sw = tb_switch_find_by_link_depth(tb, link, depth - 1);
+ if (!parent_sw) {
+ tb_err(tb, "failed to find parent switch for %u.%u\n",
+ link, depth);
+ return;
+ }
+
+ sw = tb_switch_alloc(tb, &parent_sw->dev, route);
+ if (!sw) {
+ tb_switch_put(parent_sw);
+ return;
+ }
+
+ sw->uuid = kmemdup(&pkg->ep_uuid, sizeof(pkg->ep_uuid), GFP_KERNEL);
+ sw->connection_id = pkg->connection_id;
+ sw->connection_key = pkg->connection_key;
+ sw->link = link;
+ sw->depth = depth;
+ sw->authorized = authorized;
+ sw->security_level = (pkg->hdr.flags & ICM_FLAGS_SLEVEL_MASK) >>
+ ICM_FLAGS_SLEVEL_SHIFT;
+
+ /* Link the two switches now */
+ tb_port_at(route, parent_sw)->remote = tb_upstream_port(sw);
+ tb_upstream_port(sw)->remote = tb_port_at(route, parent_sw);
+
+ ret = tb_switch_add(sw);
+ if (ret) {
+ tb_port_at(tb_route(sw), parent_sw)->remote = NULL;
+ tb_switch_put(sw);
+ }
+ tb_switch_put(parent_sw);
+}
+
+static void
+icm_fr_device_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
+{
+ const struct icm_fr_event_device_disconnected *pkg =
+ (const struct icm_fr_event_device_disconnected *)hdr;
+ struct tb_switch *sw;
+ u8 link, depth;
+
+ link = pkg->link_info & ICM_LINK_INFO_LINK_MASK;
+ depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >>
+ ICM_LINK_INFO_DEPTH_SHIFT;
+
+ if (link > ICM_MAX_LINK || depth > ICM_MAX_DEPTH) {
+ tb_warn(tb, "invalid topology %u.%u, ignoring\n", link, depth);
+ return;
+ }
+
+ sw = tb_switch_find_by_link_depth(tb, link, depth);
+ if (!sw) {
+ tb_warn(tb, "no switch exists at %u.%u, ignoring\n", link,
+ depth);
+ return;
+ }
+
+ remove_switch(sw);
+ tb_switch_put(sw);
+}
+
+static struct pci_dev *get_upstream_port(struct pci_dev *pdev)
+{
+ struct pci_dev *parent;
+
+ parent = pci_upstream_bridge(pdev);
+ while (parent) {
+ if (!pci_is_pcie(parent))
+ return NULL;
+ if (pci_pcie_type(parent) == PCI_EXP_TYPE_UPSTREAM)
+ break;
+ parent = pci_upstream_bridge(parent);
+ }
+
+ if (!parent)
+ return NULL;
+
+ switch (parent->device) {
+ case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE:
+ case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE:
+ case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE:
+ case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE:
+ case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE:
+ return parent;
+ }
+
+ return NULL;
+}
+
+static bool icm_ar_is_supported(struct tb *tb)
+{
+ struct pci_dev *upstream_port;
+ struct icm *icm = tb_priv(tb);
+
+ /*
+ * Starting from Alpine Ridge we can use ICM on Apple machines
+ * as well. We just need to reset and re-enable it first.
+ */
+ if (!is_apple())
+ return true;
+
+ /*
+ * Find the upstream PCIe port in case we need to do reset
+ * through its vendor specific registers.
+ */
+ upstream_port = get_upstream_port(tb->nhi->pdev);
+ if (upstream_port) {
+ int cap;
+
+ cap = pci_find_ext_capability(upstream_port,
+ PCI_EXT_CAP_ID_VNDR);
+ if (cap > 0) {
+ icm->upstream_port = upstream_port;
+ icm->vnd_cap = cap;
+
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static int icm_ar_get_mode(struct tb *tb)
+{
+ struct tb_nhi *nhi = tb->nhi;
+ int retries = 5;
+ u32 val;
+
+ do {
+ val = ioread32(nhi->iobase + REG_FW_STS);
+ if (val & REG_FW_STS_NVM_AUTH_DONE)
+ break;
+ msleep(30);
+ } while (--retries);
+
+ if (!retries) {
+ dev_err(&nhi->pdev->dev, "ICM firmware not authenticated\n");
+ return -ENODEV;
+ }
+
+ return nhi_mailbox_mode(nhi);
+}
+
+static int icm_ar_get_route(struct tb *tb, u8 link, u8 depth, u64 *route)
+{
+ struct icm_ar_pkg_get_route_response reply;
+ struct icm_ar_pkg_get_route request = {
+ .hdr = { .code = ICM_GET_ROUTE },
+ .link_info = depth << ICM_LINK_INFO_DEPTH_SHIFT | link,
+ };
+ int ret;
+
+ memset(&reply, 0, sizeof(reply));
+ ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+ 1, ICM_TIMEOUT);
+ if (ret)
+ return ret;
+
+ if (reply.hdr.flags & ICM_FLAGS_ERROR)
+ return -EIO;
+
+ *route = get_route(reply.route_hi, reply.route_lo);
+ return 0;
+}
+
+static void icm_handle_notification(struct work_struct *work)
+{
+ struct icm_notification *n = container_of(work, typeof(*n), work);
+ struct tb *tb = n->tb;
+ struct icm *icm = tb_priv(tb);
+
+ mutex_lock(&tb->lock);
+
+ switch (n->pkg->code) {
+ case ICM_EVENT_DEVICE_CONNECTED:
+ icm->device_connected(tb, n->pkg);
+ break;
+ case ICM_EVENT_DEVICE_DISCONNECTED:
+ icm->device_disconnected(tb, n->pkg);
+ break;
+ }
+
+ mutex_unlock(&tb->lock);
+
+ kfree(n->pkg);
+ kfree(n);
+}
+
+static void icm_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
+ const void *buf, size_t size)
+{
+ struct icm_notification *n;
+
+ n = kmalloc(sizeof(*n), GFP_KERNEL);
+ if (!n)
+ return;
+
+ INIT_WORK(&n->work, icm_handle_notification);
+ n->pkg = kmemdup(buf, size, GFP_KERNEL);
+ n->tb = tb;
+
+ queue_work(tb->wq, &n->work);
+}
+
+static int
+__icm_driver_ready(struct tb *tb, enum tb_security_level *security_level)
+{
+ struct icm_pkg_driver_ready_response reply;
+ struct icm_pkg_driver_ready request = {
+ .hdr.code = ICM_DRIVER_READY,
+ };
+ unsigned int retries = 10;
+ int ret;
+
+ memset(&reply, 0, sizeof(reply));
+ ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+ 1, ICM_TIMEOUT);
+ if (ret)
+ return ret;
+
+ if (security_level)
+ *security_level = reply.security_level & 0xf;
+
+ /*
+ * Hold on here until the switch config space is accessible so
+ * that we can read root switch config successfully.
+ */
+ do {
+ struct tb_cfg_result res;
+ u32 tmp;
+
+ res = tb_cfg_read_raw(tb->ctl, &tmp, 0, 0, TB_CFG_SWITCH,
+ 0, 1, 100);
+ if (!res.err)
+ return 0;
+
+ msleep(50);
+ } while (--retries);
+
+ return -ETIMEDOUT;
+}
+
+static int pci2cio_wait_completion(struct icm *icm, unsigned long timeout_msec)
+{
+ unsigned long end = jiffies + msecs_to_jiffies(timeout_msec);
+ u32 cmd;
+
+ do {
+ pci_read_config_dword(icm->upstream_port,
+ icm->vnd_cap + PCIE2CIO_CMD, &cmd);
+ if (!(cmd & PCIE2CIO_CMD_START)) {
+ if (cmd & PCIE2CIO_CMD_TIMEOUT)
+ break;
+ return 0;
+ }
+
+ msleep(50);
+ } while (time_before(jiffies, end));
+
+ return -ETIMEDOUT;
+}
+
+static int pcie2cio_read(struct icm *icm, enum tb_cfg_space cs,
+ unsigned int port, unsigned int index, u32 *data)
+{
+ struct pci_dev *pdev = icm->upstream_port;
+ int ret, vnd_cap = icm->vnd_cap;
+ u32 cmd;
+
+ cmd = index;
+ cmd |= (port << PCIE2CIO_CMD_PORT_SHIFT) & PCIE2CIO_CMD_PORT_MASK;
+ cmd |= (cs << PCIE2CIO_CMD_CS_SHIFT) & PCIE2CIO_CMD_CS_MASK;
+ cmd |= PCIE2CIO_CMD_START;
+ pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_CMD, cmd);
+
+ ret = pci2cio_wait_completion(icm, 5000);
+ if (ret)
+ return ret;
+
+ pci_read_config_dword(pdev, vnd_cap + PCIE2CIO_RDDATA, data);
+ return 0;
+}
+
+static int pcie2cio_write(struct icm *icm, enum tb_cfg_space cs,
+ unsigned int port, unsigned int index, u32 data)
+{
+ struct pci_dev *pdev = icm->upstream_port;
+ int vnd_cap = icm->vnd_cap;
+ u32 cmd;
+
+ pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_WRDATA, data);
+
+ cmd = index;
+ cmd |= (port << PCIE2CIO_CMD_PORT_SHIFT) & PCIE2CIO_CMD_PORT_MASK;
+ cmd |= (cs << PCIE2CIO_CMD_CS_SHIFT) & PCIE2CIO_CMD_CS_MASK;
+ cmd |= PCIE2CIO_CMD_WRITE | PCIE2CIO_CMD_START;
+ pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_CMD, cmd);
+
+ return pci2cio_wait_completion(icm, 5000);
+}
+
+static int icm_firmware_reset(struct tb *tb, struct tb_nhi *nhi)
+{
+ struct icm *icm = tb_priv(tb);
+ u32 val;
+
+ /* Put ARC to wait for CIO reset event to happen */
+ val = ioread32(nhi->iobase + REG_FW_STS);
+ val |= REG_FW_STS_CIO_RESET_REQ;
+ iowrite32(val, nhi->iobase + REG_FW_STS);
+
+ /* Re-start ARC */
+ val = ioread32(nhi->iobase + REG_FW_STS);
+ val |= REG_FW_STS_ICM_EN_INVERT;
+ val |= REG_FW_STS_ICM_EN_CPU;
+ iowrite32(val, nhi->iobase + REG_FW_STS);
+
+ /* Trigger CIO reset now */
+ return pcie2cio_write(icm, TB_CFG_SWITCH, 0, 0x50, BIT(9));
+}
+
+static int icm_firmware_start(struct tb *tb, struct tb_nhi *nhi)
+{
+ unsigned int retries = 10;
+ int ret;
+ u32 val;
+
+ /* Check if the ICM firmware is already running */
+ val = ioread32(nhi->iobase + REG_FW_STS);
+ if (val & REG_FW_STS_ICM_EN)
+ return 0;
+
+ dev_info(&nhi->pdev->dev, "starting ICM firmware\n");
+
+ ret = icm_firmware_reset(tb, nhi);
+ if (ret)
+ return ret;
+
+ /* Wait until the ICM firmware tells us it is up and running */
+ do {
+ /* Check that the ICM firmware is running */
+ val = ioread32(nhi->iobase + REG_FW_STS);
+ if (val & REG_FW_STS_NVM_AUTH_DONE)
+ return 0;
+
+ msleep(300);
+ } while (--retries);
+
+ return -ETIMEDOUT;
+}
+
+static int icm_reset_phy_port(struct tb *tb, int phy_port)
+{
+ struct icm *icm = tb_priv(tb);
+ u32 state0, state1;
+ int port0, port1;
+ u32 val0, val1;
+ int ret;
+
+ if (!icm->upstream_port)
+ return 0;
+
+ if (phy_port) {
+ port0 = 3;
+ port1 = 4;
+ } else {
+ port0 = 1;
+ port1 = 2;
+ }
+
+ /*
+ * Read link status of both null ports belonging to a single
+ * physical port.
+ */
+ ret = pcie2cio_read(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, &val0);
+ if (ret)
+ return ret;
+ ret = pcie2cio_read(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, &val1);
+ if (ret)
+ return ret;
+
+ state0 = val0 & PHY_PORT_CS1_LINK_STATE_MASK;
+ state0 >>= PHY_PORT_CS1_LINK_STATE_SHIFT;
+ state1 = val1 & PHY_PORT_CS1_LINK_STATE_MASK;
+ state1 >>= PHY_PORT_CS1_LINK_STATE_SHIFT;
+
+ /* If they are both up we need to reset them now */
+ if (state0 != TB_PORT_UP || state1 != TB_PORT_UP)
+ return 0;
+
+ val0 |= PHY_PORT_CS1_LINK_DISABLE;
+ ret = pcie2cio_write(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, val0);
+ if (ret)
+ return ret;
+
+ val1 |= PHY_PORT_CS1_LINK_DISABLE;
+ ret = pcie2cio_write(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, val1);
+ if (ret)
+ return ret;
+
+ /* Wait a bit and then re-enable both ports */
+ usleep_range(10, 100);
+
+ ret = pcie2cio_read(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, &val0);
+ if (ret)
+ return ret;
+ ret = pcie2cio_read(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, &val1);
+ if (ret)
+ return ret;
+
+ val0 &= ~PHY_PORT_CS1_LINK_DISABLE;
+ ret = pcie2cio_write(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, val0);
+ if (ret)
+ return ret;
+
+ val1 &= ~PHY_PORT_CS1_LINK_DISABLE;
+ return pcie2cio_write(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, val1);
+}
+
+static int icm_firmware_init(struct tb *tb)
+{
+ struct icm *icm = tb_priv(tb);
+ struct tb_nhi *nhi = tb->nhi;
+ int ret;
+
+ ret = icm_firmware_start(tb, nhi);
+ if (ret) {
+ dev_err(&nhi->pdev->dev, "could not start ICM firmware\n");
+ return ret;
+ }
+
+ if (icm->get_mode) {
+ ret = icm->get_mode(tb);
+
+ switch (ret) {
+ case NHI_FW_SAFE_MODE:
+ icm->safe_mode = true;
+ break;
+
+ case NHI_FW_CM_MODE:
+ /* Ask ICM to accept all Thunderbolt devices */
+ nhi_mailbox_cmd(nhi, NHI_MAILBOX_ALLOW_ALL_DEVS, 0);
+ break;
+
+ default:
+ tb_err(tb, "ICM firmware is in wrong mode: %u\n", ret);
+ return -ENODEV;
+ }
+ }
+
+ /*
+ * Reset both physical ports if there is anything connected to
+ * them already.
+ */
+ ret = icm_reset_phy_port(tb, 0);
+ if (ret)
+ dev_warn(&nhi->pdev->dev, "failed to reset links on port0\n");
+ ret = icm_reset_phy_port(tb, 1);
+ if (ret)
+ dev_warn(&nhi->pdev->dev, "failed to reset links on port1\n");
+
+ return 0;
+}
+
+static int icm_driver_ready(struct tb *tb)
+{
+ struct icm *icm = tb_priv(tb);
+ int ret;
+
+ ret = icm_firmware_init(tb);
+ if (ret)
+ return ret;
+
+ if (icm->safe_mode) {
+ tb_info(tb, "Thunderbolt host controller is in safe mode.\n");
+ tb_info(tb, "You need to update NVM firmware of the controller before it can be used.\n");
+ tb_info(tb, "For latest updates check https://thunderbolttechnology.net/updates.\n");
+ return 0;
+ }
+
+ return __icm_driver_ready(tb, &tb->security_level);
+}
+
+static int icm_suspend(struct tb *tb)
+{
+ return nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_SAVE_DEVS, 0);
+}
+
+/*
+ * Mark all switches (except root switch) below this one unplugged. ICM
+ * firmware will send us an updated list of switches after we have send
+ * it driver ready command. If a switch is not in that list it will be
+ * removed when we perform rescan.
+ */
+static void icm_unplug_children(struct tb_switch *sw)
+{
+ unsigned int i;
+
+ if (tb_route(sw))
+ sw->is_unplugged = true;
+
+ for (i = 1; i <= sw->config.max_port_number; i++) {
+ struct tb_port *port = &sw->ports[i];
+
+ if (tb_is_upstream_port(port))
+ continue;
+ if (!port->remote)
+ continue;
+
+ icm_unplug_children(port->remote->sw);
+ }
+}
+
+static void icm_free_unplugged_children(struct tb_switch *sw)
+{
+ unsigned int i;
+
+ for (i = 1; i <= sw->config.max_port_number; i++) {
+ struct tb_port *port = &sw->ports[i];
+
+ if (tb_is_upstream_port(port))
+ continue;
+ if (!port->remote)
+ continue;
+
+ if (port->remote->sw->is_unplugged) {
+ tb_switch_remove(port->remote->sw);
+ port->remote = NULL;
+ } else {
+ icm_free_unplugged_children(port->remote->sw);
+ }
+ }
+}
+
+static void icm_rescan_work(struct work_struct *work)
+{
+ struct icm *icm = container_of(work, struct icm, rescan_work.work);
+ struct tb *tb = icm_to_tb(icm);
+
+ mutex_lock(&tb->lock);
+ if (tb->root_switch)
+ icm_free_unplugged_children(tb->root_switch);
+ mutex_unlock(&tb->lock);
+}
+
+static void icm_complete(struct tb *tb)
+{
+ struct icm *icm = tb_priv(tb);
+
+ if (tb->nhi->going_away)
+ return;
+
+ icm_unplug_children(tb->root_switch);
+
+ /*
+ * Now all existing children should be resumed, start events
+ * from ICM to get updated status.
+ */
+ __icm_driver_ready(tb, NULL);
+
+ /*
+ * We do not get notifications of devices that have been
+ * unplugged during suspend so schedule rescan to clean them up
+ * if any.
+ */
+ queue_delayed_work(tb->wq, &icm->rescan_work, msecs_to_jiffies(500));
+}
+
+static int icm_start(struct tb *tb)
+{
+ struct icm *icm = tb_priv(tb);
+ int ret;
+
+ if (icm->safe_mode)
+ tb->root_switch = tb_switch_alloc_safe_mode(tb, &tb->dev, 0);
+ else
+ tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
+ if (!tb->root_switch)
+ return -ENODEV;
+
+ /*
+ * NVM upgrade has not been tested on Apple systems and they
+ * don't provide images publicly either. To be on the safe side
+ * prevent root switch NVM upgrade on Macs for now.
+ */
+ tb->root_switch->no_nvm_upgrade = is_apple();
+
+ ret = tb_switch_add(tb->root_switch);
+ if (ret)
+ tb_switch_put(tb->root_switch);
+
+ return ret;
+}
+
+static void icm_stop(struct tb *tb)
+{
+ struct icm *icm = tb_priv(tb);
+
+ cancel_delayed_work(&icm->rescan_work);
+ tb_switch_remove(tb->root_switch);
+ tb->root_switch = NULL;
+ nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DRV_UNLOADS, 0);
+}
+
+static int icm_disconnect_pcie_paths(struct tb *tb)
+{
+ return nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DISCONNECT_PCIE_PATHS, 0);
+}
+
+/* Falcon Ridge and Alpine Ridge */
+static const struct tb_cm_ops icm_fr_ops = {
+ .driver_ready = icm_driver_ready,
+ .start = icm_start,
+ .stop = icm_stop,
+ .suspend = icm_suspend,
+ .complete = icm_complete,
+ .handle_event = icm_handle_event,
+ .approve_switch = icm_fr_approve_switch,
+ .add_switch_key = icm_fr_add_switch_key,
+ .challenge_switch_key = icm_fr_challenge_switch_key,
+ .disconnect_pcie_paths = icm_disconnect_pcie_paths,
+};
+
+struct tb *icm_probe(struct tb_nhi *nhi)
+{
+ struct icm *icm;
+ struct tb *tb;
+
+ tb = tb_domain_alloc(nhi, sizeof(struct icm));
+ if (!tb)
+ return NULL;
+
+ icm = tb_priv(tb);
+ INIT_DELAYED_WORK(&icm->rescan_work, icm_rescan_work);
+ mutex_init(&icm->request_lock);
+
+ switch (nhi->pdev->device) {
+ case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI:
+ case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI:
+ icm->is_supported = icm_fr_is_supported;
+ icm->get_route = icm_fr_get_route;
+ icm->device_connected = icm_fr_device_connected;
+ icm->device_disconnected = icm_fr_device_disconnected;
+ tb->cm_ops = &icm_fr_ops;
+ break;
+
+ case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_NHI:
+ case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_NHI:
+ case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_NHI:
+ case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_NHI:
+ case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_NHI:
+ icm->is_supported = icm_ar_is_supported;
+ icm->get_mode = icm_ar_get_mode;
+ icm->get_route = icm_ar_get_route;
+ icm->device_connected = icm_fr_device_connected;
+ icm->device_disconnected = icm_fr_device_disconnected;
+ tb->cm_ops = &icm_fr_ops;
+ break;
+ }
+
+ if (!icm->is_supported || !icm->is_supported(tb)) {
+ dev_dbg(&nhi->pdev->dev, "ICM not supported on this controller\n");
+ tb_domain_put(tb);
+ return NULL;
+ }
+
+ return tb;
+}
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
index a8c20413dbda..05af126a2435 100644
--- a/drivers/thunderbolt/nhi.c
+++ b/drivers/thunderbolt/nhi.c
@@ -13,7 +13,7 @@
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/module.h>
-#include <linux/dmi.h>
+#include <linux/delay.h>
#include "nhi.h"
#include "nhi_regs.h"
@@ -21,6 +21,14 @@
#define RING_TYPE(ring) ((ring)->is_tx ? "TX ring" : "RX ring")
+/*
+ * Minimal number of vectors when we use MSI-X. Two for control channel
+ * Rx/Tx and the rest four are for cross domain DMA paths.
+ */
+#define MSIX_MIN_VECS 6
+#define MSIX_MAX_VECS 16
+
+#define NHI_MAILBOX_TIMEOUT 500 /* ms */
static int ring_interrupt_index(struct tb_ring *ring)
{
@@ -42,6 +50,37 @@ static void ring_interrupt_active(struct tb_ring *ring, bool active)
int bit = ring_interrupt_index(ring) & 31;
int mask = 1 << bit;
u32 old, new;
+
+ if (ring->irq > 0) {
+ u32 step, shift, ivr, misc;
+ void __iomem *ivr_base;
+ int index;
+
+ if (ring->is_tx)
+ index = ring->hop;
+ else
+ index = ring->hop + ring->nhi->hop_count;
+
+ /*
+ * Ask the hardware to clear interrupt status bits automatically
+ * since we already know which interrupt was triggered.
+ */
+ misc = ioread32(ring->nhi->iobase + REG_DMA_MISC);
+ if (!(misc & REG_DMA_MISC_INT_AUTO_CLEAR)) {
+ misc |= REG_DMA_MISC_INT_AUTO_CLEAR;
+ iowrite32(misc, ring->nhi->iobase + REG_DMA_MISC);
+ }
+
+ ivr_base = ring->nhi->iobase + REG_INT_VEC_ALLOC_BASE;
+ step = index / REG_INT_VEC_ALLOC_REGS * REG_INT_VEC_ALLOC_BITS;
+ shift = index % REG_INT_VEC_ALLOC_REGS * REG_INT_VEC_ALLOC_BITS;
+ ivr = ioread32(ivr_base + step);
+ ivr &= ~(REG_INT_VEC_ALLOC_MASK << shift);
+ if (active)
+ ivr |= ring->vector << shift;
+ iowrite32(ivr, ivr_base + step);
+ }
+
old = ioread32(ring->nhi->iobase + reg);
if (active)
new = old | mask;
@@ -239,8 +278,50 @@ int __ring_enqueue(struct tb_ring *ring, struct ring_frame *frame)
return ret;
}
+static irqreturn_t ring_msix(int irq, void *data)
+{
+ struct tb_ring *ring = data;
+
+ schedule_work(&ring->work);
+ return IRQ_HANDLED;
+}
+
+static int ring_request_msix(struct tb_ring *ring, bool no_suspend)
+{
+ struct tb_nhi *nhi = ring->nhi;
+ unsigned long irqflags;
+ int ret;
+
+ if (!nhi->pdev->msix_enabled)
+ return 0;
+
+ ret = ida_simple_get(&nhi->msix_ida, 0, MSIX_MAX_VECS, GFP_KERNEL);
+ if (ret < 0)
+ return ret;
+
+ ring->vector = ret;
+
+ ring->irq = pci_irq_vector(ring->nhi->pdev, ring->vector);
+ if (ring->irq < 0)
+ return ring->irq;
+
+ irqflags = no_suspend ? IRQF_NO_SUSPEND : 0;
+ return request_irq(ring->irq, ring_msix, irqflags, "thunderbolt", ring);
+}
+
+static void ring_release_msix(struct tb_ring *ring)
+{
+ if (ring->irq <= 0)
+ return;
+
+ free_irq(ring->irq, ring);
+ ida_simple_remove(&ring->nhi->msix_ida, ring->vector);
+ ring->vector = 0;
+ ring->irq = 0;
+}
+
static struct tb_ring *ring_alloc(struct tb_nhi *nhi, u32 hop, int size,
- bool transmit)
+ bool transmit, unsigned int flags)
{
struct tb_ring *ring = NULL;
dev_info(&nhi->pdev->dev, "allocating %s ring %d of size %d\n",
@@ -271,9 +352,14 @@ static struct tb_ring *ring_alloc(struct tb_nhi *nhi, u32 hop, int size,
ring->hop = hop;
ring->is_tx = transmit;
ring->size = size;
+ ring->flags = flags;
ring->head = 0;
ring->tail = 0;
ring->running = false;
+
+ if (ring_request_msix(ring, flags & RING_FLAG_NO_SUSPEND))
+ goto err;
+
ring->descriptors = dma_alloc_coherent(&ring->nhi->pdev->dev,
size * sizeof(*ring->descriptors),
&ring->descriptors_dma, GFP_KERNEL | __GFP_ZERO);
@@ -295,14 +381,16 @@ err:
return NULL;
}
-struct tb_ring *ring_alloc_tx(struct tb_nhi *nhi, int hop, int size)
+struct tb_ring *ring_alloc_tx(struct tb_nhi *nhi, int hop, int size,
+ unsigned int flags)
{
- return ring_alloc(nhi, hop, size, true);
+ return ring_alloc(nhi, hop, size, true, flags);
}
-struct tb_ring *ring_alloc_rx(struct tb_nhi *nhi, int hop, int size)
+struct tb_ring *ring_alloc_rx(struct tb_nhi *nhi, int hop, int size,
+ unsigned int flags)
{
- return ring_alloc(nhi, hop, size, false);
+ return ring_alloc(nhi, hop, size, false, flags);
}
/**
@@ -314,6 +402,8 @@ void ring_start(struct tb_ring *ring)
{
mutex_lock(&ring->nhi->lock);
mutex_lock(&ring->lock);
+ if (ring->nhi->going_away)
+ goto err;
if (ring->running) {
dev_WARN(&ring->nhi->pdev->dev, "ring already started\n");
goto err;
@@ -360,6 +450,8 @@ void ring_stop(struct tb_ring *ring)
mutex_lock(&ring->lock);
dev_info(&ring->nhi->pdev->dev, "stopping %s %d\n",
RING_TYPE(ring), ring->hop);
+ if (ring->nhi->going_away)
+ goto err;
if (!ring->running) {
dev_WARN(&ring->nhi->pdev->dev, "%s %d already stopped\n",
RING_TYPE(ring), ring->hop);
@@ -413,6 +505,8 @@ void ring_free(struct tb_ring *ring)
RING_TYPE(ring), ring->hop);
}
+ ring_release_msix(ring);
+
dma_free_coherent(&ring->nhi->pdev->dev,
ring->size * sizeof(*ring->descriptors),
ring->descriptors, ring->descriptors_dma);
@@ -428,15 +522,70 @@ void ring_free(struct tb_ring *ring)
mutex_unlock(&ring->nhi->lock);
/**
- * ring->work can no longer be scheduled (it is scheduled only by
- * nhi_interrupt_work and ring_stop). Wait for it to finish before
- * freeing the ring.
+ * ring->work can no longer be scheduled (it is scheduled only
+ * by nhi_interrupt_work, ring_stop and ring_msix). Wait for it
+ * to finish before freeing the ring.
*/
flush_work(&ring->work);
mutex_destroy(&ring->lock);
kfree(ring);
}
+/**
+ * nhi_mailbox_cmd() - Send a command through NHI mailbox
+ * @nhi: Pointer to the NHI structure
+ * @cmd: Command to send
+ * @data: Data to be send with the command
+ *
+ * Sends mailbox command to the firmware running on NHI. Returns %0 in
+ * case of success and negative errno in case of failure.
+ */
+int nhi_mailbox_cmd(struct tb_nhi *nhi, enum nhi_mailbox_cmd cmd, u32 data)
+{
+ ktime_t timeout;
+ u32 val;
+
+ iowrite32(data, nhi->iobase + REG_INMAIL_DATA);
+
+ val = ioread32(nhi->iobase + REG_INMAIL_CMD);
+ val &= ~(REG_INMAIL_CMD_MASK | REG_INMAIL_ERROR);
+ val |= REG_INMAIL_OP_REQUEST | cmd;
+ iowrite32(val, nhi->iobase + REG_INMAIL_CMD);
+
+ timeout = ktime_add_ms(ktime_get(), NHI_MAILBOX_TIMEOUT);
+ do {
+ val = ioread32(nhi->iobase + REG_INMAIL_CMD);
+ if (!(val & REG_INMAIL_OP_REQUEST))
+ break;
+ usleep_range(10, 20);
+ } while (ktime_before(ktime_get(), timeout));
+
+ if (val & REG_INMAIL_OP_REQUEST)
+ return -ETIMEDOUT;
+ if (val & REG_INMAIL_ERROR)
+ return -EIO;
+
+ return 0;
+}
+
+/**
+ * nhi_mailbox_mode() - Return current firmware operation mode
+ * @nhi: Pointer to the NHI structure
+ *
+ * The function reads current firmware operation mode using NHI mailbox
+ * registers and returns it to the caller.
+ */
+enum nhi_fw_mode nhi_mailbox_mode(struct tb_nhi *nhi)
+{
+ u32 val;
+
+ val = ioread32(nhi->iobase + REG_OUTMAIL_CMD);
+ val &= REG_OUTMAIL_CMD_OPMODE_MASK;
+ val >>= REG_OUTMAIL_CMD_OPMODE_SHIFT;
+
+ return (enum nhi_fw_mode)val;
+}
+
static void nhi_interrupt_work(struct work_struct *work)
{
struct tb_nhi *nhi = container_of(work, typeof(*nhi), interrupt_work);
@@ -498,16 +647,40 @@ static int nhi_suspend_noirq(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct tb *tb = pci_get_drvdata(pdev);
- thunderbolt_suspend(tb);
- return 0;
+
+ return tb_domain_suspend_noirq(tb);
}
static int nhi_resume_noirq(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct tb *tb = pci_get_drvdata(pdev);
- thunderbolt_resume(tb);
- return 0;
+
+ /*
+ * Check that the device is still there. It may be that the user
+ * unplugged last device which causes the host controller to go
+ * away on PCs.
+ */
+ if (!pci_device_is_present(pdev))
+ tb->nhi->going_away = true;
+
+ return tb_domain_resume_noirq(tb);
+}
+
+static int nhi_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct tb *tb = pci_get_drvdata(pdev);
+
+ return tb_domain_suspend(tb);
+}
+
+static void nhi_complete(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct tb *tb = pci_get_drvdata(pdev);
+
+ tb_domain_complete(tb);
}
static void nhi_shutdown(struct tb_nhi *nhi)
@@ -528,9 +701,52 @@ static void nhi_shutdown(struct tb_nhi *nhi)
* We have to release the irq before calling flush_work. Otherwise an
* already executing IRQ handler could call schedule_work again.
*/
- devm_free_irq(&nhi->pdev->dev, nhi->pdev->irq, nhi);
- flush_work(&nhi->interrupt_work);
+ if (!nhi->pdev->msix_enabled) {
+ devm_free_irq(&nhi->pdev->dev, nhi->pdev->irq, nhi);
+ flush_work(&nhi->interrupt_work);
+ }
mutex_destroy(&nhi->lock);
+ ida_destroy(&nhi->msix_ida);
+}
+
+static int nhi_init_msi(struct tb_nhi *nhi)
+{
+ struct pci_dev *pdev = nhi->pdev;
+ int res, irq, nvec;
+
+ /* In case someone left them on. */
+ nhi_disable_interrupts(nhi);
+
+ ida_init(&nhi->msix_ida);
+
+ /*
+ * The NHI has 16 MSI-X vectors or a single MSI. We first try to
+ * get all MSI-X vectors and if we succeed, each ring will have
+ * one MSI-X. If for some reason that does not work out, we
+ * fallback to a single MSI.
+ */
+ nvec = pci_alloc_irq_vectors(pdev, MSIX_MIN_VECS, MSIX_MAX_VECS,
+ PCI_IRQ_MSIX);
+ if (nvec < 0) {
+ nvec = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
+ if (nvec < 0)
+ return nvec;
+
+ INIT_WORK(&nhi->interrupt_work, nhi_interrupt_work);
+
+ irq = pci_irq_vector(nhi->pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ res = devm_request_irq(&pdev->dev, irq, nhi_msi,
+ IRQF_NO_SUSPEND, "thunderbolt", nhi);
+ if (res) {
+ dev_err(&pdev->dev, "request_irq failed, aborting\n");
+ return res;
+ }
+ }
+
+ return 0;
}
static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
@@ -545,12 +761,6 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return res;
}
- res = pci_enable_msi(pdev);
- if (res) {
- dev_err(&pdev->dev, "cannot enable MSI, aborting\n");
- return res;
- }
-
res = pcim_iomap_regions(pdev, 1 << 0, "thunderbolt");
if (res) {
dev_err(&pdev->dev, "cannot obtain PCI resources, aborting\n");
@@ -568,7 +778,6 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (nhi->hop_count != 12 && nhi->hop_count != 32)
dev_warn(&pdev->dev, "unexpected hop count: %d\n",
nhi->hop_count);
- INIT_WORK(&nhi->interrupt_work, nhi_interrupt_work);
nhi->tx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count,
sizeof(*nhi->tx_rings), GFP_KERNEL);
@@ -577,12 +786,9 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (!nhi->tx_rings || !nhi->rx_rings)
return -ENOMEM;
- nhi_disable_interrupts(nhi); /* In case someone left them on. */
- res = devm_request_irq(&pdev->dev, pdev->irq, nhi_msi,
- IRQF_NO_SUSPEND, /* must work during _noirq */
- "thunderbolt", nhi);
+ res = nhi_init_msi(nhi);
if (res) {
- dev_err(&pdev->dev, "request_irq failed, aborting\n");
+ dev_err(&pdev->dev, "cannot enable MSI, aborting\n");
return res;
}
@@ -593,13 +799,24 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* magic value - clock related? */
iowrite32(3906250 / 10000, nhi->iobase + 0x38c00);
- dev_info(&nhi->pdev->dev, "NHI initialized, starting thunderbolt\n");
- tb = thunderbolt_alloc_and_start(nhi);
+ tb = icm_probe(nhi);
+ if (!tb)
+ tb = tb_probe(nhi);
if (!tb) {
+ dev_err(&nhi->pdev->dev,
+ "failed to determine connection manager, aborting\n");
+ return -ENODEV;
+ }
+
+ dev_info(&nhi->pdev->dev, "NHI initialized, starting thunderbolt\n");
+
+ res = tb_domain_add(tb);
+ if (res) {
/*
* At this point the RX/TX rings might already have been
* activated. Do a proper shutdown.
*/
+ tb_domain_put(tb);
nhi_shutdown(nhi);
return -EIO;
}
@@ -612,7 +829,8 @@ static void nhi_remove(struct pci_dev *pdev)
{
struct tb *tb = pci_get_drvdata(pdev);
struct tb_nhi *nhi = tb->nhi;
- thunderbolt_shutdown_and_free(tb);
+
+ tb_domain_remove(tb);
nhi_shutdown(nhi);
}
@@ -629,6 +847,10 @@ static const struct dev_pm_ops nhi_pm_ops = {
* pci-tunnels stay alive.
*/
.restore_noirq = nhi_resume_noirq,
+ .suspend = nhi_suspend,
+ .freeze = nhi_suspend,
+ .poweroff = nhi_suspend,
+ .complete = nhi_complete,
};
static struct pci_device_id nhi_ids[] = {
@@ -660,6 +882,17 @@ static struct pci_device_id nhi_ids[] = {
.device = PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI,
.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID,
},
+
+ /* Thunderbolt 3 */
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_NHI) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_NHI) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_USBONLY_NHI) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_NHI) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_USBONLY_NHI) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_NHI) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_NHI) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_USBONLY_NHI) },
+
{ 0,}
};
@@ -676,14 +909,21 @@ static struct pci_driver nhi_driver = {
static int __init nhi_init(void)
{
- if (!dmi_match(DMI_BOARD_VENDOR, "Apple Inc."))
- return -ENOSYS;
- return pci_register_driver(&nhi_driver);
+ int ret;
+
+ ret = tb_domain_init();
+ if (ret)
+ return ret;
+ ret = pci_register_driver(&nhi_driver);
+ if (ret)
+ tb_domain_exit();
+ return ret;
}
static void __exit nhi_unload(void)
{
pci_unregister_driver(&nhi_driver);
+ tb_domain_exit();
}
module_init(nhi_init);
diff --git a/drivers/thunderbolt/nhi.h b/drivers/thunderbolt/nhi.h
index 317242939b31..5b5bb2c436be 100644
--- a/drivers/thunderbolt/nhi.h
+++ b/drivers/thunderbolt/nhi.h
@@ -7,45 +7,78 @@
#ifndef DSL3510_H_
#define DSL3510_H_
+#include <linux/idr.h>
#include <linux/mutex.h>
#include <linux/workqueue.h>
/**
* struct tb_nhi - thunderbolt native host interface
+ * @lock: Must be held during ring creation/destruction. Is acquired by
+ * interrupt_work when dispatching interrupts to individual rings.
+ * @pdev: Pointer to the PCI device
+ * @iobase: MMIO space of the NHI
+ * @tx_rings: All Tx rings available on this host controller
+ * @rx_rings: All Rx rings available on this host controller
+ * @msix_ida: Used to allocate MSI-X vectors for rings
+ * @going_away: The host controller device is about to disappear so when
+ * this flag is set, avoid touching the hardware anymore.
+ * @interrupt_work: Work scheduled to handle ring interrupt when no
+ * MSI-X is used.
+ * @hop_count: Number of rings (end point hops) supported by NHI.
*/
struct tb_nhi {
- struct mutex lock; /*
- * Must be held during ring creation/destruction.
- * Is acquired by interrupt_work when dispatching
- * interrupts to individual rings.
- **/
+ struct mutex lock;
struct pci_dev *pdev;
void __iomem *iobase;
struct tb_ring **tx_rings;
struct tb_ring **rx_rings;
+ struct ida msix_ida;
+ bool going_away;
struct work_struct interrupt_work;
- u32 hop_count; /* Number of rings (end point hops) supported by NHI. */
+ u32 hop_count;
};
/**
* struct tb_ring - thunderbolt TX or RX ring associated with a NHI
+ * @lock: Lock serializing actions to this ring. Must be acquired after
+ * nhi->lock.
+ * @nhi: Pointer to the native host controller interface
+ * @size: Size of the ring
+ * @hop: Hop (DMA channel) associated with this ring
+ * @head: Head of the ring (write next descriptor here)
+ * @tail: Tail of the ring (complete next descriptor here)
+ * @descriptors: Allocated descriptors for this ring
+ * @queue: Queue holding frames to be transferred over this ring
+ * @in_flight: Queue holding frames that are currently in flight
+ * @work: Interrupt work structure
+ * @is_tx: Is the ring Tx or Rx
+ * @running: Is the ring running
+ * @irq: MSI-X irq number if the ring uses MSI-X. %0 otherwise.
+ * @vector: MSI-X vector number the ring uses (only set if @irq is > 0)
+ * @flags: Ring specific flags
*/
struct tb_ring {
- struct mutex lock; /* must be acquired after nhi->lock */
+ struct mutex lock;
struct tb_nhi *nhi;
int size;
int hop;
- int head; /* write next descriptor here */
- int tail; /* complete next descriptor here */
+ int head;
+ int tail;
struct ring_desc *descriptors;
dma_addr_t descriptors_dma;
struct list_head queue;
struct list_head in_flight;
struct work_struct work;
- bool is_tx:1; /* rx otherwise */
+ bool is_tx:1;
bool running:1;
+ int irq;
+ u8 vector;
+ unsigned int flags;
};
+/* Leave ring interrupt enabled on suspend */
+#define RING_FLAG_NO_SUSPEND BIT(0)
+
struct ring_frame;
typedef void (*ring_cb)(struct tb_ring*, struct ring_frame*, bool canceled);
@@ -64,8 +97,10 @@ struct ring_frame {
#define TB_FRAME_SIZE 0x100 /* minimum size for ring_rx */
-struct tb_ring *ring_alloc_tx(struct tb_nhi *nhi, int hop, int size);
-struct tb_ring *ring_alloc_rx(struct tb_nhi *nhi, int hop, int size);
+struct tb_ring *ring_alloc_tx(struct tb_nhi *nhi, int hop, int size,
+ unsigned int flags);
+struct tb_ring *ring_alloc_rx(struct tb_nhi *nhi, int hop, int size,
+ unsigned int flags);
void ring_start(struct tb_ring *ring);
void ring_stop(struct tb_ring *ring);
void ring_free(struct tb_ring *ring);
@@ -111,4 +146,38 @@ static inline int ring_tx(struct tb_ring *ring, struct ring_frame *frame)
return __ring_enqueue(ring, frame);
}
+enum nhi_fw_mode {
+ NHI_FW_SAFE_MODE,
+ NHI_FW_AUTH_MODE,
+ NHI_FW_EP_MODE,
+ NHI_FW_CM_MODE,
+};
+
+enum nhi_mailbox_cmd {
+ NHI_MAILBOX_SAVE_DEVS = 0x05,
+ NHI_MAILBOX_DISCONNECT_PCIE_PATHS = 0x06,
+ NHI_MAILBOX_DRV_UNLOADS = 0x07,
+ NHI_MAILBOX_ALLOW_ALL_DEVS = 0x23,
+};
+
+int nhi_mailbox_cmd(struct tb_nhi *nhi, enum nhi_mailbox_cmd cmd, u32 data);
+enum nhi_fw_mode nhi_mailbox_mode(struct tb_nhi *nhi);
+
+/*
+ * PCI IDs used in this driver from Win Ridge forward. There is no
+ * need for the PCI quirk anymore as we will use ICM also on Apple
+ * hardware.
+ */
+#define PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_NHI 0x157d
+#define PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE 0x157e
+#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_NHI 0x15bf
+#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE 0x15c0
+#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_NHI 0x15d2
+#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE 0x15d3
+#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_NHI 0x15d9
+#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE 0x15da
+#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_USBONLY_NHI 0x15dc
+#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_USBONLY_NHI 0x15dd
+#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_USBONLY_NHI 0x15de
+
#endif
diff --git a/drivers/thunderbolt/nhi_regs.h b/drivers/thunderbolt/nhi_regs.h
index 75cf0691e6c5..09ed574e92ff 100644
--- a/drivers/thunderbolt/nhi_regs.h
+++ b/drivers/thunderbolt/nhi_regs.h
@@ -95,7 +95,34 @@ struct ring_desc {
#define REG_RING_INTERRUPT_BASE 0x38200
#define RING_INTERRUPT_REG_COUNT(nhi) ((31 + 2 * nhi->hop_count) / 32)
+/* Interrupt Vector Allocation */
+#define REG_INT_VEC_ALLOC_BASE 0x38c40
+#define REG_INT_VEC_ALLOC_BITS 4
+#define REG_INT_VEC_ALLOC_MASK GENMASK(3, 0)
+#define REG_INT_VEC_ALLOC_REGS (32 / REG_INT_VEC_ALLOC_BITS)
+
/* The last 11 bits contain the number of hops supported by the NHI port. */
#define REG_HOP_COUNT 0x39640
+#define REG_DMA_MISC 0x39864
+#define REG_DMA_MISC_INT_AUTO_CLEAR BIT(2)
+
+#define REG_INMAIL_DATA 0x39900
+
+#define REG_INMAIL_CMD 0x39904
+#define REG_INMAIL_CMD_MASK GENMASK(7, 0)
+#define REG_INMAIL_ERROR BIT(30)
+#define REG_INMAIL_OP_REQUEST BIT(31)
+
+#define REG_OUTMAIL_CMD 0x3990c
+#define REG_OUTMAIL_CMD_OPMODE_SHIFT 8
+#define REG_OUTMAIL_CMD_OPMODE_MASK GENMASK(11, 8)
+
+#define REG_FW_STS 0x39944
+#define REG_FW_STS_NVM_AUTH_DONE BIT(31)
+#define REG_FW_STS_CIO_RESET_REQ BIT(30)
+#define REG_FW_STS_ICM_EN_CPU BIT(2)
+#define REG_FW_STS_ICM_EN_INVERT BIT(1)
+#define REG_FW_STS_ICM_EN BIT(0)
+
#endif
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index c6f30b1695a9..ab3e8f410444 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -5,10 +5,395 @@
*/
#include <linux/delay.h>
+#include <linux/idr.h>
+#include <linux/nvmem-provider.h>
+#include <linux/sizes.h>
#include <linux/slab.h>
+#include <linux/vmalloc.h>
#include "tb.h"
+/* Switch authorization from userspace is serialized by this lock */
+static DEFINE_MUTEX(switch_lock);
+
+/* Switch NVM support */
+
+#define NVM_DEVID 0x05
+#define NVM_VERSION 0x08
+#define NVM_CSS 0x10
+#define NVM_FLASH_SIZE 0x45
+
+#define NVM_MIN_SIZE SZ_32K
+#define NVM_MAX_SIZE SZ_512K
+
+static DEFINE_IDA(nvm_ida);
+
+struct nvm_auth_status {
+ struct list_head list;
+ uuid_be uuid;
+ u32 status;
+};
+
+/*
+ * Hold NVM authentication failure status per switch This information
+ * needs to stay around even when the switch gets power cycled so we
+ * keep it separately.
+ */
+static LIST_HEAD(nvm_auth_status_cache);
+static DEFINE_MUTEX(nvm_auth_status_lock);
+
+static struct nvm_auth_status *__nvm_get_auth_status(const struct tb_switch *sw)
+{
+ struct nvm_auth_status *st;
+
+ list_for_each_entry(st, &nvm_auth_status_cache, list) {
+ if (!uuid_be_cmp(st->uuid, *sw->uuid))
+ return st;
+ }
+
+ return NULL;
+}
+
+static void nvm_get_auth_status(const struct tb_switch *sw, u32 *status)
+{
+ struct nvm_auth_status *st;
+
+ mutex_lock(&nvm_auth_status_lock);
+ st = __nvm_get_auth_status(sw);
+ mutex_unlock(&nvm_auth_status_lock);
+
+ *status = st ? st->status : 0;
+}
+
+static void nvm_set_auth_status(const struct tb_switch *sw, u32 status)
+{
+ struct nvm_auth_status *st;
+
+ if (WARN_ON(!sw->uuid))
+ return;
+
+ mutex_lock(&nvm_auth_status_lock);
+ st = __nvm_get_auth_status(sw);
+
+ if (!st) {
+ st = kzalloc(sizeof(*st), GFP_KERNEL);
+ if (!st)
+ goto unlock;
+
+ memcpy(&st->uuid, sw->uuid, sizeof(st->uuid));
+ INIT_LIST_HEAD(&st->list);
+ list_add_tail(&st->list, &nvm_auth_status_cache);
+ }
+
+ st->status = status;
+unlock:
+ mutex_unlock(&nvm_auth_status_lock);
+}
+
+static void nvm_clear_auth_status(const struct tb_switch *sw)
+{
+ struct nvm_auth_status *st;
+
+ mutex_lock(&nvm_auth_status_lock);
+ st = __nvm_get_auth_status(sw);
+ if (st) {
+ list_del(&st->list);
+ kfree(st);
+ }
+ mutex_unlock(&nvm_auth_status_lock);
+}
+
+static int nvm_validate_and_write(struct tb_switch *sw)
+{
+ unsigned int image_size, hdr_size;
+ const u8 *buf = sw->nvm->buf;
+ u16 ds_size;
+ int ret;
+
+ if (!buf)
+ return -EINVAL;
+
+ image_size = sw->nvm->buf_data_size;
+ if (image_size < NVM_MIN_SIZE || image_size > NVM_MAX_SIZE)
+ return -EINVAL;
+
+ /*
+ * FARB pointer must point inside the image and must at least
+ * contain parts of the digital section we will be reading here.
+ */
+ hdr_size = (*(u32 *)buf) & 0xffffff;
+ if (hdr_size + NVM_DEVID + 2 >= image_size)
+ return -EINVAL;
+
+ /* Digital section start should be aligned to 4k page */
+ if (!IS_ALIGNED(hdr_size, SZ_4K))
+ return -EINVAL;
+
+ /*
+ * Read digital section size and check that it also fits inside
+ * the image.
+ */
+ ds_size = *(u16 *)(buf + hdr_size);
+ if (ds_size >= image_size)
+ return -EINVAL;
+
+ if (!sw->safe_mode) {
+ u16 device_id;
+
+ /*
+ * Make sure the device ID in the image matches the one
+ * we read from the switch config space.
+ */
+ device_id = *(u16 *)(buf + hdr_size + NVM_DEVID);
+ if (device_id != sw->config.device_id)
+ return -EINVAL;
+
+ if (sw->generation < 3) {
+ /* Write CSS headers first */
+ ret = dma_port_flash_write(sw->dma_port,
+ DMA_PORT_CSS_ADDRESS, buf + NVM_CSS,
+ DMA_PORT_CSS_MAX_SIZE);
+ if (ret)
+ return ret;
+ }
+
+ /* Skip headers in the image */
+ buf += hdr_size;
+ image_size -= hdr_size;
+ }
+
+ return dma_port_flash_write(sw->dma_port, 0, buf, image_size);
+}
+
+static int nvm_authenticate_host(struct tb_switch *sw)
+{
+ int ret;
+
+ /*
+ * Root switch NVM upgrade requires that we disconnect the
+ * existing PCIe paths first (in case it is not in safe mode
+ * already).
+ */
+ if (!sw->safe_mode) {
+ ret = tb_domain_disconnect_pcie_paths(sw->tb);
+ if (ret)
+ return ret;
+ /*
+ * The host controller goes away pretty soon after this if
+ * everything goes well so getting timeout is expected.
+ */
+ ret = dma_port_flash_update_auth(sw->dma_port);
+ return ret == -ETIMEDOUT ? 0 : ret;
+ }
+
+ /*
+ * From safe mode we can get out by just power cycling the
+ * switch.
+ */
+ dma_port_power_cycle(sw->dma_port);
+ return 0;
+}
+
+static int nvm_authenticate_device(struct tb_switch *sw)
+{
+ int ret, retries = 10;
+
+ ret = dma_port_flash_update_auth(sw->dma_port);
+ if (ret && ret != -ETIMEDOUT)
+ return ret;
+
+ /*
+ * Poll here for the authentication status. It takes some time
+ * for the device to respond (we get timeout for a while). Once
+ * we get response the device needs to be power cycled in order
+ * to the new NVM to be taken into use.
+ */
+ do {
+ u32 status;
+
+ ret = dma_port_flash_update_auth_status(sw->dma_port, &status);
+ if (ret < 0 && ret != -ETIMEDOUT)
+ return ret;
+ if (ret > 0) {
+ if (status) {
+ tb_sw_warn(sw, "failed to authenticate NVM\n");
+ nvm_set_auth_status(sw, status);
+ }
+
+ tb_sw_info(sw, "power cycling the switch now\n");
+ dma_port_power_cycle(sw->dma_port);
+ return 0;
+ }
+
+ msleep(500);
+ } while (--retries);
+
+ return -ETIMEDOUT;
+}
+
+static int tb_switch_nvm_read(void *priv, unsigned int offset, void *val,
+ size_t bytes)
+{
+ struct tb_switch *sw = priv;
+
+ return dma_port_flash_read(sw->dma_port, offset, val, bytes);
+}
+
+static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val,
+ size_t bytes)
+{
+ struct tb_switch *sw = priv;
+ int ret = 0;
+
+ if (mutex_lock_interruptible(&switch_lock))
+ return -ERESTARTSYS;
+
+ /*
+ * Since writing the NVM image might require some special steps,
+ * for example when CSS headers are written, we cache the image
+ * locally here and handle the special cases when the user asks
+ * us to authenticate the image.
+ */
+ if (!sw->nvm->buf) {
+ sw->nvm->buf = vmalloc(NVM_MAX_SIZE);
+ if (!sw->nvm->buf) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+ }
+
+ sw->nvm->buf_data_size = offset + bytes;
+ memcpy(sw->nvm->buf + offset, val, bytes);
+
+unlock:
+ mutex_unlock(&switch_lock);
+
+ return ret;
+}
+
+static struct nvmem_device *register_nvmem(struct tb_switch *sw, int id,
+ size_t size, bool active)
+{
+ struct nvmem_config config;
+
+ memset(&config, 0, sizeof(config));
+
+ if (active) {
+ config.name = "nvm_active";
+ config.reg_read = tb_switch_nvm_read;
+ } else {
+ config.name = "nvm_non_active";
+ config.reg_write = tb_switch_nvm_write;
+ }
+
+ config.id = id;
+ config.stride = 4;
+ config.word_size = 4;
+ config.size = size;
+ config.dev = &sw->dev;
+ config.owner = THIS_MODULE;
+ config.root_only = true;
+ config.priv = sw;
+
+ return nvmem_register(&config);
+}
+
+static int tb_switch_nvm_add(struct tb_switch *sw)
+{
+ struct nvmem_device *nvm_dev;
+ struct tb_switch_nvm *nvm;
+ u32 val;
+ int ret;
+
+ if (!sw->dma_port)
+ return 0;
+
+ nvm = kzalloc(sizeof(*nvm), GFP_KERNEL);
+ if (!nvm)
+ return -ENOMEM;
+
+ nvm->id = ida_simple_get(&nvm_ida, 0, 0, GFP_KERNEL);
+
+ /*
+ * If the switch is in safe-mode the only accessible portion of
+ * the NVM is the non-active one where userspace is expected to
+ * write new functional NVM.
+ */
+ if (!sw->safe_mode) {
+ u32 nvm_size, hdr_size;
+
+ ret = dma_port_flash_read(sw->dma_port, NVM_FLASH_SIZE, &val,
+ sizeof(val));
+ if (ret)
+ goto err_ida;
+
+ hdr_size = sw->generation < 3 ? SZ_8K : SZ_16K;
+ nvm_size = (SZ_1M << (val & 7)) / 8;
+ nvm_size = (nvm_size - hdr_size) / 2;
+
+ ret = dma_port_flash_read(sw->dma_port, NVM_VERSION, &val,
+ sizeof(val));
+ if (ret)
+ goto err_ida;
+
+ nvm->major = val >> 16;
+ nvm->minor = val >> 8;
+
+ nvm_dev = register_nvmem(sw, nvm->id, nvm_size, true);
+ if (IS_ERR(nvm_dev)) {
+ ret = PTR_ERR(nvm_dev);
+ goto err_ida;
+ }
+ nvm->active = nvm_dev;
+ }
+
+ nvm_dev = register_nvmem(sw, nvm->id, NVM_MAX_SIZE, false);
+ if (IS_ERR(nvm_dev)) {
+ ret = PTR_ERR(nvm_dev);
+ goto err_nvm_active;
+ }
+ nvm->non_active = nvm_dev;
+
+ mutex_lock(&switch_lock);
+ sw->nvm = nvm;
+ mutex_unlock(&switch_lock);
+
+ return 0;
+
+err_nvm_active:
+ if (nvm->active)
+ nvmem_unregister(nvm->active);
+err_ida:
+ ida_simple_remove(&nvm_ida, nvm->id);
+ kfree(nvm);
+
+ return ret;
+}
+
+static void tb_switch_nvm_remove(struct tb_switch *sw)
+{
+ struct tb_switch_nvm *nvm;
+
+ mutex_lock(&switch_lock);
+ nvm = sw->nvm;
+ sw->nvm = NULL;
+ mutex_unlock(&switch_lock);
+
+ if (!nvm)
+ return;
+
+ /* Remove authentication status in case the switch is unplugged */
+ if (!nvm->authenticating)
+ nvm_clear_auth_status(sw);
+
+ nvmem_unregister(nvm->non_active);
+ if (nvm->active)
+ nvmem_unregister(nvm->active);
+ ida_simple_remove(&nvm_ida, nvm->id);
+ vfree(nvm->buf);
+ kfree(nvm);
+}
+
/* port utility functions */
static const char *tb_port_type(struct tb_regs_port_header *port)
@@ -192,7 +577,7 @@ static int tb_init_port(struct tb_port *port)
/* Port 0 is the switch itself and has no PHY. */
if (port->config.type == TB_TYPE_PORT && port->port != 0) {
- cap = tb_find_cap(port, TB_CFG_PORT, TB_CAP_PHY);
+ cap = tb_port_find_cap(port, TB_PORT_CAP_PHY);
if (cap > 0)
port->cap_phy = cap;
@@ -281,6 +666,9 @@ static int tb_plug_events_active(struct tb_switch *sw, bool active)
u32 data;
int res;
+ if (!sw->config.enabled)
+ return 0;
+
sw->config.plug_events_delay = 0xff;
res = tb_sw_write(sw, ((u32 *) &sw->config) + 4, TB_CFG_SWITCH, 4, 1);
if (res)
@@ -307,36 +695,361 @@ static int tb_plug_events_active(struct tb_switch *sw, bool active)
sw->cap_plug_events + 1, 1);
}
+static ssize_t authorized_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tb_switch *sw = tb_to_switch(dev);
-/**
- * tb_switch_free() - free a tb_switch and all downstream switches
- */
-void tb_switch_free(struct tb_switch *sw)
+ return sprintf(buf, "%u\n", sw->authorized);
+}
+
+static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val)
{
- int i;
- /* port 0 is the switch itself and never has a remote */
- for (i = 1; i <= sw->config.max_port_number; i++) {
- if (tb_is_upstream_port(&sw->ports[i]))
- continue;
- if (sw->ports[i].remote)
- tb_switch_free(sw->ports[i].remote->sw);
- sw->ports[i].remote = NULL;
+ int ret = -EINVAL;
+
+ if (mutex_lock_interruptible(&switch_lock))
+ return -ERESTARTSYS;
+
+ if (sw->authorized)
+ goto unlock;
+
+ switch (val) {
+ /* Approve switch */
+ case 1:
+ if (sw->key)
+ ret = tb_domain_approve_switch_key(sw->tb, sw);
+ else
+ ret = tb_domain_approve_switch(sw->tb, sw);
+ break;
+
+ /* Challenge switch */
+ case 2:
+ if (sw->key)
+ ret = tb_domain_challenge_switch_key(sw->tb, sw);
+ break;
+
+ default:
+ break;
}
- if (!sw->is_unplugged)
- tb_plug_events_active(sw, false);
+ if (!ret) {
+ sw->authorized = val;
+ /* Notify status change to the userspace */
+ kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE);
+ }
+
+unlock:
+ mutex_unlock(&switch_lock);
+ return ret;
+}
+
+static ssize_t authorized_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct tb_switch *sw = tb_to_switch(dev);
+ unsigned int val;
+ ssize_t ret;
+
+ ret = kstrtouint(buf, 0, &val);
+ if (ret)
+ return ret;
+ if (val > 2)
+ return -EINVAL;
+
+ ret = tb_switch_set_authorized(sw, val);
+
+ return ret ? ret : count;
+}
+static DEVICE_ATTR_RW(authorized);
+
+static ssize_t device_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tb_switch *sw = tb_to_switch(dev);
+
+ return sprintf(buf, "%#x\n", sw->device);
+}
+static DEVICE_ATTR_RO(device);
+
+static ssize_t
+device_name_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct tb_switch *sw = tb_to_switch(dev);
+
+ return sprintf(buf, "%s\n", sw->device_name ? sw->device_name : "");
+}
+static DEVICE_ATTR_RO(device_name);
+
+static ssize_t key_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tb_switch *sw = tb_to_switch(dev);
+ ssize_t ret;
+
+ if (mutex_lock_interruptible(&switch_lock))
+ return -ERESTARTSYS;
+
+ if (sw->key)
+ ret = sprintf(buf, "%*phN\n", TB_SWITCH_KEY_SIZE, sw->key);
+ else
+ ret = sprintf(buf, "\n");
+
+ mutex_unlock(&switch_lock);
+ return ret;
+}
+
+static ssize_t key_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct tb_switch *sw = tb_to_switch(dev);
+ u8 key[TB_SWITCH_KEY_SIZE];
+ ssize_t ret = count;
+
+ if (count < 64)
+ return -EINVAL;
+ if (hex2bin(key, buf, sizeof(key)))
+ return -EINVAL;
+
+ if (mutex_lock_interruptible(&switch_lock))
+ return -ERESTARTSYS;
+
+ if (sw->authorized) {
+ ret = -EBUSY;
+ } else {
+ kfree(sw->key);
+ sw->key = kmemdup(key, sizeof(key), GFP_KERNEL);
+ if (!sw->key)
+ ret = -ENOMEM;
+ }
+
+ mutex_unlock(&switch_lock);
+ return ret;
+}
+static DEVICE_ATTR_RW(key);
+
+static ssize_t nvm_authenticate_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct tb_switch *sw = tb_to_switch(dev);
+ u32 status;
+
+ nvm_get_auth_status(sw, &status);
+ return sprintf(buf, "%#x\n", status);
+}
+
+static ssize_t nvm_authenticate_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct tb_switch *sw = tb_to_switch(dev);
+ bool val;
+ int ret;
+
+ if (mutex_lock_interruptible(&switch_lock))
+ return -ERESTARTSYS;
+
+ /* If NVMem devices are not yet added */
+ if (!sw->nvm) {
+ ret = -EAGAIN;
+ goto exit_unlock;
+ }
+
+ ret = kstrtobool(buf, &val);
+ if (ret)
+ goto exit_unlock;
+
+ /* Always clear the authentication status */
+ nvm_clear_auth_status(sw);
+
+ if (val) {
+ ret = nvm_validate_and_write(sw);
+ if (ret)
+ goto exit_unlock;
+
+ sw->nvm->authenticating = true;
+
+ if (!tb_route(sw))
+ ret = nvm_authenticate_host(sw);
+ else
+ ret = nvm_authenticate_device(sw);
+ }
+
+exit_unlock:
+ mutex_unlock(&switch_lock);
+
+ if (ret)
+ return ret;
+ return count;
+}
+static DEVICE_ATTR_RW(nvm_authenticate);
+
+static ssize_t nvm_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct tb_switch *sw = tb_to_switch(dev);
+ int ret;
+
+ if (mutex_lock_interruptible(&switch_lock))
+ return -ERESTARTSYS;
+
+ if (sw->safe_mode)
+ ret = -ENODATA;
+ else if (!sw->nvm)
+ ret = -EAGAIN;
+ else
+ ret = sprintf(buf, "%x.%x\n", sw->nvm->major, sw->nvm->minor);
+
+ mutex_unlock(&switch_lock);
+
+ return ret;
+}
+static DEVICE_ATTR_RO(nvm_version);
+
+static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tb_switch *sw = tb_to_switch(dev);
+
+ return sprintf(buf, "%#x\n", sw->vendor);
+}
+static DEVICE_ATTR_RO(vendor);
+
+static ssize_t
+vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct tb_switch *sw = tb_to_switch(dev);
+
+ return sprintf(buf, "%s\n", sw->vendor_name ? sw->vendor_name : "");
+}
+static DEVICE_ATTR_RO(vendor_name);
+
+static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tb_switch *sw = tb_to_switch(dev);
+
+ return sprintf(buf, "%pUb\n", sw->uuid);
+}
+static DEVICE_ATTR_RO(unique_id);
+
+static struct attribute *switch_attrs[] = {
+ &dev_attr_authorized.attr,
+ &dev_attr_device.attr,
+ &dev_attr_device_name.attr,
+ &dev_attr_key.attr,
+ &dev_attr_nvm_authenticate.attr,
+ &dev_attr_nvm_version.attr,
+ &dev_attr_vendor.attr,
+ &dev_attr_vendor_name.attr,
+ &dev_attr_unique_id.attr,
+ NULL,
+};
+
+static umode_t switch_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct tb_switch *sw = tb_to_switch(dev);
+
+ if (attr == &dev_attr_key.attr) {
+ if (tb_route(sw) &&
+ sw->tb->security_level == TB_SECURITY_SECURE &&
+ sw->security_level == TB_SECURITY_SECURE)
+ return attr->mode;
+ return 0;
+ } else if (attr == &dev_attr_nvm_authenticate.attr ||
+ attr == &dev_attr_nvm_version.attr) {
+ if (sw->dma_port)
+ return attr->mode;
+ return 0;
+ }
+
+ return sw->safe_mode ? 0 : attr->mode;
+}
+
+static struct attribute_group switch_group = {
+ .is_visible = switch_attr_is_visible,
+ .attrs = switch_attrs,
+};
+
+static const struct attribute_group *switch_groups[] = {
+ &switch_group,
+ NULL,
+};
+
+static void tb_switch_release(struct device *dev)
+{
+ struct tb_switch *sw = tb_to_switch(dev);
+
+ dma_port_free(sw->dma_port);
+
+ kfree(sw->uuid);
+ kfree(sw->device_name);
+ kfree(sw->vendor_name);
kfree(sw->ports);
kfree(sw->drom);
+ kfree(sw->key);
kfree(sw);
}
+struct device_type tb_switch_type = {
+ .name = "thunderbolt_device",
+ .release = tb_switch_release,
+};
+
+static int tb_switch_get_generation(struct tb_switch *sw)
+{
+ switch (sw->config.device_id) {
+ case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
+ case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE:
+ case PCI_DEVICE_ID_INTEL_LIGHT_PEAK:
+ case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C:
+ case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
+ case PCI_DEVICE_ID_INTEL_PORT_RIDGE:
+ case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_2C_BRIDGE:
+ case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_4C_BRIDGE:
+ return 1;
+
+ case PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE:
+ case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE:
+ case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE:
+ return 2;
+
+ case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE:
+ case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE:
+ case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE:
+ case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE:
+ case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE:
+ return 3;
+
+ default:
+ /*
+ * For unknown switches assume generation to be 1 to be
+ * on the safe side.
+ */
+ tb_sw_warn(sw, "unsupported switch device id %#x\n",
+ sw->config.device_id);
+ return 1;
+ }
+}
+
/**
- * tb_switch_alloc() - allocate and initialize a switch
+ * tb_switch_alloc() - allocate a switch
+ * @tb: Pointer to the owning domain
+ * @parent: Parent device for this switch
+ * @route: Route string for this switch
*
- * Return: Returns a NULL on failure.
+ * Allocates and initializes a switch. Will not upload configuration to
+ * the switch. For that you need to call tb_switch_configure()
+ * separately. The returned switch should be released by calling
+ * tb_switch_put().
+ *
+ * Return: Pointer to the allocated switch or %NULL in case of failure
*/
-struct tb_switch *tb_switch_alloc(struct tb *tb, u64 route)
+struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
+ u64 route)
{
int i;
int cap;
@@ -351,11 +1064,9 @@ struct tb_switch *tb_switch_alloc(struct tb *tb, u64 route)
sw->tb = tb;
if (tb_cfg_read(tb->ctl, &sw->config, route, 0, TB_CFG_SWITCH, 0, 5))
- goto err;
- tb_info(tb,
- "initializing Switch at %#llx (depth: %d, up port: %d)\n",
- route, tb_route_length(route), upstream_port);
- tb_info(tb, "old switch config:\n");
+ goto err_free_sw_ports;
+
+ tb_info(tb, "current switch config:\n");
tb_dump_switch(tb, &sw->config);
/* configure switch */
@@ -363,30 +1074,13 @@ struct tb_switch *tb_switch_alloc(struct tb *tb, u64 route)
sw->config.depth = tb_route_length(route);
sw->config.route_lo = route;
sw->config.route_hi = route >> 32;
- sw->config.enabled = 1;
- /* from here on we may use the tb_sw_* functions & macros */
-
- if (sw->config.vendor_id != 0x8086)
- tb_sw_warn(sw, "unknown switch vendor id %#x\n",
- sw->config.vendor_id);
-
- if (sw->config.device_id != PCI_DEVICE_ID_INTEL_LIGHT_RIDGE &&
- sw->config.device_id != PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C &&
- sw->config.device_id != PCI_DEVICE_ID_INTEL_PORT_RIDGE &&
- sw->config.device_id != PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE &&
- sw->config.device_id != PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE)
- tb_sw_warn(sw, "unsupported switch device id %#x\n",
- sw->config.device_id);
-
- /* upload configuration */
- if (tb_sw_write(sw, 1 + (u32 *) &sw->config, TB_CFG_SWITCH, 1, 3))
- goto err;
+ sw->config.enabled = 0;
/* initialize ports */
sw->ports = kcalloc(sw->config.max_port_number + 1, sizeof(*sw->ports),
GFP_KERNEL);
if (!sw->ports)
- goto err;
+ goto err_free_sw_ports;
for (i = 0; i <= sw->config.max_port_number; i++) {
/* minimum setup for tb_find_cap and tb_drom_read to work */
@@ -394,41 +1088,286 @@ struct tb_switch *tb_switch_alloc(struct tb *tb, u64 route)
sw->ports[i].port = i;
}
- cap = tb_find_cap(&sw->ports[0], TB_CFG_SWITCH, TB_CAP_PLUG_EVENTS);
+ sw->generation = tb_switch_get_generation(sw);
+
+ cap = tb_switch_find_vse_cap(sw, TB_VSE_CAP_PLUG_EVENTS);
if (cap < 0) {
- tb_sw_warn(sw, "cannot find TB_CAP_PLUG_EVENTS aborting\n");
- goto err;
+ tb_sw_warn(sw, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n");
+ goto err_free_sw_ports;
}
sw->cap_plug_events = cap;
- /* read drom */
- if (tb_drom_read(sw))
- tb_sw_warn(sw, "tb_eeprom_read_rom failed, continuing\n");
- tb_sw_info(sw, "uid: %#llx\n", sw->uid);
-
- for (i = 0; i <= sw->config.max_port_number; i++) {
- if (sw->ports[i].disabled) {
- tb_port_info(&sw->ports[i], "disabled by eeprom\n");
- continue;
- }
- if (tb_init_port(&sw->ports[i]))
- goto err;
- }
-
- /* TODO: I2C, IECS, link controller */
+ /* Root switch is always authorized */
+ if (!route)
+ sw->authorized = true;
- if (tb_plug_events_active(sw, true))
- goto err;
+ device_initialize(&sw->dev);
+ sw->dev.parent = parent;
+ sw->dev.bus = &tb_bus_type;
+ sw->dev.type = &tb_switch_type;
+ sw->dev.groups = switch_groups;
+ dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw));
return sw;
-err:
+
+err_free_sw_ports:
kfree(sw->ports);
- kfree(sw->drom);
kfree(sw);
+
return NULL;
}
/**
+ * tb_switch_alloc_safe_mode() - allocate a switch that is in safe mode
+ * @tb: Pointer to the owning domain
+ * @parent: Parent device for this switch
+ * @route: Route string for this switch
+ *
+ * This creates a switch in safe mode. This means the switch pretty much
+ * lacks all capabilities except DMA configuration port before it is
+ * flashed with a valid NVM firmware.
+ *
+ * The returned switch must be released by calling tb_switch_put().
+ *
+ * Return: Pointer to the allocated switch or %NULL in case of failure
+ */
+struct tb_switch *
+tb_switch_alloc_safe_mode(struct tb *tb, struct device *parent, u64 route)
+{
+ struct tb_switch *sw;
+
+ sw = kzalloc(sizeof(*sw), GFP_KERNEL);
+ if (!sw)
+ return NULL;
+
+ sw->tb = tb;
+ sw->config.depth = tb_route_length(route);
+ sw->config.route_hi = upper_32_bits(route);
+ sw->config.route_lo = lower_32_bits(route);
+ sw->safe_mode = true;
+
+ device_initialize(&sw->dev);
+ sw->dev.parent = parent;
+ sw->dev.bus = &tb_bus_type;
+ sw->dev.type = &tb_switch_type;
+ sw->dev.groups = switch_groups;
+ dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw));
+
+ return sw;
+}
+
+/**
+ * tb_switch_configure() - Uploads configuration to the switch
+ * @sw: Switch to configure
+ *
+ * Call this function before the switch is added to the system. It will
+ * upload configuration to the switch and makes it available for the
+ * connection manager to use.
+ *
+ * Return: %0 in case of success and negative errno in case of failure
+ */
+int tb_switch_configure(struct tb_switch *sw)
+{
+ struct tb *tb = sw->tb;
+ u64 route;
+ int ret;
+
+ route = tb_route(sw);
+ tb_info(tb,
+ "initializing Switch at %#llx (depth: %d, up port: %d)\n",
+ route, tb_route_length(route), sw->config.upstream_port_number);
+
+ if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL)
+ tb_sw_warn(sw, "unknown switch vendor id %#x\n",
+ sw->config.vendor_id);
+
+ sw->config.enabled = 1;
+
+ /* upload configuration */
+ ret = tb_sw_write(sw, 1 + (u32 *)&sw->config, TB_CFG_SWITCH, 1, 3);
+ if (ret)
+ return ret;
+
+ return tb_plug_events_active(sw, true);
+}
+
+static void tb_switch_set_uuid(struct tb_switch *sw)
+{
+ u32 uuid[4];
+ int cap;
+
+ if (sw->uuid)
+ return;
+
+ /*
+ * The newer controllers include fused UUID as part of link
+ * controller specific registers
+ */
+ cap = tb_switch_find_vse_cap(sw, TB_VSE_CAP_LINK_CONTROLLER);
+ if (cap > 0) {
+ tb_sw_read(sw, uuid, TB_CFG_SWITCH, cap + 3, 4);
+ } else {
+ /*
+ * ICM generates UUID based on UID and fills the upper
+ * two words with ones. This is not strictly following
+ * UUID format but we want to be compatible with it so
+ * we do the same here.
+ */
+ uuid[0] = sw->uid & 0xffffffff;
+ uuid[1] = (sw->uid >> 32) & 0xffffffff;
+ uuid[2] = 0xffffffff;
+ uuid[3] = 0xffffffff;
+ }
+
+ sw->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL);
+}
+
+static int tb_switch_add_dma_port(struct tb_switch *sw)
+{
+ u32 status;
+ int ret;
+
+ switch (sw->generation) {
+ case 3:
+ break;
+
+ case 2:
+ /* Only root switch can be upgraded */
+ if (tb_route(sw))
+ return 0;
+ break;
+
+ default:
+ /*
+ * DMA port is the only thing available when the switch
+ * is in safe mode.
+ */
+ if (!sw->safe_mode)
+ return 0;
+ break;
+ }
+
+ if (sw->no_nvm_upgrade)
+ return 0;
+
+ sw->dma_port = dma_port_alloc(sw);
+ if (!sw->dma_port)
+ return 0;
+
+ /*
+ * Check status of the previous flash authentication. If there
+ * is one we need to power cycle the switch in any case to make
+ * it functional again.
+ */
+ ret = dma_port_flash_update_auth_status(sw->dma_port, &status);
+ if (ret <= 0)
+ return ret;
+
+ if (status) {
+ tb_sw_info(sw, "switch flash authentication failed\n");
+ tb_switch_set_uuid(sw);
+ nvm_set_auth_status(sw, status);
+ }
+
+ tb_sw_info(sw, "power cycling the switch now\n");
+ dma_port_power_cycle(sw->dma_port);
+
+ /*
+ * We return error here which causes the switch adding failure.
+ * It should appear back after power cycle is complete.
+ */
+ return -ESHUTDOWN;
+}
+
+/**
+ * tb_switch_add() - Add a switch to the domain
+ * @sw: Switch to add
+ *
+ * This is the last step in adding switch to the domain. It will read
+ * identification information from DROM and initializes ports so that
+ * they can be used to connect other switches. The switch will be
+ * exposed to the userspace when this function successfully returns. To
+ * remove and release the switch, call tb_switch_remove().
+ *
+ * Return: %0 in case of success and negative errno in case of failure
+ */
+int tb_switch_add(struct tb_switch *sw)
+{
+ int i, ret;
+
+ /*
+ * Initialize DMA control port now before we read DROM. Recent
+ * host controllers have more complete DROM on NVM that includes
+ * vendor and model identification strings which we then expose
+ * to the userspace. NVM can be accessed through DMA
+ * configuration based mailbox.
+ */
+ ret = tb_switch_add_dma_port(sw);
+ if (ret)
+ return ret;
+
+ if (!sw->safe_mode) {
+ /* read drom */
+ ret = tb_drom_read(sw);
+ if (ret) {
+ tb_sw_warn(sw, "tb_eeprom_read_rom failed\n");
+ return ret;
+ }
+ tb_sw_info(sw, "uid: %#llx\n", sw->uid);
+
+ tb_switch_set_uuid(sw);
+
+ for (i = 0; i <= sw->config.max_port_number; i++) {
+ if (sw->ports[i].disabled) {
+ tb_port_info(&sw->ports[i], "disabled by eeprom\n");
+ continue;
+ }
+ ret = tb_init_port(&sw->ports[i]);
+ if (ret)
+ return ret;
+ }
+ }
+
+ ret = device_add(&sw->dev);
+ if (ret)
+ return ret;
+
+ ret = tb_switch_nvm_add(sw);
+ if (ret)
+ device_del(&sw->dev);
+
+ return ret;
+}
+
+/**
+ * tb_switch_remove() - Remove and release a switch
+ * @sw: Switch to remove
+ *
+ * This will remove the switch from the domain and release it after last
+ * reference count drops to zero. If there are switches connected below
+ * this switch, they will be removed as well.
+ */
+void tb_switch_remove(struct tb_switch *sw)
+{
+ int i;
+
+ /* port 0 is the switch itself and never has a remote */
+ for (i = 1; i <= sw->config.max_port_number; i++) {
+ if (tb_is_upstream_port(&sw->ports[i]))
+ continue;
+ if (sw->ports[i].remote)
+ tb_switch_remove(sw->ports[i].remote->sw);
+ sw->ports[i].remote = NULL;
+ }
+
+ if (!sw->is_unplugged)
+ tb_plug_events_active(sw, false);
+
+ tb_switch_nvm_remove(sw);
+ device_unregister(&sw->dev);
+}
+
+/**
* tb_sw_set_unplugged() - set is_unplugged on switch and downstream switches
*/
void tb_sw_set_unplugged(struct tb_switch *sw)
@@ -452,19 +1391,26 @@ void tb_sw_set_unplugged(struct tb_switch *sw)
int tb_switch_resume(struct tb_switch *sw)
{
int i, err;
- u64 uid;
tb_sw_info(sw, "resuming switch\n");
- err = tb_drom_read_uid_only(sw, &uid);
- if (err) {
- tb_sw_warn(sw, "uid read failed\n");
- return err;
- }
- if (sw != sw->tb->root_switch && sw->uid != uid) {
- tb_sw_info(sw,
- "changed while suspended (uid %#llx -> %#llx)\n",
- sw->uid, uid);
- return -ENODEV;
+ /*
+ * Check for UID of the connected switches except for root
+ * switch which we assume cannot be removed.
+ */
+ if (tb_route(sw)) {
+ u64 uid;
+
+ err = tb_drom_read_uid_only(sw, &uid);
+ if (err) {
+ tb_sw_warn(sw, "uid read failed\n");
+ return err;
+ }
+ if (sw->uid != uid) {
+ tb_sw_info(sw,
+ "changed while suspended (uid %#llx -> %#llx)\n",
+ sw->uid, uid);
+ return -ENODEV;
+ }
}
/* upload configuration */
@@ -509,3 +1455,85 @@ void tb_switch_suspend(struct tb_switch *sw)
* effect?
*/
}
+
+struct tb_sw_lookup {
+ struct tb *tb;
+ u8 link;
+ u8 depth;
+ const uuid_be *uuid;
+};
+
+static int tb_switch_match(struct device *dev, void *data)
+{
+ struct tb_switch *sw = tb_to_switch(dev);
+ struct tb_sw_lookup *lookup = data;
+
+ if (!sw)
+ return 0;
+ if (sw->tb != lookup->tb)
+ return 0;
+
+ if (lookup->uuid)
+ return !memcmp(sw->uuid, lookup->uuid, sizeof(*lookup->uuid));
+
+ /* Root switch is matched only by depth */
+ if (!lookup->depth)
+ return !sw->depth;
+
+ return sw->link == lookup->link && sw->depth == lookup->depth;
+}
+
+/**
+ * tb_switch_find_by_link_depth() - Find switch by link and depth
+ * @tb: Domain the switch belongs
+ * @link: Link number the switch is connected
+ * @depth: Depth of the switch in link
+ *
+ * Returned switch has reference count increased so the caller needs to
+ * call tb_switch_put() when done with the switch.
+ */
+struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, u8 depth)
+{
+ struct tb_sw_lookup lookup;
+ struct device *dev;
+
+ memset(&lookup, 0, sizeof(lookup));
+ lookup.tb = tb;
+ lookup.link = link;
+ lookup.depth = depth;
+
+ dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
+ if (dev)
+ return tb_to_switch(dev);
+
+ return NULL;
+}
+
+/**
+ * tb_switch_find_by_link_depth() - Find switch by UUID
+ * @tb: Domain the switch belongs
+ * @uuid: UUID to look for
+ *
+ * Returned switch has reference count increased so the caller needs to
+ * call tb_switch_put() when done with the switch.
+ */
+struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_be *uuid)
+{
+ struct tb_sw_lookup lookup;
+ struct device *dev;
+
+ memset(&lookup, 0, sizeof(lookup));
+ lookup.tb = tb;
+ lookup.uuid = uuid;
+
+ dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
+ if (dev)
+ return tb_to_switch(dev);
+
+ return NULL;
+}
+
+void tb_switch_exit(void)
+{
+ ida_destroy(&nvm_ida);
+}
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
index 24b6d30c3c86..1b02ca0b6129 100644
--- a/drivers/thunderbolt/tb.c
+++ b/drivers/thunderbolt/tb.c
@@ -7,11 +7,24 @@
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/delay.h>
+#include <linux/dmi.h>
#include "tb.h"
#include "tb_regs.h"
#include "tunnel_pci.h"
+/**
+ * struct tb_cm - Simple Thunderbolt connection manager
+ * @tunnel_list: List of active tunnels
+ * @hotplug_active: tb_handle_hotplug will stop progressing plug
+ * events and exit if this is not set (it needs to
+ * acquire the lock one more time). Used to drain wq
+ * after cfg has been paused.
+ */
+struct tb_cm {
+ struct list_head tunnel_list;
+ bool hotplug_active;
+};
/* enumeration & hot plug handling */
@@ -49,9 +62,23 @@ static void tb_scan_port(struct tb_port *port)
tb_port_WARN(port, "port already has a remote!\n");
return;
}
- sw = tb_switch_alloc(port->sw->tb, tb_downstream_route(port));
+ sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
+ tb_downstream_route(port));
if (!sw)
return;
+
+ if (tb_switch_configure(sw)) {
+ tb_switch_put(sw);
+ return;
+ }
+
+ sw->authorized = true;
+
+ if (tb_switch_add(sw)) {
+ tb_switch_put(sw);
+ return;
+ }
+
port->remote = tb_upstream_port(sw);
tb_upstream_port(sw)->remote = port;
tb_scan_switch(sw);
@@ -62,12 +89,14 @@ static void tb_scan_port(struct tb_port *port)
*/
static void tb_free_invalid_tunnels(struct tb *tb)
{
+ struct tb_cm *tcm = tb_priv(tb);
struct tb_pci_tunnel *tunnel;
struct tb_pci_tunnel *n;
- list_for_each_entry_safe(tunnel, n, &tb->tunnel_list, list)
- {
+
+ list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
if (tb_pci_is_invalid(tunnel)) {
tb_pci_deactivate(tunnel);
+ list_del(&tunnel->list);
tb_pci_free(tunnel);
}
}
@@ -86,7 +115,7 @@ static void tb_free_unplugged_children(struct tb_switch *sw)
if (!port->remote)
continue;
if (port->remote->sw->is_unplugged) {
- tb_switch_free(port->remote->sw);
+ tb_switch_remove(port->remote->sw);
port->remote = NULL;
} else {
tb_free_unplugged_children(port->remote->sw);
@@ -121,8 +150,8 @@ static struct tb_port *tb_find_unused_down_port(struct tb_switch *sw)
continue;
if (sw->ports[i].config.type != TB_TYPE_PCIE_DOWN)
continue;
- cap = tb_find_cap(&sw->ports[i], TB_CFG_PORT, TB_CAP_PCIE);
- if (cap <= 0)
+ cap = tb_port_find_cap(&sw->ports[i], TB_PORT_CAP_ADAP);
+ if (cap < 0)
continue;
res = tb_port_read(&sw->ports[i], &data, TB_CFG_PORT, cap, 1);
if (res < 0)
@@ -149,6 +178,8 @@ static void tb_activate_pcie_devices(struct tb *tb)
struct tb_port *up_port;
struct tb_port *down_port;
struct tb_pci_tunnel *tunnel;
+ struct tb_cm *tcm = tb_priv(tb);
+
/* scan for pcie devices at depth 1*/
for (i = 1; i <= tb->root_switch->config.max_port_number; i++) {
if (tb_is_upstream_port(&tb->root_switch->ports[i]))
@@ -165,8 +196,8 @@ static void tb_activate_pcie_devices(struct tb *tb)
}
/* check whether port is already activated */
- cap = tb_find_cap(up_port, TB_CFG_PORT, TB_CAP_PCIE);
- if (cap <= 0)
+ cap = tb_port_find_cap(up_port, TB_PORT_CAP_ADAP);
+ if (cap < 0)
continue;
if (tb_port_read(up_port, &data, TB_CFG_PORT, cap, 1))
continue;
@@ -195,6 +226,7 @@ static void tb_activate_pcie_devices(struct tb *tb)
tb_pci_free(tunnel);
}
+ list_add(&tunnel->list, &tcm->tunnel_list);
}
}
@@ -217,10 +249,11 @@ static void tb_handle_hotplug(struct work_struct *work)
{
struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
struct tb *tb = ev->tb;
+ struct tb_cm *tcm = tb_priv(tb);
struct tb_switch *sw;
struct tb_port *port;
mutex_lock(&tb->lock);
- if (!tb->hotplug_active)
+ if (!tcm->hotplug_active)
goto out; /* during init, suspend or shutdown */
sw = get_switch_at_route(tb->root_switch, ev->route);
@@ -248,7 +281,7 @@ static void tb_handle_hotplug(struct work_struct *work)
tb_port_info(port, "unplugged\n");
tb_sw_set_unplugged(port->remote->sw);
tb_free_invalid_tunnels(tb);
- tb_switch_free(port->remote->sw);
+ tb_switch_remove(port->remote->sw);
port->remote = NULL;
} else {
tb_port_info(port,
@@ -281,137 +314,108 @@ out:
*
* Delegates to tb_handle_hotplug.
*/
-static void tb_schedule_hotplug_handler(void *data, u64 route, u8 port,
- bool unplug)
+static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
+ const void *buf, size_t size)
{
- struct tb *tb = data;
- struct tb_hotplug_event *ev = kmalloc(sizeof(*ev), GFP_KERNEL);
+ const struct cfg_event_pkg *pkg = buf;
+ struct tb_hotplug_event *ev;
+ u64 route;
+
+ if (type != TB_CFG_PKG_EVENT) {
+ tb_warn(tb, "unexpected event %#x, ignoring\n", type);
+ return;
+ }
+
+ route = tb_cfg_get_route(&pkg->header);
+
+ if (tb_cfg_error(tb->ctl, route, pkg->port,
+ TB_CFG_ERROR_ACK_PLUG_EVENT)) {
+ tb_warn(tb, "could not ack plug event on %llx:%x\n", route,
+ pkg->port);
+ }
+
+ ev = kmalloc(sizeof(*ev), GFP_KERNEL);
if (!ev)
return;
INIT_WORK(&ev->work, tb_handle_hotplug);
ev->tb = tb;
ev->route = route;
- ev->port = port;
- ev->unplug = unplug;
+ ev->port = pkg->port;
+ ev->unplug = pkg->unplug;
queue_work(tb->wq, &ev->work);
}
-/**
- * thunderbolt_shutdown_and_free() - shutdown everything
- *
- * Free all switches and the config channel.
- *
- * Used in the error path of thunderbolt_alloc_and_start.
- */
-void thunderbolt_shutdown_and_free(struct tb *tb)
+static void tb_stop(struct tb *tb)
{
+ struct tb_cm *tcm = tb_priv(tb);
struct tb_pci_tunnel *tunnel;
struct tb_pci_tunnel *n;
- mutex_lock(&tb->lock);
-
/* tunnels are only present after everything has been initialized */
- list_for_each_entry_safe(tunnel, n, &tb->tunnel_list, list) {
+ list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
tb_pci_deactivate(tunnel);
tb_pci_free(tunnel);
}
-
- if (tb->root_switch)
- tb_switch_free(tb->root_switch);
- tb->root_switch = NULL;
-
- if (tb->ctl) {
- tb_ctl_stop(tb->ctl);
- tb_ctl_free(tb->ctl);
- }
- tb->ctl = NULL;
- tb->hotplug_active = false; /* signal tb_handle_hotplug to quit */
-
- /* allow tb_handle_hotplug to acquire the lock */
- mutex_unlock(&tb->lock);
- if (tb->wq) {
- flush_workqueue(tb->wq);
- destroy_workqueue(tb->wq);
- tb->wq = NULL;
- }
- mutex_destroy(&tb->lock);
- kfree(tb);
+ tb_switch_remove(tb->root_switch);
+ tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
}
-/**
- * thunderbolt_alloc_and_start() - setup the thunderbolt bus
- *
- * Allocates a tb_cfg control channel, initializes the root switch, enables
- * plug events and activates pci devices.
- *
- * Return: Returns NULL on error.
- */
-struct tb *thunderbolt_alloc_and_start(struct tb_nhi *nhi)
+static int tb_start(struct tb *tb)
{
- struct tb *tb;
-
- BUILD_BUG_ON(sizeof(struct tb_regs_switch_header) != 5 * 4);
- BUILD_BUG_ON(sizeof(struct tb_regs_port_header) != 8 * 4);
- BUILD_BUG_ON(sizeof(struct tb_regs_hop) != 2 * 4);
+ struct tb_cm *tcm = tb_priv(tb);
+ int ret;
- tb = kzalloc(sizeof(*tb), GFP_KERNEL);
- if (!tb)
- return NULL;
-
- tb->nhi = nhi;
- mutex_init(&tb->lock);
- mutex_lock(&tb->lock);
- INIT_LIST_HEAD(&tb->tunnel_list);
-
- tb->wq = alloc_ordered_workqueue("thunderbolt", 0);
- if (!tb->wq)
- goto err_locked;
+ tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
+ if (!tb->root_switch)
+ return -ENOMEM;
- tb->ctl = tb_ctl_alloc(tb->nhi, tb_schedule_hotplug_handler, tb);
- if (!tb->ctl)
- goto err_locked;
/*
- * tb_schedule_hotplug_handler may be called as soon as the config
- * channel is started. Thats why we have to hold the lock here.
+ * ICM firmware upgrade needs running firmware and in native
+ * mode that is not available so disable firmware upgrade of the
+ * root switch.
*/
- tb_ctl_start(tb->ctl);
+ tb->root_switch->no_nvm_upgrade = true;
- tb->root_switch = tb_switch_alloc(tb, 0);
- if (!tb->root_switch)
- goto err_locked;
+ ret = tb_switch_configure(tb->root_switch);
+ if (ret) {
+ tb_switch_put(tb->root_switch);
+ return ret;
+ }
+
+ /* Announce the switch to the world */
+ ret = tb_switch_add(tb->root_switch);
+ if (ret) {
+ tb_switch_put(tb->root_switch);
+ return ret;
+ }
/* Full scan to discover devices added before the driver was loaded. */
tb_scan_switch(tb->root_switch);
tb_activate_pcie_devices(tb);
/* Allow tb_handle_hotplug to progress events */
- tb->hotplug_active = true;
- mutex_unlock(&tb->lock);
- return tb;
-
-err_locked:
- mutex_unlock(&tb->lock);
- thunderbolt_shutdown_and_free(tb);
- return NULL;
+ tcm->hotplug_active = true;
+ return 0;
}
-void thunderbolt_suspend(struct tb *tb)
+static int tb_suspend_noirq(struct tb *tb)
{
+ struct tb_cm *tcm = tb_priv(tb);
+
tb_info(tb, "suspending...\n");
- mutex_lock(&tb->lock);
tb_switch_suspend(tb->root_switch);
- tb_ctl_stop(tb->ctl);
- tb->hotplug_active = false; /* signal tb_handle_hotplug to quit */
- mutex_unlock(&tb->lock);
+ tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
tb_info(tb, "suspend finished\n");
+
+ return 0;
}
-void thunderbolt_resume(struct tb *tb)
+static int tb_resume_noirq(struct tb *tb)
{
+ struct tb_cm *tcm = tb_priv(tb);
struct tb_pci_tunnel *tunnel, *n;
+
tb_info(tb, "resuming...\n");
- mutex_lock(&tb->lock);
- tb_ctl_start(tb->ctl);
/* remove any pci devices the firmware might have setup */
tb_switch_reset(tb, 0);
@@ -419,9 +423,9 @@ void thunderbolt_resume(struct tb *tb)
tb_switch_resume(tb->root_switch);
tb_free_invalid_tunnels(tb);
tb_free_unplugged_children(tb->root_switch);
- list_for_each_entry_safe(tunnel, n, &tb->tunnel_list, list)
+ list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
tb_pci_restart(tunnel);
- if (!list_empty(&tb->tunnel_list)) {
+ if (!list_empty(&tcm->tunnel_list)) {
/*
* the pcie links need some time to get going.
* 100ms works for me...
@@ -430,7 +434,37 @@ void thunderbolt_resume(struct tb *tb)
msleep(100);
}
/* Allow tb_handle_hotplug to progress events */
- tb->hotplug_active = true;
- mutex_unlock(&tb->lock);
+ tcm->hotplug_active = true;
tb_info(tb, "resume finished\n");
+
+ return 0;
+}
+
+static const struct tb_cm_ops tb_cm_ops = {
+ .start = tb_start,
+ .stop = tb_stop,
+ .suspend_noirq = tb_suspend_noirq,
+ .resume_noirq = tb_resume_noirq,
+ .handle_event = tb_handle_event,
+};
+
+struct tb *tb_probe(struct tb_nhi *nhi)
+{
+ struct tb_cm *tcm;
+ struct tb *tb;
+
+ if (!dmi_match(DMI_BOARD_VENDOR, "Apple Inc."))
+ return NULL;
+
+ tb = tb_domain_alloc(nhi, sizeof(*tcm));
+ if (!tb)
+ return NULL;
+
+ tb->security_level = TB_SECURITY_NONE;
+ tb->cm_ops = &tb_cm_ops;
+
+ tcm = tb_priv(tb);
+ INIT_LIST_HEAD(&tcm->tunnel_list);
+
+ return tb;
}
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
index 61d57ba64035..3d9f64676e58 100644
--- a/drivers/thunderbolt/tb.h
+++ b/drivers/thunderbolt/tb.h
@@ -7,22 +7,120 @@
#ifndef TB_H_
#define TB_H_
+#include <linux/nvmem-provider.h>
#include <linux/pci.h>
+#include <linux/uuid.h>
#include "tb_regs.h"
#include "ctl.h"
+#include "dma_port.h"
+
+/**
+ * struct tb_switch_nvm - Structure holding switch NVM information
+ * @major: Major version number of the active NVM portion
+ * @minor: Minor version number of the active NVM portion
+ * @id: Identifier used with both NVM portions
+ * @active: Active portion NVMem device
+ * @non_active: Non-active portion NVMem device
+ * @buf: Buffer where the NVM image is stored before it is written to
+ * the actual NVM flash device
+ * @buf_data_size: Number of bytes actually consumed by the new NVM
+ * image
+ * @authenticating: The switch is authenticating the new NVM
+ */
+struct tb_switch_nvm {
+ u8 major;
+ u8 minor;
+ int id;
+ struct nvmem_device *active;
+ struct nvmem_device *non_active;
+ void *buf;
+ size_t buf_data_size;
+ bool authenticating;
+};
+
+/**
+ * enum tb_security_level - Thunderbolt security level
+ * @TB_SECURITY_NONE: No security, legacy mode
+ * @TB_SECURITY_USER: User approval required at minimum
+ * @TB_SECURITY_SECURE: One time saved key required at minimum
+ * @TB_SECURITY_DPONLY: Only tunnel Display port (and USB)
+ */
+enum tb_security_level {
+ TB_SECURITY_NONE,
+ TB_SECURITY_USER,
+ TB_SECURITY_SECURE,
+ TB_SECURITY_DPONLY,
+};
+
+#define TB_SWITCH_KEY_SIZE 32
+/* Each physical port contains 2 links on modern controllers */
+#define TB_SWITCH_LINKS_PER_PHY_PORT 2
/**
* struct tb_switch - a thunderbolt switch
+ * @dev: Device for the switch
+ * @config: Switch configuration
+ * @ports: Ports in this switch
+ * @dma_port: If the switch has port supporting DMA configuration based
+ * mailbox this will hold the pointer to that (%NULL
+ * otherwise). If set it also means the switch has
+ * upgradeable NVM.
+ * @tb: Pointer to the domain the switch belongs to
+ * @uid: Unique ID of the switch
+ * @uuid: UUID of the switch (or %NULL if not supported)
+ * @vendor: Vendor ID of the switch
+ * @device: Device ID of the switch
+ * @vendor_name: Name of the vendor (or %NULL if not known)
+ * @device_name: Name of the device (or %NULL if not known)
+ * @generation: Switch Thunderbolt generation
+ * @cap_plug_events: Offset to the plug events capability (%0 if not found)
+ * @is_unplugged: The switch is going away
+ * @drom: DROM of the switch (%NULL if not found)
+ * @nvm: Pointer to the NVM if the switch has one (%NULL otherwise)
+ * @no_nvm_upgrade: Prevent NVM upgrade of this switch
+ * @safe_mode: The switch is in safe-mode
+ * @authorized: Whether the switch is authorized by user or policy
+ * @work: Work used to automatically authorize a switch
+ * @security_level: Switch supported security level
+ * @key: Contains the key used to challenge the device or %NULL if not
+ * supported. Size of the key is %TB_SWITCH_KEY_SIZE.
+ * @connection_id: Connection ID used with ICM messaging
+ * @connection_key: Connection key used with ICM messaging
+ * @link: Root switch link this switch is connected (ICM only)
+ * @depth: Depth in the chain this switch is connected (ICM only)
+ *
+ * When the switch is being added or removed to the domain (other
+ * switches) you need to have domain lock held. For switch authorization
+ * internal switch_lock is enough.
*/
struct tb_switch {
+ struct device dev;
struct tb_regs_switch_header config;
struct tb_port *ports;
+ struct tb_dma_port *dma_port;
struct tb *tb;
u64 uid;
- int cap_plug_events; /* offset, zero if not found */
- bool is_unplugged; /* unplugged, will go away */
+ uuid_be *uuid;
+ u16 vendor;
+ u16 device;
+ const char *vendor_name;
+ const char *device_name;
+ unsigned int generation;
+ int cap_plug_events;
+ bool is_unplugged;
u8 *drom;
+ struct tb_switch_nvm *nvm;
+ bool no_nvm_upgrade;
+ bool safe_mode;
+ unsigned int authorized;
+ struct work_struct work;
+ enum tb_security_level security_level;
+ u8 *key;
+ u8 connection_id;
+ u8 connection_key;
+ u8 link;
+ u8 depth;
};
/**
@@ -92,29 +190,71 @@ struct tb_path {
int path_length; /* number of hops */
};
+/**
+ * struct tb_cm_ops - Connection manager specific operations vector
+ * @driver_ready: Called right after control channel is started. Used by
+ * ICM to send driver ready message to the firmware.
+ * @start: Starts the domain
+ * @stop: Stops the domain
+ * @suspend_noirq: Connection manager specific suspend_noirq
+ * @resume_noirq: Connection manager specific resume_noirq
+ * @suspend: Connection manager specific suspend
+ * @complete: Connection manager specific complete
+ * @handle_event: Handle thunderbolt event
+ * @approve_switch: Approve switch
+ * @add_switch_key: Add key to switch
+ * @challenge_switch_key: Challenge switch using key
+ * @disconnect_pcie_paths: Disconnects PCIe paths before NVM update
+ */
+struct tb_cm_ops {
+ int (*driver_ready)(struct tb *tb);
+ int (*start)(struct tb *tb);
+ void (*stop)(struct tb *tb);
+ int (*suspend_noirq)(struct tb *tb);
+ int (*resume_noirq)(struct tb *tb);
+ int (*suspend)(struct tb *tb);
+ void (*complete)(struct tb *tb);
+ void (*handle_event)(struct tb *tb, enum tb_cfg_pkg_type,
+ const void *buf, size_t size);
+ int (*approve_switch)(struct tb *tb, struct tb_switch *sw);
+ int (*add_switch_key)(struct tb *tb, struct tb_switch *sw);
+ int (*challenge_switch_key)(struct tb *tb, struct tb_switch *sw,
+ const u8 *challenge, u8 *response);
+ int (*disconnect_pcie_paths)(struct tb *tb);
+};
/**
* struct tb - main thunderbolt bus structure
+ * @dev: Domain device
+ * @lock: Big lock. Must be held when accessing any struct
+ * tb_switch / struct tb_port.
+ * @nhi: Pointer to the NHI structure
+ * @ctl: Control channel for this domain
+ * @wq: Ordered workqueue for all domain specific work
+ * @root_switch: Root switch of this domain
+ * @cm_ops: Connection manager specific operations vector
+ * @index: Linux assigned domain number
+ * @security_level: Current security level
+ * @privdata: Private connection manager specific data
*/
struct tb {
- struct mutex lock; /*
- * Big lock. Must be held when accessing cfg or
- * any struct tb_switch / struct tb_port.
- */
+ struct device dev;
+ struct mutex lock;
struct tb_nhi *nhi;
struct tb_ctl *ctl;
- struct workqueue_struct *wq; /* ordered workqueue for plug events */
+ struct workqueue_struct *wq;
struct tb_switch *root_switch;
- struct list_head tunnel_list; /* list of active PCIe tunnels */
- bool hotplug_active; /*
- * tb_handle_hotplug will stop progressing plug
- * events and exit if this is not set (it needs to
- * acquire the lock one more time). Used to drain
- * wq after cfg has been paused.
- */
-
+ const struct tb_cm_ops *cm_ops;
+ int index;
+ enum tb_security_level security_level;
+ unsigned long privdata[0];
};
+static inline void *tb_priv(struct tb *tb)
+{
+ return (void *)tb->privdata;
+}
+
/* helper functions & macros */
/**
@@ -137,6 +277,16 @@ static inline u64 tb_route(struct tb_switch *sw)
return ((u64) sw->config.route_hi) << 32 | sw->config.route_lo;
}
+static inline struct tb_port *tb_port_at(u64 route, struct tb_switch *sw)
+{
+ u8 port;
+
+ port = route >> (sw->config.depth * 8);
+ if (WARN_ON(port > sw->config.max_port_number))
+ return NULL;
+ return &sw->ports[port];
+}
+
static inline int tb_sw_read(struct tb_switch *sw, void *buffer,
enum tb_cfg_space space, u32 offset, u32 length)
{
@@ -173,7 +323,7 @@ static inline int tb_port_read(struct tb_port *port, void *buffer,
length);
}
-static inline int tb_port_write(struct tb_port *port, void *buffer,
+static inline int tb_port_write(struct tb_port *port, const void *buffer,
enum tb_cfg_space space, u32 offset, u32 length)
{
return tb_cfg_write(port->sw->tb->ctl,
@@ -215,25 +365,78 @@ static inline int tb_port_write(struct tb_port *port, void *buffer,
#define tb_port_info(port, fmt, arg...) \
__TB_PORT_PRINT(tb_info, port, fmt, ##arg)
+struct tb *icm_probe(struct tb_nhi *nhi);
+struct tb *tb_probe(struct tb_nhi *nhi);
+
+extern struct bus_type tb_bus_type;
+extern struct device_type tb_domain_type;
+extern struct device_type tb_switch_type;
+
+int tb_domain_init(void);
+void tb_domain_exit(void);
+void tb_switch_exit(void);
+
+struct tb *tb_domain_alloc(struct tb_nhi *nhi, size_t privsize);
+int tb_domain_add(struct tb *tb);
+void tb_domain_remove(struct tb *tb);
+int tb_domain_suspend_noirq(struct tb *tb);
+int tb_domain_resume_noirq(struct tb *tb);
+int tb_domain_suspend(struct tb *tb);
+void tb_domain_complete(struct tb *tb);
+int tb_domain_approve_switch(struct tb *tb, struct tb_switch *sw);
+int tb_domain_approve_switch_key(struct tb *tb, struct tb_switch *sw);
+int tb_domain_challenge_switch_key(struct tb *tb, struct tb_switch *sw);
+int tb_domain_disconnect_pcie_paths(struct tb *tb);
+
+static inline void tb_domain_put(struct tb *tb)
+{
+ put_device(&tb->dev);
+}
-struct tb *thunderbolt_alloc_and_start(struct tb_nhi *nhi);
-void thunderbolt_shutdown_and_free(struct tb *tb);
-void thunderbolt_suspend(struct tb *tb);
-void thunderbolt_resume(struct tb *tb);
-
-struct tb_switch *tb_switch_alloc(struct tb *tb, u64 route);
-void tb_switch_free(struct tb_switch *sw);
+struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
+ u64 route);
+struct tb_switch *tb_switch_alloc_safe_mode(struct tb *tb,
+ struct device *parent, u64 route);
+int tb_switch_configure(struct tb_switch *sw);
+int tb_switch_add(struct tb_switch *sw);
+void tb_switch_remove(struct tb_switch *sw);
void tb_switch_suspend(struct tb_switch *sw);
int tb_switch_resume(struct tb_switch *sw);
int tb_switch_reset(struct tb *tb, u64 route);
void tb_sw_set_unplugged(struct tb_switch *sw);
struct tb_switch *get_switch_at_route(struct tb_switch *sw, u64 route);
+struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link,
+ u8 depth);
+struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_be *uuid);
+
+static inline unsigned int tb_switch_phy_port_from_link(unsigned int link)
+{
+ return (link - 1) / TB_SWITCH_LINKS_PER_PHY_PORT;
+}
+
+static inline void tb_switch_put(struct tb_switch *sw)
+{
+ put_device(&sw->dev);
+}
+
+static inline bool tb_is_switch(const struct device *dev)
+{
+ return dev->type == &tb_switch_type;
+}
+
+static inline struct tb_switch *tb_to_switch(struct device *dev)
+{
+ if (tb_is_switch(dev))
+ return container_of(dev, struct tb_switch, dev);
+ return NULL;
+}
int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged);
int tb_port_add_nfc_credits(struct tb_port *port, int credits);
int tb_port_clear_counter(struct tb_port *port, int counter);
-int tb_find_cap(struct tb_port *port, enum tb_cfg_space space, enum tb_cap cap);
+int tb_switch_find_vse_cap(struct tb_switch *sw, enum tb_switch_vse_cap vsec);
+int tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap);
struct tb_path *tb_path_alloc(struct tb *tb, int num_hops);
void tb_path_free(struct tb_path *path);
diff --git a/drivers/thunderbolt/tb_msgs.h b/drivers/thunderbolt/tb_msgs.h
new file mode 100644
index 000000000000..85b6d33c0919
--- /dev/null
+++ b/drivers/thunderbolt/tb_msgs.h
@@ -0,0 +1,260 @@
+/*
+ * Thunderbolt control channel messages
+ *
+ * Copyright (C) 2014 Andreas Noever <andreas.noever@gmail.com>
+ * Copyright (C) 2017, Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _TB_MSGS
+#define _TB_MSGS
+
+#include <linux/types.h>
+#include <linux/uuid.h>
+
+enum tb_cfg_pkg_type {
+ TB_CFG_PKG_READ = 1,
+ TB_CFG_PKG_WRITE = 2,
+ TB_CFG_PKG_ERROR = 3,
+ TB_CFG_PKG_NOTIFY_ACK = 4,
+ TB_CFG_PKG_EVENT = 5,
+ TB_CFG_PKG_XDOMAIN_REQ = 6,
+ TB_CFG_PKG_XDOMAIN_RESP = 7,
+ TB_CFG_PKG_OVERRIDE = 8,
+ TB_CFG_PKG_RESET = 9,
+ TB_CFG_PKG_ICM_EVENT = 10,
+ TB_CFG_PKG_ICM_CMD = 11,
+ TB_CFG_PKG_ICM_RESP = 12,
+ TB_CFG_PKG_PREPARE_TO_SLEEP = 0xd,
+
+};
+
+enum tb_cfg_space {
+ TB_CFG_HOPS = 0,
+ TB_CFG_PORT = 1,
+ TB_CFG_SWITCH = 2,
+ TB_CFG_COUNTERS = 3,
+};
+
+enum tb_cfg_error {
+ TB_CFG_ERROR_PORT_NOT_CONNECTED = 0,
+ TB_CFG_ERROR_LINK_ERROR = 1,
+ TB_CFG_ERROR_INVALID_CONFIG_SPACE = 2,
+ TB_CFG_ERROR_NO_SUCH_PORT = 4,
+ TB_CFG_ERROR_ACK_PLUG_EVENT = 7, /* send as reply to TB_CFG_PKG_EVENT */
+ TB_CFG_ERROR_LOOP = 8,
+ TB_CFG_ERROR_HEC_ERROR_DETECTED = 12,
+ TB_CFG_ERROR_FLOW_CONTROL_ERROR = 13,
+};
+
+/* common header */
+struct tb_cfg_header {
+ u32 route_hi:22;
+ u32 unknown:10; /* highest order bit is set on replies */
+ u32 route_lo;
+} __packed;
+
+/* additional header for read/write packets */
+struct tb_cfg_address {
+ u32 offset:13; /* in dwords */
+ u32 length:6; /* in dwords */
+ u32 port:6;
+ enum tb_cfg_space space:2;
+ u32 seq:2; /* sequence number */
+ u32 zero:3;
+} __packed;
+
+/* TB_CFG_PKG_READ, response for TB_CFG_PKG_WRITE */
+struct cfg_read_pkg {
+ struct tb_cfg_header header;
+ struct tb_cfg_address addr;
+} __packed;
+
+/* TB_CFG_PKG_WRITE, response for TB_CFG_PKG_READ */
+struct cfg_write_pkg {
+ struct tb_cfg_header header;
+ struct tb_cfg_address addr;
+ u32 data[64]; /* maximum size, tb_cfg_address.length has 6 bits */
+} __packed;
+
+/* TB_CFG_PKG_ERROR */
+struct cfg_error_pkg {
+ struct tb_cfg_header header;
+ enum tb_cfg_error error:4;
+ u32 zero1:4;
+ u32 port:6;
+ u32 zero2:2; /* Both should be zero, still they are different fields. */
+ u32 zero3:16;
+} __packed;
+
+/* TB_CFG_PKG_EVENT */
+struct cfg_event_pkg {
+ struct tb_cfg_header header;
+ u32 port:6;
+ u32 zero:25;
+ bool unplug:1;
+} __packed;
+
+/* TB_CFG_PKG_RESET */
+struct cfg_reset_pkg {
+ struct tb_cfg_header header;
+} __packed;
+
+/* TB_CFG_PKG_PREPARE_TO_SLEEP */
+struct cfg_pts_pkg {
+ struct tb_cfg_header header;
+ u32 data;
+} __packed;
+
+/* ICM messages */
+
+enum icm_pkg_code {
+ ICM_GET_TOPOLOGY = 0x1,
+ ICM_DRIVER_READY = 0x3,
+ ICM_APPROVE_DEVICE = 0x4,
+ ICM_CHALLENGE_DEVICE = 0x5,
+ ICM_ADD_DEVICE_KEY = 0x6,
+ ICM_GET_ROUTE = 0xa,
+};
+
+enum icm_event_code {
+ ICM_EVENT_DEVICE_CONNECTED = 3,
+ ICM_EVENT_DEVICE_DISCONNECTED = 4,
+};
+
+struct icm_pkg_header {
+ u8 code;
+ u8 flags;
+ u8 packet_id;
+ u8 total_packets;
+} __packed;
+
+#define ICM_FLAGS_ERROR BIT(0)
+#define ICM_FLAGS_NO_KEY BIT(1)
+#define ICM_FLAGS_SLEVEL_SHIFT 3
+#define ICM_FLAGS_SLEVEL_MASK GENMASK(4, 3)
+
+struct icm_pkg_driver_ready {
+ struct icm_pkg_header hdr;
+} __packed;
+
+struct icm_pkg_driver_ready_response {
+ struct icm_pkg_header hdr;
+ u8 romver;
+ u8 ramver;
+ u16 security_level;
+} __packed;
+
+/* Falcon Ridge & Alpine Ridge common messages */
+
+struct icm_fr_pkg_get_topology {
+ struct icm_pkg_header hdr;
+} __packed;
+
+#define ICM_GET_TOPOLOGY_PACKETS 14
+
+struct icm_fr_pkg_get_topology_response {
+ struct icm_pkg_header hdr;
+ u32 route_lo;
+ u32 route_hi;
+ u8 first_data;
+ u8 second_data;
+ u8 drom_i2c_address_index;
+ u8 switch_index;
+ u32 reserved[2];
+ u32 ports[16];
+ u32 port_hop_info[16];
+} __packed;
+
+#define ICM_SWITCH_USED BIT(0)
+#define ICM_SWITCH_UPSTREAM_PORT_MASK GENMASK(7, 1)
+#define ICM_SWITCH_UPSTREAM_PORT_SHIFT 1
+
+#define ICM_PORT_TYPE_MASK GENMASK(23, 0)
+#define ICM_PORT_INDEX_SHIFT 24
+#define ICM_PORT_INDEX_MASK GENMASK(31, 24)
+
+struct icm_fr_event_device_connected {
+ struct icm_pkg_header hdr;
+ uuid_be ep_uuid;
+ u8 connection_key;
+ u8 connection_id;
+ u16 link_info;
+ u32 ep_name[55];
+} __packed;
+
+#define ICM_LINK_INFO_LINK_MASK 0x7
+#define ICM_LINK_INFO_DEPTH_SHIFT 4
+#define ICM_LINK_INFO_DEPTH_MASK GENMASK(7, 4)
+#define ICM_LINK_INFO_APPROVED BIT(8)
+
+struct icm_fr_pkg_approve_device {
+ struct icm_pkg_header hdr;
+ uuid_be ep_uuid;
+ u8 connection_key;
+ u8 connection_id;
+ u16 reserved;
+} __packed;
+
+struct icm_fr_event_device_disconnected {
+ struct icm_pkg_header hdr;
+ u16 reserved;
+ u16 link_info;
+} __packed;
+
+struct icm_fr_pkg_add_device_key {
+ struct icm_pkg_header hdr;
+ uuid_be ep_uuid;
+ u8 connection_key;
+ u8 connection_id;
+ u16 reserved;
+ u32 key[8];
+} __packed;
+
+struct icm_fr_pkg_add_device_key_response {
+ struct icm_pkg_header hdr;
+ uuid_be ep_uuid;
+ u8 connection_key;
+ u8 connection_id;
+ u16 reserved;
+} __packed;
+
+struct icm_fr_pkg_challenge_device {
+ struct icm_pkg_header hdr;
+ uuid_be ep_uuid;
+ u8 connection_key;
+ u8 connection_id;
+ u16 reserved;
+ u32 challenge[8];
+} __packed;
+
+struct icm_fr_pkg_challenge_device_response {
+ struct icm_pkg_header hdr;
+ uuid_be ep_uuid;
+ u8 connection_key;
+ u8 connection_id;
+ u16 reserved;
+ u32 challenge[8];
+ u32 response[8];
+} __packed;
+
+/* Alpine Ridge only messages */
+
+struct icm_ar_pkg_get_route {
+ struct icm_pkg_header hdr;
+ u16 reserved;
+ u16 link_info;
+} __packed;
+
+struct icm_ar_pkg_get_route_response {
+ struct icm_pkg_header hdr;
+ u16 reserved;
+ u16 link_info;
+ u32 route_hi;
+ u32 route_lo;
+} __packed;
+
+#endif
diff --git a/drivers/thunderbolt/tb_regs.h b/drivers/thunderbolt/tb_regs.h
index 1e2a4a8046be..582bd1f156dc 100644
--- a/drivers/thunderbolt/tb_regs.h
+++ b/drivers/thunderbolt/tb_regs.h
@@ -23,15 +23,22 @@
*/
#define TB_MAX_CONFIG_RW_LENGTH 60
-enum tb_cap {
- TB_CAP_PHY = 0x0001,
- TB_CAP_TIME1 = 0x0003,
- TB_CAP_PCIE = 0x0004,
- TB_CAP_I2C = 0x0005,
- TB_CAP_PLUG_EVENTS = 0x0105, /* also EEPROM */
- TB_CAP_TIME2 = 0x0305,
- TB_CAP_IECS = 0x0405,
- TB_CAP_LINK_CONTROLLER = 0x0605, /* also IECS */
+enum tb_switch_cap {
+ TB_SWITCH_CAP_VSE = 0x05,
+};
+
+enum tb_switch_vse_cap {
+ TB_VSE_CAP_PLUG_EVENTS = 0x01, /* also EEPROM */
+ TB_VSE_CAP_TIME2 = 0x03,
+ TB_VSE_CAP_IECS = 0x04,
+ TB_VSE_CAP_LINK_CONTROLLER = 0x06, /* also IECS */
+};
+
+enum tb_port_cap {
+ TB_PORT_CAP_PHY = 0x01,
+ TB_PORT_CAP_TIME1 = 0x03,
+ TB_PORT_CAP_ADAP = 0x04,
+ TB_PORT_CAP_VSE = 0x05,
};
enum tb_port_state {
@@ -49,15 +56,34 @@ struct tb_cap_basic {
u8 cap; /* if cap == 0x05 then we have a extended capability */
} __packed;
+/**
+ * struct tb_cap_extended_short - Switch extended short capability
+ * @next: Pointer to the next capability. If @next and @length are zero
+ * then we have a long cap.
+ * @cap: Base capability ID (see &enum tb_switch_cap)
+ * @vsec_id: Vendor specific capability ID (see &enum switch_vse_cap)
+ * @length: Length of this capability
+ */
struct tb_cap_extended_short {
- u8 next; /* if next and length are zero then we have a long cap */
- enum tb_cap cap:16;
+ u8 next;
+ u8 cap;
+ u8 vsec_id;
u8 length;
} __packed;
+/**
+ * struct tb_cap_extended_long - Switch extended long capability
+ * @zero1: This field should be zero
+ * @cap: Base capability ID (see &enum tb_switch_cap)
+ * @vsec_id: Vendor specific capability ID (see &enum switch_vse_cap)
+ * @zero2: This field should be zero
+ * @next: Pointer to the next capability
+ * @length: Length of this capability
+ */
struct tb_cap_extended_long {
u8 zero1;
- enum tb_cap cap:16;
+ u8 cap;
+ u8 vsec_id;
u8 zero2;
u16 next;
u16 length;
diff --git a/drivers/thunderbolt/tunnel_pci.c b/drivers/thunderbolt/tunnel_pci.c
index baf1cd370446..ca4475907d7a 100644
--- a/drivers/thunderbolt/tunnel_pci.c
+++ b/drivers/thunderbolt/tunnel_pci.c
@@ -147,10 +147,10 @@ bool tb_pci_is_invalid(struct tb_pci_tunnel *tunnel)
static int tb_pci_port_active(struct tb_port *port, bool active)
{
u32 word = active ? 0x80000000 : 0x0;
- int cap = tb_find_cap(port, TB_CFG_PORT, TB_CAP_PCIE);
- if (cap <= 0) {
- tb_port_warn(port, "TB_CAP_PCIE not found: %d\n", cap);
- return cap ? cap : -ENXIO;
+ int cap = tb_port_find_cap(port, TB_PORT_CAP_ADAP);
+ if (cap < 0) {
+ tb_port_warn(port, "TB_PORT_CAP_ADAP not found: %d\n", cap);
+ return cap;
}
return tb_port_write(port, &word, TB_CFG_PORT, cap, 1);
}
@@ -194,19 +194,13 @@ err:
*/
int tb_pci_activate(struct tb_pci_tunnel *tunnel)
{
- int res;
if (tunnel->path_to_up->activated || tunnel->path_to_down->activated) {
tb_tunnel_WARN(tunnel,
"trying to activate an already activated tunnel\n");
return -EINVAL;
}
- res = tb_pci_restart(tunnel);
- if (res)
- return res;
-
- list_add(&tunnel->list, &tunnel->tb->tunnel_list);
- return 0;
+ return tb_pci_restart(tunnel);
}
@@ -227,6 +221,5 @@ void tb_pci_deactivate(struct tb_pci_tunnel *tunnel)
tb_path_deactivate(tunnel->path_to_down);
if (tunnel->path_to_up->activated)
tb_path_deactivate(tunnel->path_to_up);
- list_del_init(&tunnel->list);
}
diff --git a/drivers/uio/uio_pci_generic.c b/drivers/uio/uio_pci_generic.c
index d0b508b68f3c..a56fdf972dbe 100644
--- a/drivers/uio/uio_pci_generic.c
+++ b/drivers/uio/uio_pci_generic.c
@@ -66,14 +66,7 @@ static int probe(struct pci_dev *pdev,
return err;
}
- if (!pdev->irq) {
- dev_warn(&pdev->dev, "No IRQ assigned to device: "
- "no support for interrupts?\n");
- pci_disable_device(pdev);
- return -ENODEV;
- }
-
- if (!pci_intx_mask_supported(pdev)) {
+ if (pdev->irq && !pci_intx_mask_supported(pdev)) {
err = -ENODEV;
goto err_verify;
}
@@ -86,10 +79,15 @@ static int probe(struct pci_dev *pdev,
gdev->info.name = "uio_pci_generic";
gdev->info.version = DRIVER_VERSION;
- gdev->info.irq = pdev->irq;
- gdev->info.irq_flags = IRQF_SHARED;
- gdev->info.handler = irqhandler;
gdev->pdev = pdev;
+ if (pdev->irq) {
+ gdev->info.irq = pdev->irq;
+ gdev->info.irq_flags = IRQF_SHARED;
+ gdev->info.handler = irqhandler;
+ } else {
+ dev_warn(&pdev->dev, "No IRQ assigned to device: "
+ "no support for interrupts?\n");
+ }
err = uio_register_device(&pdev->dev, &gdev->info);
if (err)
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index 3acef3c5d8ed..3f63e03de8e8 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -706,7 +706,7 @@ static const struct file_operations vhost_vsock_fops = {
};
static struct miscdevice vhost_vsock_misc = {
- .minor = MISC_DYNAMIC_MINOR,
+ .minor = VHOST_VSOCK_MINOR,
.name = "vhost-vsock",
.fops = &vhost_vsock_fops,
};
@@ -778,3 +778,5 @@ module_exit(vhost_vsock_exit);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Asias He");
MODULE_DESCRIPTION("vhost transport for vsock ");
+MODULE_ALIAS_MISCDEV(VHOST_VSOCK_MINOR);
+MODULE_ALIAS("devname:vhost-vsock");
diff --git a/drivers/w1/masters/ds1wm.c b/drivers/w1/masters/ds1wm.c
index e0b8a4bc73df..fd2e9da27c4b 100644
--- a/drivers/w1/masters/ds1wm.c
+++ b/drivers/w1/masters/ds1wm.c
@@ -25,8 +25,7 @@
#include <asm/io.h>
-#include "../w1.h"
-#include "../w1_int.h"
+#include <linux/w1.h>
#define DS1WM_CMD 0x00 /* R/W 4 bits command */
diff --git a/drivers/w1/masters/ds2482.c b/drivers/w1/masters/ds2482.c
index 2e30db1b1a43..d49681cd29af 100644
--- a/drivers/w1/masters/ds2482.c
+++ b/drivers/w1/masters/ds2482.c
@@ -20,8 +20,7 @@
#include <linux/delay.h>
#include <asm/delay.h>
-#include "../w1.h"
-#include "../w1_int.h"
+#include <linux/w1.h>
/**
* Allow the active pullup to be disabled, default is enabled.
@@ -35,6 +34,8 @@
*/
static int ds2482_active_pullup = 1;
module_param_named(active_pullup, ds2482_active_pullup, int, 0644);
+MODULE_PARM_DESC(active_pullup, "Active pullup (apply to all buses): " \
+ "0-disable, 1-enable (default)");
/**
* The DS2482 registers - there are 3 registers that are addressed by a read
@@ -93,30 +94,6 @@ static const u8 ds2482_chan_rd[8] =
#define DS2482_REG_STS_PPD 0x02
#define DS2482_REG_STS_1WB 0x01
-
-static int ds2482_probe(struct i2c_client *client,
- const struct i2c_device_id *id);
-static int ds2482_remove(struct i2c_client *client);
-
-
-/**
- * Driver data (common to all clients)
- */
-static const struct i2c_device_id ds2482_id[] = {
- { "ds2482", 0 },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, ds2482_id);
-
-static struct i2c_driver ds2482_driver = {
- .driver = {
- .name = "ds2482",
- },
- .probe = ds2482_probe,
- .remove = ds2482_remove,
- .id_table = ds2482_id,
-};
-
/*
* Client data (each client gets its own)
*/
@@ -560,10 +537,25 @@ static int ds2482_remove(struct i2c_client *client)
return 0;
}
+/**
+ * Driver data (common to all clients)
+ */
+static const struct i2c_device_id ds2482_id[] = {
+ { "ds2482", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, ds2482_id);
+
+static struct i2c_driver ds2482_driver = {
+ .driver = {
+ .name = "ds2482",
+ },
+ .probe = ds2482_probe,
+ .remove = ds2482_remove,
+ .id_table = ds2482_id,
+};
module_i2c_driver(ds2482_driver);
-MODULE_PARM_DESC(active_pullup, "Active pullup (apply to all buses): " \
- "0-disable, 1-enable (default)");
MODULE_AUTHOR("Ben Gardner <bgardner@wabtec.com>");
MODULE_DESCRIPTION("DS2482 driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/w1/masters/ds2490.c b/drivers/w1/masters/ds2490.c
index be77b7914fad..46ccb2fc4f60 100644
--- a/drivers/w1/masters/ds2490.c
+++ b/drivers/w1/masters/ds2490.c
@@ -25,8 +25,7 @@
#include <linux/usb.h>
#include <linux/slab.h>
-#include "../w1_int.h"
-#include "../w1.h"
+#include <linux/w1.h>
/* USB Standard */
/* USB Control request vendor type */
@@ -179,28 +178,9 @@ struct ds_status
u8 reserved2;
};
-static struct usb_device_id ds_id_table [] = {
- { USB_DEVICE(0x04fa, 0x2490) },
- { },
-};
-MODULE_DEVICE_TABLE(usb, ds_id_table);
-
-static int ds_probe(struct usb_interface *, const struct usb_device_id *);
-static void ds_disconnect(struct usb_interface *);
-
-static int ds_send_control(struct ds_device *, u16, u16);
-static int ds_send_control_cmd(struct ds_device *, u16, u16);
-
static LIST_HEAD(ds_devices);
static DEFINE_MUTEX(ds_mutex);
-static struct usb_driver ds_driver = {
- .name = "DS9490R",
- .probe = ds_probe,
- .disconnect = ds_disconnect,
- .id_table = ds_id_table,
-};
-
static int ds_send_control_cmd(struct ds_device *dev, u16 value, u16 index)
{
int err;
@@ -1108,8 +1088,20 @@ static void ds_disconnect(struct usb_interface *intf)
kfree(dev);
}
+static struct usb_device_id ds_id_table [] = {
+ { USB_DEVICE(0x04fa, 0x2490) },
+ { },
+};
+MODULE_DEVICE_TABLE(usb, ds_id_table);
+
+static struct usb_driver ds_driver = {
+ .name = "DS9490R",
+ .probe = ds_probe,
+ .disconnect = ds_disconnect,
+ .id_table = ds_id_table,
+};
module_usb_driver(ds_driver);
-MODULE_LICENSE("GPL");
MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
MODULE_DESCRIPTION("DS2490 USB <-> W1 bus master driver (DS9490*)");
+MODULE_LICENSE("GPL");
diff --git a/drivers/w1/masters/matrox_w1.c b/drivers/w1/masters/matrox_w1.c
index 97a676bf5989..d83d7c99d81d 100644
--- a/drivers/w1/masters/matrox_w1.c
+++ b/drivers/w1/masters/matrox_w1.c
@@ -34,28 +34,7 @@
#include <linux/pci_ids.h>
#include <linux/pci.h>
-#include "../w1.h"
-#include "../w1_int.h"
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
-MODULE_DESCRIPTION("Driver for transport(Dallas 1-wire protocol) over VGA DDC(matrox gpio).");
-
-static struct pci_device_id matrox_w1_tbl[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_MATROX, PCI_DEVICE_ID_MATROX_G400) },
- { },
-};
-MODULE_DEVICE_TABLE(pci, matrox_w1_tbl);
-
-static int matrox_w1_probe(struct pci_dev *, const struct pci_device_id *);
-static void matrox_w1_remove(struct pci_dev *);
-
-static struct pci_driver matrox_w1_pci_driver = {
- .name = "matrox_w1",
- .id_table = matrox_w1_tbl,
- .probe = matrox_w1_probe,
- .remove = matrox_w1_remove,
-};
+#include <linux/w1.h>
/*
* Matrox G400 DDC registers.
@@ -88,9 +67,6 @@ struct matrox_device
struct w1_bus_master *bus_master;
};
-static u8 matrox_w1_read_ddc_bit(void *);
-static void matrox_w1_write_ddc_bit(void *, u8);
-
/*
* These functions read and write DDC Data bit.
*
@@ -226,4 +202,21 @@ static void matrox_w1_remove(struct pci_dev *pdev)
}
kfree(dev);
}
+
+static struct pci_device_id matrox_w1_tbl[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_MATROX, PCI_DEVICE_ID_MATROX_G400) },
+ { },
+};
+MODULE_DEVICE_TABLE(pci, matrox_w1_tbl);
+
+static struct pci_driver matrox_w1_pci_driver = {
+ .name = "matrox_w1",
+ .id_table = matrox_w1_tbl,
+ .probe = matrox_w1_probe,
+ .remove = matrox_w1_remove,
+};
module_pci_driver(matrox_w1_pci_driver);
+
+MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
+MODULE_DESCRIPTION("Driver for transport(Dallas 1-wire protocol) over VGA DDC(matrox gpio).");
+MODULE_LICENSE("GPL");
diff --git a/drivers/w1/masters/mxc_w1.c b/drivers/w1/masters/mxc_w1.c
index a4621757a47f..74f2e6e6202a 100644
--- a/drivers/w1/masters/mxc_w1.c
+++ b/drivers/w1/masters/mxc_w1.c
@@ -19,8 +19,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
-#include "../w1.h"
-#include "../w1_int.h"
+#include <linux/w1.h>
/*
* MXC W1 Register offsets
diff --git a/drivers/w1/masters/omap_hdq.c b/drivers/w1/masters/omap_hdq.c
index fb190c259607..3612542b6044 100644
--- a/drivers/w1/masters/omap_hdq.c
+++ b/drivers/w1/masters/omap_hdq.c
@@ -19,8 +19,7 @@
#include <linux/pm_runtime.h>
#include <linux/of.h>
-#include "../w1.h"
-#include "../w1_int.h"
+#include <linux/w1.h>
#define MOD_NAME "OMAP_HDQ:"
@@ -53,7 +52,10 @@
#define OMAP_HDQ_MAX_USER 4
static DECLARE_WAIT_QUEUE_HEAD(hdq_wait_queue);
+
static int w1_id;
+module_param(w1_id, int, S_IRUSR);
+MODULE_PARM_DESC(w1_id, "1-wire id for the slave detection in HDQ mode");
struct hdq_data {
struct device *dev;
@@ -76,36 +78,6 @@ struct hdq_data {
};
-static int omap_hdq_probe(struct platform_device *pdev);
-static int omap_hdq_remove(struct platform_device *pdev);
-
-static const struct of_device_id omap_hdq_dt_ids[] = {
- { .compatible = "ti,omap3-1w" },
- { .compatible = "ti,am4372-hdq" },
- {}
-};
-MODULE_DEVICE_TABLE(of, omap_hdq_dt_ids);
-
-static struct platform_driver omap_hdq_driver = {
- .probe = omap_hdq_probe,
- .remove = omap_hdq_remove,
- .driver = {
- .name = "omap_hdq",
- .of_match_table = omap_hdq_dt_ids,
- },
-};
-
-static u8 omap_w1_read_byte(void *_hdq);
-static void omap_w1_write_byte(void *_hdq, u8 byte);
-static u8 omap_w1_reset_bus(void *_hdq);
-
-
-static struct w1_bus_master omap_w1_master = {
- .read_byte = omap_w1_read_byte,
- .write_byte = omap_w1_write_byte,
- .reset_bus = omap_w1_reset_bus,
-};
-
/* HDQ register I/O routines */
static inline u8 hdq_reg_in(struct hdq_data *hdq_data, u32 offset)
{
@@ -678,6 +650,12 @@ static void omap_w1_write_byte(void *_hdq, u8 byte)
}
}
+static struct w1_bus_master omap_w1_master = {
+ .read_byte = omap_w1_read_byte,
+ .write_byte = omap_w1_write_byte,
+ .reset_bus = omap_w1_reset_bus,
+};
+
static int omap_hdq_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -787,10 +765,22 @@ static int omap_hdq_remove(struct platform_device *pdev)
return 0;
}
-module_platform_driver(omap_hdq_driver);
+static const struct of_device_id omap_hdq_dt_ids[] = {
+ { .compatible = "ti,omap3-1w" },
+ { .compatible = "ti,am4372-hdq" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, omap_hdq_dt_ids);
-module_param(w1_id, int, S_IRUSR);
-MODULE_PARM_DESC(w1_id, "1-wire id for the slave detection in HDQ mode");
+static struct platform_driver omap_hdq_driver = {
+ .probe = omap_hdq_probe,
+ .remove = omap_hdq_remove,
+ .driver = {
+ .name = "omap_hdq",
+ .of_match_table = omap_hdq_dt_ids,
+ },
+};
+module_platform_driver(omap_hdq_driver);
MODULE_AUTHOR("Texas Instruments");
MODULE_DESCRIPTION("HDQ-1W driver Library");
diff --git a/drivers/w1/masters/w1-gpio.c b/drivers/w1/masters/w1-gpio.c
index a373ae69d9f6..a90728ceec5a 100644
--- a/drivers/w1/masters/w1-gpio.c
+++ b/drivers/w1/masters/w1-gpio.c
@@ -20,8 +20,7 @@
#include <linux/of.h>
#include <linux/delay.h>
-#include "../w1.h"
-#include "../w1_int.h"
+#include <linux/w1.h>
static u8 w1_gpio_set_pullup(void *data, int delay)
{
diff --git a/drivers/w1/slaves/w1_bq27000.c b/drivers/w1/slaves/w1_bq27000.c
index 9f4a86b754ba..8046ac45381a 100644
--- a/drivers/w1/slaves/w1_bq27000.c
+++ b/drivers/w1/slaves/w1_bq27000.c
@@ -17,14 +17,16 @@
#include <linux/mutex.h>
#include <linux/power/bq27xxx_battery.h>
-#include "../w1.h"
-#include "../w1_int.h"
-#include "../w1_family.h"
+#include <linux/w1.h>
+
+#define W1_FAMILY_BQ27000 0x01
#define HDQ_CMD_READ (0)
#define HDQ_CMD_WRITE (1<<7)
static int F_ID;
+module_param(F_ID, int, S_IRUSR);
+MODULE_PARM_DESC(F_ID, "1-wire slave FID for BQ device");
static int w1_bq27000_read(struct device *dev, unsigned int reg)
{
@@ -106,13 +108,10 @@ static void __exit w1_bq27000_exit(void)
w1_unregister_family(&w1_bq27000_family);
}
-
module_init(w1_bq27000_init);
module_exit(w1_bq27000_exit);
-module_param(F_ID, int, S_IRUSR);
-MODULE_PARM_DESC(F_ID, "1-wire slave FID for BQ device");
-MODULE_ALIAS("w1-family-" __stringify(W1_FAMILY_BQ27000));
-MODULE_LICENSE("GPL");
MODULE_AUTHOR("Texas Instruments Ltd");
MODULE_DESCRIPTION("HDQ/1-wire slave driver bq27000 battery monitor chip");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("w1-family-" __stringify(W1_FAMILY_BQ27000));
diff --git a/drivers/w1/slaves/w1_ds2405.c b/drivers/w1/slaves/w1_ds2405.c
index d5d54876cb64..42a1e81060ce 100644
--- a/drivers/w1/slaves/w1_ds2405.c
+++ b/drivers/w1/slaves/w1_ds2405.c
@@ -24,8 +24,9 @@
#include <linux/string.h>
#include <linux/types.h>
-#include "../w1.h"
-#include "../w1_family.h"
+#include <linux/w1.h>
+
+#define W1_FAMILY_DS2405 0x05
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Maciej S. Szmigiero <mail@maciej.szmigiero.name>");
diff --git a/drivers/w1/slaves/w1_ds2406.c b/drivers/w1/slaves/w1_ds2406.c
index 51f2f66d6555..fac266366ca3 100644
--- a/drivers/w1/slaves/w1_ds2406.c
+++ b/drivers/w1/slaves/w1_ds2406.c
@@ -17,13 +17,9 @@
#include <linux/slab.h>
#include <linux/crc16.h>
-#include "../w1.h"
-#include "../w1_int.h"
-#include "../w1_family.h"
+#include <linux/w1.h>
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Scott Alfter <scott@alfter.us>");
-MODULE_DESCRIPTION("w1 family 12 driver for DS2406 2 Pin IO");
+#define W1_FAMILY_DS2406 0x12
#define W1_F12_FUNC_READ_STATUS 0xAA
#define W1_F12_FUNC_WRITE_STATUS 0x55
@@ -154,3 +150,7 @@ static struct w1_family w1_family_12 = {
.fops = &w1_f12_fops,
};
module_w1_family(w1_family_12);
+
+MODULE_AUTHOR("Scott Alfter <scott@alfter.us>");
+MODULE_DESCRIPTION("w1 family 12 driver for DS2406 2 Pin IO");
+MODULE_LICENSE("GPL");
diff --git a/drivers/w1/slaves/w1_ds2408.c b/drivers/w1/slaves/w1_ds2408.c
index aec5958e66e9..b535d5ec35b6 100644
--- a/drivers/w1/slaves/w1_ds2408.c
+++ b/drivers/w1/slaves/w1_ds2408.c
@@ -15,15 +15,9 @@
#include <linux/delay.h>
#include <linux/slab.h>
-#include "../w1.h"
-#include "../w1_int.h"
-#include "../w1_family.h"
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Jean-Francois Dagenais <dagenaisj@sonatest.com>");
-MODULE_DESCRIPTION("w1 family 29 driver for DS2408 8 Pin IO");
-MODULE_ALIAS("w1-family-" __stringify(W1_FAMILY_DS2408));
+#include <linux/w1.h>
+#define W1_FAMILY_DS2408 0x29
#define W1_F29_RETRIES 3
@@ -352,3 +346,8 @@ static struct w1_family w1_family_29 = {
.fops = &w1_f29_fops,
};
module_w1_family(w1_family_29);
+
+MODULE_AUTHOR("Jean-Francois Dagenais <dagenaisj@sonatest.com>");
+MODULE_DESCRIPTION("w1 family 29 driver for DS2408 8 Pin IO");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("w1-family-" __stringify(W1_FAMILY_DS2408));
diff --git a/drivers/w1/slaves/w1_ds2413.c b/drivers/w1/slaves/w1_ds2413.c
index f2e1c51533b9..492e3d010321 100644
--- a/drivers/w1/slaves/w1_ds2413.c
+++ b/drivers/w1/slaves/w1_ds2413.c
@@ -16,14 +16,9 @@
#include <linux/delay.h>
#include <linux/slab.h>
-#include "../w1.h"
-#include "../w1_int.h"
-#include "../w1_family.h"
+#include <linux/w1.h>
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mariusz Bialonczyk <manio@skyboo.net>");
-MODULE_DESCRIPTION("w1 family 3a driver for DS2413 2 Pin IO");
-MODULE_ALIAS("w1-family-" __stringify(W1_FAMILY_DS2413));
+#define W1_FAMILY_DS2413 0x3A
#define W1_F3A_RETRIES 3
#define W1_F3A_FUNC_PIO_ACCESS_READ 0xF5
@@ -136,3 +131,8 @@ static struct w1_family w1_family_3a = {
.fops = &w1_f3a_fops,
};
module_w1_family(w1_family_3a);
+
+MODULE_AUTHOR("Mariusz Bialonczyk <manio@skyboo.net>");
+MODULE_DESCRIPTION("w1 family 3a driver for DS2413 2 Pin IO");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("w1-family-" __stringify(W1_FAMILY_DS2413));
diff --git a/drivers/w1/slaves/w1_ds2423.c b/drivers/w1/slaves/w1_ds2423.c
index 4ab54fd9dde2..050407c53b16 100644
--- a/drivers/w1/slaves/w1_ds2423.c
+++ b/drivers/w1/slaves/w1_ds2423.c
@@ -30,9 +30,9 @@
#include <linux/delay.h>
#include <linux/crc16.h>
-#include "../w1.h"
-#include "../w1_int.h"
-#include "../w1_family.h"
+#include <linux/w1.h>
+
+#define W1_COUNTER_DS2423 0x1D
#define CRC16_VALID 0xb001
#define CRC16_INIT 0
@@ -140,7 +140,7 @@ static struct w1_family w1_family_1d = {
};
module_w1_family(w1_family_1d);
-MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mika Laitio <lamikr@pilppa.org>");
MODULE_DESCRIPTION("w1 family 1d driver for DS2423, 4 counters and 4kb ram");
+MODULE_LICENSE("GPL");
MODULE_ALIAS("w1-family-" __stringify(W1_COUNTER_DS2423));
diff --git a/drivers/w1/slaves/w1_ds2431.c b/drivers/w1/slaves/w1_ds2431.c
index 80572cb63ba8..5adecd3face1 100644
--- a/drivers/w1/slaves/w1_ds2431.c
+++ b/drivers/w1/slaves/w1_ds2431.c
@@ -16,9 +16,9 @@
#include <linux/types.h>
#include <linux/delay.h>
-#include "../w1.h"
-#include "../w1_int.h"
-#include "../w1_family.h"
+#include <linux/w1.h>
+
+#define W1_EEPROM_DS2431 0x2D
#define W1_F2D_EEPROM_SIZE 128
#define W1_F2D_PAGE_COUNT 4
@@ -290,7 +290,7 @@ static struct w1_family w1_family_2d = {
};
module_w1_family(w1_family_2d);
-MODULE_LICENSE("GPL");
MODULE_AUTHOR("Bernhard Weirich <bernhard.weirich@riedel.net>");
MODULE_DESCRIPTION("w1 family 2d driver for DS2431, 1kb EEPROM");
+MODULE_LICENSE("GPL");
MODULE_ALIAS("w1-family-" __stringify(W1_EEPROM_DS2431));
diff --git a/drivers/w1/slaves/w1_ds2433.c b/drivers/w1/slaves/w1_ds2433.c
index 6cf378c89ecb..75ad70cfe8e8 100644
--- a/drivers/w1/slaves/w1_ds2433.c
+++ b/drivers/w1/slaves/w1_ds2433.c
@@ -22,14 +22,9 @@
#endif
-#include "../w1.h"
-#include "../w1_int.h"
-#include "../w1_family.h"
+#include <linux/w1.h>
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Ben Gardner <bgardner@wabtec.com>");
-MODULE_DESCRIPTION("w1 family 23 driver for DS2433, 4kb EEPROM");
-MODULE_ALIAS("w1-family-" __stringify(W1_EEPROM_DS2433));
+#define W1_EEPROM_DS2433 0x23
#define W1_EEPROM_SIZE 512
#define W1_PAGE_COUNT 16
@@ -306,3 +301,8 @@ static struct w1_family w1_family_23 = {
.fops = &w1_f23_fops,
};
module_w1_family(w1_family_23);
+
+MODULE_AUTHOR("Ben Gardner <bgardner@wabtec.com>");
+MODULE_DESCRIPTION("w1 family 23 driver for DS2433, 4kb EEPROM");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("w1-family-" __stringify(W1_EEPROM_DS2433));
diff --git a/drivers/w1/slaves/w1_ds2438.c b/drivers/w1/slaves/w1_ds2438.c
index 5ededb4965e1..6487fb772a20 100644
--- a/drivers/w1/slaves/w1_ds2438.c
+++ b/drivers/w1/slaves/w1_ds2438.c
@@ -13,8 +13,9 @@
#include <linux/types.h>
#include <linux/delay.h>
-#include "../w1.h"
-#include "../w1_family.h"
+#include <linux/w1.h>
+
+#define W1_FAMILY_DS2438 0x26
#define W1_DS2438_RETRIES 3
diff --git a/drivers/w1/slaves/w1_ds2760.c b/drivers/w1/slaves/w1_ds2760.c
index ffa37f773b3b..26168abfb8b8 100644
--- a/drivers/w1/slaves/w1_ds2760.c
+++ b/drivers/w1/slaves/w1_ds2760.c
@@ -18,11 +18,12 @@
#include <linux/idr.h>
#include <linux/gfp.h>
-#include "../w1.h"
-#include "../w1_int.h"
-#include "../w1_family.h"
+#include <linux/w1.h>
+
#include "w1_ds2760.h"
+#define W1_FAMILY_DS2760 0x30
+
static int w1_ds2760_io(struct device *dev, char *buf, int addr, size_t count,
int io)
{
@@ -63,11 +64,13 @@ int w1_ds2760_read(struct device *dev, char *buf, int addr, size_t count)
{
return w1_ds2760_io(dev, buf, addr, count, 0);
}
+EXPORT_SYMBOL(w1_ds2760_read);
int w1_ds2760_write(struct device *dev, char *buf, int addr, size_t count)
{
return w1_ds2760_io(dev, buf, addr, count, 1);
}
+EXPORT_SYMBOL(w1_ds2760_write);
static int w1_ds2760_eeprom_cmd(struct device *dev, int addr, int cmd)
{
@@ -91,11 +94,13 @@ int w1_ds2760_store_eeprom(struct device *dev, int addr)
{
return w1_ds2760_eeprom_cmd(dev, addr, W1_DS2760_COPY_DATA);
}
+EXPORT_SYMBOL(w1_ds2760_store_eeprom);
int w1_ds2760_recall_eeprom(struct device *dev, int addr)
{
return w1_ds2760_eeprom_cmd(dev, addr, W1_DS2760_RECALL_DATA);
}
+EXPORT_SYMBOL(w1_ds2760_recall_eeprom);
static ssize_t w1_slave_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
@@ -164,12 +169,7 @@ static struct w1_family w1_ds2760_family = {
};
module_w1_family(w1_ds2760_family);
-EXPORT_SYMBOL(w1_ds2760_read);
-EXPORT_SYMBOL(w1_ds2760_write);
-EXPORT_SYMBOL(w1_ds2760_store_eeprom);
-EXPORT_SYMBOL(w1_ds2760_recall_eeprom);
-
-MODULE_LICENSE("GPL");
MODULE_AUTHOR("Szabolcs Gyurko <szabolcs.gyurko@tlt.hu>");
MODULE_DESCRIPTION("1-wire Driver Dallas 2760 battery monitor chip");
+MODULE_LICENSE("GPL");
MODULE_ALIAS("w1-family-" __stringify(W1_FAMILY_DS2760));
diff --git a/drivers/w1/slaves/w1_ds2780.c b/drivers/w1/slaves/w1_ds2780.c
index f5c2aa429a92..a60528131154 100644
--- a/drivers/w1/slaves/w1_ds2780.c
+++ b/drivers/w1/slaves/w1_ds2780.c
@@ -21,11 +21,12 @@
#include <linux/mutex.h>
#include <linux/idr.h>
-#include "../w1.h"
-#include "../w1_int.h"
-#include "../w1_family.h"
+#include <linux/w1.h>
+
#include "w1_ds2780.h"
+#define W1_FAMILY_DS2780 0x32
+
static int w1_ds2780_do_io(struct device *dev, char *buf, int addr,
size_t count, int io)
{
@@ -156,7 +157,7 @@ static struct w1_family w1_ds2780_family = {
};
module_w1_family(w1_ds2780_family);
-MODULE_LICENSE("GPL");
MODULE_AUTHOR("Clifton Barnes <cabarnes@indesign-llc.com>");
MODULE_DESCRIPTION("1-wire Driver for Maxim/Dallas DS2780 Stand-Alone Fuel Gauge IC");
+MODULE_LICENSE("GPL");
MODULE_ALIAS("w1-family-" __stringify(W1_FAMILY_DS2780));
diff --git a/drivers/w1/slaves/w1_ds2781.c b/drivers/w1/slaves/w1_ds2781.c
index 9c03e014cf9e..645be6e0b24a 100644
--- a/drivers/w1/slaves/w1_ds2781.c
+++ b/drivers/w1/slaves/w1_ds2781.c
@@ -18,11 +18,12 @@
#include <linux/platform_device.h>
#include <linux/mutex.h>
-#include "../w1.h"
-#include "../w1_int.h"
-#include "../w1_family.h"
+#include <linux/w1.h>
+
#include "w1_ds2781.h"
+#define W1_FAMILY_DS2781 0x3D
+
static int w1_ds2781_do_io(struct device *dev, char *buf, int addr,
size_t count, int io)
{
@@ -153,7 +154,7 @@ static struct w1_family w1_ds2781_family = {
};
module_w1_family(w1_ds2781_family);
-MODULE_LICENSE("GPL");
MODULE_AUTHOR("Renata Sayakhova <renata@oktetlabs.ru>");
MODULE_DESCRIPTION("1-wire Driver for Maxim/Dallas DS2781 Stand-Alone Fuel Gauge IC");
+MODULE_LICENSE("GPL");
MODULE_ALIAS("w1-family-" __stringify(W1_FAMILY_DS2781));
diff --git a/drivers/w1/slaves/w1_ds28e04.c b/drivers/w1/slaves/w1_ds28e04.c
index 5e348d38ec5c..ec234b846eb3 100644
--- a/drivers/w1/slaves/w1_ds28e04.c
+++ b/drivers/w1/slaves/w1_ds28e04.c
@@ -20,14 +20,9 @@
#define CRC16_INIT 0
#define CRC16_VALID 0xb001
-#include "../w1.h"
-#include "../w1_int.h"
-#include "../w1_family.h"
+#include <linux/w1.h>
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Markus Franke <franke.m@sebakmt.com>, <franm@hrz.tu-chemnitz.de>");
-MODULE_DESCRIPTION("w1 family 1C driver for DS28E04, 4kb EEPROM and PIO");
-MODULE_ALIAS("w1-family-" __stringify(W1_FAMILY_DS28E04));
+#define W1_FAMILY_DS28E04 0x1C
/* Allow the strong pullup to be disabled, but default to enabled.
* If it was disabled a parasite powered device might not get the required
@@ -428,3 +423,8 @@ static struct w1_family w1_family_1C = {
.fops = &w1_f1C_fops,
};
module_w1_family(w1_family_1C);
+
+MODULE_AUTHOR("Markus Franke <franke.m@sebakmt.com>, <franm@hrz.tu-chemnitz.de>");
+MODULE_DESCRIPTION("w1 family 1C driver for DS28E04, 4kb EEPROM and PIO");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("w1-family-" __stringify(W1_FAMILY_DS28E04));
diff --git a/drivers/w1/slaves/w1_smem.c b/drivers/w1/slaves/w1_smem.c
index ed4c87506def..e556b0caff71 100644
--- a/drivers/w1/slaves/w1_smem.c
+++ b/drivers/w1/slaves/w1_smem.c
@@ -27,15 +27,10 @@
#include <linux/device.h>
#include <linux/types.h>
-#include "../w1.h"
-#include "../w1_int.h"
-#include "../w1_family.h"
+#include <linux/w1.h>
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
-MODULE_DESCRIPTION("Driver for 1-wire Dallas network protocol, 64bit memory family.");
-MODULE_ALIAS("w1-family-" __stringify(W1_FAMILY_SMEM_01));
-MODULE_ALIAS("w1-family-" __stringify(W1_FAMILY_SMEM_81));
+#define W1_FAMILY_SMEM_01 0x01
+#define W1_FAMILY_SMEM_81 0x81
static struct w1_family w1_smem_family_01 = {
.fid = W1_FAMILY_SMEM_01,
@@ -70,3 +65,9 @@ static void __exit w1_smem_fini(void)
module_init(w1_smem_init);
module_exit(w1_smem_fini);
+
+MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
+MODULE_DESCRIPTION("Driver for 1-wire Dallas network protocol, 64bit memory family.");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("w1-family-" __stringify(W1_FAMILY_SMEM_01));
+MODULE_ALIAS("w1-family-" __stringify(W1_FAMILY_SMEM_81));
diff --git a/drivers/w1/slaves/w1_therm.c b/drivers/w1/slaves/w1_therm.c
index 82611f197b0a..cb3fc3c6b0d1 100644
--- a/drivers/w1/slaves/w1_therm.c
+++ b/drivers/w1/slaves/w1_therm.c
@@ -30,18 +30,13 @@
#include <linux/slab.h>
#include <linux/delay.h>
-#include "../w1.h"
-#include "../w1_int.h"
-#include "../w1_family.h"
+#include <linux/w1.h>
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
-MODULE_DESCRIPTION("Driver for 1-wire Dallas network protocol, temperature family.");
-MODULE_ALIAS("w1-family-" __stringify(W1_THERM_DS18S20));
-MODULE_ALIAS("w1-family-" __stringify(W1_THERM_DS1822));
-MODULE_ALIAS("w1-family-" __stringify(W1_THERM_DS18B20));
-MODULE_ALIAS("w1-family-" __stringify(W1_THERM_DS1825));
-MODULE_ALIAS("w1-family-" __stringify(W1_THERM_DS28EA00));
+#define W1_THERM_DS18S20 0x10
+#define W1_THERM_DS1822 0x22
+#define W1_THERM_DS18B20 0x28
+#define W1_THERM_DS1825 0x3B
+#define W1_THERM_DS28EA00 0x42
/* Allow the strong pullup to be disabled, but default to enabled.
* If it was disabled a parasite powered device might not get the require
@@ -646,3 +641,12 @@ static void __exit w1_therm_fini(void)
module_init(w1_therm_init);
module_exit(w1_therm_fini);
+
+MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
+MODULE_DESCRIPTION("Driver for 1-wire Dallas network protocol, temperature family.");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("w1-family-" __stringify(W1_THERM_DS18S20));
+MODULE_ALIAS("w1-family-" __stringify(W1_THERM_DS1822));
+MODULE_ALIAS("w1-family-" __stringify(W1_THERM_DS18B20));
+MODULE_ALIAS("w1-family-" __stringify(W1_THERM_DS1825));
+MODULE_ALIAS("w1-family-" __stringify(W1_THERM_DS28EA00));
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
index 8511d1685db9..95ea7e6b1d99 100644
--- a/drivers/w1/w1.c
+++ b/drivers/w1/w1.c
@@ -28,25 +28,20 @@
#include <linux/atomic.h>
-#include "w1.h"
-#include "w1_int.h"
-#include "w1_family.h"
+#include "w1_internal.h"
#include "w1_netlink.h"
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
-MODULE_DESCRIPTION("Driver for 1-wire Dallas network protocol.");
+#define W1_FAMILY_DEFAULT 0
static int w1_timeout = 10;
-static int w1_timeout_us = 0;
-int w1_max_slave_count = 64;
-int w1_max_slave_ttl = 10;
-
module_param_named(timeout, w1_timeout, int, 0);
MODULE_PARM_DESC(timeout, "time in seconds between automatic slave searches");
+
+static int w1_timeout_us = 0;
module_param_named(timeout_us, w1_timeout_us, int, 0);
MODULE_PARM_DESC(timeout_us,
"time in microseconds between automatic slave searches");
+
/* A search stops when w1_max_slave_count devices have been found in that
* search. The next search will start over and detect the same set of devices
* on a static 1-wire bus. Memory is not allocated based on this number, just
@@ -55,9 +50,12 @@ MODULE_PARM_DESC(timeout_us,
* device on the network and w1_max_slave_count is set to 1, the device id can
* be read directly skipping the normal slower search process.
*/
+int w1_max_slave_count = 64;
module_param_named(max_slave_count, w1_max_slave_count, int, 0);
MODULE_PARM_DESC(max_slave_count,
"maximum number of slaves detected in a search");
+
+int w1_max_slave_ttl = 10;
module_param_named(slave_ttl, w1_max_slave_ttl, int, 0);
MODULE_PARM_DESC(slave_ttl,
"Number of searches not seeing a slave before it will be removed");
@@ -1228,3 +1226,7 @@ static void __exit w1_fini(void)
module_init(w1_init);
module_exit(w1_fini);
+
+MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
+MODULE_DESCRIPTION("Driver for 1-wire Dallas network protocol.");
+MODULE_LICENSE("GPL");
diff --git a/drivers/w1/w1_family.c b/drivers/w1/w1_family.c
index 2096f460498f..f14ab0b340b5 100644
--- a/drivers/w1/w1_family.c
+++ b/drivers/w1/w1_family.c
@@ -18,8 +18,7 @@
#include <linux/delay.h>
#include <linux/export.h>
-#include "w1_family.h"
-#include "w1.h"
+#include "w1_internal.h"
DEFINE_SPINLOCK(w1_flock);
static LIST_HEAD(w1_families);
@@ -55,6 +54,7 @@ int w1_register_family(struct w1_family *newf)
return ret;
}
+EXPORT_SYMBOL(w1_register_family);
/**
* w1_unregister_family() - unregister a device family driver
@@ -87,6 +87,7 @@ void w1_unregister_family(struct w1_family *fent)
flush_signals(current);
}
}
+EXPORT_SYMBOL(w1_unregister_family);
/*
* Should be called under w1_flock held.
@@ -136,6 +137,3 @@ void __w1_family_get(struct w1_family *f)
atomic_inc(&f->refcnt);
smp_mb__after_atomic();
}
-
-EXPORT_SYMBOL(w1_unregister_family);
-EXPORT_SYMBOL(w1_register_family);
diff --git a/drivers/w1/w1_family.h b/drivers/w1/w1_family.h
deleted file mode 100644
index 869a3ff87d29..000000000000
--- a/drivers/w1/w1_family.h
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __W1_FAMILY_H
-#define __W1_FAMILY_H
-
-#include <linux/types.h>
-#include <linux/device.h>
-#include <linux/atomic.h>
-
-#define W1_FAMILY_DEFAULT 0
-#define W1_FAMILY_BQ27000 0x01
-#define W1_FAMILY_SMEM_01 0x01
-#define W1_FAMILY_SMEM_81 0x81
-#define W1_FAMILY_DS2405 0x05
-#define W1_THERM_DS18S20 0x10
-#define W1_FAMILY_DS28E04 0x1C
-#define W1_COUNTER_DS2423 0x1D
-#define W1_THERM_DS1822 0x22
-#define W1_EEPROM_DS2433 0x23
-#define W1_FAMILY_DS2438 0x26
-#define W1_THERM_DS18B20 0x28
-#define W1_FAMILY_DS2408 0x29
-#define W1_EEPROM_DS2431 0x2D
-#define W1_FAMILY_DS2760 0x30
-#define W1_FAMILY_DS2780 0x32
-#define W1_FAMILY_DS2413 0x3A
-#define W1_FAMILY_DS2406 0x12
-#define W1_THERM_DS1825 0x3B
-#define W1_FAMILY_DS2781 0x3D
-#define W1_THERM_DS28EA00 0x42
-
-#define MAXNAMELEN 32
-
-struct w1_slave;
-
-/**
- * struct w1_family_ops - operations for a family type
- * @add_slave: add_slave
- * @remove_slave: remove_slave
- * @groups: sysfs group
- */
-struct w1_family_ops
-{
- int (* add_slave)(struct w1_slave *);
- void (* remove_slave)(struct w1_slave *);
- const struct attribute_group **groups;
-};
-
-/**
- * struct w1_family - reference counted family structure.
- * @family_entry: family linked list
- * @fid: 8 bit family identifier
- * @fops: operations for this family
- * @refcnt: reference counter
- */
-struct w1_family
-{
- struct list_head family_entry;
- u8 fid;
-
- struct w1_family_ops *fops;
-
- atomic_t refcnt;
-};
-
-extern spinlock_t w1_flock;
-
-void w1_family_put(struct w1_family *);
-void __w1_family_get(struct w1_family *);
-struct w1_family * w1_family_registered(u8);
-void w1_unregister_family(struct w1_family *);
-int w1_register_family(struct w1_family *);
-
-/**
- * module_w1_driver() - Helper macro for registering a 1-Wire families
- * @__w1_family: w1_family struct
- *
- * Helper macro for 1-Wire families which do not do anything special in module
- * init/exit. This eliminates a lot of boilerplate. Each module may only
- * use this macro once, and calling it replaces module_init() and module_exit()
- */
-#define module_w1_family(__w1_family) \
- module_driver(__w1_family, w1_register_family, \
- w1_unregister_family)
-
-#endif /* __W1_FAMILY_H */
diff --git a/drivers/w1/w1_int.c b/drivers/w1/w1_int.c
index 1072a2e620bb..1c776178f598 100644
--- a/drivers/w1/w1_int.c
+++ b/drivers/w1/w1_int.c
@@ -21,9 +21,8 @@
#include <linux/export.h>
#include <linux/moduleparam.h>
-#include "w1.h"
+#include "w1_internal.h"
#include "w1_netlink.h"
-#include "w1_int.h"
static int w1_search_count = -1; /* Default is continual scan */
module_param_named(search_count, w1_search_count, int, 0);
@@ -179,6 +178,7 @@ err_out_free_dev:
return retval;
}
+EXPORT_SYMBOL(w1_add_master_device);
void __w1_remove_master_device(struct w1_master *dev)
{
@@ -251,6 +251,4 @@ void w1_remove_master_device(struct w1_bus_master *bm)
__w1_remove_master_device(found);
}
-
-EXPORT_SYMBOL(w1_add_master_device);
EXPORT_SYMBOL(w1_remove_master_device);
diff --git a/drivers/w1/w1_int.h b/drivers/w1/w1_int.h
deleted file mode 100644
index 371989159216..000000000000
--- a/drivers/w1/w1_int.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __W1_INT_H
-#define __W1_INT_H
-
-#include <linux/kernel.h>
-#include <linux/device.h>
-
-#include "w1.h"
-
-int w1_add_master_device(struct w1_bus_master *);
-void w1_remove_master_device(struct w1_bus_master *);
-void __w1_remove_master_device(struct w1_master *);
-
-#endif /* __W1_INT_H */
diff --git a/drivers/w1/w1_internal.h b/drivers/w1/w1_internal.h
new file mode 100644
index 000000000000..1623e2fdccc7
--- /dev/null
+++ b/drivers/w1/w1_internal.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __W1_H
+#define __W1_H
+
+#include <linux/w1.h>
+
+#include <linux/completion.h>
+#include <linux/mutex.h>
+
+#define W1_SLAVE_ACTIVE 0
+#define W1_SLAVE_DETACH 1
+
+/**
+ * struct w1_async_cmd - execute callback from the w1_process kthread
+ * @async_entry: link entry
+ * @cb: callback function, must list_del and destroy this list before
+ * returning
+ *
+ * When inserted into the w1_master async_list, w1_process will execute
+ * the callback. Embed this into the structure with the command details.
+ */
+struct w1_async_cmd {
+ struct list_head async_entry;
+ void (*cb)(struct w1_master *dev, struct w1_async_cmd *async_cmd);
+};
+
+int w1_create_master_attributes(struct w1_master *master);
+void w1_destroy_master_attributes(struct w1_master *master);
+void w1_search(struct w1_master *dev, u8 search_type,
+ w1_slave_found_callback cb);
+void w1_search_devices(struct w1_master *dev, u8 search_type,
+ w1_slave_found_callback cb);
+/* call w1_unref_slave to release the reference counts w1_search_slave added */
+struct w1_slave *w1_search_slave(struct w1_reg_num *id);
+/*
+ * decrements the reference on sl->master and sl, and cleans up if zero
+ * returns the reference count after it has been decremented
+ */
+int w1_unref_slave(struct w1_slave *sl);
+void w1_slave_found(struct w1_master *dev, u64 rn);
+void w1_search_process_cb(struct w1_master *dev, u8 search_type,
+ w1_slave_found_callback cb);
+struct w1_slave *w1_slave_search_device(struct w1_master *dev,
+ struct w1_reg_num *rn);
+struct w1_master *w1_search_master_id(u32 id);
+
+/* Disconnect and reconnect devices in the given family. Used for finding
+ * unclaimed devices after a family has been registered or releasing devices
+ * after a family has been unregistered. Set attach to 1 when a new family
+ * has just been registered, to 0 when it has been unregistered.
+ */
+void w1_reconnect_slaves(struct w1_family *f, int attach);
+int w1_attach_slave_device(struct w1_master *dev, struct w1_reg_num *rn);
+/* 0 success, otherwise EBUSY */
+int w1_slave_detach(struct w1_slave *sl);
+
+void __w1_remove_master_device(struct w1_master *dev);
+
+void w1_family_put(struct w1_family *f);
+void __w1_family_get(struct w1_family *f);
+struct w1_family *w1_family_registered(u8 fid);
+
+extern struct device_driver w1_master_driver;
+extern struct device w1_master_device;
+extern int w1_max_slave_count;
+extern int w1_max_slave_ttl;
+extern struct list_head w1_masters;
+extern struct mutex w1_mlock;
+extern spinlock_t w1_flock;
+
+int w1_process_callbacks(struct w1_master *dev);
+int w1_process(void *data);
+
+#endif /* __W1_H */
diff --git a/drivers/w1/w1_io.c b/drivers/w1/w1_io.c
index 1134e6b1eb02..d191e1f80579 100644
--- a/drivers/w1/w1_io.c
+++ b/drivers/w1/w1_io.c
@@ -18,7 +18,7 @@
#include <linux/moduleparam.h>
#include <linux/module.h>
-#include "w1.h"
+#include "w1_internal.h"
static int w1_delay_parm = 1;
module_param_named(delay_coef, w1_delay_parm, int, 0);
diff --git a/drivers/w1/w1_netlink.c b/drivers/w1/w1_netlink.c
index 027950f997d1..f2f099caeb77 100644
--- a/drivers/w1/w1_netlink.c
+++ b/drivers/w1/w1_netlink.c
@@ -17,7 +17,7 @@
#include <linux/netlink.h>
#include <linux/connector.h>
-#include "w1.h"
+#include "w1_internal.h"
#include "w1_netlink.h"
#if defined(CONFIG_W1_CON) && (defined(CONFIG_CONNECTOR) || (defined(CONFIG_CONNECTOR_MODULE) && defined(CONFIG_W1_MODULE)))
diff --git a/drivers/w1/w1_netlink.h b/drivers/w1/w1_netlink.h
index b389e5ff5fa5..a36661cd1f05 100644
--- a/drivers/w1/w1_netlink.h
+++ b/drivers/w1/w1_netlink.h
@@ -18,7 +18,7 @@
#include <asm/types.h>
#include <linux/connector.h>
-#include "w1.h"
+#include "w1_internal.h"
/**
* enum w1_cn_msg_flags - bitfield flags for struct cn_msg.flags
diff --git a/include/dt-bindings/mux/mux.h b/include/dt-bindings/mux/mux.h
new file mode 100644
index 000000000000..c8e855c4a609
--- /dev/null
+++ b/include/dt-bindings/mux/mux.h
@@ -0,0 +1,16 @@
+/*
+ * This header provides constants for most Multiplexer bindings.
+ *
+ * Most Multiplexer bindings specify an idle state. In most cases, the
+ * the multiplexer can be left as is when idle, and in some cases it can
+ * disconnect the input/output and leave the multiplexer in a high
+ * impedance state.
+ */
+
+#ifndef _DT_BINDINGS_MUX_MUX_H
+#define _DT_BINDINGS_MUX_MUX_H
+
+#define MUX_IDLE_AS_IS (-1)
+#define MUX_IDLE_DISCONNECT (-2)
+
+#endif
diff --git a/include/linux/coresight.h b/include/linux/coresight.h
index 035c16c9a505..d950dad5056a 100644
--- a/include/linux/coresight.h
+++ b/include/linux/coresight.h
@@ -263,11 +263,15 @@ static inline int coresight_timeout(void __iomem *addr, u32 offset,
#endif
#ifdef CONFIG_OF
-extern struct coresight_platform_data *of_get_coresight_platform_data(
- struct device *dev, struct device_node *node);
+extern int of_coresight_get_cpu(const struct device_node *node);
+extern struct coresight_platform_data *
+of_get_coresight_platform_data(struct device *dev,
+ const struct device_node *node);
#else
+static inline int of_coresight_get_cpu(const struct device_node *node)
+{ return 0; }
static inline struct coresight_platform_data *of_get_coresight_platform_data(
- struct device *dev, struct device_node *node) { return NULL; }
+ struct device *dev, const struct device_node *node) { return NULL; }
#endif
#ifdef CONFIG_PID_NS
diff --git a/include/linux/crc4.h b/include/linux/crc4.h
new file mode 100644
index 000000000000..8f739f1d794f
--- /dev/null
+++ b/include/linux/crc4.h
@@ -0,0 +1,8 @@
+#ifndef _LINUX_CRC4_H
+#define _LINUX_CRC4_H
+
+#include <linux/types.h>
+
+extern uint8_t crc4(uint8_t c, uint64_t x, int bits);
+
+#endif /* _LINUX_CRC4_H */
diff --git a/include/linux/fsi.h b/include/linux/fsi.h
index 273cbf6400ea..141fd38d061f 100644
--- a/include/linux/fsi.h
+++ b/include/linux/fsi.h
@@ -21,8 +21,18 @@ struct fsi_device {
struct device dev;
u8 engine_type;
u8 version;
+ u8 unit;
+ struct fsi_slave *slave;
+ uint32_t addr;
+ uint32_t size;
};
+extern int fsi_device_read(struct fsi_device *dev, uint32_t addr,
+ void *val, size_t size);
+extern int fsi_device_write(struct fsi_device *dev, uint32_t addr,
+ const void *val, size_t size);
+extern int fsi_device_peek(struct fsi_device *dev, void *val);
+
struct fsi_device_id {
u8 engine_type;
u8 version;
@@ -36,7 +46,6 @@ struct fsi_device_id {
#define FSI_DEVICE_VERSIONED(t, v) \
.engine_type = (t), .version = (v),
-
struct fsi_driver {
struct device_driver drv;
const struct fsi_device_id *id_table;
@@ -45,6 +54,30 @@ struct fsi_driver {
#define to_fsi_dev(devp) container_of(devp, struct fsi_device, dev)
#define to_fsi_drv(drvp) container_of(drvp, struct fsi_driver, drv)
+extern int fsi_driver_register(struct fsi_driver *fsi_drv);
+extern void fsi_driver_unregister(struct fsi_driver *fsi_drv);
+
+/* module_fsi_driver() - Helper macro for drivers that don't do
+ * anything special in module init/exit. This eliminates a lot of
+ * boilerplate. Each module may only use this macro once, and
+ * calling it replaces module_init() and module_exit()
+ */
+#define module_fsi_driver(__fsi_driver) \
+ module_driver(__fsi_driver, fsi_driver_register, \
+ fsi_driver_unregister)
+
+/* direct slave API */
+extern int fsi_slave_claim_range(struct fsi_slave *slave,
+ uint32_t addr, uint32_t size);
+extern void fsi_slave_release_range(struct fsi_slave *slave,
+ uint32_t addr, uint32_t size);
+extern int fsi_slave_read(struct fsi_slave *slave, uint32_t addr,
+ void *val, size_t size);
+extern int fsi_slave_write(struct fsi_slave *slave, uint32_t addr,
+ const void *val, size_t size);
+
+
+
extern struct bus_type fsi_bus_type;
#endif /* LINUX_FSI_H */
diff --git a/include/linux/iio/consumer.h b/include/linux/iio/consumer.h
index 47eeec3218b5..5e347a9805fd 100644
--- a/include/linux/iio/consumer.h
+++ b/include/linux/iio/consumer.h
@@ -312,4 +312,41 @@ int iio_read_channel_scale(struct iio_channel *chan, int *val,
int iio_convert_raw_to_processed(struct iio_channel *chan, int raw,
int *processed, unsigned int scale);
+/**
+ * iio_get_channel_ext_info_count() - get number of ext_info attributes
+ * connected to the channel.
+ * @chan: The channel being queried
+ *
+ * Returns the number of ext_info attributes
+ */
+unsigned int iio_get_channel_ext_info_count(struct iio_channel *chan);
+
+/**
+ * iio_read_channel_ext_info() - read ext_info attribute from a given channel
+ * @chan: The channel being queried.
+ * @attr: The ext_info attribute to read.
+ * @buf: Where to store the attribute value. Assumed to hold
+ * at least PAGE_SIZE bytes.
+ *
+ * Returns the number of bytes written to buf (perhaps w/o zero termination;
+ * it need not even be a string), or an error code.
+ */
+ssize_t iio_read_channel_ext_info(struct iio_channel *chan,
+ const char *attr, char *buf);
+
+/**
+ * iio_write_channel_ext_info() - write ext_info attribute from a given channel
+ * @chan: The channel being queried.
+ * @attr: The ext_info attribute to read.
+ * @buf: The new attribute value. Strings needs to be zero-
+ * terminated, but the terminator should not be included
+ * in the below len.
+ * @len: The size of the new attribute value.
+ *
+ * Returns the number of accepted bytes, which should be the same as len.
+ * An error code can also be returned.
+ */
+ssize_t iio_write_channel_ext_info(struct iio_channel *chan, const char *attr,
+ const char *buf, size_t len);
+
#endif
diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h
index 762b5fec3383..58751eae5f77 100644
--- a/include/linux/miscdevice.h
+++ b/include/linux/miscdevice.h
@@ -54,6 +54,7 @@
#define VHOST_NET_MINOR 238
#define UHID_MINOR 239
#define USERIO_MINOR 240
+#define VHOST_VSOCK_MINOR 241
#define MISC_DYNAMIC_MINOR 255
struct device;
diff --git a/include/linux/mux/consumer.h b/include/linux/mux/consumer.h
new file mode 100644
index 000000000000..5577e1b773c4
--- /dev/null
+++ b/include/linux/mux/consumer.h
@@ -0,0 +1,32 @@
+/*
+ * mux/consumer.h - definitions for the multiplexer consumer interface
+ *
+ * Copyright (C) 2017 Axentia Technologies AB
+ *
+ * Author: Peter Rosin <peda@axentia.se>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _LINUX_MUX_CONSUMER_H
+#define _LINUX_MUX_CONSUMER_H
+
+struct device;
+struct mux_control;
+
+unsigned int mux_control_states(struct mux_control *mux);
+int __must_check mux_control_select(struct mux_control *mux,
+ unsigned int state);
+int __must_check mux_control_try_select(struct mux_control *mux,
+ unsigned int state);
+int mux_control_deselect(struct mux_control *mux);
+
+struct mux_control *mux_control_get(struct device *dev, const char *mux_name);
+void mux_control_put(struct mux_control *mux);
+
+struct mux_control *devm_mux_control_get(struct device *dev,
+ const char *mux_name);
+
+#endif /* _LINUX_MUX_CONSUMER_H */
diff --git a/include/linux/mux/driver.h b/include/linux/mux/driver.h
new file mode 100644
index 000000000000..35c3579c3304
--- /dev/null
+++ b/include/linux/mux/driver.h
@@ -0,0 +1,108 @@
+/*
+ * mux/driver.h - definitions for the multiplexer driver interface
+ *
+ * Copyright (C) 2017 Axentia Technologies AB
+ *
+ * Author: Peter Rosin <peda@axentia.se>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _LINUX_MUX_DRIVER_H
+#define _LINUX_MUX_DRIVER_H
+
+#include <dt-bindings/mux/mux.h>
+#include <linux/device.h>
+#include <linux/semaphore.h>
+
+struct mux_chip;
+struct mux_control;
+
+/**
+ * struct mux_control_ops - Mux controller operations for a mux chip.
+ * @set: Set the state of the given mux controller.
+ */
+struct mux_control_ops {
+ int (*set)(struct mux_control *mux, int state);
+};
+
+/**
+ * struct mux_control - Represents a mux controller.
+ * @lock: Protects the mux controller state.
+ * @chip: The mux chip that is handling this mux controller.
+ * @cached_state: The current mux controller state, or -1 if none.
+ * @states: The number of mux controller states.
+ * @idle_state: The mux controller state to use when inactive, or one
+ * of MUX_IDLE_AS_IS and MUX_IDLE_DISCONNECT.
+ *
+ * Mux drivers may only change @states and @idle_state, and may only do so
+ * between allocation and registration of the mux controller. Specifically,
+ * @cached_state is internal to the mux core and should never be written by
+ * mux drivers.
+ */
+struct mux_control {
+ struct semaphore lock; /* protects the state of the mux */
+
+ struct mux_chip *chip;
+ int cached_state;
+
+ unsigned int states;
+ int idle_state;
+};
+
+/**
+ * struct mux_chip - Represents a chip holding mux controllers.
+ * @controllers: Number of mux controllers handled by the chip.
+ * @mux: Array of mux controllers that are handled.
+ * @dev: Device structure.
+ * @id: Used to identify the device internally.
+ * @ops: Mux controller operations.
+ */
+struct mux_chip {
+ unsigned int controllers;
+ struct mux_control *mux;
+ struct device dev;
+ int id;
+
+ const struct mux_control_ops *ops;
+};
+
+#define to_mux_chip(x) container_of((x), struct mux_chip, dev)
+
+/**
+ * mux_chip_priv() - Get the extra memory reserved by mux_chip_alloc().
+ * @mux_chip: The mux-chip to get the private memory from.
+ *
+ * Return: Pointer to the private memory reserved by the allocator.
+ */
+static inline void *mux_chip_priv(struct mux_chip *mux_chip)
+{
+ return &mux_chip->mux[mux_chip->controllers];
+}
+
+struct mux_chip *mux_chip_alloc(struct device *dev,
+ unsigned int controllers, size_t sizeof_priv);
+int mux_chip_register(struct mux_chip *mux_chip);
+void mux_chip_unregister(struct mux_chip *mux_chip);
+void mux_chip_free(struct mux_chip *mux_chip);
+
+struct mux_chip *devm_mux_chip_alloc(struct device *dev,
+ unsigned int controllers,
+ size_t sizeof_priv);
+int devm_mux_chip_register(struct device *dev, struct mux_chip *mux_chip);
+
+/**
+ * mux_control_get_index() - Get the index of the given mux controller
+ * @mux: The mux-control to get the index for.
+ *
+ * Return: The index of the mux controller within the mux chip the mux
+ * controller is a part of.
+ */
+static inline unsigned int mux_control_get_index(struct mux_control *mux)
+{
+ return mux - mux->chip->mux;
+}
+
+#endif /* _LINUX_MUX_DRIVER_H */
diff --git a/include/linux/i2c/apds990x.h b/include/linux/platform_data/apds990x.h
index d186fcc5d257..d186fcc5d257 100644
--- a/include/linux/i2c/apds990x.h
+++ b/include/linux/platform_data/apds990x.h
diff --git a/include/linux/i2c/bh1770glc.h b/include/linux/platform_data/bh1770glc.h
index 8b5e2df36c72..8b5e2df36c72 100644
--- a/include/linux/i2c/bh1770glc.h
+++ b/include/linux/platform_data/bh1770glc.h
diff --git a/include/linux/sram.h b/include/linux/sram.h
index c97dcbe8ce25..4fb405fb0480 100644
--- a/include/linux/sram.h
+++ b/include/linux/sram.h
@@ -16,12 +16,12 @@
struct gen_pool;
#ifdef CONFIG_SRAM_EXEC
-int sram_exec_copy(struct gen_pool *pool, void *dst, void *src, size_t size);
+void *sram_exec_copy(struct gen_pool *pool, void *dst, void *src, size_t size);
#else
-static inline int sram_exec_copy(struct gen_pool *pool, void *dst, void *src,
- size_t size)
+static inline void *sram_exec_copy(struct gen_pool *pool, void *dst, void *src,
+ size_t size)
{
- return -ENODEV;
+ return NULL;
}
#endif /* CONFIG_SRAM_EXEC */
#endif /* __LINUX_SRAM_H__ */
diff --git a/drivers/w1/w1.h b/include/linux/w1.h
index 758a7a6322e9..90cbe7e65059 100644
--- a/drivers/w1/w1.h
+++ b/include/linux/w1.h
@@ -12,8 +12,10 @@
* GNU General Public License for more details.
*/
-#ifndef __W1_H
-#define __W1_H
+#ifndef __LINUX_W1_H
+#define __LINUX_W1_H
+
+#include <linux/device.h>
/**
* struct w1_reg_num - broken out slave device id
@@ -22,8 +24,7 @@
* @id: along with family is the unique device id
* @crc: checksum of the other bytes
*/
-struct w1_reg_num
-{
+struct w1_reg_num {
#if defined(__LITTLE_ENDIAN_BITFIELD)
__u64 family:8,
id:48,
@@ -39,12 +40,6 @@ struct w1_reg_num
#ifdef __KERNEL__
-#include <linux/completion.h>
-#include <linux/device.h>
-#include <linux/mutex.h>
-
-#include "w1_family.h"
-
#define W1_MAXNAMELEN 32
#define W1_SEARCH 0xF0
@@ -59,9 +54,6 @@ struct w1_reg_num
#define W1_MATCH_ROM 0x55
#define W1_RESUME_CMD 0xA5
-#define W1_SLAVE_ACTIVE 0
-#define W1_SLAVE_DETACH 1
-
/**
* struct w1_slave - holds a single slave device on the bus
*
@@ -78,8 +70,7 @@ struct w1_reg_num
* @dev: kernel device identifier
*
*/
-struct w1_slave
-{
+struct w1_slave {
struct module *owner;
unsigned char name[W1_MAXNAMELEN];
struct list_head w1_slave_entry;
@@ -96,7 +87,6 @@ struct w1_slave
typedef void (*w1_slave_found_callback)(struct w1_master *, u64);
-
/**
* struct w1_bus_master - operations available on a bus master
*
@@ -142,8 +132,7 @@ typedef void (*w1_slave_found_callback)(struct w1_master *, u64);
* reset_bus.
*
*/
-struct w1_bus_master
-{
+struct w1_bus_master {
void *data;
u8 (*read_bit)(void *);
@@ -209,8 +198,7 @@ enum w1_master_flags {
* @bus_master: io operations available
* @seq: sequence number used for netlink broadcasts
*/
-struct w1_master
-{
+struct w1_master {
struct list_head w1_master_entry;
struct module *owner;
unsigned char name[W1_MAXNAMELEN];
@@ -254,45 +242,51 @@ struct w1_master
u32 seq;
};
+int w1_add_master_device(struct w1_bus_master *master);
+void w1_remove_master_device(struct w1_bus_master *master);
+
/**
- * struct w1_async_cmd - execute callback from the w1_process kthread
- * @async_entry: link entry
- * @cb: callback function, must list_del and destroy this list before
- * returning
- *
- * When inserted into the w1_master async_list, w1_process will execute
- * the callback. Embed this into the structure with the command details.
+ * struct w1_family_ops - operations for a family type
+ * @add_slave: add_slave
+ * @remove_slave: remove_slave
+ * @groups: sysfs group
*/
-struct w1_async_cmd {
- struct list_head async_entry;
- void (*cb)(struct w1_master *dev, struct w1_async_cmd *async_cmd);
+struct w1_family_ops {
+ int (*add_slave)(struct w1_slave *sl);
+ void (*remove_slave)(struct w1_slave *sl);
+ const struct attribute_group **groups;
};
-int w1_create_master_attributes(struct w1_master *);
-void w1_destroy_master_attributes(struct w1_master *master);
-void w1_search(struct w1_master *dev, u8 search_type, w1_slave_found_callback cb);
-void w1_search_devices(struct w1_master *dev, u8 search_type, w1_slave_found_callback cb);
-/* call w1_unref_slave to release the reference counts w1_search_slave added */
-struct w1_slave *w1_search_slave(struct w1_reg_num *id);
-/* decrements the reference on sl->master and sl, and cleans up if zero
- * returns the reference count after it has been decremented */
-int w1_unref_slave(struct w1_slave *sl);
-void w1_slave_found(struct w1_master *dev, u64 rn);
-void w1_search_process_cb(struct w1_master *dev, u8 search_type,
- w1_slave_found_callback cb);
-struct w1_slave *w1_slave_search_device(struct w1_master *dev,
- struct w1_reg_num *rn);
-struct w1_master *w1_search_master_id(u32 id);
-
-/* Disconnect and reconnect devices in the given family. Used for finding
- * unclaimed devices after a family has been registered or releasing devices
- * after a family has been unregistered. Set attach to 1 when a new family
- * has just been registered, to 0 when it has been unregistered.
+/**
+ * struct w1_family - reference counted family structure.
+ * @family_entry: family linked list
+ * @fid: 8 bit family identifier
+ * @fops: operations for this family
+ * @refcnt: reference counter
+ */
+struct w1_family {
+ struct list_head family_entry;
+ u8 fid;
+
+ struct w1_family_ops *fops;
+
+ atomic_t refcnt;
+};
+
+int w1_register_family(struct w1_family *family);
+void w1_unregister_family(struct w1_family *family);
+
+/**
+ * module_w1_driver() - Helper macro for registering a 1-Wire families
+ * @__w1_family: w1_family struct
+ *
+ * Helper macro for 1-Wire families which do not do anything special in module
+ * init/exit. This eliminates a lot of boilerplate. Each module may only
+ * use this macro once, and calling it replaces module_init() and module_exit()
*/
-void w1_reconnect_slaves(struct w1_family *f, int attach);
-int w1_attach_slave_device(struct w1_master *dev, struct w1_reg_num *rn);
-/* 0 success, otherwise EBUSY */
-int w1_slave_detach(struct w1_slave *sl);
+#define module_w1_family(__w1_family) \
+ module_driver(__w1_family, w1_register_family, \
+ w1_unregister_family)
u8 w1_triplet(struct w1_master *dev, int bdir);
void w1_write_8(struct w1_master *, u8);
@@ -321,16 +315,6 @@ static inline struct w1_master* dev_to_w1_master(struct device *dev)
return container_of(dev, struct w1_master, dev);
}
-extern struct device_driver w1_master_driver;
-extern struct device w1_master_device;
-extern int w1_max_slave_count;
-extern int w1_max_slave_ttl;
-extern struct list_head w1_masters;
-extern struct mutex w1_mlock;
-
-extern int w1_process_callbacks(struct w1_master *dev);
-extern int w1_process(void *);
-
#endif /* __KERNEL__ */
-#endif /* __W1_H */
+#endif /* __LINUX_W1_H */
diff --git a/include/trace/events/fsi.h b/include/trace/events/fsi.h
new file mode 100644
index 000000000000..697ee6678892
--- /dev/null
+++ b/include/trace/events/fsi.h
@@ -0,0 +1,127 @@
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM fsi
+
+#if !defined(_TRACE_FSI_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_FSI_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(fsi_master_read,
+ TP_PROTO(const struct fsi_master *master, int link, int id,
+ uint32_t addr, size_t size),
+ TP_ARGS(master, link, id, addr, size),
+ TP_STRUCT__entry(
+ __field(int, master_idx)
+ __field(int, link)
+ __field(int, id)
+ __field(__u32, addr)
+ __field(size_t, size)
+ ),
+ TP_fast_assign(
+ __entry->master_idx = master->idx;
+ __entry->link = link;
+ __entry->id = id;
+ __entry->addr = addr;
+ __entry->size = size;
+ ),
+ TP_printk("fsi%d:%02d:%02d %08x[%zd]",
+ __entry->master_idx,
+ __entry->link,
+ __entry->id,
+ __entry->addr,
+ __entry->size
+ )
+);
+
+TRACE_EVENT(fsi_master_write,
+ TP_PROTO(const struct fsi_master *master, int link, int id,
+ uint32_t addr, size_t size, const void *data),
+ TP_ARGS(master, link, id, addr, size, data),
+ TP_STRUCT__entry(
+ __field(int, master_idx)
+ __field(int, link)
+ __field(int, id)
+ __field(__u32, addr)
+ __field(size_t, size)
+ __field(__u32, data)
+ ),
+ TP_fast_assign(
+ __entry->master_idx = master->idx;
+ __entry->link = link;
+ __entry->id = id;
+ __entry->addr = addr;
+ __entry->size = size;
+ __entry->data = 0;
+ memcpy(&__entry->data, data, size);
+ ),
+ TP_printk("fsi%d:%02d:%02d %08x[%zd] <= {%*ph}",
+ __entry->master_idx,
+ __entry->link,
+ __entry->id,
+ __entry->addr,
+ __entry->size,
+ (int)__entry->size, &__entry->data
+ )
+);
+
+TRACE_EVENT(fsi_master_rw_result,
+ TP_PROTO(const struct fsi_master *master, int link, int id,
+ uint32_t addr, size_t size,
+ bool write, const void *data, int ret),
+ TP_ARGS(master, link, id, addr, size, write, data, ret),
+ TP_STRUCT__entry(
+ __field(int, master_idx)
+ __field(int, link)
+ __field(int, id)
+ __field(__u32, addr)
+ __field(size_t, size)
+ __field(bool, write)
+ __field(__u32, data)
+ __field(int, ret)
+ ),
+ TP_fast_assign(
+ __entry->master_idx = master->idx;
+ __entry->link = link;
+ __entry->id = id;
+ __entry->addr = addr;
+ __entry->size = size;
+ __entry->write = write;
+ __entry->data = 0;
+ __entry->ret = ret;
+ if (__entry->write || !__entry->ret)
+ memcpy(&__entry->data, data, size);
+ ),
+ TP_printk("fsi%d:%02d:%02d %08x[%zd] %s {%*ph} ret %d",
+ __entry->master_idx,
+ __entry->link,
+ __entry->id,
+ __entry->addr,
+ __entry->size,
+ __entry->write ? "<=" : "=>",
+ (int)__entry->size, &__entry->data,
+ __entry->ret
+ )
+);
+
+TRACE_EVENT(fsi_master_break,
+ TP_PROTO(const struct fsi_master *master, int link),
+ TP_ARGS(master, link),
+ TP_STRUCT__entry(
+ __field(int, master_idx)
+ __field(int, link)
+ ),
+ TP_fast_assign(
+ __entry->master_idx = master->idx;
+ __entry->link = link;
+ ),
+ TP_printk("fsi%d:%d",
+ __entry->master_idx,
+ __entry->link
+ )
+);
+
+
+#endif /* _TRACE_FSI_H */
+
+#include <trace/define_trace.h>
diff --git a/include/trace/events/fsi_master_gpio.h b/include/trace/events/fsi_master_gpio.h
new file mode 100644
index 000000000000..11b36c119048
--- /dev/null
+++ b/include/trace/events/fsi_master_gpio.h
@@ -0,0 +1,68 @@
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM fsi_master_gpio
+
+#if !defined(_TRACE_FSI_MASTER_GPIO_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_FSI_MASTER_GPIO_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(fsi_master_gpio_in,
+ TP_PROTO(const struct fsi_master_gpio *master, int bits, uint64_t msg),
+ TP_ARGS(master, bits, msg),
+ TP_STRUCT__entry(
+ __field(int, master_idx)
+ __field(int, bits)
+ __field(uint64_t, msg)
+ ),
+ TP_fast_assign(
+ __entry->master_idx = master->master.idx;
+ __entry->bits = bits;
+ __entry->msg = msg & ((1ull<<bits) - 1);
+ ),
+ TP_printk("fsi-gpio%d => %0*llx[%d]",
+ __entry->master_idx,
+ (__entry->bits + 3) / 4,
+ __entry->msg,
+ __entry->bits
+ )
+);
+
+TRACE_EVENT(fsi_master_gpio_out,
+ TP_PROTO(const struct fsi_master_gpio *master, int bits, uint64_t msg),
+ TP_ARGS(master, bits, msg),
+ TP_STRUCT__entry(
+ __field(int, master_idx)
+ __field(int, bits)
+ __field(uint64_t, msg)
+ ),
+ TP_fast_assign(
+ __entry->master_idx = master->master.idx;
+ __entry->bits = bits;
+ __entry->msg = msg & ((1ull<<bits) - 1);
+ ),
+ TP_printk("fsi-gpio%d <= %0*llx[%d]",
+ __entry->master_idx,
+ (__entry->bits + 3) / 4,
+ __entry->msg,
+ __entry->bits
+ )
+);
+
+TRACE_EVENT(fsi_master_gpio_break,
+ TP_PROTO(const struct fsi_master_gpio *master),
+ TP_ARGS(master),
+ TP_STRUCT__entry(
+ __field(int, master_idx)
+ ),
+ TP_fast_assign(
+ __entry->master_idx = master->master.idx;
+ ),
+ TP_printk("fsi-gpio%d ----break---",
+ __entry->master_idx
+ )
+);
+
+#endif /* _TRACE_FSI_MASTER_GPIO_H */
+
+#include <trace/define_trace.h>
diff --git a/kernel/configs/android-base.config b/kernel/configs/android-base.config
index 26a06e09a5bd..d70829033bb7 100644
--- a/kernel/configs/android-base.config
+++ b/kernel/configs/android-base.config
@@ -1,10 +1,13 @@
# KEEP ALPHABETICALLY SORTED
# CONFIG_DEVKMEM is not set
# CONFIG_DEVMEM is not set
+# CONFIG_FHANDLE is not set
# CONFIG_INET_LRO is not set
-# CONFIG_MODULES is not set
+# CONFIG_NFSD is not set
+# CONFIG_NFS_FS is not set
# CONFIG_OABI_COMPAT is not set
# CONFIG_SYSVIPC is not set
+# CONFIG_USELIB is not set
CONFIG_ANDROID=y
CONFIG_ANDROID_BINDER_IPC=y
CONFIG_ANDROID_LOW_MEMORY_KILLER=y
@@ -13,6 +16,7 @@ CONFIG_ASHMEM=y
CONFIG_AUDIT=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_CGROUPS=y
+CONFIG_CGROUP_BPF=y
CONFIG_CGROUP_CPUACCT=y
CONFIG_CGROUP_DEBUG=y
CONFIG_CGROUP_FREEZER=y
@@ -23,6 +27,8 @@ CONFIG_EMBEDDED=y
CONFIG_FB=y
CONFIG_HARDENED_USERCOPY=y
CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
CONFIG_INET6_AH=y
CONFIG_INET6_ESP=y
CONFIG_INET6_IPCOMP=y
@@ -60,6 +66,9 @@ CONFIG_IP_NF_TARGET_MASQUERADE=y
CONFIG_IP_NF_TARGET_NETMAP=y
CONFIG_IP_NF_TARGET_REDIRECT=y
CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
CONFIG_NET=y
CONFIG_NETDEVICES=y
CONFIG_NETFILTER=y
diff --git a/kernel/configs/android-recommended.config b/kernel/configs/android-recommended.config
index 28ee064b6744..946fb92418f7 100644
--- a/kernel/configs/android-recommended.config
+++ b/kernel/configs/android-recommended.config
@@ -6,13 +6,15 @@
# CONFIG_NF_CONNTRACK_SIP is not set
# CONFIG_PM_WAKELOCKS_GC is not set
# CONFIG_VT is not set
+CONFIG_ARM64_SW_TTBR0_PAN=y
CONFIG_BACKLIGHT_LCD_SUPPORT=y
CONFIG_BLK_DEV_DM=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_CC_STACKPROTECTOR_STRONG=y
CONFIG_COMPACTION=y
-CONFIG_STRICT_KERNEL_RWX=y
+CONFIG_CPU_SW_DOMAIN_PAN=y
CONFIG_DM_CRYPT=y
CONFIG_DM_UEVENT=y
CONFIG_DM_VERITY=y
@@ -105,6 +107,7 @@ CONFIG_SCHEDSTATS=y
CONFIG_SMARTJOYPLUS_FF=y
CONFIG_SND=y
CONFIG_SOUND=y
+CONFIG_STRICT_KERNEL_RWX=y
CONFIG_SUSPEND_TIME=y
CONFIG_TABLET_USB_ACECAD=y
CONFIG_TABLET_USB_AIPTEK=y
diff --git a/lib/Kconfig b/lib/Kconfig
index 0c8b78a9ae2e..d2fd2623721e 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -158,6 +158,14 @@ config CRC32_BIT
endchoice
+config CRC4
+ tristate "CRC4 functions"
+ help
+ This option is provided for the case where no in-kernel-tree
+ modules require CRC4 functions, but a module built outside
+ the kernel tree does. Such modules that use library CRC4
+ functions require M here.
+
config CRC7
tristate "CRC7 functions"
help
diff --git a/lib/Makefile b/lib/Makefile
index 07fbe6a75692..7fb6ab799b8e 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -96,6 +96,7 @@ obj-$(CONFIG_CRC_T10DIF)+= crc-t10dif.o
obj-$(CONFIG_CRC_ITU_T) += crc-itu-t.o
obj-$(CONFIG_CRC32) += crc32.o
obj-$(CONFIG_CRC32_SELFTEST) += crc32test.o
+obj-$(CONFIG_CRC4) += crc4.o
obj-$(CONFIG_CRC7) += crc7.o
obj-$(CONFIG_LIBCRC32C) += libcrc32c.o
obj-$(CONFIG_CRC8) += crc8.o
diff --git a/lib/crc4.c b/lib/crc4.c
new file mode 100644
index 000000000000..cf6db46661be
--- /dev/null
+++ b/lib/crc4.c
@@ -0,0 +1,46 @@
+/*
+ * crc4.c - simple crc-4 calculations.
+ *
+ * This source code is licensed under the GNU General Public License, Version
+ * 2. See the file COPYING for more details.
+ */
+
+#include <linux/crc4.h>
+#include <linux/module.h>
+
+static const uint8_t crc4_tab[] = {
+ 0x0, 0x7, 0xe, 0x9, 0xb, 0xc, 0x5, 0x2,
+ 0x1, 0x6, 0xf, 0x8, 0xa, 0xd, 0x4, 0x3,
+};
+
+/**
+ * crc4 - calculate the 4-bit crc of a value.
+ * @crc: starting crc4
+ * @x: value to checksum
+ * @bits: number of bits in @x to checksum
+ *
+ * Returns the crc4 value of @x, using polynomial 0b10111.
+ *
+ * The @x value is treated as left-aligned, and bits above @bits are ignored
+ * in the crc calculations.
+ */
+uint8_t crc4(uint8_t c, uint64_t x, int bits)
+{
+ int i;
+
+ /* mask off anything above the top bit */
+ x &= (1ull << bits) - 1;
+
+ /* Align to 4-bits */
+ bits = (bits + 3) & ~0x3;
+
+ /* Calculate crc4 over four-bit nibbles, starting at the MSbit */
+ for (i = bits - 4; i >= 0; i -= 4)
+ c = crc4_tab[c ^ ((x >> i) & 0xf)];
+
+ return c;
+}
+EXPORT_SYMBOL_GPL(crc4);
+
+MODULE_DESCRIPTION("CRC4 calculations");
+MODULE_LICENSE("GPL");
diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c
index f1758fcbc37d..88b20e007c05 100644
--- a/tools/hv/hv_kvp_daemon.c
+++ b/tools/hv/hv_kvp_daemon.c
@@ -39,6 +39,7 @@
#include <fcntl.h>
#include <dirent.h>
#include <net/if.h>
+#include <limits.h>
#include <getopt.h>
/*
@@ -97,6 +98,8 @@ static struct utsname uts_buf;
#define KVP_SCRIPTS_PATH "/usr/libexec/hypervkvpd/"
#endif
+#define KVP_NET_DIR "/sys/class/net/"
+
#define MAX_FILE_NAME 100
#define ENTRIES_PER_BLOCK 50
@@ -596,26 +599,21 @@ static char *kvp_get_if_name(char *guid)
DIR *dir;
struct dirent *entry;
FILE *file;
- char *p, *q, *x;
+ char *p, *x;
char *if_name = NULL;
char buf[256];
- char *kvp_net_dir = "/sys/class/net/";
- char dev_id[256];
+ char dev_id[PATH_MAX];
- dir = opendir(kvp_net_dir);
+ dir = opendir(KVP_NET_DIR);
if (dir == NULL)
return NULL;
- snprintf(dev_id, sizeof(dev_id), "%s", kvp_net_dir);
- q = dev_id + strlen(kvp_net_dir);
-
while ((entry = readdir(dir)) != NULL) {
/*
* Set the state for the next pass.
*/
- *q = '\0';
- strcat(dev_id, entry->d_name);
- strcat(dev_id, "/device/device_id");
+ snprintf(dev_id, sizeof(dev_id), "%s%s/device/device_id",
+ KVP_NET_DIR, entry->d_name);
file = fopen(dev_id, "r");
if (file == NULL)
@@ -653,12 +651,12 @@ static char *kvp_if_name_to_mac(char *if_name)
FILE *file;
char *p, *x;
char buf[256];
- char addr_file[256];
+ char addr_file[PATH_MAX];
unsigned int i;
char *mac_addr = NULL;
- snprintf(addr_file, sizeof(addr_file), "%s%s%s", "/sys/class/net/",
- if_name, "/address");
+ snprintf(addr_file, sizeof(addr_file), "%s%s%s", KVP_NET_DIR,
+ if_name, "/address");
file = fopen(addr_file, "r");
if (file == NULL)
@@ -688,28 +686,22 @@ static char *kvp_mac_to_if_name(char *mac)
DIR *dir;
struct dirent *entry;
FILE *file;
- char *p, *q, *x;
+ char *p, *x;
char *if_name = NULL;
char buf[256];
- char *kvp_net_dir = "/sys/class/net/";
- char dev_id[256];
+ char dev_id[PATH_MAX];
unsigned int i;
- dir = opendir(kvp_net_dir);
+ dir = opendir(KVP_NET_DIR);
if (dir == NULL)
return NULL;
- snprintf(dev_id, sizeof(dev_id), "%s", kvp_net_dir);
- q = dev_id + strlen(kvp_net_dir);
-
while ((entry = readdir(dir)) != NULL) {
/*
* Set the state for the next pass.
*/
- *q = '\0';
-
- strcat(dev_id, entry->d_name);
- strcat(dev_id, "/address");
+ snprintf(dev_id, sizeof(dev_id), "%s%s/address", KVP_NET_DIR,
+ entry->d_name);
file = fopen(dev_id, "r");
if (file == NULL)
@@ -1218,9 +1210,9 @@ static int process_ip_string(FILE *f, char *ip_string, int type)
static int kvp_set_ip_info(char *if_name, struct hv_kvp_ipaddr_value *new_val)
{
int error = 0;
- char if_file[128];
+ char if_file[PATH_MAX];
FILE *file;
- char cmd[512];
+ char cmd[PATH_MAX];
char *mac_addr;
/*
diff --git a/tools/hv/hv_vss_daemon.c b/tools/hv/hv_vss_daemon.c
index e0829809c897..7ba54195934c 100644
--- a/tools/hv/hv_vss_daemon.c
+++ b/tools/hv/hv_vss_daemon.c
@@ -261,7 +261,9 @@ int main(int argc, char *argv[])
if (len != sizeof(struct hv_vss_msg)) {
syslog(LOG_ERR, "write failed; error: %d %s", errno,
strerror(errno));
- exit(EXIT_FAILURE);
+
+ if (op == VSS_OP_FREEZE)
+ vss_operate(VSS_OP_THAW);
}
}