summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-08-18 21:04:51 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2018-08-18 21:04:51 +0300
commitd5acba26bfa097a618be425522b1ec4269d3edaf (patch)
tree7abb08032d4b79b34eb1386aa007a811e1964839 /drivers
parent2475c515d4031c494ff452508a8bf8c281ec6e56 (diff)
parent128f38041035001276e964cda1cf951f218d965d (diff)
downloadlinux-d5acba26bfa097a618be425522b1ec4269d3edaf.tar.xz
Merge tag 'char-misc-4.19-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc
Pull char/misc driver updates from Greg KH: "Here is the bit set of char/misc drivers for 4.19-rc1 There is a lot here, much more than normal, seems like everyone is writing new driver subsystems these days... Anyway, major things here are: - new FSI driver subsystem, yet-another-powerpc low-level hardware bus - gnss, finally an in-kernel GPS subsystem to try to tame all of the crazy out-of-tree drivers that have been floating around for years, combined with some really hacky userspace implementations. This is only for GNSS receivers, but you have to start somewhere, and this is great to see. Other than that, there are new slimbus drivers, new coresight drivers, new fpga drivers, and loads of DT bindings for all of these and existing drivers. All of these have been in linux-next for a while with no reported issues" * tag 'char-misc-4.19-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc: (255 commits) android: binder: Rate-limit debug and userspace triggered err msgs fsi: sbefifo: Bump max command length fsi: scom: Fix NULL dereference misc: mic: SCIF Fix scif_get_new_port() error handling misc: cxl: changed asterisk position genwqe: card_base: Use true and false for boolean values misc: eeprom: assignment outside the if statement uio: potential double frees if __uio_register_device() fails eeprom: idt_89hpesx: clean up an error pointer vs NULL inconsistency misc: ti-st: Fix memory leak in the error path of probe() android: binder: Show extra_buffers_size in trace firmware: vpd: Fix section enabled flag on vpd_section_destroy platform: goldfish: Retire pdev_bus goldfish: Use dedicated macros instead of manual bit shifting goldfish: Add missing includes to goldfish.h mux: adgs1408: new driver for Analog Devices ADGS1408/1409 mux dt-bindings: mux: add adi,adgs1408 Drivers: hv: vmbus: Cleanup synic memory free path Drivers: hv: vmbus: Remove use of slow_virt_to_phys() Drivers: hv: vmbus: Reset the channel callback in vmbus_onoffer_rescind() ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/android/Kconfig2
-rw-r--r--drivers/android/binder.c9
-rw-r--r--drivers/android/binder_alloc.c43
-rw-r--r--drivers/android/binder_trace.h7
-rw-r--r--drivers/ata/pata_imx.c1
-rw-r--r--drivers/ata/pata_samsung_cf.c1
-rw-r--r--drivers/auxdisplay/hd44780.c1
-rw-r--r--drivers/char/hpet.c2
-rw-r--r--drivers/char/hw_random/atmel-rng.c1
-rw-r--r--drivers/char/hw_random/exynos-trng.c1
-rw-r--r--drivers/char/hw_random/imx-rngc.c1
-rw-r--r--drivers/char/hw_random/powernv-rng.c1
-rw-r--r--drivers/char/mem.c1
-rw-r--r--drivers/char/pcmcia/cm4000_cs.c4
-rw-r--r--drivers/char/virtio_console.c60
-rw-r--r--drivers/crypto/mediatek/mtk-platform.c1
-rw-r--r--drivers/crypto/qce/core.c1
-rw-r--r--drivers/crypto/stm32/stm32_crc32.c1
-rw-r--r--drivers/crypto/ux500/cryp/cryp_core.c1
-rw-r--r--drivers/crypto/ux500/hash/hash_core.c1
-rw-r--r--drivers/devfreq/tegra-devfreq.c1
-rw-r--r--drivers/dma/ep93xx_dma.c1
-rw-r--r--drivers/dma/s3c24xx-dma.c1
-rw-r--r--drivers/extcon/extcon-intel-cht-wc.c1
-rw-r--r--drivers/extcon/extcon-intel-int3496.c2
-rw-r--r--drivers/extcon/extcon-max3355.c1
-rw-r--r--drivers/extcon/extcon-qcom-spmi-misc.c1
-rw-r--r--drivers/extcon/extcon-usbc-cros-ec.c22
-rw-r--r--drivers/extcon/extcon.c3
-rw-r--r--drivers/firmware/google/vpd.c5
-rw-r--r--drivers/fpga/Kconfig68
-rw-r--r--drivers/fpga/Makefile14
-rw-r--r--drivers/fpga/dfl-afu-dma-region.c463
-rw-r--r--drivers/fpga/dfl-afu-main.c636
-rw-r--r--drivers/fpga/dfl-afu-region.c166
-rw-r--r--drivers/fpga/dfl-afu.h100
-rw-r--r--drivers/fpga/dfl-fme-br.c114
-rw-r--r--drivers/fpga/dfl-fme-main.c279
-rw-r--r--drivers/fpga/dfl-fme-mgr.c349
-rw-r--r--drivers/fpga/dfl-fme-pr.c479
-rw-r--r--drivers/fpga/dfl-fme-pr.h84
-rw-r--r--drivers/fpga/dfl-fme-region.c89
-rw-r--r--drivers/fpga/dfl-fme.h38
-rw-r--r--drivers/fpga/dfl-pci.c243
-rw-r--r--drivers/fpga/dfl.c1044
-rw-r--r--drivers/fpga/dfl.h410
-rw-r--r--drivers/fpga/fpga-mgr.c28
-rw-r--r--drivers/fpga/fpga-region.c22
-rw-r--r--drivers/fsi/Kconfig32
-rw-r--r--drivers/fsi/Makefile2
-rw-r--r--drivers/fsi/cf-fsi-fw.h157
-rw-r--r--drivers/fsi/fsi-core.c590
-rw-r--r--drivers/fsi/fsi-master-ast-cf.c1440
-rw-r--r--drivers/fsi/fsi-master-gpio.c471
-rw-r--r--drivers/fsi/fsi-master-hub.c5
-rw-r--r--drivers/fsi/fsi-master.h37
-rw-r--r--drivers/fsi/fsi-sbefifo.c1066
-rw-r--r--drivers/fsi/fsi-scom.c558
-rw-r--r--drivers/gnss/Kconfig43
-rw-r--r--drivers/gnss/Makefile16
-rw-r--r--drivers/gnss/core.c420
-rw-r--r--drivers/gnss/serial.c275
-rw-r--r--drivers/gnss/serial.h47
-rw-r--r--drivers/gnss/sirf.c408
-rw-r--r--drivers/gnss/ubx.c153
-rw-r--r--drivers/gpu/drm/mediatek/mtk_cec.c1
-rw-r--r--drivers/gpu/drm/sun4i/sun6i_drc.c1
-rw-r--r--drivers/hid/hid-hyperv.c3
-rw-r--r--drivers/hsi/controllers/omap_ssi_port.c1
-rw-r--r--drivers/hv/channel.c67
-rw-r--r--drivers/hv/channel_mgmt.c10
-rw-r--r--drivers/hv/hv.c44
-rw-r--r--drivers/hv/hv_balloon.c3
-rw-r--r--drivers/hv/hv_util.c3
-rw-r--r--drivers/hv/ring_buffer.c65
-rw-r--r--drivers/hv/vmbus_drv.c120
-rw-r--r--drivers/hwmon/max197.c1
-rw-r--r--drivers/hwmon/mc13783-adc.c1
-rw-r--r--drivers/hwtracing/coresight/Kconfig11
-rw-r--r--drivers/hwtracing/coresight/Makefile1
-rw-r--r--drivers/hwtracing/coresight/coresight-catu.c577
-rw-r--r--drivers/hwtracing/coresight/coresight-catu.h119
-rw-r--r--drivers/hwtracing/coresight/coresight-etb10.c12
-rw-r--r--drivers/hwtracing/coresight/coresight-etm.h3
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x-sysfs.c43
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x.c4
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-sysfs.c47
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.c31
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.h3
-rw-r--r--drivers/hwtracing/coresight/coresight-priv.h10
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etf.c45
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etr.c1074
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.c83
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.h113
-rw-r--r--drivers/hwtracing/coresight/coresight-tpiu.c7
-rw-r--r--drivers/hwtracing/coresight/coresight.c53
-rw-r--r--drivers/input/serio/hyperv-keyboard.c3
-rw-r--r--drivers/ipack/carriers/tpci200.c7
-rw-r--r--drivers/media/platform/coda/imx-vdoa.c1
-rw-r--r--drivers/media/platform/rcar-fcp.c1
-rw-r--r--drivers/media/platform/vimc/vimc-capture.c1
-rw-r--r--drivers/media/platform/vimc/vimc-debayer.c1
-rw-r--r--drivers/media/platform/vimc/vimc-scaler.c1
-rw-r--r--drivers/media/platform/vimc/vimc-sensor.c1
-rw-r--r--drivers/memory/tegra/tegra186.c1
-rw-r--r--drivers/mfd/atmel-hlcdc.c1
-rw-r--r--drivers/mfd/cros_ec_dev.c1
-rw-r--r--drivers/misc/aspeed-lpc-snoop.c84
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.c12
-rw-r--r--drivers/misc/eeprom/at25.c4
-rw-r--r--drivers/misc/eeprom/idt_89hpesx.c6
-rw-r--r--drivers/misc/eeprom/max6875.c3
-rw-r--r--drivers/misc/genwqe/card_base.h2
-rw-r--r--drivers/misc/genwqe/card_debugfs.c4
-rw-r--r--drivers/misc/genwqe/card_dev.c5
-rw-r--r--drivers/misc/ibmvmc.c2
-rw-r--r--drivers/misc/mei/bus-fixup.c92
-rw-r--r--drivers/misc/mei/bus.c36
-rw-r--r--drivers/misc/mei/client.c128
-rw-r--r--drivers/misc/mei/client.h2
-rw-r--r--drivers/misc/mei/debugfs.c2
-rw-r--r--drivers/misc/mei/hbm.c37
-rw-r--r--drivers/misc/mei/hw-me.c77
-rw-r--r--drivers/misc/mei/hw-me.h6
-rw-r--r--drivers/misc/mei/hw-txe.c66
-rw-r--r--drivers/misc/mei/hw.h73
-rw-r--r--drivers/misc/mei/interrupt.c17
-rw-r--r--drivers/misc/mei/main.c41
-rw-r--r--drivers/misc/mei/mei_dev.h77
-rw-r--r--drivers/misc/mic/cosm/cosm_main.h5
-rw-r--r--drivers/misc/mic/cosm/cosm_scif_server.c6
-rw-r--r--drivers/misc/mic/cosm_client/cosm_scif_client.c6
-rw-r--r--drivers/misc/mic/scif/scif_api.c21
-rw-r--r--drivers/misc/sgi-xp/xpc_channel.c3
-rw-r--r--drivers/misc/sgi-xp/xpc_partition.c5
-rw-r--r--drivers/misc/sram.c18
-rw-r--r--drivers/misc/ti-st/Kconfig3
-rw-r--r--drivers/misc/ti-st/st_kim.c7
-rw-r--r--drivers/misc/tsl2550.c2
-rw-r--r--drivers/misc/vexpress-syscfg.c10
-rw-r--r--drivers/misc/vmw_balloon.c181
-rw-r--r--drivers/misc/vmw_vmci/vmci_queue_pair.c9
-rw-r--r--drivers/mtd/nand/raw/brcmnand/brcmstb_nand.c1
-rw-r--r--drivers/mux/Kconfig10
-rw-r--r--drivers/mux/Makefile2
-rw-r--r--drivers/mux/adgs1408.c131
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c1
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.c1
-rw-r--r--drivers/net/hyperv/netvsc_drv.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c1
-rw-r--r--drivers/net/wireless/ti/wl12xx/main.c1
-rw-r--r--drivers/net/wireless/ti/wl18xx/main.c1
-rw-r--r--drivers/nvmem/Kconfig11
-rw-r--r--drivers/nvmem/Makefile3
-rw-r--r--drivers/nvmem/core.c2
-rw-r--r--drivers/nvmem/imx-ocotp.c7
-rw-r--r--drivers/nvmem/lpc18xx_eeprom.c1
-rw-r--r--drivers/nvmem/mtk-efuse.c1
-rw-r--r--drivers/nvmem/qfprom.c1
-rw-r--r--drivers/nvmem/rave-sp-eeprom.c6
-rw-r--r--drivers/nvmem/sc27xx-efuse.c264
-rw-r--r--drivers/nvmem/uniphier-efuse.c1
-rw-r--r--drivers/parport/ieee1284.c3
-rw-r--r--drivers/parport/parport_sunbpp.c8
-rw-r--r--drivers/perf/arm-ccn.c1
-rw-r--r--drivers/pinctrl/intel/pinctrl-merrifield.c1
-rw-r--r--drivers/pinctrl/pinctrl-u300.c1
-rw-r--r--drivers/pinctrl/sprd/pinctrl-sprd-sc9860.c1
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c1
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c1
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-ld4.c1
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-ld6b.c1
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-pro4.c1
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-pro5.c1
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-pxs2.c1
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-pxs3.c1
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-sld8.c1
-rw-r--r--drivers/platform/goldfish/Kconfig5
-rw-r--r--drivers/platform/goldfish/Makefile1
-rw-r--r--drivers/platform/goldfish/goldfish_pipe.c3
-rw-r--r--drivers/platform/goldfish/pdev_bus.c232
-rw-r--r--drivers/platform/x86/intel_bxtwc_tmu.c1
-rw-r--r--drivers/power/avs/smartreflex.c1
-rw-r--r--drivers/power/reset/ltc2952-poweroff.c1
-rw-r--r--drivers/power/supply/max8998_charger.c1
-rw-r--r--drivers/power/supply/olpc_battery.c1
-rw-r--r--drivers/ptp/ptp_dte.c1
-rw-r--r--drivers/regulator/tps65912-regulator.c1
-rw-r--r--drivers/reset/reset-ath79.c1
-rw-r--r--drivers/reset/reset-axs10x.c1
-rw-r--r--drivers/reset/reset-imx7.c1
-rw-r--r--drivers/rtc/rtc-coh901331.c1
-rw-r--r--drivers/rtc/rtc-cpcap.c1
-rw-r--r--drivers/rtc/rtc-ftrtc010.c1
-rw-r--r--drivers/rtc/rtc-mc13xxx.c1
-rw-r--r--drivers/rtc/rtc-mxc_v2.c1
-rw-r--r--drivers/rtc/rtc-r7301.c1
-rw-r--r--drivers/rtc/rtc-sh.c1
-rw-r--r--drivers/rtc/rtc-tegra.c1
-rw-r--r--drivers/scsi/storvsc_drv.c3
-rw-r--r--drivers/siox/siox-bus-gpio.c1
-rw-r--r--drivers/siox/siox-core.c30
-rw-r--r--drivers/slimbus/Kconfig12
-rw-r--r--drivers/slimbus/Makefile5
-rw-r--r--drivers/slimbus/core.c41
-rw-r--r--drivers/slimbus/messaging.c79
-rw-r--r--drivers/slimbus/qcom-ngd-ctrl.c1526
-rw-r--r--drivers/slimbus/slimbus.h198
-rw-r--r--drivers/slimbus/stream.c477
-rw-r--r--drivers/thunderbolt/domain.c55
-rw-r--r--drivers/thunderbolt/icm.c174
-rw-r--r--drivers/thunderbolt/nhi.c46
-rw-r--r--drivers/thunderbolt/switch.c65
-rw-r--r--drivers/thunderbolt/tb.h10
-rw-r--r--drivers/thunderbolt/tb_msgs.h4
-rw-r--r--drivers/thunderbolt/tb_regs.h2
-rw-r--r--drivers/thunderbolt/xdomain.c18
-rw-r--r--drivers/tty/goldfish.c1
-rw-r--r--drivers/tty/serial/8250/8250_em.c1
-rw-r--r--drivers/tty/serial/sccnxp.c1
-rw-r--r--drivers/uio/uio.c24
-rw-r--r--drivers/uio/uio_cif.c4
-rw-r--r--drivers/uio/uio_fsl_elbc_gpcm.c1
-rw-r--r--drivers/uio/uio_hv_generic.c4
-rw-r--r--drivers/uio/uio_netx.c3
-rw-r--r--drivers/uio/uio_pci_generic.c3
-rw-r--r--drivers/uio/uio_pruss.c69
-rw-r--r--drivers/uio/uio_sercos3.c1
-rw-r--r--drivers/usb/gadget/udc/fsl_mxc_udc.c1
-rw-r--r--drivers/video/fbdev/hyperv_fb.c6
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/encoder-tpd12s015.c1
-rw-r--r--drivers/vme/bridges/vme_ca91cx42.c2
-rw-r--r--drivers/w1/masters/ds2482.c2
-rw-r--r--drivers/w1/masters/ds2490.c16
-rw-r--r--drivers/w1/masters/mxc_w1.c1
-rw-r--r--drivers/watchdog/coh901327_wdt.c1
-rw-r--r--drivers/watchdog/davinci_wdt.c1
-rw-r--r--drivers/watchdog/imgpdc_wdt.c1
-rw-r--r--drivers/watchdog/max63xx_wdt.c1
-rw-r--r--drivers/watchdog/max77620_wdt.c1
-rw-r--r--drivers/watchdog/moxart_wdt.c1
-rw-r--r--drivers/watchdog/omap_wdt.c1
-rw-r--r--drivers/watchdog/tangox_wdt.c1
245 files changed, 16333 insertions, 1448 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 95b9ccc08165..ab4d43923c4d 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -9,6 +9,8 @@ source "drivers/bus/Kconfig"
source "drivers/connector/Kconfig"
+source "drivers/gnss/Kconfig"
+
source "drivers/mtd/Kconfig"
source "drivers/of/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index a6abd7a856c6..578f469f72fb 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -185,3 +185,4 @@ obj-$(CONFIG_TEE) += tee/
obj-$(CONFIG_MULTIPLEXER) += mux/
obj-$(CONFIG_UNISYS_VISORBUS) += visorbus/
obj-$(CONFIG_SIOX) += siox/
+obj-$(CONFIG_GNSS) += gnss/
diff --git a/drivers/android/Kconfig b/drivers/android/Kconfig
index ee4880bfdcdc..432e9ad77070 100644
--- a/drivers/android/Kconfig
+++ b/drivers/android/Kconfig
@@ -10,7 +10,7 @@ if ANDROID
config ANDROID_BINDER_IPC
bool "Android Binder IPC Driver"
- depends on MMU && !M68K
+ depends on MMU
default n
---help---
Binder is used in Android for both communication between processes,
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 95283f3bb51c..d58763b6b009 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -51,7 +51,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <asm/cacheflush.h>
#include <linux/fdtable.h>
#include <linux/file.h>
#include <linux/freezer.h>
@@ -71,8 +70,12 @@
#include <linux/pid_namespace.h>
#include <linux/security.h>
#include <linux/spinlock.h>
+#include <linux/ratelimit.h>
#include <uapi/linux/android/binder.h>
+
+#include <asm/cacheflush.h>
+
#include "binder_alloc.h"
#include "binder_trace.h"
@@ -161,13 +164,13 @@ module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
#define binder_debug(mask, x...) \
do { \
if (binder_debug_mask & mask) \
- pr_info(x); \
+ pr_info_ratelimited(x); \
} while (0)
#define binder_user_error(x...) \
do { \
if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
- pr_info(x); \
+ pr_info_ratelimited(x); \
if (binder_stop_on_user_error) \
binder_stop_on_user_error = 2; \
} while (0)
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 2628806c64a2..3f3b7b253445 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -17,7 +17,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <asm/cacheflush.h>
#include <linux/list.h>
#include <linux/sched/mm.h>
#include <linux/module.h>
@@ -28,6 +27,8 @@
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/list_lru.h>
+#include <linux/ratelimit.h>
+#include <asm/cacheflush.h>
#include "binder_alloc.h"
#include "binder_trace.h"
@@ -36,11 +37,12 @@ struct list_lru binder_alloc_lru;
static DEFINE_MUTEX(binder_alloc_mmap_lock);
enum {
+ BINDER_DEBUG_USER_ERROR = 1U << 0,
BINDER_DEBUG_OPEN_CLOSE = 1U << 1,
BINDER_DEBUG_BUFFER_ALLOC = 1U << 2,
BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 3,
};
-static uint32_t binder_alloc_debug_mask;
+static uint32_t binder_alloc_debug_mask = BINDER_DEBUG_USER_ERROR;
module_param_named(debug_mask, binder_alloc_debug_mask,
uint, 0644);
@@ -48,7 +50,7 @@ module_param_named(debug_mask, binder_alloc_debug_mask,
#define binder_alloc_debug(mask, x...) \
do { \
if (binder_alloc_debug_mask & mask) \
- pr_info(x); \
+ pr_info_ratelimited(x); \
} while (0)
static struct binder_buffer *binder_buffer_next(struct binder_buffer *buffer)
@@ -152,8 +154,10 @@ static struct binder_buffer *binder_alloc_prepare_to_free_locked(
* free the buffer twice
*/
if (buffer->free_in_progress) {
- pr_err("%d:%d FREE_BUFFER u%016llx user freed buffer twice\n",
- alloc->pid, current->pid, (u64)user_ptr);
+ binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
+ "%d:%d FREE_BUFFER u%016llx user freed buffer twice\n",
+ alloc->pid, current->pid,
+ (u64)user_ptr);
return NULL;
}
buffer->free_in_progress = 1;
@@ -224,8 +228,9 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
}
if (!vma && need_mm) {
- pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
- alloc->pid);
+ binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
+ "%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
+ alloc->pid);
goto err_no_vma;
}
@@ -344,8 +349,9 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
int ret;
if (alloc->vma == NULL) {
- pr_err("%d: binder_alloc_buf, no vma\n",
- alloc->pid);
+ binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
+ "%d: binder_alloc_buf, no vma\n",
+ alloc->pid);
return ERR_PTR(-ESRCH);
}
@@ -417,11 +423,14 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
if (buffer_size > largest_free_size)
largest_free_size = buffer_size;
}
- pr_err("%d: binder_alloc_buf size %zd failed, no address space\n",
- alloc->pid, size);
- pr_err("allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n",
- total_alloc_size, allocated_buffers, largest_alloc_size,
- total_free_size, free_buffers, largest_free_size);
+ binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
+ "%d: binder_alloc_buf size %zd failed, no address space\n",
+ alloc->pid, size);
+ binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
+ "allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n",
+ total_alloc_size, allocated_buffers,
+ largest_alloc_size, total_free_size,
+ free_buffers, largest_free_size);
return ERR_PTR(-ENOSPC);
}
if (n == NULL) {
@@ -731,8 +740,10 @@ err_alloc_pages_failed:
err_get_vm_area_failed:
err_already_mapped:
mutex_unlock(&binder_alloc_mmap_lock);
- pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
- alloc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
+ binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
+ "%s: %d %lx-%lx %s failed %d\n", __func__,
+ alloc->pid, vma->vm_start, vma->vm_end,
+ failure_string, ret);
return ret;
}
diff --git a/drivers/android/binder_trace.h b/drivers/android/binder_trace.h
index 76e3b9c8a8a2..588eb3ec3507 100644
--- a/drivers/android/binder_trace.h
+++ b/drivers/android/binder_trace.h
@@ -248,14 +248,17 @@ DECLARE_EVENT_CLASS(binder_buffer_class,
__field(int, debug_id)
__field(size_t, data_size)
__field(size_t, offsets_size)
+ __field(size_t, extra_buffers_size)
),
TP_fast_assign(
__entry->debug_id = buf->debug_id;
__entry->data_size = buf->data_size;
__entry->offsets_size = buf->offsets_size;
+ __entry->extra_buffers_size = buf->extra_buffers_size;
),
- TP_printk("transaction=%d data_size=%zd offsets_size=%zd",
- __entry->debug_id, __entry->data_size, __entry->offsets_size)
+ TP_printk("transaction=%d data_size=%zd offsets_size=%zd extra_buffers_size=%zd",
+ __entry->debug_id, __entry->data_size, __entry->offsets_size,
+ __entry->extra_buffers_size)
);
DEFINE_EVENT(binder_buffer_class, binder_transaction_alloc_buf,
diff --git a/drivers/ata/pata_imx.c b/drivers/ata/pata_imx.c
index d4caa23f5a88..6f0534047c6d 100644
--- a/drivers/ata/pata_imx.c
+++ b/drivers/ata/pata_imx.c
@@ -17,6 +17,7 @@
#include <linux/clk.h>
#include <linux/libata.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#define DRV_NAME "pata_imx"
diff --git a/drivers/ata/pata_samsung_cf.c b/drivers/ata/pata_samsung_cf.c
index bb96dc35950d..f5bd44b8bd63 100644
--- a/drivers/ata/pata_samsung_cf.c
+++ b/drivers/ata/pata_samsung_cf.c
@@ -17,6 +17,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/init.h>
#include <linux/clk.h>
#include <linux/libata.h>
diff --git a/drivers/auxdisplay/hd44780.c b/drivers/auxdisplay/hd44780.c
index 78d8f1986fec..f1a42f0f1ded 100644
--- a/drivers/auxdisplay/hd44780.c
+++ b/drivers/auxdisplay/hd44780.c
@@ -9,6 +9,7 @@
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/slab.h>
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index be426eb2a353..4a22b4b41aef 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -579,7 +579,6 @@ hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
struct hpet_info *info)
{
struct hpet_timer __iomem *timer;
- struct hpet __iomem *hpet;
struct hpets *hpetp;
int err;
unsigned long v;
@@ -591,7 +590,6 @@ hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
case HPET_DPI:
case HPET_IRQFREQ:
timer = devp->hd_timer;
- hpet = devp->hd_hpet;
hpetp = devp->hd_hpets;
break;
case HPET_IE_ON:
diff --git a/drivers/char/hw_random/atmel-rng.c b/drivers/char/hw_random/atmel-rng.c
index 661c82cde0f2..433426242b87 100644
--- a/drivers/char/hw_random/atmel-rng.c
+++ b/drivers/char/hw_random/atmel-rng.c
@@ -8,6 +8,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/clk.h>
diff --git a/drivers/char/hw_random/exynos-trng.c b/drivers/char/hw_random/exynos-trng.c
index 1947aed7c044..94235761955c 100644
--- a/drivers/char/hw_random/exynos-trng.c
+++ b/drivers/char/hw_random/exynos-trng.c
@@ -19,6 +19,7 @@
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/char/hw_random/imx-rngc.c b/drivers/char/hw_random/imx-rngc.c
index 250123bc4905..14730be54edf 100644
--- a/drivers/char/hw_random/imx-rngc.c
+++ b/drivers/char/hw_random/imx-rngc.c
@@ -13,6 +13,7 @@
*/
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/clk.h>
diff --git a/drivers/char/hw_random/powernv-rng.c b/drivers/char/hw_random/powernv-rng.c
index 263a5bb8e605..791182aa8e04 100644
--- a/drivers/char/hw_random/powernv-rng.c
+++ b/drivers/char/hw_random/powernv-rng.c
@@ -10,6 +10,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/random.h>
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index df66a9dd0aae..7b4e4de778e4 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -766,6 +766,7 @@ static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
switch (orig) {
case SEEK_CUR:
offset += file->f_pos;
+ /* fall through */
case SEEK_SET:
/* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */
if ((unsigned long long)offset >= -MAX_ERRNO) {
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c
index 370e0a64ead1..a219964cb770 100644
--- a/drivers/char/pcmcia/cm4000_cs.c
+++ b/drivers/char/pcmcia/cm4000_cs.c
@@ -1748,8 +1748,6 @@ static int cm4000_config_check(struct pcmcia_device *p_dev, void *priv_data)
static int cm4000_config(struct pcmcia_device * link, int devno)
{
- struct cm4000_dev *dev;
-
link->config_flags |= CONF_AUTO_SET_IO;
/* read the config-tuples */
@@ -1759,8 +1757,6 @@ static int cm4000_config(struct pcmcia_device * link, int devno)
if (pcmcia_enable_device(link))
goto cs_release;
- dev = link->priv;
-
return 0;
cs_release:
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 17084cfcf53e..5b5b5d72eab7 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1309,51 +1309,35 @@ static const struct attribute_group port_attribute_group = {
.attrs = port_sysfs_entries,
};
-static ssize_t debugfs_read(struct file *filp, char __user *ubuf,
- size_t count, loff_t *offp)
+static int debugfs_show(struct seq_file *s, void *data)
{
- struct port *port;
- char *buf;
- ssize_t ret, out_offset, out_count;
+ struct port *port = s->private;
+
+ seq_printf(s, "name: %s\n", port->name ? port->name : "");
+ seq_printf(s, "guest_connected: %d\n", port->guest_connected);
+ seq_printf(s, "host_connected: %d\n", port->host_connected);
+ seq_printf(s, "outvq_full: %d\n", port->outvq_full);
+ seq_printf(s, "bytes_sent: %lu\n", port->stats.bytes_sent);
+ seq_printf(s, "bytes_received: %lu\n", port->stats.bytes_received);
+ seq_printf(s, "bytes_discarded: %lu\n", port->stats.bytes_discarded);
+ seq_printf(s, "is_console: %s\n",
+ is_console_port(port) ? "yes" : "no");
+ seq_printf(s, "console_vtermno: %u\n", port->cons.vtermno);
- out_count = 1024;
- buf = kmalloc(out_count, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
+ return 0;
+}
- port = filp->private_data;
- out_offset = 0;
- out_offset += snprintf(buf + out_offset, out_count,
- "name: %s\n", port->name ? port->name : "");
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
- "guest_connected: %d\n", port->guest_connected);
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
- "host_connected: %d\n", port->host_connected);
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
- "outvq_full: %d\n", port->outvq_full);
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
- "bytes_sent: %lu\n", port->stats.bytes_sent);
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
- "bytes_received: %lu\n",
- port->stats.bytes_received);
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
- "bytes_discarded: %lu\n",
- port->stats.bytes_discarded);
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
- "is_console: %s\n",
- is_console_port(port) ? "yes" : "no");
- out_offset += snprintf(buf + out_offset, out_count - out_offset,
- "console_vtermno: %u\n", port->cons.vtermno);
-
- ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset);
- kfree(buf);
- return ret;
+static int debugfs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, debugfs_show, inode->i_private);
}
static const struct file_operations port_debugfs_ops = {
.owner = THIS_MODULE,
- .open = simple_open,
- .read = debugfs_read,
+ .open = debugfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
};
static void set_console_size(struct port *port, u16 rows, u16 cols)
diff --git a/drivers/crypto/mediatek/mtk-platform.c b/drivers/crypto/mediatek/mtk-platform.c
index b182e941b0cd..ee0404e27a0f 100644
--- a/drivers/crypto/mediatek/mtk-platform.c
+++ b/drivers/crypto/mediatek/mtk-platform.c
@@ -13,6 +13,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include "mtk-platform.h"
diff --git a/drivers/crypto/qce/core.c b/drivers/crypto/qce/core.c
index 718b32a3112e..1c3b36b75467 100644
--- a/drivers/crypto/qce/core.c
+++ b/drivers/crypto/qce/core.c
@@ -14,6 +14,7 @@
#include <linux/clk.h>
#include <linux/interrupt.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
#include <linux/types.h>
diff --git a/drivers/crypto/stm32/stm32_crc32.c b/drivers/crypto/stm32/stm32_crc32.c
index 5f3242a246fc..29d2095d9dfd 100644
--- a/drivers/crypto/stm32/stm32_crc32.c
+++ b/drivers/crypto/stm32/stm32_crc32.c
@@ -8,6 +8,7 @@
#include <linux/clk.h>
#include <linux/crc32poly.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
index cb31b59c9d53..d2663a4e1f5e 100644
--- a/drivers/crypto/ux500/cryp/cryp_core.c
+++ b/drivers/crypto/ux500/cryp/cryp_core.c
@@ -20,6 +20,7 @@
#include <linux/irqreturn.h>
#include <linux/klist.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/semaphore.h>
diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
index daf4fed0df8c..633321a8dd03 100644
--- a/drivers/crypto/ux500/hash/hash_core.c
+++ b/drivers/crypto/ux500/hash/hash_core.c
@@ -21,6 +21,7 @@
#include <linux/klist.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/crypto.h>
diff --git a/drivers/devfreq/tegra-devfreq.c b/drivers/devfreq/tegra-devfreq.c
index ae712159246f..c59d2eee5d30 100644
--- a/drivers/devfreq/tegra-devfreq.c
+++ b/drivers/devfreq/tegra-devfreq.c
@@ -24,6 +24,7 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/pm_opp.h>
#include <linux/reset.h>
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index ec240592f5c8..a15592383d4e 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -23,6 +23,7 @@
#include <linux/interrupt.h>
#include <linux/dmaengine.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c
index 7056fe7513b4..64744eb88720 100644
--- a/drivers/dma/s3c24xx-dma.c
+++ b/drivers/dma/s3c24xx-dma.c
@@ -35,6 +35,7 @@
#include <linux/interrupt.h>
#include <linux/clk.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/slab.h>
#include <linux/platform_data/dma-s3c24xx.h>
diff --git a/drivers/extcon/extcon-intel-cht-wc.c b/drivers/extcon/extcon-intel-cht-wc.c
index b7e9ea377d70..5e1dd2772278 100644
--- a/drivers/extcon/extcon-intel-cht-wc.c
+++ b/drivers/extcon/extcon-intel-cht-wc.c
@@ -20,6 +20,7 @@
#include <linux/kernel.h>
#include <linux/mfd/intel_soc_pmic.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
diff --git a/drivers/extcon/extcon-intel-int3496.c b/drivers/extcon/extcon-intel-int3496.c
index acaccb128fc4..fd24debe58a3 100644
--- a/drivers/extcon/extcon-intel-int3496.c
+++ b/drivers/extcon/extcon-intel-int3496.c
@@ -20,7 +20,7 @@
#include <linux/acpi.h>
#include <linux/extcon-provider.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/platform_device.h>
diff --git a/drivers/extcon/extcon-max3355.c b/drivers/extcon/extcon-max3355.c
index 0aa410836f4e..1335a476bfec 100644
--- a/drivers/extcon/extcon-max3355.c
+++ b/drivers/extcon/extcon-max3355.c
@@ -14,6 +14,7 @@
#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
struct max3355_data {
diff --git a/drivers/extcon/extcon-qcom-spmi-misc.c b/drivers/extcon/extcon-qcom-spmi-misc.c
index 660bbf163bf5..72bc0f2478e2 100644
--- a/drivers/extcon/extcon-qcom-spmi-misc.c
+++ b/drivers/extcon/extcon-qcom-spmi-misc.c
@@ -20,6 +20,7 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
diff --git a/drivers/extcon/extcon-usbc-cros-ec.c b/drivers/extcon/extcon-usbc-cros-ec.c
index 6721ab01fe7d..43c0a936ab82 100644
--- a/drivers/extcon/extcon-usbc-cros-ec.c
+++ b/drivers/extcon/extcon-usbc-cros-ec.c
@@ -1,18 +1,8 @@
-/**
- * drivers/extcon/extcon-usbc-cros-ec - ChromeOS Embedded Controller extcon
- *
- * Copyright (C) 2017 Google, Inc
- * Author: Benson Leung <bleung@chromium.org>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0
+// ChromeOS Embedded Controller extcon
+//
+// Copyright (C) 2017 Google, Inc.
+// Author: Benson Leung <bleung@chromium.org>
#include <linux/extcon-provider.h>
#include <linux/kernel.h>
@@ -548,4 +538,4 @@ module_platform_driver(extcon_cros_ec_driver);
MODULE_DESCRIPTION("ChromeOS Embedded Controller extcon driver");
MODULE_AUTHOR("Benson Leung <bleung@chromium.org>");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c
index af83ad58819c..b9d27c8fe57e 100644
--- a/drivers/extcon/extcon.c
+++ b/drivers/extcon/extcon.c
@@ -433,8 +433,8 @@ int extcon_sync(struct extcon_dev *edev, unsigned int id)
return index;
spin_lock_irqsave(&edev->lock, flags);
-
state = !!(edev->state & BIT(index));
+ spin_unlock_irqrestore(&edev->lock, flags);
/*
* Call functions in a raw notifier chain for the specific one
@@ -448,6 +448,7 @@ int extcon_sync(struct extcon_dev *edev, unsigned int id)
*/
raw_notifier_call_chain(&edev->nh_all, state, edev);
+ spin_lock_irqsave(&edev->lock, flags);
/* This could be in interrupt handler */
prop_buf = (char *)get_zeroed_page(GFP_ATOMIC);
if (!prop_buf) {
diff --git a/drivers/firmware/google/vpd.c b/drivers/firmware/google/vpd.c
index e9db895916c3..1aa67bb5d8c0 100644
--- a/drivers/firmware/google/vpd.c
+++ b/drivers/firmware/google/vpd.c
@@ -246,6 +246,7 @@ static int vpd_section_destroy(struct vpd_section *sec)
sysfs_remove_bin_file(vpd_kobj, &sec->bin_attr);
kfree(sec->raw_name);
memunmap(sec->baseaddr);
+ sec->enabled = false;
}
return 0;
@@ -279,8 +280,10 @@ static int vpd_sections_init(phys_addr_t physaddr)
ret = vpd_section_init("rw", &rw_vpd,
physaddr + sizeof(struct vpd_cbmem) +
header.ro_size, header.rw_size);
- if (ret)
+ if (ret) {
+ vpd_section_destroy(&ro_vpd);
return ret;
+ }
}
return 0;
diff --git a/drivers/fpga/Kconfig b/drivers/fpga/Kconfig
index ee9c5420c47f..1ebcef4bab5b 100644
--- a/drivers/fpga/Kconfig
+++ b/drivers/fpga/Kconfig
@@ -130,4 +130,72 @@ config OF_FPGA_REGION
Support for loading FPGA images by applying a Device Tree
overlay.
+config FPGA_DFL
+ tristate "FPGA Device Feature List (DFL) support"
+ select FPGA_BRIDGE
+ select FPGA_REGION
+ help
+ Device Feature List (DFL) defines a feature list structure that
+ creates a linked list of feature headers within the MMIO space
+ to provide an extensible way of adding features for FPGA.
+ Driver can walk through the feature headers to enumerate feature
+ devices (e.g. FPGA Management Engine, Port and Accelerator
+ Function Unit) and their private features for target FPGA devices.
+
+ Select this option to enable common support for Field-Programmable
+ Gate Array (FPGA) solutions which implement Device Feature List.
+ It provides enumeration APIs and feature device infrastructure.
+
+config FPGA_DFL_FME
+ tristate "FPGA DFL FME Driver"
+ depends on FPGA_DFL
+ help
+ The FPGA Management Engine (FME) is a feature device implemented
+ under Device Feature List (DFL) framework. Select this option to
+ enable the platform device driver for FME which implements all
+ FPGA platform level management features. There shall be one FME
+ per DFL based FPGA device.
+
+config FPGA_DFL_FME_MGR
+ tristate "FPGA DFL FME Manager Driver"
+ depends on FPGA_DFL_FME && HAS_IOMEM
+ help
+ Say Y to enable FPGA Manager driver for FPGA Management Engine.
+
+config FPGA_DFL_FME_BRIDGE
+ tristate "FPGA DFL FME Bridge Driver"
+ depends on FPGA_DFL_FME && HAS_IOMEM
+ help
+ Say Y to enable FPGA Bridge driver for FPGA Management Engine.
+
+config FPGA_DFL_FME_REGION
+ tristate "FPGA DFL FME Region Driver"
+ depends on FPGA_DFL_FME && HAS_IOMEM
+ help
+ Say Y to enable FPGA Region driver for FPGA Management Engine.
+
+config FPGA_DFL_AFU
+ tristate "FPGA DFL AFU Driver"
+ depends on FPGA_DFL
+ help
+ This is the driver for FPGA Accelerated Function Unit (AFU) which
+ implements AFU and Port management features. A User AFU connects
+ to the FPGA infrastructure via a Port. There may be more than one
+ Port/AFU per DFL based FPGA device.
+
+config FPGA_DFL_PCI
+ tristate "FPGA DFL PCIe Device Driver"
+ depends on PCI && FPGA_DFL
+ help
+ Select this option to enable PCIe driver for PCIe-based
+ Field-Programmable Gate Array (FPGA) solutions which implement
+ the Device Feature List (DFL). This driver provides interfaces
+ for userspace applications to configure, enumerate, open and access
+ FPGA accelerators on the FPGA DFL devices, enables system level
+ management functions such as FPGA partial reconfiguration, power
+ management and virtualization with DFL framework and DFL feature
+ device drivers.
+
+ To compile this as a module, choose M here.
+
endif # FPGA
diff --git a/drivers/fpga/Makefile b/drivers/fpga/Makefile
index f9803dad6919..7a2d73ba7122 100644
--- a/drivers/fpga/Makefile
+++ b/drivers/fpga/Makefile
@@ -28,3 +28,17 @@ obj-$(CONFIG_XILINX_PR_DECOUPLER) += xilinx-pr-decoupler.o
# High Level Interfaces
obj-$(CONFIG_FPGA_REGION) += fpga-region.o
obj-$(CONFIG_OF_FPGA_REGION) += of-fpga-region.o
+
+# FPGA Device Feature List Support
+obj-$(CONFIG_FPGA_DFL) += dfl.o
+obj-$(CONFIG_FPGA_DFL_FME) += dfl-fme.o
+obj-$(CONFIG_FPGA_DFL_FME_MGR) += dfl-fme-mgr.o
+obj-$(CONFIG_FPGA_DFL_FME_BRIDGE) += dfl-fme-br.o
+obj-$(CONFIG_FPGA_DFL_FME_REGION) += dfl-fme-region.o
+obj-$(CONFIG_FPGA_DFL_AFU) += dfl-afu.o
+
+dfl-fme-objs := dfl-fme-main.o dfl-fme-pr.o
+dfl-afu-objs := dfl-afu-main.o dfl-afu-region.o dfl-afu-dma-region.o
+
+# Drivers for FPGAs which implement DFL
+obj-$(CONFIG_FPGA_DFL_PCI) += dfl-pci.o
diff --git a/drivers/fpga/dfl-afu-dma-region.c b/drivers/fpga/dfl-afu-dma-region.c
new file mode 100644
index 000000000000..0e81d33af856
--- /dev/null
+++ b/drivers/fpga/dfl-afu-dma-region.c
@@ -0,0 +1,463 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for FPGA Accelerated Function Unit (AFU) DMA Region Management
+ *
+ * Copyright (C) 2017-2018 Intel Corporation, Inc.
+ *
+ * Authors:
+ * Wu Hao <hao.wu@intel.com>
+ * Xiao Guangrong <guangrong.xiao@linux.intel.com>
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/sched/signal.h>
+#include <linux/uaccess.h>
+
+#include "dfl-afu.h"
+
+static void put_all_pages(struct page **pages, int npages)
+{
+ int i;
+
+ for (i = 0; i < npages; i++)
+ if (pages[i])
+ put_page(pages[i]);
+}
+
+void afu_dma_region_init(struct dfl_feature_platform_data *pdata)
+{
+ struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata);
+
+ afu->dma_regions = RB_ROOT;
+}
+
+/**
+ * afu_dma_adjust_locked_vm - adjust locked memory
+ * @dev: port device
+ * @npages: number of pages
+ * @incr: increase or decrease locked memory
+ *
+ * Increase or decrease the locked memory size with npages input.
+ *
+ * Return 0 on success.
+ * Return -ENOMEM if locked memory size is over the limit and no CAP_IPC_LOCK.
+ */
+static int afu_dma_adjust_locked_vm(struct device *dev, long npages, bool incr)
+{
+ unsigned long locked, lock_limit;
+ int ret = 0;
+
+ /* the task is exiting. */
+ if (!current->mm)
+ return 0;
+
+ down_write(&current->mm->mmap_sem);
+
+ if (incr) {
+ locked = current->mm->locked_vm + npages;
+ lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
+
+ if (locked > lock_limit && !capable(CAP_IPC_LOCK))
+ ret = -ENOMEM;
+ else
+ current->mm->locked_vm += npages;
+ } else {
+ if (WARN_ON_ONCE(npages > current->mm->locked_vm))
+ npages = current->mm->locked_vm;
+ current->mm->locked_vm -= npages;
+ }
+
+ dev_dbg(dev, "[%d] RLIMIT_MEMLOCK %c%ld %ld/%ld%s\n", current->pid,
+ incr ? '+' : '-', npages << PAGE_SHIFT,
+ current->mm->locked_vm << PAGE_SHIFT, rlimit(RLIMIT_MEMLOCK),
+ ret ? "- execeeded" : "");
+
+ up_write(&current->mm->mmap_sem);
+
+ return ret;
+}
+
+/**
+ * afu_dma_pin_pages - pin pages of given dma memory region
+ * @pdata: feature device platform data
+ * @region: dma memory region to be pinned
+ *
+ * Pin all the pages of given dfl_afu_dma_region.
+ * Return 0 for success or negative error code.
+ */
+static int afu_dma_pin_pages(struct dfl_feature_platform_data *pdata,
+ struct dfl_afu_dma_region *region)
+{
+ int npages = region->length >> PAGE_SHIFT;
+ struct device *dev = &pdata->dev->dev;
+ int ret, pinned;
+
+ ret = afu_dma_adjust_locked_vm(dev, npages, true);
+ if (ret)
+ return ret;
+
+ region->pages = kcalloc(npages, sizeof(struct page *), GFP_KERNEL);
+ if (!region->pages) {
+ ret = -ENOMEM;
+ goto unlock_vm;
+ }
+
+ pinned = get_user_pages_fast(region->user_addr, npages, 1,
+ region->pages);
+ if (pinned < 0) {
+ ret = pinned;
+ goto put_pages;
+ } else if (pinned != npages) {
+ ret = -EFAULT;
+ goto free_pages;
+ }
+
+ dev_dbg(dev, "%d pages pinned\n", pinned);
+
+ return 0;
+
+put_pages:
+ put_all_pages(region->pages, pinned);
+free_pages:
+ kfree(region->pages);
+unlock_vm:
+ afu_dma_adjust_locked_vm(dev, npages, false);
+ return ret;
+}
+
+/**
+ * afu_dma_unpin_pages - unpin pages of given dma memory region
+ * @pdata: feature device platform data
+ * @region: dma memory region to be unpinned
+ *
+ * Unpin all the pages of given dfl_afu_dma_region.
+ * Return 0 for success or negative error code.
+ */
+static void afu_dma_unpin_pages(struct dfl_feature_platform_data *pdata,
+ struct dfl_afu_dma_region *region)
+{
+ long npages = region->length >> PAGE_SHIFT;
+ struct device *dev = &pdata->dev->dev;
+
+ put_all_pages(region->pages, npages);
+ kfree(region->pages);
+ afu_dma_adjust_locked_vm(dev, npages, false);
+
+ dev_dbg(dev, "%ld pages unpinned\n", npages);
+}
+
+/**
+ * afu_dma_check_continuous_pages - check if pages are continuous
+ * @region: dma memory region
+ *
+ * Return true if pages of given dma memory region have continuous physical
+ * address, otherwise return false.
+ */
+static bool afu_dma_check_continuous_pages(struct dfl_afu_dma_region *region)
+{
+ int npages = region->length >> PAGE_SHIFT;
+ int i;
+
+ for (i = 0; i < npages - 1; i++)
+ if (page_to_pfn(region->pages[i]) + 1 !=
+ page_to_pfn(region->pages[i + 1]))
+ return false;
+
+ return true;
+}
+
+/**
+ * dma_region_check_iova - check if memory area is fully contained in the region
+ * @region: dma memory region
+ * @iova: address of the dma memory area
+ * @size: size of the dma memory area
+ *
+ * Compare the dma memory area defined by @iova and @size with given dma region.
+ * Return true if memory area is fully contained in the region, otherwise false.
+ */
+static bool dma_region_check_iova(struct dfl_afu_dma_region *region,
+ u64 iova, u64 size)
+{
+ if (!size && region->iova != iova)
+ return false;
+
+ return (region->iova <= iova) &&
+ (region->length + region->iova >= iova + size);
+}
+
+/**
+ * afu_dma_region_add - add given dma region to rbtree
+ * @pdata: feature device platform data
+ * @region: dma region to be added
+ *
+ * Return 0 for success, -EEXIST if dma region has already been added.
+ *
+ * Needs to be called with pdata->lock heold.
+ */
+static int afu_dma_region_add(struct dfl_feature_platform_data *pdata,
+ struct dfl_afu_dma_region *region)
+{
+ struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata);
+ struct rb_node **new, *parent = NULL;
+
+ dev_dbg(&pdata->dev->dev, "add region (iova = %llx)\n",
+ (unsigned long long)region->iova);
+
+ new = &afu->dma_regions.rb_node;
+
+ while (*new) {
+ struct dfl_afu_dma_region *this;
+
+ this = container_of(*new, struct dfl_afu_dma_region, node);
+
+ parent = *new;
+
+ if (dma_region_check_iova(this, region->iova, region->length))
+ return -EEXIST;
+
+ if (region->iova < this->iova)
+ new = &((*new)->rb_left);
+ else if (region->iova > this->iova)
+ new = &((*new)->rb_right);
+ else
+ return -EEXIST;
+ }
+
+ rb_link_node(&region->node, parent, new);
+ rb_insert_color(&region->node, &afu->dma_regions);
+
+ return 0;
+}
+
+/**
+ * afu_dma_region_remove - remove given dma region from rbtree
+ * @pdata: feature device platform data
+ * @region: dma region to be removed
+ *
+ * Needs to be called with pdata->lock heold.
+ */
+static void afu_dma_region_remove(struct dfl_feature_platform_data *pdata,
+ struct dfl_afu_dma_region *region)
+{
+ struct dfl_afu *afu;
+
+ dev_dbg(&pdata->dev->dev, "del region (iova = %llx)\n",
+ (unsigned long long)region->iova);
+
+ afu = dfl_fpga_pdata_get_private(pdata);
+ rb_erase(&region->node, &afu->dma_regions);
+}
+
+/**
+ * afu_dma_region_destroy - destroy all regions in rbtree
+ * @pdata: feature device platform data
+ *
+ * Needs to be called with pdata->lock heold.
+ */
+void afu_dma_region_destroy(struct dfl_feature_platform_data *pdata)
+{
+ struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata);
+ struct rb_node *node = rb_first(&afu->dma_regions);
+ struct dfl_afu_dma_region *region;
+
+ while (node) {
+ region = container_of(node, struct dfl_afu_dma_region, node);
+
+ dev_dbg(&pdata->dev->dev, "del region (iova = %llx)\n",
+ (unsigned long long)region->iova);
+
+ rb_erase(node, &afu->dma_regions);
+
+ if (region->iova)
+ dma_unmap_page(dfl_fpga_pdata_to_parent(pdata),
+ region->iova, region->length,
+ DMA_BIDIRECTIONAL);
+
+ if (region->pages)
+ afu_dma_unpin_pages(pdata, region);
+
+ node = rb_next(node);
+ kfree(region);
+ }
+}
+
+/**
+ * afu_dma_region_find - find the dma region from rbtree based on iova and size
+ * @pdata: feature device platform data
+ * @iova: address of the dma memory area
+ * @size: size of the dma memory area
+ *
+ * It finds the dma region from the rbtree based on @iova and @size:
+ * - if @size == 0, it finds the dma region which starts from @iova
+ * - otherwise, it finds the dma region which fully contains
+ * [@iova, @iova+size)
+ * If nothing is matched returns NULL.
+ *
+ * Needs to be called with pdata->lock held.
+ */
+struct dfl_afu_dma_region *
+afu_dma_region_find(struct dfl_feature_platform_data *pdata, u64 iova, u64 size)
+{
+ struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata);
+ struct rb_node *node = afu->dma_regions.rb_node;
+ struct device *dev = &pdata->dev->dev;
+
+ while (node) {
+ struct dfl_afu_dma_region *region;
+
+ region = container_of(node, struct dfl_afu_dma_region, node);
+
+ if (dma_region_check_iova(region, iova, size)) {
+ dev_dbg(dev, "find region (iova = %llx)\n",
+ (unsigned long long)region->iova);
+ return region;
+ }
+
+ if (iova < region->iova)
+ node = node->rb_left;
+ else if (iova > region->iova)
+ node = node->rb_right;
+ else
+ /* the iova region is not fully covered. */
+ break;
+ }
+
+ dev_dbg(dev, "region with iova %llx and size %llx is not found\n",
+ (unsigned long long)iova, (unsigned long long)size);
+
+ return NULL;
+}
+
+/**
+ * afu_dma_region_find_iova - find the dma region from rbtree by iova
+ * @pdata: feature device platform data
+ * @iova: address of the dma region
+ *
+ * Needs to be called with pdata->lock held.
+ */
+static struct dfl_afu_dma_region *
+afu_dma_region_find_iova(struct dfl_feature_platform_data *pdata, u64 iova)
+{
+ return afu_dma_region_find(pdata, iova, 0);
+}
+
+/**
+ * afu_dma_map_region - map memory region for dma
+ * @pdata: feature device platform data
+ * @user_addr: address of the memory region
+ * @length: size of the memory region
+ * @iova: pointer of iova address
+ *
+ * Map memory region defined by @user_addr and @length, and return dma address
+ * of the memory region via @iova.
+ * Return 0 for success, otherwise error code.
+ */
+int afu_dma_map_region(struct dfl_feature_platform_data *pdata,
+ u64 user_addr, u64 length, u64 *iova)
+{
+ struct dfl_afu_dma_region *region;
+ int ret;
+
+ /*
+ * Check Inputs, only accept page-aligned user memory region with
+ * valid length.
+ */
+ if (!PAGE_ALIGNED(user_addr) || !PAGE_ALIGNED(length) || !length)
+ return -EINVAL;
+
+ /* Check overflow */
+ if (user_addr + length < user_addr)
+ return -EINVAL;
+
+ if (!access_ok(VERIFY_WRITE, (void __user *)(unsigned long)user_addr,
+ length))
+ return -EINVAL;
+
+ region = kzalloc(sizeof(*region), GFP_KERNEL);
+ if (!region)
+ return -ENOMEM;
+
+ region->user_addr = user_addr;
+ region->length = length;
+
+ /* Pin the user memory region */
+ ret = afu_dma_pin_pages(pdata, region);
+ if (ret) {
+ dev_err(&pdata->dev->dev, "failed to pin memory region\n");
+ goto free_region;
+ }
+
+ /* Only accept continuous pages, return error else */
+ if (!afu_dma_check_continuous_pages(region)) {
+ dev_err(&pdata->dev->dev, "pages are not continuous\n");
+ ret = -EINVAL;
+ goto unpin_pages;
+ }
+
+ /* As pages are continuous then start to do DMA mapping */
+ region->iova = dma_map_page(dfl_fpga_pdata_to_parent(pdata),
+ region->pages[0], 0,
+ region->length,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(&pdata->dev->dev, region->iova)) {
+ dev_err(&pdata->dev->dev, "failed to map for dma\n");
+ ret = -EFAULT;
+ goto unpin_pages;
+ }
+
+ *iova = region->iova;
+
+ mutex_lock(&pdata->lock);
+ ret = afu_dma_region_add(pdata, region);
+ mutex_unlock(&pdata->lock);
+ if (ret) {
+ dev_err(&pdata->dev->dev, "failed to add dma region\n");
+ goto unmap_dma;
+ }
+
+ return 0;
+
+unmap_dma:
+ dma_unmap_page(dfl_fpga_pdata_to_parent(pdata),
+ region->iova, region->length, DMA_BIDIRECTIONAL);
+unpin_pages:
+ afu_dma_unpin_pages(pdata, region);
+free_region:
+ kfree(region);
+ return ret;
+}
+
+/**
+ * afu_dma_unmap_region - unmap dma memory region
+ * @pdata: feature device platform data
+ * @iova: dma address of the region
+ *
+ * Unmap dma memory region based on @iova.
+ * Return 0 for success, otherwise error code.
+ */
+int afu_dma_unmap_region(struct dfl_feature_platform_data *pdata, u64 iova)
+{
+ struct dfl_afu_dma_region *region;
+
+ mutex_lock(&pdata->lock);
+ region = afu_dma_region_find_iova(pdata, iova);
+ if (!region) {
+ mutex_unlock(&pdata->lock);
+ return -EINVAL;
+ }
+
+ if (region->in_use) {
+ mutex_unlock(&pdata->lock);
+ return -EBUSY;
+ }
+
+ afu_dma_region_remove(pdata, region);
+ mutex_unlock(&pdata->lock);
+
+ dma_unmap_page(dfl_fpga_pdata_to_parent(pdata),
+ region->iova, region->length, DMA_BIDIRECTIONAL);
+ afu_dma_unpin_pages(pdata, region);
+ kfree(region);
+
+ return 0;
+}
diff --git a/drivers/fpga/dfl-afu-main.c b/drivers/fpga/dfl-afu-main.c
new file mode 100644
index 000000000000..02baa6a227c0
--- /dev/null
+++ b/drivers/fpga/dfl-afu-main.c
@@ -0,0 +1,636 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for FPGA Accelerated Function Unit (AFU)
+ *
+ * Copyright (C) 2017-2018 Intel Corporation, Inc.
+ *
+ * Authors:
+ * Wu Hao <hao.wu@intel.com>
+ * Xiao Guangrong <guangrong.xiao@linux.intel.com>
+ * Joseph Grecco <joe.grecco@intel.com>
+ * Enno Luebbers <enno.luebbers@intel.com>
+ * Tim Whisonant <tim.whisonant@intel.com>
+ * Ananda Ravuri <ananda.ravuri@intel.com>
+ * Henry Mitchel <henry.mitchel@intel.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/fpga-dfl.h>
+
+#include "dfl-afu.h"
+
+/**
+ * port_enable - enable a port
+ * @pdev: port platform device.
+ *
+ * Enable Port by clear the port soft reset bit, which is set by default.
+ * The AFU is unable to respond to any MMIO access while in reset.
+ * port_enable function should only be used after port_disable function.
+ */
+static void port_enable(struct platform_device *pdev)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ void __iomem *base;
+ u64 v;
+
+ WARN_ON(!pdata->disable_count);
+
+ if (--pdata->disable_count != 0)
+ return;
+
+ base = dfl_get_feature_ioaddr_by_id(&pdev->dev, PORT_FEATURE_ID_HEADER);
+
+ /* Clear port soft reset */
+ v = readq(base + PORT_HDR_CTRL);
+ v &= ~PORT_CTRL_SFTRST;
+ writeq(v, base + PORT_HDR_CTRL);
+}
+
+#define RST_POLL_INVL 10 /* us */
+#define RST_POLL_TIMEOUT 1000 /* us */
+
+/**
+ * port_disable - disable a port
+ * @pdev: port platform device.
+ *
+ * Disable Port by setting the port soft reset bit, it puts the port into
+ * reset.
+ */
+static int port_disable(struct platform_device *pdev)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ void __iomem *base;
+ u64 v;
+
+ if (pdata->disable_count++ != 0)
+ return 0;
+
+ base = dfl_get_feature_ioaddr_by_id(&pdev->dev, PORT_FEATURE_ID_HEADER);
+
+ /* Set port soft reset */
+ v = readq(base + PORT_HDR_CTRL);
+ v |= PORT_CTRL_SFTRST;
+ writeq(v, base + PORT_HDR_CTRL);
+
+ /*
+ * HW sets ack bit to 1 when all outstanding requests have been drained
+ * on this port and minimum soft reset pulse width has elapsed.
+ * Driver polls port_soft_reset_ack to determine if reset done by HW.
+ */
+ if (readq_poll_timeout(base + PORT_HDR_CTRL, v, v & PORT_CTRL_SFTRST,
+ RST_POLL_INVL, RST_POLL_TIMEOUT)) {
+ dev_err(&pdev->dev, "timeout, fail to reset device\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+/*
+ * This function resets the FPGA Port and its accelerator (AFU) by function
+ * __port_disable and __port_enable (set port soft reset bit and then clear
+ * it). Userspace can do Port reset at any time, e.g. during DMA or Partial
+ * Reconfiguration. But it should never cause any system level issue, only
+ * functional failure (e.g. DMA or PR operation failure) and be recoverable
+ * from the failure.
+ *
+ * Note: the accelerator (AFU) is not accessible when its port is in reset
+ * (disabled). Any attempts on MMIO access to AFU while in reset, will
+ * result errors reported via port error reporting sub feature (if present).
+ */
+static int __port_reset(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = port_disable(pdev);
+ if (!ret)
+ port_enable(pdev);
+
+ return ret;
+}
+
+static int port_reset(struct platform_device *pdev)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ int ret;
+
+ mutex_lock(&pdata->lock);
+ ret = __port_reset(pdev);
+ mutex_unlock(&pdata->lock);
+
+ return ret;
+}
+
+static int port_get_id(struct platform_device *pdev)
+{
+ void __iomem *base;
+
+ base = dfl_get_feature_ioaddr_by_id(&pdev->dev, PORT_FEATURE_ID_HEADER);
+
+ return FIELD_GET(PORT_CAP_PORT_NUM, readq(base + PORT_HDR_CAP));
+}
+
+static ssize_t
+id_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ int id = port_get_id(to_platform_device(dev));
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", id);
+}
+static DEVICE_ATTR_RO(id);
+
+static const struct attribute *port_hdr_attrs[] = {
+ &dev_attr_id.attr,
+ NULL,
+};
+
+static int port_hdr_init(struct platform_device *pdev,
+ struct dfl_feature *feature)
+{
+ dev_dbg(&pdev->dev, "PORT HDR Init.\n");
+
+ port_reset(pdev);
+
+ return sysfs_create_files(&pdev->dev.kobj, port_hdr_attrs);
+}
+
+static void port_hdr_uinit(struct platform_device *pdev,
+ struct dfl_feature *feature)
+{
+ dev_dbg(&pdev->dev, "PORT HDR UInit.\n");
+
+ sysfs_remove_files(&pdev->dev.kobj, port_hdr_attrs);
+}
+
+static long
+port_hdr_ioctl(struct platform_device *pdev, struct dfl_feature *feature,
+ unsigned int cmd, unsigned long arg)
+{
+ long ret;
+
+ switch (cmd) {
+ case DFL_FPGA_PORT_RESET:
+ if (!arg)
+ ret = port_reset(pdev);
+ else
+ ret = -EINVAL;
+ break;
+ default:
+ dev_dbg(&pdev->dev, "%x cmd not handled", cmd);
+ ret = -ENODEV;
+ }
+
+ return ret;
+}
+
+static const struct dfl_feature_ops port_hdr_ops = {
+ .init = port_hdr_init,
+ .uinit = port_hdr_uinit,
+ .ioctl = port_hdr_ioctl,
+};
+
+static ssize_t
+afu_id_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ void __iomem *base;
+ u64 guidl, guidh;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_AFU);
+
+ mutex_lock(&pdata->lock);
+ if (pdata->disable_count) {
+ mutex_unlock(&pdata->lock);
+ return -EBUSY;
+ }
+
+ guidl = readq(base + GUID_L);
+ guidh = readq(base + GUID_H);
+ mutex_unlock(&pdata->lock);
+
+ return scnprintf(buf, PAGE_SIZE, "%016llx%016llx\n", guidh, guidl);
+}
+static DEVICE_ATTR_RO(afu_id);
+
+static const struct attribute *port_afu_attrs[] = {
+ &dev_attr_afu_id.attr,
+ NULL
+};
+
+static int port_afu_init(struct platform_device *pdev,
+ struct dfl_feature *feature)
+{
+ struct resource *res = &pdev->resource[feature->resource_index];
+ int ret;
+
+ dev_dbg(&pdev->dev, "PORT AFU Init.\n");
+
+ ret = afu_mmio_region_add(dev_get_platdata(&pdev->dev),
+ DFL_PORT_REGION_INDEX_AFU, resource_size(res),
+ res->start, DFL_PORT_REGION_READ |
+ DFL_PORT_REGION_WRITE | DFL_PORT_REGION_MMAP);
+ if (ret)
+ return ret;
+
+ return sysfs_create_files(&pdev->dev.kobj, port_afu_attrs);
+}
+
+static void port_afu_uinit(struct platform_device *pdev,
+ struct dfl_feature *feature)
+{
+ dev_dbg(&pdev->dev, "PORT AFU UInit.\n");
+
+ sysfs_remove_files(&pdev->dev.kobj, port_afu_attrs);
+}
+
+static const struct dfl_feature_ops port_afu_ops = {
+ .init = port_afu_init,
+ .uinit = port_afu_uinit,
+};
+
+static struct dfl_feature_driver port_feature_drvs[] = {
+ {
+ .id = PORT_FEATURE_ID_HEADER,
+ .ops = &port_hdr_ops,
+ },
+ {
+ .id = PORT_FEATURE_ID_AFU,
+ .ops = &port_afu_ops,
+ },
+ {
+ .ops = NULL,
+ }
+};
+
+static int afu_open(struct inode *inode, struct file *filp)
+{
+ struct platform_device *fdev = dfl_fpga_inode_to_feature_dev(inode);
+ struct dfl_feature_platform_data *pdata;
+ int ret;
+
+ pdata = dev_get_platdata(&fdev->dev);
+ if (WARN_ON(!pdata))
+ return -ENODEV;
+
+ ret = dfl_feature_dev_use_begin(pdata);
+ if (ret)
+ return ret;
+
+ dev_dbg(&fdev->dev, "Device File Open\n");
+ filp->private_data = fdev;
+
+ return 0;
+}
+
+static int afu_release(struct inode *inode, struct file *filp)
+{
+ struct platform_device *pdev = filp->private_data;
+ struct dfl_feature_platform_data *pdata;
+
+ dev_dbg(&pdev->dev, "Device File Release\n");
+
+ pdata = dev_get_platdata(&pdev->dev);
+
+ mutex_lock(&pdata->lock);
+ __port_reset(pdev);
+ afu_dma_region_destroy(pdata);
+ mutex_unlock(&pdata->lock);
+
+ dfl_feature_dev_use_end(pdata);
+
+ return 0;
+}
+
+static long afu_ioctl_check_extension(struct dfl_feature_platform_data *pdata,
+ unsigned long arg)
+{
+ /* No extension support for now */
+ return 0;
+}
+
+static long
+afu_ioctl_get_info(struct dfl_feature_platform_data *pdata, void __user *arg)
+{
+ struct dfl_fpga_port_info info;
+ struct dfl_afu *afu;
+ unsigned long minsz;
+
+ minsz = offsetofend(struct dfl_fpga_port_info, num_umsgs);
+
+ if (copy_from_user(&info, arg, minsz))
+ return -EFAULT;
+
+ if (info.argsz < minsz)
+ return -EINVAL;
+
+ mutex_lock(&pdata->lock);
+ afu = dfl_fpga_pdata_get_private(pdata);
+ info.flags = 0;
+ info.num_regions = afu->num_regions;
+ info.num_umsgs = afu->num_umsgs;
+ mutex_unlock(&pdata->lock);
+
+ if (copy_to_user(arg, &info, sizeof(info)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static long afu_ioctl_get_region_info(struct dfl_feature_platform_data *pdata,
+ void __user *arg)
+{
+ struct dfl_fpga_port_region_info rinfo;
+ struct dfl_afu_mmio_region region;
+ unsigned long minsz;
+ long ret;
+
+ minsz = offsetofend(struct dfl_fpga_port_region_info, offset);
+
+ if (copy_from_user(&rinfo, arg, minsz))
+ return -EFAULT;
+
+ if (rinfo.argsz < minsz || rinfo.padding)
+ return -EINVAL;
+
+ ret = afu_mmio_region_get_by_index(pdata, rinfo.index, &region);
+ if (ret)
+ return ret;
+
+ rinfo.flags = region.flags;
+ rinfo.size = region.size;
+ rinfo.offset = region.offset;
+
+ if (copy_to_user(arg, &rinfo, sizeof(rinfo)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static long
+afu_ioctl_dma_map(struct dfl_feature_platform_data *pdata, void __user *arg)
+{
+ struct dfl_fpga_port_dma_map map;
+ unsigned long minsz;
+ long ret;
+
+ minsz = offsetofend(struct dfl_fpga_port_dma_map, iova);
+
+ if (copy_from_user(&map, arg, minsz))
+ return -EFAULT;
+
+ if (map.argsz < minsz || map.flags)
+ return -EINVAL;
+
+ ret = afu_dma_map_region(pdata, map.user_addr, map.length, &map.iova);
+ if (ret)
+ return ret;
+
+ if (copy_to_user(arg, &map, sizeof(map))) {
+ afu_dma_unmap_region(pdata, map.iova);
+ return -EFAULT;
+ }
+
+ dev_dbg(&pdata->dev->dev, "dma map: ua=%llx, len=%llx, iova=%llx\n",
+ (unsigned long long)map.user_addr,
+ (unsigned long long)map.length,
+ (unsigned long long)map.iova);
+
+ return 0;
+}
+
+static long
+afu_ioctl_dma_unmap(struct dfl_feature_platform_data *pdata, void __user *arg)
+{
+ struct dfl_fpga_port_dma_unmap unmap;
+ unsigned long minsz;
+
+ minsz = offsetofend(struct dfl_fpga_port_dma_unmap, iova);
+
+ if (copy_from_user(&unmap, arg, minsz))
+ return -EFAULT;
+
+ if (unmap.argsz < minsz || unmap.flags)
+ return -EINVAL;
+
+ return afu_dma_unmap_region(pdata, unmap.iova);
+}
+
+static long afu_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ struct platform_device *pdev = filp->private_data;
+ struct dfl_feature_platform_data *pdata;
+ struct dfl_feature *f;
+ long ret;
+
+ dev_dbg(&pdev->dev, "%s cmd 0x%x\n", __func__, cmd);
+
+ pdata = dev_get_platdata(&pdev->dev);
+
+ switch (cmd) {
+ case DFL_FPGA_GET_API_VERSION:
+ return DFL_FPGA_API_VERSION;
+ case DFL_FPGA_CHECK_EXTENSION:
+ return afu_ioctl_check_extension(pdata, arg);
+ case DFL_FPGA_PORT_GET_INFO:
+ return afu_ioctl_get_info(pdata, (void __user *)arg);
+ case DFL_FPGA_PORT_GET_REGION_INFO:
+ return afu_ioctl_get_region_info(pdata, (void __user *)arg);
+ case DFL_FPGA_PORT_DMA_MAP:
+ return afu_ioctl_dma_map(pdata, (void __user *)arg);
+ case DFL_FPGA_PORT_DMA_UNMAP:
+ return afu_ioctl_dma_unmap(pdata, (void __user *)arg);
+ default:
+ /*
+ * Let sub-feature's ioctl function to handle the cmd
+ * Sub-feature's ioctl returns -ENODEV when cmd is not
+ * handled in this sub feature, and returns 0 and other
+ * error code if cmd is handled.
+ */
+ dfl_fpga_dev_for_each_feature(pdata, f)
+ if (f->ops && f->ops->ioctl) {
+ ret = f->ops->ioctl(pdev, f, cmd, arg);
+ if (ret != -ENODEV)
+ return ret;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int afu_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct platform_device *pdev = filp->private_data;
+ struct dfl_feature_platform_data *pdata;
+ u64 size = vma->vm_end - vma->vm_start;
+ struct dfl_afu_mmio_region region;
+ u64 offset;
+ int ret;
+
+ if (!(vma->vm_flags & VM_SHARED))
+ return -EINVAL;
+
+ pdata = dev_get_platdata(&pdev->dev);
+
+ offset = vma->vm_pgoff << PAGE_SHIFT;
+ ret = afu_mmio_region_get_by_offset(pdata, offset, size, &region);
+ if (ret)
+ return ret;
+
+ if (!(region.flags & DFL_PORT_REGION_MMAP))
+ return -EINVAL;
+
+ if ((vma->vm_flags & VM_READ) && !(region.flags & DFL_PORT_REGION_READ))
+ return -EPERM;
+
+ if ((vma->vm_flags & VM_WRITE) &&
+ !(region.flags & DFL_PORT_REGION_WRITE))
+ return -EPERM;
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ return remap_pfn_range(vma, vma->vm_start,
+ (region.phys + (offset - region.offset)) >> PAGE_SHIFT,
+ size, vma->vm_page_prot);
+}
+
+static const struct file_operations afu_fops = {
+ .owner = THIS_MODULE,
+ .open = afu_open,
+ .release = afu_release,
+ .unlocked_ioctl = afu_ioctl,
+ .mmap = afu_mmap,
+};
+
+static int afu_dev_init(struct platform_device *pdev)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_afu *afu;
+
+ afu = devm_kzalloc(&pdev->dev, sizeof(*afu), GFP_KERNEL);
+ if (!afu)
+ return -ENOMEM;
+
+ afu->pdata = pdata;
+
+ mutex_lock(&pdata->lock);
+ dfl_fpga_pdata_set_private(pdata, afu);
+ afu_mmio_region_init(pdata);
+ afu_dma_region_init(pdata);
+ mutex_unlock(&pdata->lock);
+
+ return 0;
+}
+
+static int afu_dev_destroy(struct platform_device *pdev)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_afu *afu;
+
+ mutex_lock(&pdata->lock);
+ afu = dfl_fpga_pdata_get_private(pdata);
+ afu_mmio_region_destroy(pdata);
+ afu_dma_region_destroy(pdata);
+ dfl_fpga_pdata_set_private(pdata, NULL);
+ mutex_unlock(&pdata->lock);
+
+ return 0;
+}
+
+static int port_enable_set(struct platform_device *pdev, bool enable)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ int ret = 0;
+
+ mutex_lock(&pdata->lock);
+ if (enable)
+ port_enable(pdev);
+ else
+ ret = port_disable(pdev);
+ mutex_unlock(&pdata->lock);
+
+ return ret;
+}
+
+static struct dfl_fpga_port_ops afu_port_ops = {
+ .name = DFL_FPGA_FEATURE_DEV_PORT,
+ .owner = THIS_MODULE,
+ .get_id = port_get_id,
+ .enable_set = port_enable_set,
+};
+
+static int afu_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ dev_dbg(&pdev->dev, "%s\n", __func__);
+
+ ret = afu_dev_init(pdev);
+ if (ret)
+ goto exit;
+
+ ret = dfl_fpga_dev_feature_init(pdev, port_feature_drvs);
+ if (ret)
+ goto dev_destroy;
+
+ ret = dfl_fpga_dev_ops_register(pdev, &afu_fops, THIS_MODULE);
+ if (ret) {
+ dfl_fpga_dev_feature_uinit(pdev);
+ goto dev_destroy;
+ }
+
+ return 0;
+
+dev_destroy:
+ afu_dev_destroy(pdev);
+exit:
+ return ret;
+}
+
+static int afu_remove(struct platform_device *pdev)
+{
+ dev_dbg(&pdev->dev, "%s\n", __func__);
+
+ dfl_fpga_dev_ops_unregister(pdev);
+ dfl_fpga_dev_feature_uinit(pdev);
+ afu_dev_destroy(pdev);
+
+ return 0;
+}
+
+static struct platform_driver afu_driver = {
+ .driver = {
+ .name = DFL_FPGA_FEATURE_DEV_PORT,
+ },
+ .probe = afu_probe,
+ .remove = afu_remove,
+};
+
+static int __init afu_init(void)
+{
+ int ret;
+
+ dfl_fpga_port_ops_add(&afu_port_ops);
+
+ ret = platform_driver_register(&afu_driver);
+ if (ret)
+ dfl_fpga_port_ops_del(&afu_port_ops);
+
+ return ret;
+}
+
+static void __exit afu_exit(void)
+{
+ platform_driver_unregister(&afu_driver);
+
+ dfl_fpga_port_ops_del(&afu_port_ops);
+}
+
+module_init(afu_init);
+module_exit(afu_exit);
+
+MODULE_DESCRIPTION("FPGA Accelerated Function Unit driver");
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:dfl-port");
diff --git a/drivers/fpga/dfl-afu-region.c b/drivers/fpga/dfl-afu-region.c
new file mode 100644
index 000000000000..0804b7a0c298
--- /dev/null
+++ b/drivers/fpga/dfl-afu-region.c
@@ -0,0 +1,166 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for FPGA Accelerated Function Unit (AFU) MMIO Region Management
+ *
+ * Copyright (C) 2017-2018 Intel Corporation, Inc.
+ *
+ * Authors:
+ * Wu Hao <hao.wu@intel.com>
+ * Xiao Guangrong <guangrong.xiao@linux.intel.com>
+ */
+#include "dfl-afu.h"
+
+/**
+ * afu_mmio_region_init - init function for afu mmio region support
+ * @pdata: afu platform device's pdata.
+ */
+void afu_mmio_region_init(struct dfl_feature_platform_data *pdata)
+{
+ struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata);
+
+ INIT_LIST_HEAD(&afu->regions);
+}
+
+#define for_each_region(region, afu) \
+ list_for_each_entry((region), &(afu)->regions, node)
+
+static struct dfl_afu_mmio_region *get_region_by_index(struct dfl_afu *afu,
+ u32 region_index)
+{
+ struct dfl_afu_mmio_region *region;
+
+ for_each_region(region, afu)
+ if (region->index == region_index)
+ return region;
+
+ return NULL;
+}
+
+/**
+ * afu_mmio_region_add - add a mmio region to given feature dev.
+ *
+ * @region_index: region index.
+ * @region_size: region size.
+ * @phys: region's physical address of this region.
+ * @flags: region flags (access permission).
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int afu_mmio_region_add(struct dfl_feature_platform_data *pdata,
+ u32 region_index, u64 region_size, u64 phys, u32 flags)
+{
+ struct dfl_afu_mmio_region *region;
+ struct dfl_afu *afu;
+ int ret = 0;
+
+ region = devm_kzalloc(&pdata->dev->dev, sizeof(*region), GFP_KERNEL);
+ if (!region)
+ return -ENOMEM;
+
+ region->index = region_index;
+ region->size = region_size;
+ region->phys = phys;
+ region->flags = flags;
+
+ mutex_lock(&pdata->lock);
+
+ afu = dfl_fpga_pdata_get_private(pdata);
+
+ /* check if @index already exists */
+ if (get_region_by_index(afu, region_index)) {
+ mutex_unlock(&pdata->lock);
+ ret = -EEXIST;
+ goto exit;
+ }
+
+ region_size = PAGE_ALIGN(region_size);
+ region->offset = afu->region_cur_offset;
+ list_add(&region->node, &afu->regions);
+
+ afu->region_cur_offset += region_size;
+ afu->num_regions++;
+ mutex_unlock(&pdata->lock);
+
+ return 0;
+
+exit:
+ devm_kfree(&pdata->dev->dev, region);
+ return ret;
+}
+
+/**
+ * afu_mmio_region_destroy - destroy all mmio regions under given feature dev.
+ * @pdata: afu platform device's pdata.
+ */
+void afu_mmio_region_destroy(struct dfl_feature_platform_data *pdata)
+{
+ struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata);
+ struct dfl_afu_mmio_region *tmp, *region;
+
+ list_for_each_entry_safe(region, tmp, &afu->regions, node)
+ devm_kfree(&pdata->dev->dev, region);
+}
+
+/**
+ * afu_mmio_region_get_by_index - find an afu region by index.
+ * @pdata: afu platform device's pdata.
+ * @region_index: region index.
+ * @pregion: ptr to region for result.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int afu_mmio_region_get_by_index(struct dfl_feature_platform_data *pdata,
+ u32 region_index,
+ struct dfl_afu_mmio_region *pregion)
+{
+ struct dfl_afu_mmio_region *region;
+ struct dfl_afu *afu;
+ int ret = 0;
+
+ mutex_lock(&pdata->lock);
+ afu = dfl_fpga_pdata_get_private(pdata);
+ region = get_region_by_index(afu, region_index);
+ if (!region) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ *pregion = *region;
+exit:
+ mutex_unlock(&pdata->lock);
+ return ret;
+}
+
+/**
+ * afu_mmio_region_get_by_offset - find an afu mmio region by offset and size
+ *
+ * @pdata: afu platform device's pdata.
+ * @offset: region offset from start of the device fd.
+ * @size: region size.
+ * @pregion: ptr to region for result.
+ *
+ * Find the region which fully contains the region described by input
+ * parameters (offset and size) from the feature dev's region linked list.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int afu_mmio_region_get_by_offset(struct dfl_feature_platform_data *pdata,
+ u64 offset, u64 size,
+ struct dfl_afu_mmio_region *pregion)
+{
+ struct dfl_afu_mmio_region *region;
+ struct dfl_afu *afu;
+ int ret = 0;
+
+ mutex_lock(&pdata->lock);
+ afu = dfl_fpga_pdata_get_private(pdata);
+ for_each_region(region, afu)
+ if (region->offset <= offset &&
+ region->offset + region->size >= offset + size) {
+ *pregion = *region;
+ goto exit;
+ }
+ ret = -EINVAL;
+exit:
+ mutex_unlock(&pdata->lock);
+ return ret;
+}
diff --git a/drivers/fpga/dfl-afu.h b/drivers/fpga/dfl-afu.h
new file mode 100644
index 000000000000..0c7630ae3cda
--- /dev/null
+++ b/drivers/fpga/dfl-afu.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Header file for FPGA Accelerated Function Unit (AFU) Driver
+ *
+ * Copyright (C) 2017-2018 Intel Corporation, Inc.
+ *
+ * Authors:
+ * Wu Hao <hao.wu@intel.com>
+ * Xiao Guangrong <guangrong.xiao@linux.intel.com>
+ * Joseph Grecco <joe.grecco@intel.com>
+ * Enno Luebbers <enno.luebbers@intel.com>
+ * Tim Whisonant <tim.whisonant@intel.com>
+ * Ananda Ravuri <ananda.ravuri@intel.com>
+ * Henry Mitchel <henry.mitchel@intel.com>
+ */
+
+#ifndef __DFL_AFU_H
+#define __DFL_AFU_H
+
+#include <linux/mm.h>
+
+#include "dfl.h"
+
+/**
+ * struct dfl_afu_mmio_region - afu mmio region data structure
+ *
+ * @index: region index.
+ * @flags: region flags (access permission).
+ * @size: region size.
+ * @offset: region offset from start of the device fd.
+ * @phys: region's physical address.
+ * @node: node to add to afu feature dev's region list.
+ */
+struct dfl_afu_mmio_region {
+ u32 index;
+ u32 flags;
+ u64 size;
+ u64 offset;
+ u64 phys;
+ struct list_head node;
+};
+
+/**
+ * struct fpga_afu_dma_region - afu DMA region data structure
+ *
+ * @user_addr: region userspace virtual address.
+ * @length: region length.
+ * @iova: region IO virtual address.
+ * @pages: ptr to pages of this region.
+ * @node: rb tree node.
+ * @in_use: flag to indicate if this region is in_use.
+ */
+struct dfl_afu_dma_region {
+ u64 user_addr;
+ u64 length;
+ u64 iova;
+ struct page **pages;
+ struct rb_node node;
+ bool in_use;
+};
+
+/**
+ * struct dfl_afu - afu device data structure
+ *
+ * @region_cur_offset: current region offset from start to the device fd.
+ * @num_regions: num of mmio regions.
+ * @regions: the mmio region linked list of this afu feature device.
+ * @dma_regions: root of dma regions rb tree.
+ * @num_umsgs: num of umsgs.
+ * @pdata: afu platform device's pdata.
+ */
+struct dfl_afu {
+ u64 region_cur_offset;
+ int num_regions;
+ u8 num_umsgs;
+ struct list_head regions;
+ struct rb_root dma_regions;
+
+ struct dfl_feature_platform_data *pdata;
+};
+
+void afu_mmio_region_init(struct dfl_feature_platform_data *pdata);
+int afu_mmio_region_add(struct dfl_feature_platform_data *pdata,
+ u32 region_index, u64 region_size, u64 phys, u32 flags);
+void afu_mmio_region_destroy(struct dfl_feature_platform_data *pdata);
+int afu_mmio_region_get_by_index(struct dfl_feature_platform_data *pdata,
+ u32 region_index,
+ struct dfl_afu_mmio_region *pregion);
+int afu_mmio_region_get_by_offset(struct dfl_feature_platform_data *pdata,
+ u64 offset, u64 size,
+ struct dfl_afu_mmio_region *pregion);
+void afu_dma_region_init(struct dfl_feature_platform_data *pdata);
+void afu_dma_region_destroy(struct dfl_feature_platform_data *pdata);
+int afu_dma_map_region(struct dfl_feature_platform_data *pdata,
+ u64 user_addr, u64 length, u64 *iova);
+int afu_dma_unmap_region(struct dfl_feature_platform_data *pdata, u64 iova);
+struct dfl_afu_dma_region *
+afu_dma_region_find(struct dfl_feature_platform_data *pdata,
+ u64 iova, u64 size);
+#endif /* __DFL_AFU_H */
diff --git a/drivers/fpga/dfl-fme-br.c b/drivers/fpga/dfl-fme-br.c
new file mode 100644
index 000000000000..7cc041def8b3
--- /dev/null
+++ b/drivers/fpga/dfl-fme-br.c
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * FPGA Bridge Driver for FPGA Management Engine (FME)
+ *
+ * Copyright (C) 2017-2018 Intel Corporation, Inc.
+ *
+ * Authors:
+ * Wu Hao <hao.wu@intel.com>
+ * Joseph Grecco <joe.grecco@intel.com>
+ * Enno Luebbers <enno.luebbers@intel.com>
+ * Tim Whisonant <tim.whisonant@intel.com>
+ * Ananda Ravuri <ananda.ravuri@intel.com>
+ * Henry Mitchel <henry.mitchel@intel.com>
+ */
+
+#include <linux/module.h>
+#include <linux/fpga/fpga-bridge.h>
+
+#include "dfl.h"
+#include "dfl-fme-pr.h"
+
+struct fme_br_priv {
+ struct dfl_fme_br_pdata *pdata;
+ struct dfl_fpga_port_ops *port_ops;
+ struct platform_device *port_pdev;
+};
+
+static int fme_bridge_enable_set(struct fpga_bridge *bridge, bool enable)
+{
+ struct fme_br_priv *priv = bridge->priv;
+ struct platform_device *port_pdev;
+ struct dfl_fpga_port_ops *ops;
+
+ if (!priv->port_pdev) {
+ port_pdev = dfl_fpga_cdev_find_port(priv->pdata->cdev,
+ &priv->pdata->port_id,
+ dfl_fpga_check_port_id);
+ if (!port_pdev)
+ return -ENODEV;
+
+ priv->port_pdev = port_pdev;
+ }
+
+ if (priv->port_pdev && !priv->port_ops) {
+ ops = dfl_fpga_port_ops_get(priv->port_pdev);
+ if (!ops || !ops->enable_set)
+ return -ENOENT;
+
+ priv->port_ops = ops;
+ }
+
+ return priv->port_ops->enable_set(priv->port_pdev, enable);
+}
+
+static const struct fpga_bridge_ops fme_bridge_ops = {
+ .enable_set = fme_bridge_enable_set,
+};
+
+static int fme_br_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct fme_br_priv *priv;
+ struct fpga_bridge *br;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->pdata = dev_get_platdata(dev);
+
+ br = fpga_bridge_create(dev, "DFL FPGA FME Bridge",
+ &fme_bridge_ops, priv);
+ if (!br)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, br);
+
+ ret = fpga_bridge_register(br);
+ if (ret)
+ fpga_bridge_free(br);
+
+ return ret;
+}
+
+static int fme_br_remove(struct platform_device *pdev)
+{
+ struct fpga_bridge *br = platform_get_drvdata(pdev);
+ struct fme_br_priv *priv = br->priv;
+
+ fpga_bridge_unregister(br);
+
+ if (priv->port_pdev)
+ put_device(&priv->port_pdev->dev);
+ if (priv->port_ops)
+ dfl_fpga_port_ops_put(priv->port_ops);
+
+ return 0;
+}
+
+static struct platform_driver fme_br_driver = {
+ .driver = {
+ .name = DFL_FPGA_FME_BRIDGE,
+ },
+ .probe = fme_br_probe,
+ .remove = fme_br_remove,
+};
+
+module_platform_driver(fme_br_driver);
+
+MODULE_DESCRIPTION("FPGA Bridge for DFL FPGA Management Engine");
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:dfl-fme-bridge");
diff --git a/drivers/fpga/dfl-fme-main.c b/drivers/fpga/dfl-fme-main.c
new file mode 100644
index 000000000000..086ad2420ade
--- /dev/null
+++ b/drivers/fpga/dfl-fme-main.c
@@ -0,0 +1,279 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for FPGA Management Engine (FME)
+ *
+ * Copyright (C) 2017-2018 Intel Corporation, Inc.
+ *
+ * Authors:
+ * Kang Luwei <luwei.kang@intel.com>
+ * Xiao Guangrong <guangrong.xiao@linux.intel.com>
+ * Joseph Grecco <joe.grecco@intel.com>
+ * Enno Luebbers <enno.luebbers@intel.com>
+ * Tim Whisonant <tim.whisonant@intel.com>
+ * Ananda Ravuri <ananda.ravuri@intel.com>
+ * Henry Mitchel <henry.mitchel@intel.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/fpga-dfl.h>
+
+#include "dfl.h"
+#include "dfl-fme.h"
+
+static ssize_t ports_num_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ void __iomem *base;
+ u64 v;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
+
+ v = readq(base + FME_HDR_CAP);
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n",
+ (unsigned int)FIELD_GET(FME_CAP_NUM_PORTS, v));
+}
+static DEVICE_ATTR_RO(ports_num);
+
+/*
+ * Bitstream (static FPGA region) identifier number. It contains the
+ * detailed version and other information of this static FPGA region.
+ */
+static ssize_t bitstream_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ void __iomem *base;
+ u64 v;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
+
+ v = readq(base + FME_HDR_BITSTREAM_ID);
+
+ return scnprintf(buf, PAGE_SIZE, "0x%llx\n", (unsigned long long)v);
+}
+static DEVICE_ATTR_RO(bitstream_id);
+
+/*
+ * Bitstream (static FPGA region) meta data. It contains the synthesis
+ * date, seed and other information of this static FPGA region.
+ */
+static ssize_t bitstream_metadata_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ void __iomem *base;
+ u64 v;
+
+ base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
+
+ v = readq(base + FME_HDR_BITSTREAM_MD);
+
+ return scnprintf(buf, PAGE_SIZE, "0x%llx\n", (unsigned long long)v);
+}
+static DEVICE_ATTR_RO(bitstream_metadata);
+
+static const struct attribute *fme_hdr_attrs[] = {
+ &dev_attr_ports_num.attr,
+ &dev_attr_bitstream_id.attr,
+ &dev_attr_bitstream_metadata.attr,
+ NULL,
+};
+
+static int fme_hdr_init(struct platform_device *pdev,
+ struct dfl_feature *feature)
+{
+ void __iomem *base = feature->ioaddr;
+ int ret;
+
+ dev_dbg(&pdev->dev, "FME HDR Init.\n");
+ dev_dbg(&pdev->dev, "FME cap %llx.\n",
+ (unsigned long long)readq(base + FME_HDR_CAP));
+
+ ret = sysfs_create_files(&pdev->dev.kobj, fme_hdr_attrs);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void fme_hdr_uinit(struct platform_device *pdev,
+ struct dfl_feature *feature)
+{
+ dev_dbg(&pdev->dev, "FME HDR UInit.\n");
+ sysfs_remove_files(&pdev->dev.kobj, fme_hdr_attrs);
+}
+
+static const struct dfl_feature_ops fme_hdr_ops = {
+ .init = fme_hdr_init,
+ .uinit = fme_hdr_uinit,
+};
+
+static struct dfl_feature_driver fme_feature_drvs[] = {
+ {
+ .id = FME_FEATURE_ID_HEADER,
+ .ops = &fme_hdr_ops,
+ },
+ {
+ .id = FME_FEATURE_ID_PR_MGMT,
+ .ops = &pr_mgmt_ops,
+ },
+ {
+ .ops = NULL,
+ },
+};
+
+static long fme_ioctl_check_extension(struct dfl_feature_platform_data *pdata,
+ unsigned long arg)
+{
+ /* No extension support for now */
+ return 0;
+}
+
+static int fme_open(struct inode *inode, struct file *filp)
+{
+ struct platform_device *fdev = dfl_fpga_inode_to_feature_dev(inode);
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(&fdev->dev);
+ int ret;
+
+ if (WARN_ON(!pdata))
+ return -ENODEV;
+
+ ret = dfl_feature_dev_use_begin(pdata);
+ if (ret)
+ return ret;
+
+ dev_dbg(&fdev->dev, "Device File Open\n");
+ filp->private_data = pdata;
+
+ return 0;
+}
+
+static int fme_release(struct inode *inode, struct file *filp)
+{
+ struct dfl_feature_platform_data *pdata = filp->private_data;
+ struct platform_device *pdev = pdata->dev;
+
+ dev_dbg(&pdev->dev, "Device File Release\n");
+ dfl_feature_dev_use_end(pdata);
+
+ return 0;
+}
+
+static long fme_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ struct dfl_feature_platform_data *pdata = filp->private_data;
+ struct platform_device *pdev = pdata->dev;
+ struct dfl_feature *f;
+ long ret;
+
+ dev_dbg(&pdev->dev, "%s cmd 0x%x\n", __func__, cmd);
+
+ switch (cmd) {
+ case DFL_FPGA_GET_API_VERSION:
+ return DFL_FPGA_API_VERSION;
+ case DFL_FPGA_CHECK_EXTENSION:
+ return fme_ioctl_check_extension(pdata, arg);
+ default:
+ /*
+ * Let sub-feature's ioctl function to handle the cmd.
+ * Sub-feature's ioctl returns -ENODEV when cmd is not
+ * handled in this sub feature, and returns 0 or other
+ * error code if cmd is handled.
+ */
+ dfl_fpga_dev_for_each_feature(pdata, f) {
+ if (f->ops && f->ops->ioctl) {
+ ret = f->ops->ioctl(pdev, f, cmd, arg);
+ if (ret != -ENODEV)
+ return ret;
+ }
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int fme_dev_init(struct platform_device *pdev)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_fme *fme;
+
+ fme = devm_kzalloc(&pdev->dev, sizeof(*fme), GFP_KERNEL);
+ if (!fme)
+ return -ENOMEM;
+
+ fme->pdata = pdata;
+
+ mutex_lock(&pdata->lock);
+ dfl_fpga_pdata_set_private(pdata, fme);
+ mutex_unlock(&pdata->lock);
+
+ return 0;
+}
+
+static void fme_dev_destroy(struct platform_device *pdev)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_fme *fme;
+
+ mutex_lock(&pdata->lock);
+ fme = dfl_fpga_pdata_get_private(pdata);
+ dfl_fpga_pdata_set_private(pdata, NULL);
+ mutex_unlock(&pdata->lock);
+}
+
+static const struct file_operations fme_fops = {
+ .owner = THIS_MODULE,
+ .open = fme_open,
+ .release = fme_release,
+ .unlocked_ioctl = fme_ioctl,
+};
+
+static int fme_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = fme_dev_init(pdev);
+ if (ret)
+ goto exit;
+
+ ret = dfl_fpga_dev_feature_init(pdev, fme_feature_drvs);
+ if (ret)
+ goto dev_destroy;
+
+ ret = dfl_fpga_dev_ops_register(pdev, &fme_fops, THIS_MODULE);
+ if (ret)
+ goto feature_uinit;
+
+ return 0;
+
+feature_uinit:
+ dfl_fpga_dev_feature_uinit(pdev);
+dev_destroy:
+ fme_dev_destroy(pdev);
+exit:
+ return ret;
+}
+
+static int fme_remove(struct platform_device *pdev)
+{
+ dfl_fpga_dev_ops_unregister(pdev);
+ dfl_fpga_dev_feature_uinit(pdev);
+ fme_dev_destroy(pdev);
+
+ return 0;
+}
+
+static struct platform_driver fme_driver = {
+ .driver = {
+ .name = DFL_FPGA_FEATURE_DEV_FME,
+ },
+ .probe = fme_probe,
+ .remove = fme_remove,
+};
+
+module_platform_driver(fme_driver);
+
+MODULE_DESCRIPTION("FPGA Management Engine driver");
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:dfl-fme");
diff --git a/drivers/fpga/dfl-fme-mgr.c b/drivers/fpga/dfl-fme-mgr.c
new file mode 100644
index 000000000000..b5ef405b6d88
--- /dev/null
+++ b/drivers/fpga/dfl-fme-mgr.c
@@ -0,0 +1,349 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * FPGA Manager Driver for FPGA Management Engine (FME)
+ *
+ * Copyright (C) 2017-2018 Intel Corporation, Inc.
+ *
+ * Authors:
+ * Kang Luwei <luwei.kang@intel.com>
+ * Xiao Guangrong <guangrong.xiao@linux.intel.com>
+ * Wu Hao <hao.wu@intel.com>
+ * Joseph Grecco <joe.grecco@intel.com>
+ * Enno Luebbers <enno.luebbers@intel.com>
+ * Tim Whisonant <tim.whisonant@intel.com>
+ * Ananda Ravuri <ananda.ravuri@intel.com>
+ * Christopher Rauer <christopher.rauer@intel.com>
+ * Henry Mitchel <henry.mitchel@intel.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/module.h>
+#include <linux/iopoll.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/fpga/fpga-mgr.h>
+
+#include "dfl-fme-pr.h"
+
+/* FME Partial Reconfiguration Sub Feature Register Set */
+#define FME_PR_DFH 0x0
+#define FME_PR_CTRL 0x8
+#define FME_PR_STS 0x10
+#define FME_PR_DATA 0x18
+#define FME_PR_ERR 0x20
+#define FME_PR_INTFC_ID_H 0xA8
+#define FME_PR_INTFC_ID_L 0xB0
+
+/* FME PR Control Register Bitfield */
+#define FME_PR_CTRL_PR_RST BIT_ULL(0) /* Reset PR engine */
+#define FME_PR_CTRL_PR_RSTACK BIT_ULL(4) /* Ack for PR engine reset */
+#define FME_PR_CTRL_PR_RGN_ID GENMASK_ULL(9, 7) /* PR Region ID */
+#define FME_PR_CTRL_PR_START BIT_ULL(12) /* Start to request PR service */
+#define FME_PR_CTRL_PR_COMPLETE BIT_ULL(13) /* PR data push completion */
+
+/* FME PR Status Register Bitfield */
+/* Number of available entries in HW queue inside the PR engine. */
+#define FME_PR_STS_PR_CREDIT GENMASK_ULL(8, 0)
+#define FME_PR_STS_PR_STS BIT_ULL(16) /* PR operation status */
+#define FME_PR_STS_PR_STS_IDLE 0
+#define FME_PR_STS_PR_CTRLR_STS GENMASK_ULL(22, 20) /* Controller status */
+#define FME_PR_STS_PR_HOST_STS GENMASK_ULL(27, 24) /* PR host status */
+
+/* FME PR Data Register Bitfield */
+/* PR data from the raw-binary file. */
+#define FME_PR_DATA_PR_DATA_RAW GENMASK_ULL(32, 0)
+
+/* FME PR Error Register */
+/* PR Operation errors detected. */
+#define FME_PR_ERR_OPERATION_ERR BIT_ULL(0)
+/* CRC error detected. */
+#define FME_PR_ERR_CRC_ERR BIT_ULL(1)
+/* Incompatible PR bitstream detected. */
+#define FME_PR_ERR_INCOMPATIBLE_BS BIT_ULL(2)
+/* PR data push protocol violated. */
+#define FME_PR_ERR_PROTOCOL_ERR BIT_ULL(3)
+/* PR data fifo overflow error detected */
+#define FME_PR_ERR_FIFO_OVERFLOW BIT_ULL(4)
+
+#define PR_WAIT_TIMEOUT 8000000
+#define PR_HOST_STATUS_IDLE 0
+
+struct fme_mgr_priv {
+ void __iomem *ioaddr;
+ u64 pr_error;
+};
+
+static u64 pr_error_to_mgr_status(u64 err)
+{
+ u64 status = 0;
+
+ if (err & FME_PR_ERR_OPERATION_ERR)
+ status |= FPGA_MGR_STATUS_OPERATION_ERR;
+ if (err & FME_PR_ERR_CRC_ERR)
+ status |= FPGA_MGR_STATUS_CRC_ERR;
+ if (err & FME_PR_ERR_INCOMPATIBLE_BS)
+ status |= FPGA_MGR_STATUS_INCOMPATIBLE_IMAGE_ERR;
+ if (err & FME_PR_ERR_PROTOCOL_ERR)
+ status |= FPGA_MGR_STATUS_IP_PROTOCOL_ERR;
+ if (err & FME_PR_ERR_FIFO_OVERFLOW)
+ status |= FPGA_MGR_STATUS_FIFO_OVERFLOW_ERR;
+
+ return status;
+}
+
+static u64 fme_mgr_pr_error_handle(void __iomem *fme_pr)
+{
+ u64 pr_status, pr_error;
+
+ pr_status = readq(fme_pr + FME_PR_STS);
+ if (!(pr_status & FME_PR_STS_PR_STS))
+ return 0;
+
+ pr_error = readq(fme_pr + FME_PR_ERR);
+ writeq(pr_error, fme_pr + FME_PR_ERR);
+
+ return pr_error;
+}
+
+static int fme_mgr_write_init(struct fpga_manager *mgr,
+ struct fpga_image_info *info,
+ const char *buf, size_t count)
+{
+ struct device *dev = &mgr->dev;
+ struct fme_mgr_priv *priv = mgr->priv;
+ void __iomem *fme_pr = priv->ioaddr;
+ u64 pr_ctrl, pr_status;
+
+ if (!(info->flags & FPGA_MGR_PARTIAL_RECONFIG)) {
+ dev_err(dev, "only supports partial reconfiguration.\n");
+ return -EINVAL;
+ }
+
+ dev_dbg(dev, "resetting PR before initiated PR\n");
+
+ pr_ctrl = readq(fme_pr + FME_PR_CTRL);
+ pr_ctrl |= FME_PR_CTRL_PR_RST;
+ writeq(pr_ctrl, fme_pr + FME_PR_CTRL);
+
+ if (readq_poll_timeout(fme_pr + FME_PR_CTRL, pr_ctrl,
+ pr_ctrl & FME_PR_CTRL_PR_RSTACK, 1,
+ PR_WAIT_TIMEOUT)) {
+ dev_err(dev, "PR Reset ACK timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ pr_ctrl = readq(fme_pr + FME_PR_CTRL);
+ pr_ctrl &= ~FME_PR_CTRL_PR_RST;
+ writeq(pr_ctrl, fme_pr + FME_PR_CTRL);
+
+ dev_dbg(dev,
+ "waiting for PR resource in HW to be initialized and ready\n");
+
+ if (readq_poll_timeout(fme_pr + FME_PR_STS, pr_status,
+ (pr_status & FME_PR_STS_PR_STS) ==
+ FME_PR_STS_PR_STS_IDLE, 1, PR_WAIT_TIMEOUT)) {
+ dev_err(dev, "PR Status timeout\n");
+ priv->pr_error = fme_mgr_pr_error_handle(fme_pr);
+ return -ETIMEDOUT;
+ }
+
+ dev_dbg(dev, "check and clear previous PR error\n");
+ priv->pr_error = fme_mgr_pr_error_handle(fme_pr);
+ if (priv->pr_error)
+ dev_dbg(dev, "previous PR error detected %llx\n",
+ (unsigned long long)priv->pr_error);
+
+ dev_dbg(dev, "set PR port ID\n");
+
+ pr_ctrl = readq(fme_pr + FME_PR_CTRL);
+ pr_ctrl &= ~FME_PR_CTRL_PR_RGN_ID;
+ pr_ctrl |= FIELD_PREP(FME_PR_CTRL_PR_RGN_ID, info->region_id);
+ writeq(pr_ctrl, fme_pr + FME_PR_CTRL);
+
+ return 0;
+}
+
+static int fme_mgr_write(struct fpga_manager *mgr,
+ const char *buf, size_t count)
+{
+ struct device *dev = &mgr->dev;
+ struct fme_mgr_priv *priv = mgr->priv;
+ void __iomem *fme_pr = priv->ioaddr;
+ u64 pr_ctrl, pr_status, pr_data;
+ int delay = 0, pr_credit, i = 0;
+
+ dev_dbg(dev, "start request\n");
+
+ pr_ctrl = readq(fme_pr + FME_PR_CTRL);
+ pr_ctrl |= FME_PR_CTRL_PR_START;
+ writeq(pr_ctrl, fme_pr + FME_PR_CTRL);
+
+ dev_dbg(dev, "pushing data from bitstream to HW\n");
+
+ /*
+ * driver can push data to PR hardware using PR_DATA register once HW
+ * has enough pr_credit (> 1), pr_credit reduces one for every 32bit
+ * pr data write to PR_DATA register. If pr_credit <= 1, driver needs
+ * to wait for enough pr_credit from hardware by polling.
+ */
+ pr_status = readq(fme_pr + FME_PR_STS);
+ pr_credit = FIELD_GET(FME_PR_STS_PR_CREDIT, pr_status);
+
+ while (count > 0) {
+ while (pr_credit <= 1) {
+ if (delay++ > PR_WAIT_TIMEOUT) {
+ dev_err(dev, "PR_CREDIT timeout\n");
+ return -ETIMEDOUT;
+ }
+ udelay(1);
+
+ pr_status = readq(fme_pr + FME_PR_STS);
+ pr_credit = FIELD_GET(FME_PR_STS_PR_CREDIT, pr_status);
+ }
+
+ if (count < 4) {
+ dev_err(dev, "Invaild PR bitstream size\n");
+ return -EINVAL;
+ }
+
+ pr_data = 0;
+ pr_data |= FIELD_PREP(FME_PR_DATA_PR_DATA_RAW,
+ *(((u32 *)buf) + i));
+ writeq(pr_data, fme_pr + FME_PR_DATA);
+ count -= 4;
+ pr_credit--;
+ i++;
+ }
+
+ return 0;
+}
+
+static int fme_mgr_write_complete(struct fpga_manager *mgr,
+ struct fpga_image_info *info)
+{
+ struct device *dev = &mgr->dev;
+ struct fme_mgr_priv *priv = mgr->priv;
+ void __iomem *fme_pr = priv->ioaddr;
+ u64 pr_ctrl;
+
+ pr_ctrl = readq(fme_pr + FME_PR_CTRL);
+ pr_ctrl |= FME_PR_CTRL_PR_COMPLETE;
+ writeq(pr_ctrl, fme_pr + FME_PR_CTRL);
+
+ dev_dbg(dev, "green bitstream push complete\n");
+ dev_dbg(dev, "waiting for HW to release PR resource\n");
+
+ if (readq_poll_timeout(fme_pr + FME_PR_CTRL, pr_ctrl,
+ !(pr_ctrl & FME_PR_CTRL_PR_START), 1,
+ PR_WAIT_TIMEOUT)) {
+ dev_err(dev, "PR Completion ACK timeout.\n");
+ return -ETIMEDOUT;
+ }
+
+ dev_dbg(dev, "PR operation complete, checking status\n");
+ priv->pr_error = fme_mgr_pr_error_handle(fme_pr);
+ if (priv->pr_error) {
+ dev_dbg(dev, "PR error detected %llx\n",
+ (unsigned long long)priv->pr_error);
+ return -EIO;
+ }
+
+ dev_dbg(dev, "PR done successfully\n");
+
+ return 0;
+}
+
+static enum fpga_mgr_states fme_mgr_state(struct fpga_manager *mgr)
+{
+ return FPGA_MGR_STATE_UNKNOWN;
+}
+
+static u64 fme_mgr_status(struct fpga_manager *mgr)
+{
+ struct fme_mgr_priv *priv = mgr->priv;
+
+ return pr_error_to_mgr_status(priv->pr_error);
+}
+
+static const struct fpga_manager_ops fme_mgr_ops = {
+ .write_init = fme_mgr_write_init,
+ .write = fme_mgr_write,
+ .write_complete = fme_mgr_write_complete,
+ .state = fme_mgr_state,
+ .status = fme_mgr_status,
+};
+
+static void fme_mgr_get_compat_id(void __iomem *fme_pr,
+ struct fpga_compat_id *id)
+{
+ id->id_l = readq(fme_pr + FME_PR_INTFC_ID_L);
+ id->id_h = readq(fme_pr + FME_PR_INTFC_ID_H);
+}
+
+static int fme_mgr_probe(struct platform_device *pdev)
+{
+ struct dfl_fme_mgr_pdata *pdata = dev_get_platdata(&pdev->dev);
+ struct fpga_compat_id *compat_id;
+ struct device *dev = &pdev->dev;
+ struct fme_mgr_priv *priv;
+ struct fpga_manager *mgr;
+ struct resource *res;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ if (pdata->ioaddr)
+ priv->ioaddr = pdata->ioaddr;
+
+ if (!priv->ioaddr) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->ioaddr = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->ioaddr))
+ return PTR_ERR(priv->ioaddr);
+ }
+
+ compat_id = devm_kzalloc(dev, sizeof(*compat_id), GFP_KERNEL);
+ if (!compat_id)
+ return -ENOMEM;
+
+ fme_mgr_get_compat_id(priv->ioaddr, compat_id);
+
+ mgr = fpga_mgr_create(dev, "DFL FME FPGA Manager",
+ &fme_mgr_ops, priv);
+ if (!mgr)
+ return -ENOMEM;
+
+ mgr->compat_id = compat_id;
+ platform_set_drvdata(pdev, mgr);
+
+ ret = fpga_mgr_register(mgr);
+ if (ret)
+ fpga_mgr_free(mgr);
+
+ return ret;
+}
+
+static int fme_mgr_remove(struct platform_device *pdev)
+{
+ struct fpga_manager *mgr = platform_get_drvdata(pdev);
+
+ fpga_mgr_unregister(mgr);
+
+ return 0;
+}
+
+static struct platform_driver fme_mgr_driver = {
+ .driver = {
+ .name = DFL_FPGA_FME_MGR,
+ },
+ .probe = fme_mgr_probe,
+ .remove = fme_mgr_remove,
+};
+
+module_platform_driver(fme_mgr_driver);
+
+MODULE_DESCRIPTION("FPGA Manager for DFL FPGA Management Engine");
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:dfl-fme-mgr");
diff --git a/drivers/fpga/dfl-fme-pr.c b/drivers/fpga/dfl-fme-pr.c
new file mode 100644
index 000000000000..fc9fd2d0482f
--- /dev/null
+++ b/drivers/fpga/dfl-fme-pr.c
@@ -0,0 +1,479 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for FPGA Management Engine (FME) Partial Reconfiguration
+ *
+ * Copyright (C) 2017-2018 Intel Corporation, Inc.
+ *
+ * Authors:
+ * Kang Luwei <luwei.kang@intel.com>
+ * Xiao Guangrong <guangrong.xiao@linux.intel.com>
+ * Wu Hao <hao.wu@intel.com>
+ * Joseph Grecco <joe.grecco@intel.com>
+ * Enno Luebbers <enno.luebbers@intel.com>
+ * Tim Whisonant <tim.whisonant@intel.com>
+ * Ananda Ravuri <ananda.ravuri@intel.com>
+ * Christopher Rauer <christopher.rauer@intel.com>
+ * Henry Mitchel <henry.mitchel@intel.com>
+ */
+
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/vmalloc.h>
+#include <linux/uaccess.h>
+#include <linux/fpga/fpga-mgr.h>
+#include <linux/fpga/fpga-bridge.h>
+#include <linux/fpga/fpga-region.h>
+#include <linux/fpga-dfl.h>
+
+#include "dfl.h"
+#include "dfl-fme.h"
+#include "dfl-fme-pr.h"
+
+static struct dfl_fme_region *
+dfl_fme_region_find_by_port_id(struct dfl_fme *fme, int port_id)
+{
+ struct dfl_fme_region *fme_region;
+
+ list_for_each_entry(fme_region, &fme->region_list, node)
+ if (fme_region->port_id == port_id)
+ return fme_region;
+
+ return NULL;
+}
+
+static int dfl_fme_region_match(struct device *dev, const void *data)
+{
+ return dev->parent == data;
+}
+
+static struct fpga_region *dfl_fme_region_find(struct dfl_fme *fme, int port_id)
+{
+ struct dfl_fme_region *fme_region;
+ struct fpga_region *region;
+
+ fme_region = dfl_fme_region_find_by_port_id(fme, port_id);
+ if (!fme_region)
+ return NULL;
+
+ region = fpga_region_class_find(NULL, &fme_region->region->dev,
+ dfl_fme_region_match);
+ if (!region)
+ return NULL;
+
+ return region;
+}
+
+static int fme_pr(struct platform_device *pdev, unsigned long arg)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ void __user *argp = (void __user *)arg;
+ struct dfl_fpga_fme_port_pr port_pr;
+ struct fpga_image_info *info;
+ struct fpga_region *region;
+ void __iomem *fme_hdr;
+ struct dfl_fme *fme;
+ unsigned long minsz;
+ void *buf = NULL;
+ int ret = 0;
+ u64 v;
+
+ minsz = offsetofend(struct dfl_fpga_fme_port_pr, buffer_address);
+
+ if (copy_from_user(&port_pr, argp, minsz))
+ return -EFAULT;
+
+ if (port_pr.argsz < minsz || port_pr.flags)
+ return -EINVAL;
+
+ if (!IS_ALIGNED(port_pr.buffer_size, 4))
+ return -EINVAL;
+
+ /* get fme header region */
+ fme_hdr = dfl_get_feature_ioaddr_by_id(&pdev->dev,
+ FME_FEATURE_ID_HEADER);
+
+ /* check port id */
+ v = readq(fme_hdr + FME_HDR_CAP);
+ if (port_pr.port_id >= FIELD_GET(FME_CAP_NUM_PORTS, v)) {
+ dev_dbg(&pdev->dev, "port number more than maximum\n");
+ return -EINVAL;
+ }
+
+ if (!access_ok(VERIFY_READ,
+ (void __user *)(unsigned long)port_pr.buffer_address,
+ port_pr.buffer_size))
+ return -EFAULT;
+
+ buf = vmalloc(port_pr.buffer_size);
+ if (!buf)
+ return -ENOMEM;
+
+ if (copy_from_user(buf,
+ (void __user *)(unsigned long)port_pr.buffer_address,
+ port_pr.buffer_size)) {
+ ret = -EFAULT;
+ goto free_exit;
+ }
+
+ /* prepare fpga_image_info for PR */
+ info = fpga_image_info_alloc(&pdev->dev);
+ if (!info) {
+ ret = -ENOMEM;
+ goto free_exit;
+ }
+
+ info->flags |= FPGA_MGR_PARTIAL_RECONFIG;
+
+ mutex_lock(&pdata->lock);
+ fme = dfl_fpga_pdata_get_private(pdata);
+ /* fme device has been unregistered. */
+ if (!fme) {
+ ret = -EINVAL;
+ goto unlock_exit;
+ }
+
+ region = dfl_fme_region_find(fme, port_pr.port_id);
+ if (!region) {
+ ret = -EINVAL;
+ goto unlock_exit;
+ }
+
+ fpga_image_info_free(region->info);
+
+ info->buf = buf;
+ info->count = port_pr.buffer_size;
+ info->region_id = port_pr.port_id;
+ region->info = info;
+
+ ret = fpga_region_program_fpga(region);
+
+ /*
+ * it allows userspace to reset the PR region's logic by disabling and
+ * reenabling the bridge to clear things out between accleration runs.
+ * so no need to hold the bridges after partial reconfiguration.
+ */
+ if (region->get_bridges)
+ fpga_bridges_put(&region->bridge_list);
+
+ put_device(&region->dev);
+unlock_exit:
+ mutex_unlock(&pdata->lock);
+free_exit:
+ vfree(buf);
+ if (copy_to_user((void __user *)arg, &port_pr, minsz))
+ return -EFAULT;
+
+ return ret;
+}
+
+/**
+ * dfl_fme_create_mgr - create fpga mgr platform device as child device
+ *
+ * @pdata: fme platform_device's pdata
+ *
+ * Return: mgr platform device if successful, and error code otherwise.
+ */
+static struct platform_device *
+dfl_fme_create_mgr(struct dfl_feature_platform_data *pdata,
+ struct dfl_feature *feature)
+{
+ struct platform_device *mgr, *fme = pdata->dev;
+ struct dfl_fme_mgr_pdata mgr_pdata;
+ int ret = -ENOMEM;
+
+ if (!feature->ioaddr)
+ return ERR_PTR(-ENODEV);
+
+ mgr_pdata.ioaddr = feature->ioaddr;
+
+ /*
+ * Each FME has only one fpga-mgr, so allocate platform device using
+ * the same FME platform device id.
+ */
+ mgr = platform_device_alloc(DFL_FPGA_FME_MGR, fme->id);
+ if (!mgr)
+ return ERR_PTR(ret);
+
+ mgr->dev.parent = &fme->dev;
+
+ ret = platform_device_add_data(mgr, &mgr_pdata, sizeof(mgr_pdata));
+ if (ret)
+ goto create_mgr_err;
+
+ ret = platform_device_add(mgr);
+ if (ret)
+ goto create_mgr_err;
+
+ return mgr;
+
+create_mgr_err:
+ platform_device_put(mgr);
+ return ERR_PTR(ret);
+}
+
+/**
+ * dfl_fme_destroy_mgr - destroy fpga mgr platform device
+ * @pdata: fme platform device's pdata
+ */
+static void dfl_fme_destroy_mgr(struct dfl_feature_platform_data *pdata)
+{
+ struct dfl_fme *priv = dfl_fpga_pdata_get_private(pdata);
+
+ platform_device_unregister(priv->mgr);
+}
+
+/**
+ * dfl_fme_create_bridge - create fme fpga bridge platform device as child
+ *
+ * @pdata: fme platform device's pdata
+ * @port_id: port id for the bridge to be created.
+ *
+ * Return: bridge platform device if successful, and error code otherwise.
+ */
+static struct dfl_fme_bridge *
+dfl_fme_create_bridge(struct dfl_feature_platform_data *pdata, int port_id)
+{
+ struct device *dev = &pdata->dev->dev;
+ struct dfl_fme_br_pdata br_pdata;
+ struct dfl_fme_bridge *fme_br;
+ int ret = -ENOMEM;
+
+ fme_br = devm_kzalloc(dev, sizeof(*fme_br), GFP_KERNEL);
+ if (!fme_br)
+ return ERR_PTR(ret);
+
+ br_pdata.cdev = pdata->dfl_cdev;
+ br_pdata.port_id = port_id;
+
+ fme_br->br = platform_device_alloc(DFL_FPGA_FME_BRIDGE,
+ PLATFORM_DEVID_AUTO);
+ if (!fme_br->br)
+ return ERR_PTR(ret);
+
+ fme_br->br->dev.parent = dev;
+
+ ret = platform_device_add_data(fme_br->br, &br_pdata, sizeof(br_pdata));
+ if (ret)
+ goto create_br_err;
+
+ ret = platform_device_add(fme_br->br);
+ if (ret)
+ goto create_br_err;
+
+ return fme_br;
+
+create_br_err:
+ platform_device_put(fme_br->br);
+ return ERR_PTR(ret);
+}
+
+/**
+ * dfl_fme_destroy_bridge - destroy fpga bridge platform device
+ * @fme_br: fme bridge to destroy
+ */
+static void dfl_fme_destroy_bridge(struct dfl_fme_bridge *fme_br)
+{
+ platform_device_unregister(fme_br->br);
+}
+
+/**
+ * dfl_fme_destroy_bridge - destroy all fpga bridge platform device
+ * @pdata: fme platform device's pdata
+ */
+static void dfl_fme_destroy_bridges(struct dfl_feature_platform_data *pdata)
+{
+ struct dfl_fme *priv = dfl_fpga_pdata_get_private(pdata);
+ struct dfl_fme_bridge *fbridge, *tmp;
+
+ list_for_each_entry_safe(fbridge, tmp, &priv->bridge_list, node) {
+ list_del(&fbridge->node);
+ dfl_fme_destroy_bridge(fbridge);
+ }
+}
+
+/**
+ * dfl_fme_create_region - create fpga region platform device as child
+ *
+ * @pdata: fme platform device's pdata
+ * @mgr: mgr platform device needed for region
+ * @br: br platform device needed for region
+ * @port_id: port id
+ *
+ * Return: fme region if successful, and error code otherwise.
+ */
+static struct dfl_fme_region *
+dfl_fme_create_region(struct dfl_feature_platform_data *pdata,
+ struct platform_device *mgr,
+ struct platform_device *br, int port_id)
+{
+ struct dfl_fme_region_pdata region_pdata;
+ struct device *dev = &pdata->dev->dev;
+ struct dfl_fme_region *fme_region;
+ int ret = -ENOMEM;
+
+ fme_region = devm_kzalloc(dev, sizeof(*fme_region), GFP_KERNEL);
+ if (!fme_region)
+ return ERR_PTR(ret);
+
+ region_pdata.mgr = mgr;
+ region_pdata.br = br;
+
+ /*
+ * Each FPGA device may have more than one port, so allocate platform
+ * device using the same port platform device id.
+ */
+ fme_region->region = platform_device_alloc(DFL_FPGA_FME_REGION, br->id);
+ if (!fme_region->region)
+ return ERR_PTR(ret);
+
+ fme_region->region->dev.parent = dev;
+
+ ret = platform_device_add_data(fme_region->region, &region_pdata,
+ sizeof(region_pdata));
+ if (ret)
+ goto create_region_err;
+
+ ret = platform_device_add(fme_region->region);
+ if (ret)
+ goto create_region_err;
+
+ fme_region->port_id = port_id;
+
+ return fme_region;
+
+create_region_err:
+ platform_device_put(fme_region->region);
+ return ERR_PTR(ret);
+}
+
+/**
+ * dfl_fme_destroy_region - destroy fme region
+ * @fme_region: fme region to destroy
+ */
+static void dfl_fme_destroy_region(struct dfl_fme_region *fme_region)
+{
+ platform_device_unregister(fme_region->region);
+}
+
+/**
+ * dfl_fme_destroy_regions - destroy all fme regions
+ * @pdata: fme platform device's pdata
+ */
+static void dfl_fme_destroy_regions(struct dfl_feature_platform_data *pdata)
+{
+ struct dfl_fme *priv = dfl_fpga_pdata_get_private(pdata);
+ struct dfl_fme_region *fme_region, *tmp;
+
+ list_for_each_entry_safe(fme_region, tmp, &priv->region_list, node) {
+ list_del(&fme_region->node);
+ dfl_fme_destroy_region(fme_region);
+ }
+}
+
+static int pr_mgmt_init(struct platform_device *pdev,
+ struct dfl_feature *feature)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_fme_region *fme_region;
+ struct dfl_fme_bridge *fme_br;
+ struct platform_device *mgr;
+ struct dfl_fme *priv;
+ void __iomem *fme_hdr;
+ int ret = -ENODEV, i = 0;
+ u64 fme_cap, port_offset;
+
+ fme_hdr = dfl_get_feature_ioaddr_by_id(&pdev->dev,
+ FME_FEATURE_ID_HEADER);
+
+ mutex_lock(&pdata->lock);
+ priv = dfl_fpga_pdata_get_private(pdata);
+
+ /* Initialize the region and bridge sub device list */
+ INIT_LIST_HEAD(&priv->region_list);
+ INIT_LIST_HEAD(&priv->bridge_list);
+
+ /* Create fpga mgr platform device */
+ mgr = dfl_fme_create_mgr(pdata, feature);
+ if (IS_ERR(mgr)) {
+ dev_err(&pdev->dev, "fail to create fpga mgr pdev\n");
+ goto unlock;
+ }
+
+ priv->mgr = mgr;
+
+ /* Read capability register to check number of regions and bridges */
+ fme_cap = readq(fme_hdr + FME_HDR_CAP);
+ for (; i < FIELD_GET(FME_CAP_NUM_PORTS, fme_cap); i++) {
+ port_offset = readq(fme_hdr + FME_HDR_PORT_OFST(i));
+ if (!(port_offset & FME_PORT_OFST_IMP))
+ continue;
+
+ /* Create bridge for each port */
+ fme_br = dfl_fme_create_bridge(pdata, i);
+ if (IS_ERR(fme_br)) {
+ ret = PTR_ERR(fme_br);
+ goto destroy_region;
+ }
+
+ list_add(&fme_br->node, &priv->bridge_list);
+
+ /* Create region for each port */
+ fme_region = dfl_fme_create_region(pdata, mgr,
+ fme_br->br, i);
+ if (!fme_region) {
+ ret = PTR_ERR(fme_region);
+ goto destroy_region;
+ }
+
+ list_add(&fme_region->node, &priv->region_list);
+ }
+ mutex_unlock(&pdata->lock);
+
+ return 0;
+
+destroy_region:
+ dfl_fme_destroy_regions(pdata);
+ dfl_fme_destroy_bridges(pdata);
+ dfl_fme_destroy_mgr(pdata);
+unlock:
+ mutex_unlock(&pdata->lock);
+ return ret;
+}
+
+static void pr_mgmt_uinit(struct platform_device *pdev,
+ struct dfl_feature *feature)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_fme *priv;
+
+ mutex_lock(&pdata->lock);
+ priv = dfl_fpga_pdata_get_private(pdata);
+
+ dfl_fme_destroy_regions(pdata);
+ dfl_fme_destroy_bridges(pdata);
+ dfl_fme_destroy_mgr(pdata);
+ mutex_unlock(&pdata->lock);
+}
+
+static long fme_pr_ioctl(struct platform_device *pdev,
+ struct dfl_feature *feature,
+ unsigned int cmd, unsigned long arg)
+{
+ long ret;
+
+ switch (cmd) {
+ case DFL_FPGA_FME_PORT_PR:
+ ret = fme_pr(pdev, arg);
+ break;
+ default:
+ ret = -ENODEV;
+ }
+
+ return ret;
+}
+
+const struct dfl_feature_ops pr_mgmt_ops = {
+ .init = pr_mgmt_init,
+ .uinit = pr_mgmt_uinit,
+ .ioctl = fme_pr_ioctl,
+};
diff --git a/drivers/fpga/dfl-fme-pr.h b/drivers/fpga/dfl-fme-pr.h
new file mode 100644
index 000000000000..096a699089d3
--- /dev/null
+++ b/drivers/fpga/dfl-fme-pr.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Header file for FPGA Management Engine (FME) Partial Reconfiguration Driver
+ *
+ * Copyright (C) 2017-2018 Intel Corporation, Inc.
+ *
+ * Authors:
+ * Kang Luwei <luwei.kang@intel.com>
+ * Xiao Guangrong <guangrong.xiao@linux.intel.com>
+ * Wu Hao <hao.wu@intel.com>
+ * Joseph Grecco <joe.grecco@intel.com>
+ * Enno Luebbers <enno.luebbers@intel.com>
+ * Tim Whisonant <tim.whisonant@intel.com>
+ * Ananda Ravuri <ananda.ravuri@intel.com>
+ * Henry Mitchel <henry.mitchel@intel.com>
+ */
+
+#ifndef __DFL_FME_PR_H
+#define __DFL_FME_PR_H
+
+#include <linux/platform_device.h>
+
+/**
+ * struct dfl_fme_region - FME fpga region data structure
+ *
+ * @region: platform device of the FPGA region.
+ * @node: used to link fme_region to a list.
+ * @port_id: indicate which port this region connected to.
+ */
+struct dfl_fme_region {
+ struct platform_device *region;
+ struct list_head node;
+ int port_id;
+};
+
+/**
+ * struct dfl_fme_region_pdata - platform data for FME region platform device.
+ *
+ * @mgr: platform device of the FPGA manager.
+ * @br: platform device of the FPGA bridge.
+ * @region_id: region id (same as port_id).
+ */
+struct dfl_fme_region_pdata {
+ struct platform_device *mgr;
+ struct platform_device *br;
+ int region_id;
+};
+
+/**
+ * struct dfl_fme_bridge - FME fpga bridge data structure
+ *
+ * @br: platform device of the FPGA bridge.
+ * @node: used to link fme_bridge to a list.
+ */
+struct dfl_fme_bridge {
+ struct platform_device *br;
+ struct list_head node;
+};
+
+/**
+ * struct dfl_fme_bridge_pdata - platform data for FME bridge platform device.
+ *
+ * @cdev: container device.
+ * @port_id: port id.
+ */
+struct dfl_fme_br_pdata {
+ struct dfl_fpga_cdev *cdev;
+ int port_id;
+};
+
+/**
+ * struct dfl_fme_mgr_pdata - platform data for FME manager platform device.
+ *
+ * @ioaddr: mapped io address for FME manager platform device.
+ */
+struct dfl_fme_mgr_pdata {
+ void __iomem *ioaddr;
+};
+
+#define DFL_FPGA_FME_MGR "dfl-fme-mgr"
+#define DFL_FPGA_FME_BRIDGE "dfl-fme-bridge"
+#define DFL_FPGA_FME_REGION "dfl-fme-region"
+
+#endif /* __DFL_FME_PR_H */
diff --git a/drivers/fpga/dfl-fme-region.c b/drivers/fpga/dfl-fme-region.c
new file mode 100644
index 000000000000..0b7e19c27c6d
--- /dev/null
+++ b/drivers/fpga/dfl-fme-region.c
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * FPGA Region Driver for FPGA Management Engine (FME)
+ *
+ * Copyright (C) 2017-2018 Intel Corporation, Inc.
+ *
+ * Authors:
+ * Wu Hao <hao.wu@intel.com>
+ * Joseph Grecco <joe.grecco@intel.com>
+ * Enno Luebbers <enno.luebbers@intel.com>
+ * Tim Whisonant <tim.whisonant@intel.com>
+ * Ananda Ravuri <ananda.ravuri@intel.com>
+ * Henry Mitchel <henry.mitchel@intel.com>
+ */
+
+#include <linux/module.h>
+#include <linux/fpga/fpga-region.h>
+
+#include "dfl-fme-pr.h"
+
+static int fme_region_get_bridges(struct fpga_region *region)
+{
+ struct dfl_fme_region_pdata *pdata = region->priv;
+ struct device *dev = &pdata->br->dev;
+
+ return fpga_bridge_get_to_list(dev, region->info, &region->bridge_list);
+}
+
+static int fme_region_probe(struct platform_device *pdev)
+{
+ struct dfl_fme_region_pdata *pdata = dev_get_platdata(&pdev->dev);
+ struct device *dev = &pdev->dev;
+ struct fpga_region *region;
+ struct fpga_manager *mgr;
+ int ret;
+
+ mgr = fpga_mgr_get(&pdata->mgr->dev);
+ if (IS_ERR(mgr))
+ return -EPROBE_DEFER;
+
+ region = fpga_region_create(dev, mgr, fme_region_get_bridges);
+ if (!region) {
+ ret = -ENOMEM;
+ goto eprobe_mgr_put;
+ }
+
+ region->priv = pdata;
+ region->compat_id = mgr->compat_id;
+ platform_set_drvdata(pdev, region);
+
+ ret = fpga_region_register(region);
+ if (ret)
+ goto region_free;
+
+ dev_dbg(dev, "DFL FME FPGA Region probed\n");
+
+ return 0;
+
+region_free:
+ fpga_region_free(region);
+eprobe_mgr_put:
+ fpga_mgr_put(mgr);
+ return ret;
+}
+
+static int fme_region_remove(struct platform_device *pdev)
+{
+ struct fpga_region *region = dev_get_drvdata(&pdev->dev);
+
+ fpga_region_unregister(region);
+ fpga_mgr_put(region->mgr);
+
+ return 0;
+}
+
+static struct platform_driver fme_region_driver = {
+ .driver = {
+ .name = DFL_FPGA_FME_REGION,
+ },
+ .probe = fme_region_probe,
+ .remove = fme_region_remove,
+};
+
+module_platform_driver(fme_region_driver);
+
+MODULE_DESCRIPTION("FPGA Region for DFL FPGA Management Engine");
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:dfl-fme-region");
diff --git a/drivers/fpga/dfl-fme.h b/drivers/fpga/dfl-fme.h
new file mode 100644
index 000000000000..5394a216c5c0
--- /dev/null
+++ b/drivers/fpga/dfl-fme.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Header file for FPGA Management Engine (FME) Driver
+ *
+ * Copyright (C) 2017-2018 Intel Corporation, Inc.
+ *
+ * Authors:
+ * Kang Luwei <luwei.kang@intel.com>
+ * Xiao Guangrong <guangrong.xiao@linux.intel.com>
+ * Wu Hao <hao.wu@intel.com>
+ * Joseph Grecco <joe.grecco@intel.com>
+ * Enno Luebbers <enno.luebbers@intel.com>
+ * Tim Whisonant <tim.whisonant@intel.com>
+ * Ananda Ravuri <ananda.ravuri@intel.com>
+ * Henry Mitchel <henry.mitchel@intel.com>
+ */
+
+#ifndef __DFL_FME_H
+#define __DFL_FME_H
+
+/**
+ * struct dfl_fme - dfl fme private data
+ *
+ * @mgr: FME's FPGA manager platform device.
+ * @region_list: linked list of FME's FPGA regions.
+ * @bridge_list: linked list of FME's FPGA bridges.
+ * @pdata: fme platform device's pdata.
+ */
+struct dfl_fme {
+ struct platform_device *mgr;
+ struct list_head region_list;
+ struct list_head bridge_list;
+ struct dfl_feature_platform_data *pdata;
+};
+
+extern const struct dfl_feature_ops pr_mgmt_ops;
+
+#endif /* __DFL_FME_H */
diff --git a/drivers/fpga/dfl-pci.c b/drivers/fpga/dfl-pci.c
new file mode 100644
index 000000000000..66b5720582bb
--- /dev/null
+++ b/drivers/fpga/dfl-pci.c
@@ -0,0 +1,243 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for FPGA Device Feature List (DFL) PCIe device
+ *
+ * Copyright (C) 2017-2018 Intel Corporation, Inc.
+ *
+ * Authors:
+ * Zhang Yi <Yi.Z.Zhang@intel.com>
+ * Xiao Guangrong <guangrong.xiao@linux.intel.com>
+ * Joseph Grecco <joe.grecco@intel.com>
+ * Enno Luebbers <enno.luebbers@intel.com>
+ * Tim Whisonant <tim.whisonant@intel.com>
+ * Ananda Ravuri <ananda.ravuri@intel.com>
+ * Henry Mitchel <henry.mitchel@intel.com>
+ */
+
+#include <linux/pci.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/stddef.h>
+#include <linux/errno.h>
+#include <linux/aer.h>
+
+#include "dfl.h"
+
+#define DRV_VERSION "0.8"
+#define DRV_NAME "dfl-pci"
+
+struct cci_drvdata {
+ struct dfl_fpga_cdev *cdev; /* container device */
+};
+
+static void __iomem *cci_pci_ioremap_bar(struct pci_dev *pcidev, int bar)
+{
+ if (pcim_iomap_regions(pcidev, BIT(bar), DRV_NAME))
+ return NULL;
+
+ return pcim_iomap_table(pcidev)[bar];
+}
+
+/* PCI Device ID */
+#define PCIE_DEVICE_ID_PF_INT_5_X 0xBCBD
+#define PCIE_DEVICE_ID_PF_INT_6_X 0xBCC0
+#define PCIE_DEVICE_ID_PF_DSC_1_X 0x09C4
+/* VF Device */
+#define PCIE_DEVICE_ID_VF_INT_5_X 0xBCBF
+#define PCIE_DEVICE_ID_VF_INT_6_X 0xBCC1
+#define PCIE_DEVICE_ID_VF_DSC_1_X 0x09C5
+
+static struct pci_device_id cci_pcie_id_tbl[] = {
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_PF_INT_5_X),},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_VF_INT_5_X),},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_PF_INT_6_X),},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_VF_INT_6_X),},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_PF_DSC_1_X),},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_VF_DSC_1_X),},
+ {0,}
+};
+MODULE_DEVICE_TABLE(pci, cci_pcie_id_tbl);
+
+static int cci_init_drvdata(struct pci_dev *pcidev)
+{
+ struct cci_drvdata *drvdata;
+
+ drvdata = devm_kzalloc(&pcidev->dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+
+ pci_set_drvdata(pcidev, drvdata);
+
+ return 0;
+}
+
+static void cci_remove_feature_devs(struct pci_dev *pcidev)
+{
+ struct cci_drvdata *drvdata = pci_get_drvdata(pcidev);
+
+ /* remove all children feature devices */
+ dfl_fpga_feature_devs_remove(drvdata->cdev);
+}
+
+/* enumerate feature devices under pci device */
+static int cci_enumerate_feature_devs(struct pci_dev *pcidev)
+{
+ struct cci_drvdata *drvdata = pci_get_drvdata(pcidev);
+ struct dfl_fpga_enum_info *info;
+ struct dfl_fpga_cdev *cdev;
+ resource_size_t start, len;
+ int port_num, bar, i, ret = 0;
+ void __iomem *base;
+ u32 offset;
+ u64 v;
+
+ /* allocate enumeration info via pci_dev */
+ info = dfl_fpga_enum_info_alloc(&pcidev->dev);
+ if (!info)
+ return -ENOMEM;
+
+ /* start to find Device Feature List from Bar 0 */
+ base = cci_pci_ioremap_bar(pcidev, 0);
+ if (!base) {
+ ret = -ENOMEM;
+ goto enum_info_free_exit;
+ }
+
+ /*
+ * PF device has FME and Ports/AFUs, and VF device only has one
+ * Port/AFU. Check them and add related "Device Feature List" info
+ * for the next step enumeration.
+ */
+ if (dfl_feature_is_fme(base)) {
+ start = pci_resource_start(pcidev, 0);
+ len = pci_resource_len(pcidev, 0);
+
+ dfl_fpga_enum_info_add_dfl(info, start, len, base);
+
+ /*
+ * find more Device Feature Lists (e.g. Ports) per information
+ * indicated by FME module.
+ */
+ v = readq(base + FME_HDR_CAP);
+ port_num = FIELD_GET(FME_CAP_NUM_PORTS, v);
+
+ WARN_ON(port_num > MAX_DFL_FPGA_PORT_NUM);
+
+ for (i = 0; i < port_num; i++) {
+ v = readq(base + FME_HDR_PORT_OFST(i));
+
+ /* skip ports which are not implemented. */
+ if (!(v & FME_PORT_OFST_IMP))
+ continue;
+
+ /*
+ * add Port's Device Feature List information for next
+ * step enumeration.
+ */
+ bar = FIELD_GET(FME_PORT_OFST_BAR_ID, v);
+ offset = FIELD_GET(FME_PORT_OFST_DFH_OFST, v);
+ base = cci_pci_ioremap_bar(pcidev, bar);
+ if (!base)
+ continue;
+
+ start = pci_resource_start(pcidev, bar) + offset;
+ len = pci_resource_len(pcidev, bar) - offset;
+
+ dfl_fpga_enum_info_add_dfl(info, start, len,
+ base + offset);
+ }
+ } else if (dfl_feature_is_port(base)) {
+ start = pci_resource_start(pcidev, 0);
+ len = pci_resource_len(pcidev, 0);
+
+ dfl_fpga_enum_info_add_dfl(info, start, len, base);
+ } else {
+ ret = -ENODEV;
+ goto enum_info_free_exit;
+ }
+
+ /* start enumeration with prepared enumeration information */
+ cdev = dfl_fpga_feature_devs_enumerate(info);
+ if (IS_ERR(cdev)) {
+ dev_err(&pcidev->dev, "Enumeration failure\n");
+ ret = PTR_ERR(cdev);
+ goto enum_info_free_exit;
+ }
+
+ drvdata->cdev = cdev;
+
+enum_info_free_exit:
+ dfl_fpga_enum_info_free(info);
+
+ return ret;
+}
+
+static
+int cci_pci_probe(struct pci_dev *pcidev, const struct pci_device_id *pcidevid)
+{
+ int ret;
+
+ ret = pcim_enable_device(pcidev);
+ if (ret < 0) {
+ dev_err(&pcidev->dev, "Failed to enable device %d.\n", ret);
+ return ret;
+ }
+
+ ret = pci_enable_pcie_error_reporting(pcidev);
+ if (ret && ret != -EINVAL)
+ dev_info(&pcidev->dev, "PCIE AER unavailable %d.\n", ret);
+
+ pci_set_master(pcidev);
+
+ if (!pci_set_dma_mask(pcidev, DMA_BIT_MASK(64))) {
+ ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64));
+ if (ret)
+ goto disable_error_report_exit;
+ } else if (!pci_set_dma_mask(pcidev, DMA_BIT_MASK(32))) {
+ ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32));
+ if (ret)
+ goto disable_error_report_exit;
+ } else {
+ ret = -EIO;
+ dev_err(&pcidev->dev, "No suitable DMA support available.\n");
+ goto disable_error_report_exit;
+ }
+
+ ret = cci_init_drvdata(pcidev);
+ if (ret) {
+ dev_err(&pcidev->dev, "Fail to init drvdata %d.\n", ret);
+ goto disable_error_report_exit;
+ }
+
+ ret = cci_enumerate_feature_devs(pcidev);
+ if (ret) {
+ dev_err(&pcidev->dev, "enumeration failure %d.\n", ret);
+ goto disable_error_report_exit;
+ }
+
+ return ret;
+
+disable_error_report_exit:
+ pci_disable_pcie_error_reporting(pcidev);
+ return ret;
+}
+
+static void cci_pci_remove(struct pci_dev *pcidev)
+{
+ cci_remove_feature_devs(pcidev);
+ pci_disable_pcie_error_reporting(pcidev);
+}
+
+static struct pci_driver cci_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = cci_pcie_id_tbl,
+ .probe = cci_pci_probe,
+ .remove = cci_pci_remove,
+};
+
+module_pci_driver(cci_pci_driver);
+
+MODULE_DESCRIPTION("FPGA DFL PCIe Device Driver");
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/fpga/dfl.c b/drivers/fpga/dfl.c
new file mode 100644
index 000000000000..a9b521bccb06
--- /dev/null
+++ b/drivers/fpga/dfl.c
@@ -0,0 +1,1044 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for FPGA Device Feature List (DFL) Support
+ *
+ * Copyright (C) 2017-2018 Intel Corporation, Inc.
+ *
+ * Authors:
+ * Kang Luwei <luwei.kang@intel.com>
+ * Zhang Yi <yi.z.zhang@intel.com>
+ * Wu Hao <hao.wu@intel.com>
+ * Xiao Guangrong <guangrong.xiao@linux.intel.com>
+ */
+#include <linux/module.h>
+
+#include "dfl.h"
+
+static DEFINE_MUTEX(dfl_id_mutex);
+
+/*
+ * when adding a new feature dev support in DFL framework, it's required to
+ * add a new item in enum dfl_id_type and provide related information in below
+ * dfl_devs table which is indexed by dfl_id_type, e.g. name string used for
+ * platform device creation (define name strings in dfl.h, as they could be
+ * reused by platform device drivers).
+ *
+ * if the new feature dev needs chardev support, then it's required to add
+ * a new item in dfl_chardevs table and configure dfl_devs[i].devt_type as
+ * index to dfl_chardevs table. If no chardev support just set devt_type
+ * as one invalid index (DFL_FPGA_DEVT_MAX).
+ */
+enum dfl_id_type {
+ FME_ID, /* fme id allocation and mapping */
+ PORT_ID, /* port id allocation and mapping */
+ DFL_ID_MAX,
+};
+
+enum dfl_fpga_devt_type {
+ DFL_FPGA_DEVT_FME,
+ DFL_FPGA_DEVT_PORT,
+ DFL_FPGA_DEVT_MAX,
+};
+
+/**
+ * dfl_dev_info - dfl feature device information.
+ * @name: name string of the feature platform device.
+ * @dfh_id: id value in Device Feature Header (DFH) register by DFL spec.
+ * @id: idr id of the feature dev.
+ * @devt_type: index to dfl_chrdevs[].
+ */
+struct dfl_dev_info {
+ const char *name;
+ u32 dfh_id;
+ struct idr id;
+ enum dfl_fpga_devt_type devt_type;
+};
+
+/* it is indexed by dfl_id_type */
+static struct dfl_dev_info dfl_devs[] = {
+ {.name = DFL_FPGA_FEATURE_DEV_FME, .dfh_id = DFH_ID_FIU_FME,
+ .devt_type = DFL_FPGA_DEVT_FME},
+ {.name = DFL_FPGA_FEATURE_DEV_PORT, .dfh_id = DFH_ID_FIU_PORT,
+ .devt_type = DFL_FPGA_DEVT_PORT},
+};
+
+/**
+ * dfl_chardev_info - chardev information of dfl feature device
+ * @name: nmae string of the char device.
+ * @devt: devt of the char device.
+ */
+struct dfl_chardev_info {
+ const char *name;
+ dev_t devt;
+};
+
+/* indexed by enum dfl_fpga_devt_type */
+static struct dfl_chardev_info dfl_chrdevs[] = {
+ {.name = DFL_FPGA_FEATURE_DEV_FME},
+ {.name = DFL_FPGA_FEATURE_DEV_PORT},
+};
+
+static void dfl_ids_init(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dfl_devs); i++)
+ idr_init(&dfl_devs[i].id);
+}
+
+static void dfl_ids_destroy(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dfl_devs); i++)
+ idr_destroy(&dfl_devs[i].id);
+}
+
+static int dfl_id_alloc(enum dfl_id_type type, struct device *dev)
+{
+ int id;
+
+ WARN_ON(type >= DFL_ID_MAX);
+ mutex_lock(&dfl_id_mutex);
+ id = idr_alloc(&dfl_devs[type].id, dev, 0, 0, GFP_KERNEL);
+ mutex_unlock(&dfl_id_mutex);
+
+ return id;
+}
+
+static void dfl_id_free(enum dfl_id_type type, int id)
+{
+ WARN_ON(type >= DFL_ID_MAX);
+ mutex_lock(&dfl_id_mutex);
+ idr_remove(&dfl_devs[type].id, id);
+ mutex_unlock(&dfl_id_mutex);
+}
+
+static enum dfl_id_type feature_dev_id_type(struct platform_device *pdev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dfl_devs); i++)
+ if (!strcmp(dfl_devs[i].name, pdev->name))
+ return i;
+
+ return DFL_ID_MAX;
+}
+
+static enum dfl_id_type dfh_id_to_type(u32 id)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dfl_devs); i++)
+ if (dfl_devs[i].dfh_id == id)
+ return i;
+
+ return DFL_ID_MAX;
+}
+
+/*
+ * introduce a global port_ops list, it allows port drivers to register ops
+ * in such list, then other feature devices (e.g. FME), could use the port
+ * functions even related port platform device is hidden. Below is one example,
+ * in virtualization case of PCIe-based FPGA DFL device, when SRIOV is
+ * enabled, port (and it's AFU) is turned into VF and port platform device
+ * is hidden from system but it's still required to access port to finish FPGA
+ * reconfiguration function in FME.
+ */
+
+static DEFINE_MUTEX(dfl_port_ops_mutex);
+static LIST_HEAD(dfl_port_ops_list);
+
+/**
+ * dfl_fpga_port_ops_get - get matched port ops from the global list
+ * @pdev: platform device to match with associated port ops.
+ * Return: matched port ops on success, NULL otherwise.
+ *
+ * Please note that must dfl_fpga_port_ops_put after use the port_ops.
+ */
+struct dfl_fpga_port_ops *dfl_fpga_port_ops_get(struct platform_device *pdev)
+{
+ struct dfl_fpga_port_ops *ops = NULL;
+
+ mutex_lock(&dfl_port_ops_mutex);
+ if (list_empty(&dfl_port_ops_list))
+ goto done;
+
+ list_for_each_entry(ops, &dfl_port_ops_list, node) {
+ /* match port_ops using the name of platform device */
+ if (!strcmp(pdev->name, ops->name)) {
+ if (!try_module_get(ops->owner))
+ ops = NULL;
+ goto done;
+ }
+ }
+
+ ops = NULL;
+done:
+ mutex_unlock(&dfl_port_ops_mutex);
+ return ops;
+}
+EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_get);
+
+/**
+ * dfl_fpga_port_ops_put - put port ops
+ * @ops: port ops.
+ */
+void dfl_fpga_port_ops_put(struct dfl_fpga_port_ops *ops)
+{
+ if (ops && ops->owner)
+ module_put(ops->owner);
+}
+EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_put);
+
+/**
+ * dfl_fpga_port_ops_add - add port_ops to global list
+ * @ops: port ops to add.
+ */
+void dfl_fpga_port_ops_add(struct dfl_fpga_port_ops *ops)
+{
+ mutex_lock(&dfl_port_ops_mutex);
+ list_add_tail(&ops->node, &dfl_port_ops_list);
+ mutex_unlock(&dfl_port_ops_mutex);
+}
+EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_add);
+
+/**
+ * dfl_fpga_port_ops_del - remove port_ops from global list
+ * @ops: port ops to del.
+ */
+void dfl_fpga_port_ops_del(struct dfl_fpga_port_ops *ops)
+{
+ mutex_lock(&dfl_port_ops_mutex);
+ list_del(&ops->node);
+ mutex_unlock(&dfl_port_ops_mutex);
+}
+EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_del);
+
+/**
+ * dfl_fpga_check_port_id - check the port id
+ * @pdev: port platform device.
+ * @pport_id: port id to compare.
+ *
+ * Return: 1 if port device matches with given port id, otherwise 0.
+ */
+int dfl_fpga_check_port_id(struct platform_device *pdev, void *pport_id)
+{
+ struct dfl_fpga_port_ops *port_ops = dfl_fpga_port_ops_get(pdev);
+ int port_id;
+
+ if (!port_ops || !port_ops->get_id)
+ return 0;
+
+ port_id = port_ops->get_id(pdev);
+ dfl_fpga_port_ops_put(port_ops);
+
+ return port_id == *(int *)pport_id;
+}
+EXPORT_SYMBOL_GPL(dfl_fpga_check_port_id);
+
+/**
+ * dfl_fpga_dev_feature_uinit - uinit for sub features of dfl feature device
+ * @pdev: feature device.
+ */
+void dfl_fpga_dev_feature_uinit(struct platform_device *pdev)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_feature *feature;
+
+ dfl_fpga_dev_for_each_feature(pdata, feature)
+ if (feature->ops) {
+ feature->ops->uinit(pdev, feature);
+ feature->ops = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(dfl_fpga_dev_feature_uinit);
+
+static int dfl_feature_instance_init(struct platform_device *pdev,
+ struct dfl_feature_platform_data *pdata,
+ struct dfl_feature *feature,
+ struct dfl_feature_driver *drv)
+{
+ int ret;
+
+ ret = drv->ops->init(pdev, feature);
+ if (ret)
+ return ret;
+
+ feature->ops = drv->ops;
+
+ return ret;
+}
+
+/**
+ * dfl_fpga_dev_feature_init - init for sub features of dfl feature device
+ * @pdev: feature device.
+ * @feature_drvs: drvs for sub features.
+ *
+ * This function will match sub features with given feature drvs list and
+ * use matched drv to init related sub feature.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int dfl_fpga_dev_feature_init(struct platform_device *pdev,
+ struct dfl_feature_driver *feature_drvs)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_feature_driver *drv = feature_drvs;
+ struct dfl_feature *feature;
+ int ret;
+
+ while (drv->ops) {
+ dfl_fpga_dev_for_each_feature(pdata, feature) {
+ /* match feature and drv using id */
+ if (feature->id == drv->id) {
+ ret = dfl_feature_instance_init(pdev, pdata,
+ feature, drv);
+ if (ret)
+ goto exit;
+ }
+ }
+ drv++;
+ }
+
+ return 0;
+exit:
+ dfl_fpga_dev_feature_uinit(pdev);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dfl_fpga_dev_feature_init);
+
+static void dfl_chardev_uinit(void)
+{
+ int i;
+
+ for (i = 0; i < DFL_FPGA_DEVT_MAX; i++)
+ if (MAJOR(dfl_chrdevs[i].devt)) {
+ unregister_chrdev_region(dfl_chrdevs[i].devt,
+ MINORMASK);
+ dfl_chrdevs[i].devt = MKDEV(0, 0);
+ }
+}
+
+static int dfl_chardev_init(void)
+{
+ int i, ret;
+
+ for (i = 0; i < DFL_FPGA_DEVT_MAX; i++) {
+ ret = alloc_chrdev_region(&dfl_chrdevs[i].devt, 0, MINORMASK,
+ dfl_chrdevs[i].name);
+ if (ret)
+ goto exit;
+ }
+
+ return 0;
+
+exit:
+ dfl_chardev_uinit();
+ return ret;
+}
+
+static dev_t dfl_get_devt(enum dfl_fpga_devt_type type, int id)
+{
+ if (type >= DFL_FPGA_DEVT_MAX)
+ return 0;
+
+ return MKDEV(MAJOR(dfl_chrdevs[type].devt), id);
+}
+
+/**
+ * dfl_fpga_dev_ops_register - register cdev ops for feature dev
+ *
+ * @pdev: feature dev.
+ * @fops: file operations for feature dev's cdev.
+ * @owner: owning module/driver.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int dfl_fpga_dev_ops_register(struct platform_device *pdev,
+ const struct file_operations *fops,
+ struct module *owner)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+
+ cdev_init(&pdata->cdev, fops);
+ pdata->cdev.owner = owner;
+
+ /*
+ * set parent to the feature device so that its refcount is
+ * decreased after the last refcount of cdev is gone, that
+ * makes sure the feature device is valid during device
+ * file's life-cycle.
+ */
+ pdata->cdev.kobj.parent = &pdev->dev.kobj;
+
+ return cdev_add(&pdata->cdev, pdev->dev.devt, 1);
+}
+EXPORT_SYMBOL_GPL(dfl_fpga_dev_ops_register);
+
+/**
+ * dfl_fpga_dev_ops_unregister - unregister cdev ops for feature dev
+ * @pdev: feature dev.
+ */
+void dfl_fpga_dev_ops_unregister(struct platform_device *pdev)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+
+ cdev_del(&pdata->cdev);
+}
+EXPORT_SYMBOL_GPL(dfl_fpga_dev_ops_unregister);
+
+/**
+ * struct build_feature_devs_info - info collected during feature dev build.
+ *
+ * @dev: device to enumerate.
+ * @cdev: the container device for all feature devices.
+ * @feature_dev: current feature device.
+ * @ioaddr: header register region address of feature device in enumeration.
+ * @sub_features: a sub features linked list for feature device in enumeration.
+ * @feature_num: number of sub features for feature device in enumeration.
+ */
+struct build_feature_devs_info {
+ struct device *dev;
+ struct dfl_fpga_cdev *cdev;
+ struct platform_device *feature_dev;
+ void __iomem *ioaddr;
+ struct list_head sub_features;
+ int feature_num;
+};
+
+/**
+ * struct dfl_feature_info - sub feature info collected during feature dev build
+ *
+ * @fid: id of this sub feature.
+ * @mmio_res: mmio resource of this sub feature.
+ * @ioaddr: mapped base address of mmio resource.
+ * @node: node in sub_features linked list.
+ */
+struct dfl_feature_info {
+ u64 fid;
+ struct resource mmio_res;
+ void __iomem *ioaddr;
+ struct list_head node;
+};
+
+static void dfl_fpga_cdev_add_port_dev(struct dfl_fpga_cdev *cdev,
+ struct platform_device *port)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(&port->dev);
+
+ mutex_lock(&cdev->lock);
+ list_add(&pdata->node, &cdev->port_dev_list);
+ get_device(&pdata->dev->dev);
+ mutex_unlock(&cdev->lock);
+}
+
+/*
+ * register current feature device, it is called when we need to switch to
+ * another feature parsing or we have parsed all features on given device
+ * feature list.
+ */
+static int build_info_commit_dev(struct build_feature_devs_info *binfo)
+{
+ struct platform_device *fdev = binfo->feature_dev;
+ struct dfl_feature_platform_data *pdata;
+ struct dfl_feature_info *finfo, *p;
+ int ret, index = 0;
+
+ if (!fdev)
+ return 0;
+
+ /*
+ * we do not need to care for the memory which is associated with
+ * the platform device. After calling platform_device_unregister(),
+ * it will be automatically freed by device's release() callback,
+ * platform_device_release().
+ */
+ pdata = kzalloc(dfl_feature_platform_data_size(binfo->feature_num),
+ GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ pdata->dev = fdev;
+ pdata->num = binfo->feature_num;
+ pdata->dfl_cdev = binfo->cdev;
+ mutex_init(&pdata->lock);
+
+ /*
+ * the count should be initialized to 0 to make sure
+ *__fpga_port_enable() following __fpga_port_disable()
+ * works properly for port device.
+ * and it should always be 0 for fme device.
+ */
+ WARN_ON(pdata->disable_count);
+
+ fdev->dev.platform_data = pdata;
+
+ /* each sub feature has one MMIO resource */
+ fdev->num_resources = binfo->feature_num;
+ fdev->resource = kcalloc(binfo->feature_num, sizeof(*fdev->resource),
+ GFP_KERNEL);
+ if (!fdev->resource)
+ return -ENOMEM;
+
+ /* fill features and resource information for feature dev */
+ list_for_each_entry_safe(finfo, p, &binfo->sub_features, node) {
+ struct dfl_feature *feature = &pdata->features[index];
+
+ /* save resource information for each feature */
+ feature->id = finfo->fid;
+ feature->resource_index = index;
+ feature->ioaddr = finfo->ioaddr;
+ fdev->resource[index++] = finfo->mmio_res;
+
+ list_del(&finfo->node);
+ kfree(finfo);
+ }
+
+ ret = platform_device_add(binfo->feature_dev);
+ if (!ret) {
+ if (feature_dev_id_type(binfo->feature_dev) == PORT_ID)
+ dfl_fpga_cdev_add_port_dev(binfo->cdev,
+ binfo->feature_dev);
+ else
+ binfo->cdev->fme_dev =
+ get_device(&binfo->feature_dev->dev);
+ /*
+ * reset it to avoid build_info_free() freeing their resource.
+ *
+ * The resource of successfully registered feature devices
+ * will be freed by platform_device_unregister(). See the
+ * comments in build_info_create_dev().
+ */
+ binfo->feature_dev = NULL;
+ }
+
+ return ret;
+}
+
+static int
+build_info_create_dev(struct build_feature_devs_info *binfo,
+ enum dfl_id_type type, void __iomem *ioaddr)
+{
+ struct platform_device *fdev;
+ int ret;
+
+ if (type >= DFL_ID_MAX)
+ return -EINVAL;
+
+ /* we will create a new device, commit current device first */
+ ret = build_info_commit_dev(binfo);
+ if (ret)
+ return ret;
+
+ /*
+ * we use -ENODEV as the initialization indicator which indicates
+ * whether the id need to be reclaimed
+ */
+ fdev = platform_device_alloc(dfl_devs[type].name, -ENODEV);
+ if (!fdev)
+ return -ENOMEM;
+
+ binfo->feature_dev = fdev;
+ binfo->feature_num = 0;
+ binfo->ioaddr = ioaddr;
+ INIT_LIST_HEAD(&binfo->sub_features);
+
+ fdev->id = dfl_id_alloc(type, &fdev->dev);
+ if (fdev->id < 0)
+ return fdev->id;
+
+ fdev->dev.parent = &binfo->cdev->region->dev;
+ fdev->dev.devt = dfl_get_devt(dfl_devs[type].devt_type, fdev->id);
+
+ return 0;
+}
+
+static void build_info_free(struct build_feature_devs_info *binfo)
+{
+ struct dfl_feature_info *finfo, *p;
+
+ /*
+ * it is a valid id, free it. See comments in
+ * build_info_create_dev()
+ */
+ if (binfo->feature_dev && binfo->feature_dev->id >= 0) {
+ dfl_id_free(feature_dev_id_type(binfo->feature_dev),
+ binfo->feature_dev->id);
+
+ list_for_each_entry_safe(finfo, p, &binfo->sub_features, node) {
+ list_del(&finfo->node);
+ kfree(finfo);
+ }
+ }
+
+ platform_device_put(binfo->feature_dev);
+
+ devm_kfree(binfo->dev, binfo);
+}
+
+static inline u32 feature_size(void __iomem *start)
+{
+ u64 v = readq(start + DFH);
+ u32 ofst = FIELD_GET(DFH_NEXT_HDR_OFST, v);
+ /* workaround for private features with invalid size, use 4K instead */
+ return ofst ? ofst : 4096;
+}
+
+static u64 feature_id(void __iomem *start)
+{
+ u64 v = readq(start + DFH);
+ u16 id = FIELD_GET(DFH_ID, v);
+ u8 type = FIELD_GET(DFH_TYPE, v);
+
+ if (type == DFH_TYPE_FIU)
+ return FEATURE_ID_FIU_HEADER;
+ else if (type == DFH_TYPE_PRIVATE)
+ return id;
+ else if (type == DFH_TYPE_AFU)
+ return FEATURE_ID_AFU;
+
+ WARN_ON(1);
+ return 0;
+}
+
+/*
+ * when create sub feature instances, for private features, it doesn't need
+ * to provide resource size and feature id as they could be read from DFH
+ * register. For afu sub feature, its register region only contains user
+ * defined registers, so never trust any information from it, just use the
+ * resource size information provided by its parent FIU.
+ */
+static int
+create_feature_instance(struct build_feature_devs_info *binfo,
+ struct dfl_fpga_enum_dfl *dfl, resource_size_t ofst,
+ resource_size_t size, u64 fid)
+{
+ struct dfl_feature_info *finfo;
+
+ /* read feature size and id if inputs are invalid */
+ size = size ? size : feature_size(dfl->ioaddr + ofst);
+ fid = fid ? fid : feature_id(dfl->ioaddr + ofst);
+
+ if (dfl->len - ofst < size)
+ return -EINVAL;
+
+ finfo = kzalloc(sizeof(*finfo), GFP_KERNEL);
+ if (!finfo)
+ return -ENOMEM;
+
+ finfo->fid = fid;
+ finfo->mmio_res.start = dfl->start + ofst;
+ finfo->mmio_res.end = finfo->mmio_res.start + size - 1;
+ finfo->mmio_res.flags = IORESOURCE_MEM;
+ finfo->ioaddr = dfl->ioaddr + ofst;
+
+ list_add_tail(&finfo->node, &binfo->sub_features);
+ binfo->feature_num++;
+
+ return 0;
+}
+
+static int parse_feature_port_afu(struct build_feature_devs_info *binfo,
+ struct dfl_fpga_enum_dfl *dfl,
+ resource_size_t ofst)
+{
+ u64 v = readq(binfo->ioaddr + PORT_HDR_CAP);
+ u32 size = FIELD_GET(PORT_CAP_MMIO_SIZE, v) << 10;
+
+ WARN_ON(!size);
+
+ return create_feature_instance(binfo, dfl, ofst, size, FEATURE_ID_AFU);
+}
+
+static int parse_feature_afu(struct build_feature_devs_info *binfo,
+ struct dfl_fpga_enum_dfl *dfl,
+ resource_size_t ofst)
+{
+ if (!binfo->feature_dev) {
+ dev_err(binfo->dev, "this AFU does not belong to any FIU.\n");
+ return -EINVAL;
+ }
+
+ switch (feature_dev_id_type(binfo->feature_dev)) {
+ case PORT_ID:
+ return parse_feature_port_afu(binfo, dfl, ofst);
+ default:
+ dev_info(binfo->dev, "AFU belonging to FIU %s is not supported yet.\n",
+ binfo->feature_dev->name);
+ }
+
+ return 0;
+}
+
+static int parse_feature_fiu(struct build_feature_devs_info *binfo,
+ struct dfl_fpga_enum_dfl *dfl,
+ resource_size_t ofst)
+{
+ u32 id, offset;
+ u64 v;
+ int ret = 0;
+
+ v = readq(dfl->ioaddr + ofst + DFH);
+ id = FIELD_GET(DFH_ID, v);
+
+ /* create platform device for dfl feature dev */
+ ret = build_info_create_dev(binfo, dfh_id_to_type(id),
+ dfl->ioaddr + ofst);
+ if (ret)
+ return ret;
+
+ ret = create_feature_instance(binfo, dfl, ofst, 0, 0);
+ if (ret)
+ return ret;
+ /*
+ * find and parse FIU's child AFU via its NEXT_AFU register.
+ * please note that only Port has valid NEXT_AFU pointer per spec.
+ */
+ v = readq(dfl->ioaddr + ofst + NEXT_AFU);
+
+ offset = FIELD_GET(NEXT_AFU_NEXT_DFH_OFST, v);
+ if (offset)
+ return parse_feature_afu(binfo, dfl, ofst + offset);
+
+ dev_dbg(binfo->dev, "No AFUs detected on FIU %d\n", id);
+
+ return ret;
+}
+
+static int parse_feature_private(struct build_feature_devs_info *binfo,
+ struct dfl_fpga_enum_dfl *dfl,
+ resource_size_t ofst)
+{
+ if (!binfo->feature_dev) {
+ dev_err(binfo->dev, "the private feature %llx does not belong to any AFU.\n",
+ (unsigned long long)feature_id(dfl->ioaddr + ofst));
+ return -EINVAL;
+ }
+
+ return create_feature_instance(binfo, dfl, ofst, 0, 0);
+}
+
+/**
+ * parse_feature - parse a feature on given device feature list
+ *
+ * @binfo: build feature devices information.
+ * @dfl: device feature list to parse
+ * @ofst: offset to feature header on this device feature list
+ */
+static int parse_feature(struct build_feature_devs_info *binfo,
+ struct dfl_fpga_enum_dfl *dfl, resource_size_t ofst)
+{
+ u64 v;
+ u32 type;
+
+ v = readq(dfl->ioaddr + ofst + DFH);
+ type = FIELD_GET(DFH_TYPE, v);
+
+ switch (type) {
+ case DFH_TYPE_AFU:
+ return parse_feature_afu(binfo, dfl, ofst);
+ case DFH_TYPE_PRIVATE:
+ return parse_feature_private(binfo, dfl, ofst);
+ case DFH_TYPE_FIU:
+ return parse_feature_fiu(binfo, dfl, ofst);
+ default:
+ dev_info(binfo->dev,
+ "Feature Type %x is not supported.\n", type);
+ }
+
+ return 0;
+}
+
+static int parse_feature_list(struct build_feature_devs_info *binfo,
+ struct dfl_fpga_enum_dfl *dfl)
+{
+ void __iomem *start = dfl->ioaddr;
+ void __iomem *end = dfl->ioaddr + dfl->len;
+ int ret = 0;
+ u32 ofst = 0;
+ u64 v;
+
+ /* walk through the device feature list via DFH's next DFH pointer. */
+ for (; start < end; start += ofst) {
+ if (end - start < DFH_SIZE) {
+ dev_err(binfo->dev, "The region is too small to contain a feature.\n");
+ return -EINVAL;
+ }
+
+ ret = parse_feature(binfo, dfl, start - dfl->ioaddr);
+ if (ret)
+ return ret;
+
+ v = readq(start + DFH);
+ ofst = FIELD_GET(DFH_NEXT_HDR_OFST, v);
+
+ /* stop parsing if EOL(End of List) is set or offset is 0 */
+ if ((v & DFH_EOL) || !ofst)
+ break;
+ }
+
+ /* commit current feature device when reach the end of list */
+ return build_info_commit_dev(binfo);
+}
+
+struct dfl_fpga_enum_info *dfl_fpga_enum_info_alloc(struct device *dev)
+{
+ struct dfl_fpga_enum_info *info;
+
+ get_device(dev);
+
+ info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ put_device(dev);
+ return NULL;
+ }
+
+ info->dev = dev;
+ INIT_LIST_HEAD(&info->dfls);
+
+ return info;
+}
+EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_alloc);
+
+void dfl_fpga_enum_info_free(struct dfl_fpga_enum_info *info)
+{
+ struct dfl_fpga_enum_dfl *tmp, *dfl;
+ struct device *dev;
+
+ if (!info)
+ return;
+
+ dev = info->dev;
+
+ /* remove all device feature lists in the list. */
+ list_for_each_entry_safe(dfl, tmp, &info->dfls, node) {
+ list_del(&dfl->node);
+ devm_kfree(dev, dfl);
+ }
+
+ devm_kfree(dev, info);
+ put_device(dev);
+}
+EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_free);
+
+/**
+ * dfl_fpga_enum_info_add_dfl - add info of a device feature list to enum info
+ *
+ * @info: ptr to dfl_fpga_enum_info
+ * @start: mmio resource address of the device feature list.
+ * @len: mmio resource length of the device feature list.
+ * @ioaddr: mapped mmio resource address of the device feature list.
+ *
+ * One FPGA device may have one or more Device Feature Lists (DFLs), use this
+ * function to add information of each DFL to common data structure for next
+ * step enumeration.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int dfl_fpga_enum_info_add_dfl(struct dfl_fpga_enum_info *info,
+ resource_size_t start, resource_size_t len,
+ void __iomem *ioaddr)
+{
+ struct dfl_fpga_enum_dfl *dfl;
+
+ dfl = devm_kzalloc(info->dev, sizeof(*dfl), GFP_KERNEL);
+ if (!dfl)
+ return -ENOMEM;
+
+ dfl->start = start;
+ dfl->len = len;
+ dfl->ioaddr = ioaddr;
+
+ list_add_tail(&dfl->node, &info->dfls);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_add_dfl);
+
+static int remove_feature_dev(struct device *dev, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ enum dfl_id_type type = feature_dev_id_type(pdev);
+ int id = pdev->id;
+
+ platform_device_unregister(pdev);
+
+ dfl_id_free(type, id);
+
+ return 0;
+}
+
+static void remove_feature_devs(struct dfl_fpga_cdev *cdev)
+{
+ device_for_each_child(&cdev->region->dev, NULL, remove_feature_dev);
+}
+
+/**
+ * dfl_fpga_feature_devs_enumerate - enumerate feature devices
+ * @info: information for enumeration.
+ *
+ * This function creates a container device (base FPGA region), enumerates
+ * feature devices based on the enumeration info and creates platform devices
+ * under the container device.
+ *
+ * Return: dfl_fpga_cdev struct on success, -errno on failure
+ */
+struct dfl_fpga_cdev *
+dfl_fpga_feature_devs_enumerate(struct dfl_fpga_enum_info *info)
+{
+ struct build_feature_devs_info *binfo;
+ struct dfl_fpga_enum_dfl *dfl;
+ struct dfl_fpga_cdev *cdev;
+ int ret = 0;
+
+ if (!info->dev)
+ return ERR_PTR(-ENODEV);
+
+ cdev = devm_kzalloc(info->dev, sizeof(*cdev), GFP_KERNEL);
+ if (!cdev)
+ return ERR_PTR(-ENOMEM);
+
+ cdev->region = fpga_region_create(info->dev, NULL, NULL);
+ if (!cdev->region) {
+ ret = -ENOMEM;
+ goto free_cdev_exit;
+ }
+
+ cdev->parent = info->dev;
+ mutex_init(&cdev->lock);
+ INIT_LIST_HEAD(&cdev->port_dev_list);
+
+ ret = fpga_region_register(cdev->region);
+ if (ret)
+ goto free_region_exit;
+
+ /* create and init build info for enumeration */
+ binfo = devm_kzalloc(info->dev, sizeof(*binfo), GFP_KERNEL);
+ if (!binfo) {
+ ret = -ENOMEM;
+ goto unregister_region_exit;
+ }
+
+ binfo->dev = info->dev;
+ binfo->cdev = cdev;
+
+ /*
+ * start enumeration for all feature devices based on Device Feature
+ * Lists.
+ */
+ list_for_each_entry(dfl, &info->dfls, node) {
+ ret = parse_feature_list(binfo, dfl);
+ if (ret) {
+ remove_feature_devs(cdev);
+ build_info_free(binfo);
+ goto unregister_region_exit;
+ }
+ }
+
+ build_info_free(binfo);
+
+ return cdev;
+
+unregister_region_exit:
+ fpga_region_unregister(cdev->region);
+free_region_exit:
+ fpga_region_free(cdev->region);
+free_cdev_exit:
+ devm_kfree(info->dev, cdev);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(dfl_fpga_feature_devs_enumerate);
+
+/**
+ * dfl_fpga_feature_devs_remove - remove all feature devices
+ * @cdev: fpga container device.
+ *
+ * Remove the container device and all feature devices under given container
+ * devices.
+ */
+void dfl_fpga_feature_devs_remove(struct dfl_fpga_cdev *cdev)
+{
+ struct dfl_feature_platform_data *pdata, *ptmp;
+
+ remove_feature_devs(cdev);
+
+ mutex_lock(&cdev->lock);
+ if (cdev->fme_dev) {
+ /* the fme should be unregistered. */
+ WARN_ON(device_is_registered(cdev->fme_dev));
+ put_device(cdev->fme_dev);
+ }
+
+ list_for_each_entry_safe(pdata, ptmp, &cdev->port_dev_list, node) {
+ struct platform_device *port_dev = pdata->dev;
+
+ /* the port should be unregistered. */
+ WARN_ON(device_is_registered(&port_dev->dev));
+ list_del(&pdata->node);
+ put_device(&port_dev->dev);
+ }
+ mutex_unlock(&cdev->lock);
+
+ fpga_region_unregister(cdev->region);
+ devm_kfree(cdev->parent, cdev);
+}
+EXPORT_SYMBOL_GPL(dfl_fpga_feature_devs_remove);
+
+/**
+ * __dfl_fpga_cdev_find_port - find a port under given container device
+ *
+ * @cdev: container device
+ * @data: data passed to match function
+ * @match: match function used to find specific port from the port device list
+ *
+ * Find a port device under container device. This function needs to be
+ * invoked with lock held.
+ *
+ * Return: pointer to port's platform device if successful, NULL otherwise.
+ *
+ * NOTE: you will need to drop the device reference with put_device() after use.
+ */
+struct platform_device *
+__dfl_fpga_cdev_find_port(struct dfl_fpga_cdev *cdev, void *data,
+ int (*match)(struct platform_device *, void *))
+{
+ struct dfl_feature_platform_data *pdata;
+ struct platform_device *port_dev;
+
+ list_for_each_entry(pdata, &cdev->port_dev_list, node) {
+ port_dev = pdata->dev;
+
+ if (match(port_dev, data) && get_device(&port_dev->dev))
+ return port_dev;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(__dfl_fpga_cdev_find_port);
+
+static int __init dfl_fpga_init(void)
+{
+ int ret;
+
+ dfl_ids_init();
+
+ ret = dfl_chardev_init();
+ if (ret)
+ dfl_ids_destroy();
+
+ return ret;
+}
+
+static void __exit dfl_fpga_exit(void)
+{
+ dfl_chardev_uinit();
+ dfl_ids_destroy();
+}
+
+module_init(dfl_fpga_init);
+module_exit(dfl_fpga_exit);
+
+MODULE_DESCRIPTION("FPGA Device Feature List (DFL) Support");
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/fpga/dfl.h b/drivers/fpga/dfl.h
new file mode 100644
index 000000000000..a8b869e9e5b7
--- /dev/null
+++ b/drivers/fpga/dfl.h
@@ -0,0 +1,410 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Driver Header File for FPGA Device Feature List (DFL) Support
+ *
+ * Copyright (C) 2017-2018 Intel Corporation, Inc.
+ *
+ * Authors:
+ * Kang Luwei <luwei.kang@intel.com>
+ * Zhang Yi <yi.z.zhang@intel.com>
+ * Wu Hao <hao.wu@intel.com>
+ * Xiao Guangrong <guangrong.xiao@linux.intel.com>
+ */
+
+#ifndef __FPGA_DFL_H
+#define __FPGA_DFL_H
+
+#include <linux/bitfield.h>
+#include <linux/cdev.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/iopoll.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/uuid.h>
+#include <linux/fpga/fpga-region.h>
+
+/* maximum supported number of ports */
+#define MAX_DFL_FPGA_PORT_NUM 4
+/* plus one for fme device */
+#define MAX_DFL_FEATURE_DEV_NUM (MAX_DFL_FPGA_PORT_NUM + 1)
+
+/* Reserved 0x0 for Header Group Register and 0xff for AFU */
+#define FEATURE_ID_FIU_HEADER 0x0
+#define FEATURE_ID_AFU 0xff
+
+#define FME_FEATURE_ID_HEADER FEATURE_ID_FIU_HEADER
+#define FME_FEATURE_ID_THERMAL_MGMT 0x1
+#define FME_FEATURE_ID_POWER_MGMT 0x2
+#define FME_FEATURE_ID_GLOBAL_IPERF 0x3
+#define FME_FEATURE_ID_GLOBAL_ERR 0x4
+#define FME_FEATURE_ID_PR_MGMT 0x5
+#define FME_FEATURE_ID_HSSI 0x6
+#define FME_FEATURE_ID_GLOBAL_DPERF 0x7
+
+#define PORT_FEATURE_ID_HEADER FEATURE_ID_FIU_HEADER
+#define PORT_FEATURE_ID_AFU FEATURE_ID_AFU
+#define PORT_FEATURE_ID_ERROR 0x10
+#define PORT_FEATURE_ID_UMSG 0x11
+#define PORT_FEATURE_ID_UINT 0x12
+#define PORT_FEATURE_ID_STP 0x13
+
+/*
+ * Device Feature Header Register Set
+ *
+ * For FIUs, they all have DFH + GUID + NEXT_AFU as common header registers.
+ * For AFUs, they have DFH + GUID as common header registers.
+ * For private features, they only have DFH register as common header.
+ */
+#define DFH 0x0
+#define GUID_L 0x8
+#define GUID_H 0x10
+#define NEXT_AFU 0x18
+
+#define DFH_SIZE 0x8
+
+/* Device Feature Header Register Bitfield */
+#define DFH_ID GENMASK_ULL(11, 0) /* Feature ID */
+#define DFH_ID_FIU_FME 0
+#define DFH_ID_FIU_PORT 1
+#define DFH_REVISION GENMASK_ULL(15, 12) /* Feature revision */
+#define DFH_NEXT_HDR_OFST GENMASK_ULL(39, 16) /* Offset to next DFH */
+#define DFH_EOL BIT_ULL(40) /* End of list */
+#define DFH_TYPE GENMASK_ULL(63, 60) /* Feature type */
+#define DFH_TYPE_AFU 1
+#define DFH_TYPE_PRIVATE 3
+#define DFH_TYPE_FIU 4
+
+/* Next AFU Register Bitfield */
+#define NEXT_AFU_NEXT_DFH_OFST GENMASK_ULL(23, 0) /* Offset to next AFU */
+
+/* FME Header Register Set */
+#define FME_HDR_DFH DFH
+#define FME_HDR_GUID_L GUID_L
+#define FME_HDR_GUID_H GUID_H
+#define FME_HDR_NEXT_AFU NEXT_AFU
+#define FME_HDR_CAP 0x30
+#define FME_HDR_PORT_OFST(n) (0x38 + ((n) * 0x8))
+#define FME_HDR_BITSTREAM_ID 0x60
+#define FME_HDR_BITSTREAM_MD 0x68
+
+/* FME Fab Capability Register Bitfield */
+#define FME_CAP_FABRIC_VERID GENMASK_ULL(7, 0) /* Fabric version ID */
+#define FME_CAP_SOCKET_ID BIT_ULL(8) /* Socket ID */
+#define FME_CAP_PCIE0_LINK_AVL BIT_ULL(12) /* PCIE0 Link */
+#define FME_CAP_PCIE1_LINK_AVL BIT_ULL(13) /* PCIE1 Link */
+#define FME_CAP_COHR_LINK_AVL BIT_ULL(14) /* Coherent Link */
+#define FME_CAP_IOMMU_AVL BIT_ULL(16) /* IOMMU available */
+#define FME_CAP_NUM_PORTS GENMASK_ULL(19, 17) /* Number of ports */
+#define FME_CAP_ADDR_WIDTH GENMASK_ULL(29, 24) /* Address bus width */
+#define FME_CAP_CACHE_SIZE GENMASK_ULL(43, 32) /* cache size in KB */
+#define FME_CAP_CACHE_ASSOC GENMASK_ULL(47, 44) /* Associativity */
+
+/* FME Port Offset Register Bitfield */
+/* Offset to port device feature header */
+#define FME_PORT_OFST_DFH_OFST GENMASK_ULL(23, 0)
+/* PCI Bar ID for this port */
+#define FME_PORT_OFST_BAR_ID GENMASK_ULL(34, 32)
+/* AFU MMIO access permission. 1 - VF, 0 - PF. */
+#define FME_PORT_OFST_ACC_CTRL BIT_ULL(55)
+#define FME_PORT_OFST_ACC_PF 0
+#define FME_PORT_OFST_ACC_VF 1
+#define FME_PORT_OFST_IMP BIT_ULL(60)
+
+/* PORT Header Register Set */
+#define PORT_HDR_DFH DFH
+#define PORT_HDR_GUID_L GUID_L
+#define PORT_HDR_GUID_H GUID_H
+#define PORT_HDR_NEXT_AFU NEXT_AFU
+#define PORT_HDR_CAP 0x30
+#define PORT_HDR_CTRL 0x38
+
+/* Port Capability Register Bitfield */
+#define PORT_CAP_PORT_NUM GENMASK_ULL(1, 0) /* ID of this port */
+#define PORT_CAP_MMIO_SIZE GENMASK_ULL(23, 8) /* MMIO size in KB */
+#define PORT_CAP_SUPP_INT_NUM GENMASK_ULL(35, 32) /* Interrupts num */
+
+/* Port Control Register Bitfield */
+#define PORT_CTRL_SFTRST BIT_ULL(0) /* Port soft reset */
+/* Latency tolerance reporting. '1' >= 40us, '0' < 40us.*/
+#define PORT_CTRL_LATENCY BIT_ULL(2)
+#define PORT_CTRL_SFTRST_ACK BIT_ULL(4) /* HW ack for reset */
+/**
+ * struct dfl_fpga_port_ops - port ops
+ *
+ * @name: name of this port ops, to match with port platform device.
+ * @owner: pointer to the module which owns this port ops.
+ * @node: node to link port ops to global list.
+ * @get_id: get port id from hardware.
+ * @enable_set: enable/disable the port.
+ */
+struct dfl_fpga_port_ops {
+ const char *name;
+ struct module *owner;
+ struct list_head node;
+ int (*get_id)(struct platform_device *pdev);
+ int (*enable_set)(struct platform_device *pdev, bool enable);
+};
+
+void dfl_fpga_port_ops_add(struct dfl_fpga_port_ops *ops);
+void dfl_fpga_port_ops_del(struct dfl_fpga_port_ops *ops);
+struct dfl_fpga_port_ops *dfl_fpga_port_ops_get(struct platform_device *pdev);
+void dfl_fpga_port_ops_put(struct dfl_fpga_port_ops *ops);
+int dfl_fpga_check_port_id(struct platform_device *pdev, void *pport_id);
+
+/**
+ * struct dfl_feature_driver - sub feature's driver
+ *
+ * @id: sub feature id.
+ * @ops: ops of this sub feature.
+ */
+struct dfl_feature_driver {
+ u64 id;
+ const struct dfl_feature_ops *ops;
+};
+
+/**
+ * struct dfl_feature - sub feature of the feature devices
+ *
+ * @id: sub feature id.
+ * @resource_index: each sub feature has one mmio resource for its registers.
+ * this index is used to find its mmio resource from the
+ * feature dev (platform device)'s reources.
+ * @ioaddr: mapped mmio resource address.
+ * @ops: ops of this sub feature.
+ */
+struct dfl_feature {
+ u64 id;
+ int resource_index;
+ void __iomem *ioaddr;
+ const struct dfl_feature_ops *ops;
+};
+
+#define DEV_STATUS_IN_USE 0
+
+/**
+ * struct dfl_feature_platform_data - platform data for feature devices
+ *
+ * @node: node to link feature devs to container device's port_dev_list.
+ * @lock: mutex to protect platform data.
+ * @cdev: cdev of feature dev.
+ * @dev: ptr to platform device linked with this platform data.
+ * @dfl_cdev: ptr to container device.
+ * @disable_count: count for port disable.
+ * @num: number for sub features.
+ * @dev_status: dev status (e.g. DEV_STATUS_IN_USE).
+ * @private: ptr to feature dev private data.
+ * @features: sub features of this feature dev.
+ */
+struct dfl_feature_platform_data {
+ struct list_head node;
+ struct mutex lock;
+ struct cdev cdev;
+ struct platform_device *dev;
+ struct dfl_fpga_cdev *dfl_cdev;
+ unsigned int disable_count;
+ unsigned long dev_status;
+ void *private;
+ int num;
+ struct dfl_feature features[0];
+};
+
+static inline
+int dfl_feature_dev_use_begin(struct dfl_feature_platform_data *pdata)
+{
+ /* Test and set IN_USE flags to ensure file is exclusively used */
+ if (test_and_set_bit_lock(DEV_STATUS_IN_USE, &pdata->dev_status))
+ return -EBUSY;
+
+ return 0;
+}
+
+static inline
+void dfl_feature_dev_use_end(struct dfl_feature_platform_data *pdata)
+{
+ clear_bit_unlock(DEV_STATUS_IN_USE, &pdata->dev_status);
+}
+
+static inline
+void dfl_fpga_pdata_set_private(struct dfl_feature_platform_data *pdata,
+ void *private)
+{
+ pdata->private = private;
+}
+
+static inline
+void *dfl_fpga_pdata_get_private(struct dfl_feature_platform_data *pdata)
+{
+ return pdata->private;
+}
+
+struct dfl_feature_ops {
+ int (*init)(struct platform_device *pdev, struct dfl_feature *feature);
+ void (*uinit)(struct platform_device *pdev,
+ struct dfl_feature *feature);
+ long (*ioctl)(struct platform_device *pdev, struct dfl_feature *feature,
+ unsigned int cmd, unsigned long arg);
+};
+
+#define DFL_FPGA_FEATURE_DEV_FME "dfl-fme"
+#define DFL_FPGA_FEATURE_DEV_PORT "dfl-port"
+
+static inline int dfl_feature_platform_data_size(const int num)
+{
+ return sizeof(struct dfl_feature_platform_data) +
+ num * sizeof(struct dfl_feature);
+}
+
+void dfl_fpga_dev_feature_uinit(struct platform_device *pdev);
+int dfl_fpga_dev_feature_init(struct platform_device *pdev,
+ struct dfl_feature_driver *feature_drvs);
+
+int dfl_fpga_dev_ops_register(struct platform_device *pdev,
+ const struct file_operations *fops,
+ struct module *owner);
+void dfl_fpga_dev_ops_unregister(struct platform_device *pdev);
+
+static inline
+struct platform_device *dfl_fpga_inode_to_feature_dev(struct inode *inode)
+{
+ struct dfl_feature_platform_data *pdata;
+
+ pdata = container_of(inode->i_cdev, struct dfl_feature_platform_data,
+ cdev);
+ return pdata->dev;
+}
+
+#define dfl_fpga_dev_for_each_feature(pdata, feature) \
+ for ((feature) = (pdata)->features; \
+ (feature) < (pdata)->features + (pdata)->num; (feature)++)
+
+static inline
+struct dfl_feature *dfl_get_feature_by_id(struct device *dev, u64 id)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature *feature;
+
+ dfl_fpga_dev_for_each_feature(pdata, feature)
+ if (feature->id == id)
+ return feature;
+
+ return NULL;
+}
+
+static inline
+void __iomem *dfl_get_feature_ioaddr_by_id(struct device *dev, u64 id)
+{
+ struct dfl_feature *feature = dfl_get_feature_by_id(dev, id);
+
+ if (feature && feature->ioaddr)
+ return feature->ioaddr;
+
+ WARN_ON(1);
+ return NULL;
+}
+
+static inline bool is_dfl_feature_present(struct device *dev, u64 id)
+{
+ return !!dfl_get_feature_ioaddr_by_id(dev, id);
+}
+
+static inline
+struct device *dfl_fpga_pdata_to_parent(struct dfl_feature_platform_data *pdata)
+{
+ return pdata->dev->dev.parent->parent;
+}
+
+static inline bool dfl_feature_is_fme(void __iomem *base)
+{
+ u64 v = readq(base + DFH);
+
+ return (FIELD_GET(DFH_TYPE, v) == DFH_TYPE_FIU) &&
+ (FIELD_GET(DFH_ID, v) == DFH_ID_FIU_FME);
+}
+
+static inline bool dfl_feature_is_port(void __iomem *base)
+{
+ u64 v = readq(base + DFH);
+
+ return (FIELD_GET(DFH_TYPE, v) == DFH_TYPE_FIU) &&
+ (FIELD_GET(DFH_ID, v) == DFH_ID_FIU_PORT);
+}
+
+/**
+ * struct dfl_fpga_enum_info - DFL FPGA enumeration information
+ *
+ * @dev: parent device.
+ * @dfls: list of device feature lists.
+ */
+struct dfl_fpga_enum_info {
+ struct device *dev;
+ struct list_head dfls;
+};
+
+/**
+ * struct dfl_fpga_enum_dfl - DFL FPGA enumeration device feature list info
+ *
+ * @start: base address of this device feature list.
+ * @len: size of this device feature list.
+ * @ioaddr: mapped base address of this device feature list.
+ * @node: node in list of device feature lists.
+ */
+struct dfl_fpga_enum_dfl {
+ resource_size_t start;
+ resource_size_t len;
+
+ void __iomem *ioaddr;
+
+ struct list_head node;
+};
+
+struct dfl_fpga_enum_info *dfl_fpga_enum_info_alloc(struct device *dev);
+int dfl_fpga_enum_info_add_dfl(struct dfl_fpga_enum_info *info,
+ resource_size_t start, resource_size_t len,
+ void __iomem *ioaddr);
+void dfl_fpga_enum_info_free(struct dfl_fpga_enum_info *info);
+
+/**
+ * struct dfl_fpga_cdev - container device of DFL based FPGA
+ *
+ * @parent: parent device of this container device.
+ * @region: base fpga region.
+ * @fme_dev: FME feature device under this container device.
+ * @lock: mutex lock to protect the port device list.
+ * @port_dev_list: list of all port feature devices under this container device.
+ */
+struct dfl_fpga_cdev {
+ struct device *parent;
+ struct fpga_region *region;
+ struct device *fme_dev;
+ struct mutex lock;
+ struct list_head port_dev_list;
+};
+
+struct dfl_fpga_cdev *
+dfl_fpga_feature_devs_enumerate(struct dfl_fpga_enum_info *info);
+void dfl_fpga_feature_devs_remove(struct dfl_fpga_cdev *cdev);
+
+/*
+ * need to drop the device reference with put_device() after use port platform
+ * device returned by __dfl_fpga_cdev_find_port and dfl_fpga_cdev_find_port
+ * functions.
+ */
+struct platform_device *
+__dfl_fpga_cdev_find_port(struct dfl_fpga_cdev *cdev, void *data,
+ int (*match)(struct platform_device *, void *));
+
+static inline struct platform_device *
+dfl_fpga_cdev_find_port(struct dfl_fpga_cdev *cdev, void *data,
+ int (*match)(struct platform_device *, void *))
+{
+ struct platform_device *pdev;
+
+ mutex_lock(&cdev->lock);
+ pdev = __dfl_fpga_cdev_find_port(cdev, data, match);
+ mutex_unlock(&cdev->lock);
+
+ return pdev;
+}
+#endif /* __FPGA_DFL_H */
diff --git a/drivers/fpga/fpga-mgr.c b/drivers/fpga/fpga-mgr.c
index c1564cf827fe..a41b07e37884 100644
--- a/drivers/fpga/fpga-mgr.c
+++ b/drivers/fpga/fpga-mgr.c
@@ -406,12 +406,40 @@ static ssize_t state_show(struct device *dev,
return sprintf(buf, "%s\n", state_str[mgr->state]);
}
+static ssize_t status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fpga_manager *mgr = to_fpga_manager(dev);
+ u64 status;
+ int len = 0;
+
+ if (!mgr->mops->status)
+ return -ENOENT;
+
+ status = mgr->mops->status(mgr);
+
+ if (status & FPGA_MGR_STATUS_OPERATION_ERR)
+ len += sprintf(buf + len, "reconfig operation error\n");
+ if (status & FPGA_MGR_STATUS_CRC_ERR)
+ len += sprintf(buf + len, "reconfig CRC error\n");
+ if (status & FPGA_MGR_STATUS_INCOMPATIBLE_IMAGE_ERR)
+ len += sprintf(buf + len, "reconfig incompatible image\n");
+ if (status & FPGA_MGR_STATUS_IP_PROTOCOL_ERR)
+ len += sprintf(buf + len, "reconfig IP protocol error\n");
+ if (status & FPGA_MGR_STATUS_FIFO_OVERFLOW_ERR)
+ len += sprintf(buf + len, "reconfig fifo overflow error\n");
+
+ return len;
+}
+
static DEVICE_ATTR_RO(name);
static DEVICE_ATTR_RO(state);
+static DEVICE_ATTR_RO(status);
static struct attribute *fpga_mgr_attrs[] = {
&dev_attr_name.attr,
&dev_attr_state.attr,
+ &dev_attr_status.attr,
NULL,
};
ATTRIBUTE_GROUPS(fpga_mgr);
diff --git a/drivers/fpga/fpga-region.c b/drivers/fpga/fpga-region.c
index 6d214d75c7be..0d65220d5ec5 100644
--- a/drivers/fpga/fpga-region.c
+++ b/drivers/fpga/fpga-region.c
@@ -158,6 +158,27 @@ err_put_region:
}
EXPORT_SYMBOL_GPL(fpga_region_program_fpga);
+static ssize_t compat_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fpga_region *region = to_fpga_region(dev);
+
+ if (!region->compat_id)
+ return -ENOENT;
+
+ return sprintf(buf, "%016llx%016llx\n",
+ (unsigned long long)region->compat_id->id_h,
+ (unsigned long long)region->compat_id->id_l);
+}
+
+static DEVICE_ATTR_RO(compat_id);
+
+static struct attribute *fpga_region_attrs[] = {
+ &dev_attr_compat_id.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(fpga_region);
+
/**
* fpga_region_create - alloc and init a struct fpga_region
* @dev: device parent
@@ -258,6 +279,7 @@ static int __init fpga_region_init(void)
if (IS_ERR(fpga_region_class))
return PTR_ERR(fpga_region_class);
+ fpga_region_class->dev_groups = fpga_region_groups;
fpga_region_class->dev_release = fpga_region_dev_release;
return 0;
diff --git a/drivers/fsi/Kconfig b/drivers/fsi/Kconfig
index a326ed663d3c..af3a20dd5aa4 100644
--- a/drivers/fsi/Kconfig
+++ b/drivers/fsi/Kconfig
@@ -12,6 +12,21 @@ menuconfig FSI
if FSI
+config FSI_NEW_DEV_NODE
+ bool "Create '/dev/fsi' directory for char devices"
+ default n
+ ---help---
+ This option causes char devices created for FSI devices to be
+ located under a common /dev/fsi/ directory. Set to N unless your
+ userspace has been updated to handle the new location.
+
+ Additionally, it also causes the char device names to be offset
+ by one so that chip 0 will have /dev/scom1 and chip1 /dev/scom2
+ to match old userspace expectations.
+
+ New userspace will use udev rules to generate predictable access
+ symlinks in /dev/fsi/by-path when this option is enabled.
+
config FSI_MASTER_GPIO
tristate "GPIO-based FSI master"
depends on GPIOLIB
@@ -27,9 +42,26 @@ config FSI_MASTER_HUB
allow chaining of FSI links to an arbitrary depth. This allows for
a high target device fanout.
+config FSI_MASTER_AST_CF
+ tristate "FSI master based on Aspeed ColdFire coprocessor"
+ depends on GPIOLIB
+ depends on GPIO_ASPEED
+ ---help---
+ This option enables a FSI master using the AST2400 and AST2500 GPIO
+ lines driven by the internal ColdFire coprocessor. This requires
+ the corresponding machine specific ColdFire firmware to be available.
+
config FSI_SCOM
tristate "SCOM FSI client device driver"
---help---
This option enables an FSI based SCOM device driver.
+config FSI_SBEFIFO
+ tristate "SBEFIFO FSI client device driver"
+ depends on OF_ADDRESS
+ ---help---
+ This option enables an FSI based SBEFIFO device driver. The SBEFIFO is
+ a pipe-like FSI device for communicating with the self boot engine
+ (SBE) on POWER processors.
+
endif
diff --git a/drivers/fsi/Makefile b/drivers/fsi/Makefile
index 65eb99dfafdb..a50d6ce22fb3 100644
--- a/drivers/fsi/Makefile
+++ b/drivers/fsi/Makefile
@@ -2,4 +2,6 @@
obj-$(CONFIG_FSI) += fsi-core.o
obj-$(CONFIG_FSI_MASTER_HUB) += fsi-master-hub.o
obj-$(CONFIG_FSI_MASTER_GPIO) += fsi-master-gpio.o
+obj-$(CONFIG_FSI_MASTER_AST_CF) += fsi-master-ast-cf.o
obj-$(CONFIG_FSI_SCOM) += fsi-scom.o
+obj-$(CONFIG_FSI_SBEFIFO) += fsi-sbefifo.o
diff --git a/drivers/fsi/cf-fsi-fw.h b/drivers/fsi/cf-fsi-fw.h
new file mode 100644
index 000000000000..712df0461911
--- /dev/null
+++ b/drivers/fsi/cf-fsi-fw.h
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0+
+#ifndef __CF_FSI_FW_H
+#define __CF_FSI_FW_H
+
+/*
+ * uCode file layout
+ *
+ * 0000...03ff : m68k exception vectors
+ * 0400...04ff : Header info & boot config block
+ * 0500....... : Code & stack
+ */
+
+/*
+ * Header info & boot config area
+ *
+ * The Header info is built into the ucode and provide version and
+ * platform information.
+ *
+ * the Boot config needs to be adjusted by the ARM prior to starting
+ * the ucode if the Command/Status area isn't at 0x320000 in CF space
+ * (ie. beginning of SRAM).
+ */
+
+#define HDR_OFFSET 0x400
+
+/* Info: Signature & version */
+#define HDR_SYS_SIG 0x00 /* 2 bytes system signature */
+#define SYS_SIG_SHARED 0x5348
+#define SYS_SIG_SPLIT 0x5350
+#define HDR_FW_VERS 0x02 /* 2 bytes Major.Minor */
+#define HDR_API_VERS 0x04 /* 2 bytes Major.Minor */
+#define API_VERSION_MAJ 2 /* Current version */
+#define API_VERSION_MIN 1
+#define HDR_FW_OPTIONS 0x08 /* 4 bytes option flags */
+#define FW_OPTION_TRACE_EN 0x00000001 /* FW tracing enabled */
+#define FW_OPTION_CONT_CLOCK 0x00000002 /* Continuous clocking supported */
+#define HDR_FW_SIZE 0x10 /* 4 bytes size for combo image */
+
+/* Boot Config: Address of Command/Status area */
+#define HDR_CMD_STAT_AREA 0x80 /* 4 bytes CF address */
+#define HDR_FW_CONTROL 0x84 /* 4 bytes control flags */
+#define FW_CONTROL_CONT_CLOCK 0x00000002 /* Continuous clocking enabled */
+#define FW_CONTROL_DUMMY_RD 0x00000004 /* Extra dummy read (AST2400) */
+#define FW_CONTROL_USE_STOP 0x00000008 /* Use STOP instructions */
+#define HDR_CLOCK_GPIO_VADDR 0x90 /* 2 bytes offset from GPIO base */
+#define HDR_CLOCK_GPIO_DADDR 0x92 /* 2 bytes offset from GPIO base */
+#define HDR_DATA_GPIO_VADDR 0x94 /* 2 bytes offset from GPIO base */
+#define HDR_DATA_GPIO_DADDR 0x96 /* 2 bytes offset from GPIO base */
+#define HDR_TRANS_GPIO_VADDR 0x98 /* 2 bytes offset from GPIO base */
+#define HDR_TRANS_GPIO_DADDR 0x9a /* 2 bytes offset from GPIO base */
+#define HDR_CLOCK_GPIO_BIT 0x9c /* 1 byte bit number */
+#define HDR_DATA_GPIO_BIT 0x9d /* 1 byte bit number */
+#define HDR_TRANS_GPIO_BIT 0x9e /* 1 byte bit number */
+
+/*
+ * Command/Status area layout: Main part
+ */
+
+/* Command/Status register:
+ *
+ * +---------------------------+
+ * | STAT | RLEN | CLEN | CMD |
+ * | 8 | 8 | 8 | 8 |
+ * +---------------------------+
+ * | | | |
+ * status | | |
+ * Response len | |
+ * (in bits) | |
+ * | |
+ * Command len |
+ * (in bits) |
+ * |
+ * Command code
+ *
+ * Due to the big endian layout, that means that a byte read will
+ * return the status byte
+ */
+#define CMD_STAT_REG 0x00
+#define CMD_REG_CMD_MASK 0x000000ff
+#define CMD_REG_CMD_SHIFT 0
+#define CMD_NONE 0x00
+#define CMD_COMMAND 0x01
+#define CMD_BREAK 0x02
+#define CMD_IDLE_CLOCKS 0x03 /* clen = #clocks */
+#define CMD_INVALID 0xff
+#define CMD_REG_CLEN_MASK 0x0000ff00
+#define CMD_REG_CLEN_SHIFT 8
+#define CMD_REG_RLEN_MASK 0x00ff0000
+#define CMD_REG_RLEN_SHIFT 16
+#define CMD_REG_STAT_MASK 0xff000000
+#define CMD_REG_STAT_SHIFT 24
+#define STAT_WORKING 0x00
+#define STAT_COMPLETE 0x01
+#define STAT_ERR_INVAL_CMD 0x80
+#define STAT_ERR_INVAL_IRQ 0x81
+#define STAT_ERR_MTOE 0x82
+
+/* Response tag & CRC */
+#define STAT_RTAG 0x04
+
+/* Response CRC */
+#define STAT_RCRC 0x05
+
+/* Echo and Send delay */
+#define ECHO_DLY_REG 0x08
+#define SEND_DLY_REG 0x09
+
+/* Command data area
+ *
+ * Last byte of message must be left aligned
+ */
+#define CMD_DATA 0x10 /* 64 bit of data */
+
+/* Response data area, right aligned, unused top bits are 1 */
+#define RSP_DATA 0x20 /* 32 bit of data */
+
+/* Misc */
+#define INT_CNT 0x30 /* 32-bit interrupt count */
+#define BAD_INT_VEC 0x34 /* 32-bit bad interrupt vector # */
+#define CF_STARTED 0x38 /* byte, set to -1 when copro started */
+#define CLK_CNT 0x3c /* 32-bit, clock count (debug only) */
+
+/*
+ * SRAM layout: GPIO arbitration part
+ */
+#define ARB_REG 0x40
+#define ARB_ARM_REQ 0x01
+#define ARB_ARM_ACK 0x02
+
+/* Misc2 */
+#define CF_RESET_D0 0x50
+#define CF_RESET_D1 0x54
+#define BAD_INT_S0 0x58
+#define BAD_INT_S1 0x5c
+#define STOP_CNT 0x60
+
+/* Internal */
+
+/*
+ * SRAM layout: Trace buffer (debug builds only)
+ */
+#define TRACEBUF 0x100
+#define TR_CLKOBIT0 0xc0
+#define TR_CLKOBIT1 0xc1
+#define TR_CLKOSTART 0x82
+#define TR_OLEN 0x83 /* + len */
+#define TR_CLKZ 0x84 /* + count */
+#define TR_CLKWSTART 0x85
+#define TR_CLKTAG 0x86 /* + tag */
+#define TR_CLKDATA 0x87 /* + len */
+#define TR_CLKCRC 0x88 /* + raw crc */
+#define TR_CLKIBIT0 0x90
+#define TR_CLKIBIT1 0x91
+#define TR_END 0xff
+
+#endif /* __CF_FSI_FW_H */
+
diff --git a/drivers/fsi/fsi-core.c b/drivers/fsi/fsi-core.c
index 4c03d6933646..2c31563fdcae 100644
--- a/drivers/fsi/fsi-core.c
+++ b/drivers/fsi/fsi-core.c
@@ -11,6 +11,11 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
+ *
+ * TODO:
+ * - Rework topology
+ * - s/chip_id/chip_loc
+ * - s/cfam/chip (cfam_id -> chip_id etc...)
*/
#include <linux/crc4.h>
@@ -21,6 +26,9 @@
#include <linux/of.h>
#include <linux/slab.h>
#include <linux/bitops.h>
+#include <linux/cdev.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
#include "fsi-master.h"
@@ -78,9 +86,15 @@ static DEFINE_IDA(master_ida);
struct fsi_slave {
struct device dev;
struct fsi_master *master;
- int id;
- int link;
+ struct cdev cdev;
+ int cdev_idx;
+ int id; /* FSI address */
+ int link; /* FSI link# */
+ u32 cfam_id;
+ int chip_id;
uint32_t size; /* size of slave address space */
+ u8 t_send_delay;
+ u8 t_echo_delay;
};
#define to_fsi_master(d) container_of(d, struct fsi_master, dev)
@@ -89,6 +103,13 @@ struct fsi_slave {
static const int slave_retries = 2;
static int discard_errors;
+static dev_t fsi_base_dev;
+static DEFINE_IDA(fsi_minor_ida);
+#define FSI_CHAR_MAX_DEVICES 0x1000
+
+/* Legacy /dev numbering: 4 devices per chip, 16 chips */
+#define FSI_CHAR_LEGACY_TOP 64
+
static int fsi_master_read(struct fsi_master *master, int link,
uint8_t slave_id, uint32_t addr, void *val, size_t size);
static int fsi_master_write(struct fsi_master *master, int link,
@@ -190,7 +211,7 @@ static int fsi_slave_calc_addr(struct fsi_slave *slave, uint32_t *addrp,
static int fsi_slave_report_and_clear_errors(struct fsi_slave *slave)
{
struct fsi_master *master = slave->master;
- uint32_t irq, stat;
+ __be32 irq, stat;
int rc, link;
uint8_t id;
@@ -215,7 +236,53 @@ static int fsi_slave_report_and_clear_errors(struct fsi_slave *slave)
&irq, sizeof(irq));
}
-static int fsi_slave_set_smode(struct fsi_master *master, int link, int id);
+/* Encode slave local bus echo delay */
+static inline uint32_t fsi_smode_echodly(int x)
+{
+ return (x & FSI_SMODE_ED_MASK) << FSI_SMODE_ED_SHIFT;
+}
+
+/* Encode slave local bus send delay */
+static inline uint32_t fsi_smode_senddly(int x)
+{
+ return (x & FSI_SMODE_SD_MASK) << FSI_SMODE_SD_SHIFT;
+}
+
+/* Encode slave local bus clock rate ratio */
+static inline uint32_t fsi_smode_lbcrr(int x)
+{
+ return (x & FSI_SMODE_LBCRR_MASK) << FSI_SMODE_LBCRR_SHIFT;
+}
+
+/* Encode slave ID */
+static inline uint32_t fsi_smode_sid(int x)
+{
+ return (x & FSI_SMODE_SID_MASK) << FSI_SMODE_SID_SHIFT;
+}
+
+static uint32_t fsi_slave_smode(int id, u8 t_senddly, u8 t_echodly)
+{
+ return FSI_SMODE_WSC | FSI_SMODE_ECRC
+ | fsi_smode_sid(id)
+ | fsi_smode_echodly(t_echodly - 1) | fsi_smode_senddly(t_senddly - 1)
+ | fsi_smode_lbcrr(0x8);
+}
+
+static int fsi_slave_set_smode(struct fsi_slave *slave)
+{
+ uint32_t smode;
+ __be32 data;
+
+ /* set our smode register with the slave ID field to 0; this enables
+ * extended slave addressing
+ */
+ smode = fsi_slave_smode(slave->id, slave->t_send_delay, slave->t_echo_delay);
+ data = cpu_to_be32(smode);
+
+ return fsi_master_write(slave->master, slave->link, slave->id,
+ FSI_SLAVE_BASE + FSI_SMODE,
+ &data, sizeof(data));
+}
static int fsi_slave_handle_error(struct fsi_slave *slave, bool write,
uint32_t addr, size_t size)
@@ -223,7 +290,7 @@ static int fsi_slave_handle_error(struct fsi_slave *slave, bool write,
struct fsi_master *master = slave->master;
int rc, link;
uint32_t reg;
- uint8_t id;
+ uint8_t id, send_delay, echo_delay;
if (discard_errors)
return -1;
@@ -254,15 +321,26 @@ static int fsi_slave_handle_error(struct fsi_slave *slave, bool write,
}
}
+ send_delay = slave->t_send_delay;
+ echo_delay = slave->t_echo_delay;
+
/* getting serious, reset the slave via BREAK */
rc = fsi_master_break(master, link);
if (rc)
return rc;
- rc = fsi_slave_set_smode(master, link, id);
+ slave->t_send_delay = send_delay;
+ slave->t_echo_delay = echo_delay;
+
+ rc = fsi_slave_set_smode(slave);
if (rc)
return rc;
+ if (master->link_config)
+ master->link_config(master, link,
+ slave->t_send_delay,
+ slave->t_echo_delay);
+
return fsi_slave_report_and_clear_errors(slave);
}
@@ -390,7 +468,6 @@ static struct device_node *fsi_device_find_of_node(struct fsi_device *dev)
static int fsi_slave_scan(struct fsi_slave *slave)
{
uint32_t engine_addr;
- uint32_t conf;
int rc, i;
/*
@@ -404,15 +481,17 @@ static int fsi_slave_scan(struct fsi_slave *slave)
for (i = 2; i < engine_page_size / sizeof(uint32_t); i++) {
uint8_t slots, version, type, crc;
struct fsi_device *dev;
+ uint32_t conf;
+ __be32 data;
- rc = fsi_slave_read(slave, (i + 1) * sizeof(conf),
- &conf, sizeof(conf));
+ rc = fsi_slave_read(slave, (i + 1) * sizeof(data),
+ &data, sizeof(data));
if (rc) {
dev_warn(&slave->dev,
"error reading slave registers\n");
return -1;
}
- conf = be32_to_cpu(conf);
+ conf = be32_to_cpu(data);
crc = crc4(0, conf, 32);
if (crc) {
@@ -539,79 +618,11 @@ static const struct bin_attribute fsi_slave_raw_attr = {
.write = fsi_slave_sysfs_raw_write,
};
-static ssize_t fsi_slave_sysfs_term_write(struct file *file,
- struct kobject *kobj, struct bin_attribute *attr,
- char *buf, loff_t off, size_t count)
-{
- struct fsi_slave *slave = to_fsi_slave(kobj_to_dev(kobj));
- struct fsi_master *master = slave->master;
-
- if (!master->term)
- return -ENODEV;
-
- master->term(master, slave->link, slave->id);
- return count;
-}
-
-static const struct bin_attribute fsi_slave_term_attr = {
- .attr = {
- .name = "term",
- .mode = 0200,
- },
- .size = 0,
- .write = fsi_slave_sysfs_term_write,
-};
-
-/* Encode slave local bus echo delay */
-static inline uint32_t fsi_smode_echodly(int x)
-{
- return (x & FSI_SMODE_ED_MASK) << FSI_SMODE_ED_SHIFT;
-}
-
-/* Encode slave local bus send delay */
-static inline uint32_t fsi_smode_senddly(int x)
-{
- return (x & FSI_SMODE_SD_MASK) << FSI_SMODE_SD_SHIFT;
-}
-
-/* Encode slave local bus clock rate ratio */
-static inline uint32_t fsi_smode_lbcrr(int x)
-{
- return (x & FSI_SMODE_LBCRR_MASK) << FSI_SMODE_LBCRR_SHIFT;
-}
-
-/* Encode slave ID */
-static inline uint32_t fsi_smode_sid(int x)
-{
- return (x & FSI_SMODE_SID_MASK) << FSI_SMODE_SID_SHIFT;
-}
-
-static uint32_t fsi_slave_smode(int id)
-{
- return FSI_SMODE_WSC | FSI_SMODE_ECRC
- | fsi_smode_sid(id)
- | fsi_smode_echodly(0xf) | fsi_smode_senddly(0xf)
- | fsi_smode_lbcrr(0x8);
-}
-
-static int fsi_slave_set_smode(struct fsi_master *master, int link, int id)
-{
- uint32_t smode;
-
- /* set our smode register with the slave ID field to 0; this enables
- * extended slave addressing
- */
- smode = fsi_slave_smode(id);
- smode = cpu_to_be32(smode);
-
- return fsi_master_write(master, link, id, FSI_SLAVE_BASE + FSI_SMODE,
- &smode, sizeof(smode));
-}
-
static void fsi_slave_release(struct device *dev)
{
struct fsi_slave *slave = to_fsi_slave(dev);
+ fsi_free_minor(slave->dev.devt);
of_node_put(dev->of_node);
kfree(slave);
}
@@ -659,11 +670,303 @@ static struct device_node *fsi_slave_find_of_node(struct fsi_master *master,
return NULL;
}
+static ssize_t cfam_read(struct file *filep, char __user *buf, size_t count,
+ loff_t *offset)
+{
+ struct fsi_slave *slave = filep->private_data;
+ size_t total_len, read_len;
+ loff_t off = *offset;
+ ssize_t rc;
+
+ if (off < 0)
+ return -EINVAL;
+
+ if (off > 0xffffffff || count > 0xffffffff || off + count > 0xffffffff)
+ return -EINVAL;
+
+ for (total_len = 0; total_len < count; total_len += read_len) {
+ __be32 data;
+
+ read_len = min_t(size_t, count, 4);
+ read_len -= off & 0x3;
+
+ rc = fsi_slave_read(slave, off, &data, read_len);
+ if (rc)
+ goto fail;
+ rc = copy_to_user(buf + total_len, &data, read_len);
+ if (rc) {
+ rc = -EFAULT;
+ goto fail;
+ }
+ off += read_len;
+ }
+ rc = count;
+ fail:
+ *offset = off;
+ return count;
+}
+
+static ssize_t cfam_write(struct file *filep, const char __user *buf,
+ size_t count, loff_t *offset)
+{
+ struct fsi_slave *slave = filep->private_data;
+ size_t total_len, write_len;
+ loff_t off = *offset;
+ ssize_t rc;
+
+
+ if (off < 0)
+ return -EINVAL;
+
+ if (off > 0xffffffff || count > 0xffffffff || off + count > 0xffffffff)
+ return -EINVAL;
+
+ for (total_len = 0; total_len < count; total_len += write_len) {
+ __be32 data;
+
+ write_len = min_t(size_t, count, 4);
+ write_len -= off & 0x3;
+
+ rc = copy_from_user(&data, buf + total_len, write_len);
+ if (rc) {
+ rc = -EFAULT;
+ goto fail;
+ }
+ rc = fsi_slave_write(slave, off, &data, write_len);
+ if (rc)
+ goto fail;
+ off += write_len;
+ }
+ rc = count;
+ fail:
+ *offset = off;
+ return count;
+}
+
+static loff_t cfam_llseek(struct file *file, loff_t offset, int whence)
+{
+ switch (whence) {
+ case SEEK_CUR:
+ break;
+ case SEEK_SET:
+ file->f_pos = offset;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return offset;
+}
+
+static int cfam_open(struct inode *inode, struct file *file)
+{
+ struct fsi_slave *slave = container_of(inode->i_cdev, struct fsi_slave, cdev);
+
+ file->private_data = slave;
+
+ return 0;
+}
+
+static const struct file_operations cfam_fops = {
+ .owner = THIS_MODULE,
+ .open = cfam_open,
+ .llseek = cfam_llseek,
+ .read = cfam_read,
+ .write = cfam_write,
+};
+
+static ssize_t send_term_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fsi_slave *slave = to_fsi_slave(dev);
+ struct fsi_master *master = slave->master;
+
+ if (!master->term)
+ return -ENODEV;
+
+ master->term(master, slave->link, slave->id);
+ return count;
+}
+
+static DEVICE_ATTR_WO(send_term);
+
+static ssize_t slave_send_echo_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct fsi_slave *slave = to_fsi_slave(dev);
+
+ return sprintf(buf, "%u\n", slave->t_send_delay);
+}
+
+static ssize_t slave_send_echo_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct fsi_slave *slave = to_fsi_slave(dev);
+ struct fsi_master *master = slave->master;
+ unsigned long val;
+ int rc;
+
+ if (kstrtoul(buf, 0, &val) < 0)
+ return -EINVAL;
+
+ if (val < 1 || val > 16)
+ return -EINVAL;
+
+ if (!master->link_config)
+ return -ENXIO;
+
+ /* Current HW mandates that send and echo delay are identical */
+ slave->t_send_delay = val;
+ slave->t_echo_delay = val;
+
+ rc = fsi_slave_set_smode(slave);
+ if (rc < 0)
+ return rc;
+ if (master->link_config)
+ master->link_config(master, slave->link,
+ slave->t_send_delay,
+ slave->t_echo_delay);
+
+ return count;
+}
+
+static DEVICE_ATTR(send_echo_delays, 0600,
+ slave_send_echo_show, slave_send_echo_store);
+
+static ssize_t chip_id_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct fsi_slave *slave = to_fsi_slave(dev);
+
+ return sprintf(buf, "%d\n", slave->chip_id);
+}
+
+static DEVICE_ATTR_RO(chip_id);
+
+static ssize_t cfam_id_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct fsi_slave *slave = to_fsi_slave(dev);
+
+ return sprintf(buf, "0x%x\n", slave->cfam_id);
+}
+
+static DEVICE_ATTR_RO(cfam_id);
+
+static struct attribute *cfam_attr[] = {
+ &dev_attr_send_echo_delays.attr,
+ &dev_attr_chip_id.attr,
+ &dev_attr_cfam_id.attr,
+ &dev_attr_send_term.attr,
+ NULL,
+};
+
+static const struct attribute_group cfam_attr_group = {
+ .attrs = cfam_attr,
+};
+
+static const struct attribute_group *cfam_attr_groups[] = {
+ &cfam_attr_group,
+ NULL,
+};
+
+static char *cfam_devnode(struct device *dev, umode_t *mode,
+ kuid_t *uid, kgid_t *gid)
+{
+ struct fsi_slave *slave = to_fsi_slave(dev);
+
+#ifdef CONFIG_FSI_NEW_DEV_NODE
+ return kasprintf(GFP_KERNEL, "fsi/cfam%d", slave->cdev_idx);
+#else
+ return kasprintf(GFP_KERNEL, "cfam%d", slave->cdev_idx);
+#endif
+}
+
+static const struct device_type cfam_type = {
+ .name = "cfam",
+ .devnode = cfam_devnode,
+ .groups = cfam_attr_groups
+};
+
+static char *fsi_cdev_devnode(struct device *dev, umode_t *mode,
+ kuid_t *uid, kgid_t *gid)
+{
+#ifdef CONFIG_FSI_NEW_DEV_NODE
+ return kasprintf(GFP_KERNEL, "fsi/%s", dev_name(dev));
+#else
+ return kasprintf(GFP_KERNEL, "%s", dev_name(dev));
+#endif
+}
+
+const struct device_type fsi_cdev_type = {
+ .name = "fsi-cdev",
+ .devnode = fsi_cdev_devnode,
+};
+EXPORT_SYMBOL_GPL(fsi_cdev_type);
+
+/* Backward compatible /dev/ numbering in "old style" mode */
+static int fsi_adjust_index(int index)
+{
+#ifdef CONFIG_FSI_NEW_DEV_NODE
+ return index;
+#else
+ return index + 1;
+#endif
+}
+
+static int __fsi_get_new_minor(struct fsi_slave *slave, enum fsi_dev_type type,
+ dev_t *out_dev, int *out_index)
+{
+ int cid = slave->chip_id;
+ int id;
+
+ /* Check if we qualify for legacy numbering */
+ if (cid >= 0 && cid < 16 && type < 4) {
+ /* Try reserving the legacy number */
+ id = (cid << 4) | type;
+ id = ida_simple_get(&fsi_minor_ida, id, id + 1, GFP_KERNEL);
+ if (id >= 0) {
+ *out_index = fsi_adjust_index(cid);
+ *out_dev = fsi_base_dev + id;
+ return 0;
+ }
+ /* Other failure */
+ if (id != -ENOSPC)
+ return id;
+ /* Fallback to non-legacy allocation */
+ }
+ id = ida_simple_get(&fsi_minor_ida, FSI_CHAR_LEGACY_TOP,
+ FSI_CHAR_MAX_DEVICES, GFP_KERNEL);
+ if (id < 0)
+ return id;
+ *out_index = fsi_adjust_index(id);
+ *out_dev = fsi_base_dev + id;
+ return 0;
+}
+
+int fsi_get_new_minor(struct fsi_device *fdev, enum fsi_dev_type type,
+ dev_t *out_dev, int *out_index)
+{
+ return __fsi_get_new_minor(fdev->slave, type, out_dev, out_index);
+}
+EXPORT_SYMBOL_GPL(fsi_get_new_minor);
+
+void fsi_free_minor(dev_t dev)
+{
+ ida_simple_remove(&fsi_minor_ida, MINOR(dev));
+}
+EXPORT_SYMBOL_GPL(fsi_free_minor);
+
static int fsi_slave_init(struct fsi_master *master, int link, uint8_t id)
{
- uint32_t chip_id, llmode;
+ uint32_t cfam_id;
struct fsi_slave *slave;
uint8_t crc;
+ __be32 data, llmode;
int rc;
/* Currently, we only support single slaves on a link, and use the
@@ -672,31 +975,23 @@ static int fsi_slave_init(struct fsi_master *master, int link, uint8_t id)
if (id != 0)
return -EINVAL;
- rc = fsi_master_read(master, link, id, 0, &chip_id, sizeof(chip_id));
+ rc = fsi_master_read(master, link, id, 0, &data, sizeof(data));
if (rc) {
dev_dbg(&master->dev, "can't read slave %02x:%02x %d\n",
link, id, rc);
return -ENODEV;
}
- chip_id = be32_to_cpu(chip_id);
+ cfam_id = be32_to_cpu(data);
- crc = crc4(0, chip_id, 32);
+ crc = crc4(0, cfam_id, 32);
if (crc) {
- dev_warn(&master->dev, "slave %02x:%02x invalid chip id CRC!\n",
+ dev_warn(&master->dev, "slave %02x:%02x invalid cfam id CRC!\n",
link, id);
return -EIO;
}
dev_dbg(&master->dev, "fsi: found chip %08x at %02x:%02x:%02x\n",
- chip_id, master->idx, link, id);
-
- rc = fsi_slave_set_smode(master, link, id);
- if (rc) {
- dev_warn(&master->dev,
- "can't set smode on slave:%02x:%02x %d\n",
- link, id, rc);
- return -ENODEV;
- }
+ cfam_id, master->idx, link, id);
/* If we're behind a master that doesn't provide a self-running bus
* clock, put the slave into async mode
@@ -719,30 +1014,61 @@ static int fsi_slave_init(struct fsi_master *master, int link, uint8_t id)
if (!slave)
return -ENOMEM;
- slave->master = master;
+ dev_set_name(&slave->dev, "slave@%02x:%02x", link, id);
+ slave->dev.type = &cfam_type;
slave->dev.parent = &master->dev;
slave->dev.of_node = fsi_slave_find_of_node(master, link, id);
slave->dev.release = fsi_slave_release;
+ device_initialize(&slave->dev);
+ slave->cfam_id = cfam_id;
+ slave->master = master;
slave->link = link;
slave->id = id;
slave->size = FSI_SLAVE_SIZE_23b;
+ slave->t_send_delay = 16;
+ slave->t_echo_delay = 16;
+
+ /* Get chip ID if any */
+ slave->chip_id = -1;
+ if (slave->dev.of_node) {
+ uint32_t prop;
+ if (!of_property_read_u32(slave->dev.of_node, "chip-id", &prop))
+ slave->chip_id = prop;
- dev_set_name(&slave->dev, "slave@%02x:%02x", link, id);
- rc = device_register(&slave->dev);
- if (rc < 0) {
- dev_warn(&master->dev, "failed to create slave device: %d\n",
- rc);
- put_device(&slave->dev);
- return rc;
}
+ /* Allocate a minor in the FSI space */
+ rc = __fsi_get_new_minor(slave, fsi_dev_cfam, &slave->dev.devt,
+ &slave->cdev_idx);
+ if (rc)
+ goto err_free;
+
+ /* Create chardev for userspace access */
+ cdev_init(&slave->cdev, &cfam_fops);
+ rc = cdev_device_add(&slave->cdev, &slave->dev);
+ if (rc) {
+ dev_err(&slave->dev, "Error %d creating slave device\n", rc);
+ goto err_free;
+ }
+
+ rc = fsi_slave_set_smode(slave);
+ if (rc) {
+ dev_warn(&master->dev,
+ "can't set smode on slave:%02x:%02x %d\n",
+ link, id, rc);
+ kfree(slave);
+ return -ENODEV;
+ }
+ if (master->link_config)
+ master->link_config(master, link,
+ slave->t_send_delay,
+ slave->t_echo_delay);
+
+ /* Legacy raw file -> to be removed */
rc = device_create_bin_file(&slave->dev, &fsi_slave_raw_attr);
if (rc)
dev_warn(&slave->dev, "failed to create raw attr: %d\n", rc);
- rc = device_create_bin_file(&slave->dev, &fsi_slave_term_attr);
- if (rc)
- dev_warn(&slave->dev, "failed to create term attr: %d\n", rc);
rc = fsi_slave_scan(slave);
if (rc)
@@ -750,6 +1076,10 @@ static int fsi_slave_init(struct fsi_master *master, int link, uint8_t id)
rc);
return rc;
+
+ err_free:
+ put_device(&slave->dev);
+ return rc;
}
/* FSI master support */
@@ -814,12 +1144,16 @@ static int fsi_master_link_enable(struct fsi_master *master, int link)
*/
static int fsi_master_break(struct fsi_master *master, int link)
{
+ int rc = 0;
+
trace_fsi_master_break(master, link);
if (master->send_break)
- return master->send_break(master, link);
+ rc = master->send_break(master, link);
+ if (master->link_config)
+ master->link_config(master, link, 16, 16);
- return 0;
+ return rc;
}
static int fsi_master_scan(struct fsi_master *master)
@@ -854,8 +1188,11 @@ static int fsi_slave_remove_device(struct device *dev, void *arg)
static int fsi_master_remove_slave(struct device *dev, void *arg)
{
+ struct fsi_slave *slave = to_fsi_slave(dev);
+
device_for_each_child(dev, NULL, fsi_slave_remove_device);
- device_unregister(dev);
+ cdev_device_del(&slave->cdev, &slave->dev);
+ put_device(dev);
return 0;
}
@@ -866,8 +1203,14 @@ static void fsi_master_unscan(struct fsi_master *master)
int fsi_master_rescan(struct fsi_master *master)
{
+ int rc;
+
+ mutex_lock(&master->scan_lock);
fsi_master_unscan(master);
- return fsi_master_scan(master);
+ rc = fsi_master_scan(master);
+ mutex_unlock(&master->scan_lock);
+
+ return rc;
}
EXPORT_SYMBOL_GPL(fsi_master_rescan);
@@ -903,9 +1246,7 @@ int fsi_master_register(struct fsi_master *master)
int rc;
struct device_node *np;
- if (!master)
- return -EINVAL;
-
+ mutex_init(&master->scan_lock);
master->idx = ida_simple_get(&master_ida, 0, INT_MAX, GFP_KERNEL);
dev_set_name(&master->dev, "fsi%d", master->idx);
@@ -917,21 +1258,24 @@ int fsi_master_register(struct fsi_master *master)
rc = device_create_file(&master->dev, &dev_attr_rescan);
if (rc) {
- device_unregister(&master->dev);
+ device_del(&master->dev);
ida_simple_remove(&master_ida, master->idx);
return rc;
}
rc = device_create_file(&master->dev, &dev_attr_break);
if (rc) {
- device_unregister(&master->dev);
+ device_del(&master->dev);
ida_simple_remove(&master_ida, master->idx);
return rc;
}
np = dev_of_node(&master->dev);
- if (!of_property_read_bool(np, "no-scan-on-init"))
+ if (!of_property_read_bool(np, "no-scan-on-init")) {
+ mutex_lock(&master->scan_lock);
fsi_master_scan(master);
+ mutex_unlock(&master->scan_lock);
+ }
return 0;
}
@@ -944,7 +1288,9 @@ void fsi_master_unregister(struct fsi_master *master)
master->idx = -1;
}
+ mutex_lock(&master->scan_lock);
fsi_master_unscan(master);
+ mutex_unlock(&master->scan_lock);
device_unregister(&master->dev);
}
EXPORT_SYMBOL_GPL(fsi_master_unregister);
@@ -996,13 +1342,27 @@ EXPORT_SYMBOL_GPL(fsi_bus_type);
static int __init fsi_init(void)
{
- return bus_register(&fsi_bus_type);
+ int rc;
+
+ rc = alloc_chrdev_region(&fsi_base_dev, 0, FSI_CHAR_MAX_DEVICES, "fsi");
+ if (rc)
+ return rc;
+ rc = bus_register(&fsi_bus_type);
+ if (rc)
+ goto fail_bus;
+ return 0;
+
+ fail_bus:
+ unregister_chrdev_region(fsi_base_dev, FSI_CHAR_MAX_DEVICES);
+ return rc;
}
postcore_initcall(fsi_init);
static void fsi_exit(void)
{
bus_unregister(&fsi_bus_type);
+ unregister_chrdev_region(fsi_base_dev, FSI_CHAR_MAX_DEVICES);
+ ida_destroy(&fsi_minor_ida);
}
module_exit(fsi_exit);
module_param(discard_errors, int, 0664);
diff --git a/drivers/fsi/fsi-master-ast-cf.c b/drivers/fsi/fsi-master-ast-cf.c
new file mode 100644
index 000000000000..04d10ea8d343
--- /dev/null
+++ b/drivers/fsi/fsi-master-ast-cf.c
@@ -0,0 +1,1440 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright 2018 IBM Corp
+/*
+ * A FSI master controller, using a simple GPIO bit-banging interface
+ */
+
+#include <linux/crc4.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/fsi.h>
+#include <linux/gpio/consumer.h>
+#include <linux/io.h>
+#include <linux/irqflags.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/regmap.h>
+#include <linux/firmware.h>
+#include <linux/gpio/aspeed.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of_address.h>
+#include <linux/genalloc.h>
+
+#include "fsi-master.h"
+#include "cf-fsi-fw.h"
+
+#define FW_FILE_NAME "cf-fsi-fw.bin"
+
+/* Common SCU based coprocessor control registers */
+#define SCU_COPRO_CTRL 0x100
+#define SCU_COPRO_RESET 0x00000002
+#define SCU_COPRO_CLK_EN 0x00000001
+
+/* AST2500 specific ones */
+#define SCU_2500_COPRO_SEG0 0x104
+#define SCU_2500_COPRO_SEG1 0x108
+#define SCU_2500_COPRO_SEG2 0x10c
+#define SCU_2500_COPRO_SEG3 0x110
+#define SCU_2500_COPRO_SEG4 0x114
+#define SCU_2500_COPRO_SEG5 0x118
+#define SCU_2500_COPRO_SEG6 0x11c
+#define SCU_2500_COPRO_SEG7 0x120
+#define SCU_2500_COPRO_SEG8 0x124
+#define SCU_2500_COPRO_SEG_SWAP 0x00000001
+#define SCU_2500_COPRO_CACHE_CTL 0x128
+#define SCU_2500_COPRO_CACHE_EN 0x00000001
+#define SCU_2500_COPRO_SEG0_CACHE_EN 0x00000002
+#define SCU_2500_COPRO_SEG1_CACHE_EN 0x00000004
+#define SCU_2500_COPRO_SEG2_CACHE_EN 0x00000008
+#define SCU_2500_COPRO_SEG3_CACHE_EN 0x00000010
+#define SCU_2500_COPRO_SEG4_CACHE_EN 0x00000020
+#define SCU_2500_COPRO_SEG5_CACHE_EN 0x00000040
+#define SCU_2500_COPRO_SEG6_CACHE_EN 0x00000080
+#define SCU_2500_COPRO_SEG7_CACHE_EN 0x00000100
+#define SCU_2500_COPRO_SEG8_CACHE_EN 0x00000200
+
+#define SCU_2400_COPRO_SEG0 0x104
+#define SCU_2400_COPRO_SEG2 0x108
+#define SCU_2400_COPRO_SEG4 0x10c
+#define SCU_2400_COPRO_SEG6 0x110
+#define SCU_2400_COPRO_SEG8 0x114
+#define SCU_2400_COPRO_SEG_SWAP 0x80000000
+#define SCU_2400_COPRO_CACHE_CTL 0x118
+#define SCU_2400_COPRO_CACHE_EN 0x00000001
+#define SCU_2400_COPRO_SEG0_CACHE_EN 0x00000002
+#define SCU_2400_COPRO_SEG2_CACHE_EN 0x00000004
+#define SCU_2400_COPRO_SEG4_CACHE_EN 0x00000008
+#define SCU_2400_COPRO_SEG6_CACHE_EN 0x00000010
+#define SCU_2400_COPRO_SEG8_CACHE_EN 0x00000020
+
+/* CVIC registers */
+#define CVIC_EN_REG 0x10
+#define CVIC_TRIG_REG 0x18
+
+/*
+ * System register base address (needed for configuring the
+ * coldfire maps)
+ */
+#define SYSREG_BASE 0x1e600000
+
+/* Amount of SRAM required */
+#define SRAM_SIZE 0x1000
+
+#define LAST_ADDR_INVALID 0x1
+
+struct fsi_master_acf {
+ struct fsi_master master;
+ struct device *dev;
+ struct regmap *scu;
+ struct mutex lock; /* mutex for command ordering */
+ struct gpio_desc *gpio_clk;
+ struct gpio_desc *gpio_data;
+ struct gpio_desc *gpio_trans; /* Voltage translator */
+ struct gpio_desc *gpio_enable; /* FSI enable */
+ struct gpio_desc *gpio_mux; /* Mux control */
+ uint16_t gpio_clk_vreg;
+ uint16_t gpio_clk_dreg;
+ uint16_t gpio_dat_vreg;
+ uint16_t gpio_dat_dreg;
+ uint16_t gpio_tra_vreg;
+ uint16_t gpio_tra_dreg;
+ uint8_t gpio_clk_bit;
+ uint8_t gpio_dat_bit;
+ uint8_t gpio_tra_bit;
+ uint32_t cf_mem_addr;
+ size_t cf_mem_size;
+ void __iomem *cf_mem;
+ void __iomem *cvic;
+ struct gen_pool *sram_pool;
+ void __iomem *sram;
+ bool is_ast2500;
+ bool external_mode;
+ bool trace_enabled;
+ uint32_t last_addr;
+ uint8_t t_send_delay;
+ uint8_t t_echo_delay;
+ uint32_t cvic_sw_irq;
+};
+#define to_fsi_master_acf(m) container_of(m, struct fsi_master_acf, master)
+
+struct fsi_msg {
+ uint64_t msg;
+ uint8_t bits;
+};
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/fsi_master_ast_cf.h>
+
+static void msg_push_bits(struct fsi_msg *msg, uint64_t data, int bits)
+{
+ msg->msg <<= bits;
+ msg->msg |= data & ((1ull << bits) - 1);
+ msg->bits += bits;
+}
+
+static void msg_push_crc(struct fsi_msg *msg)
+{
+ uint8_t crc;
+ int top;
+
+ top = msg->bits & 0x3;
+
+ /* start bit, and any non-aligned top bits */
+ crc = crc4(0, 1 << top | msg->msg >> (msg->bits - top), top + 1);
+
+ /* aligned bits */
+ crc = crc4(crc, msg->msg, msg->bits - top);
+
+ msg_push_bits(msg, crc, 4);
+}
+
+static void msg_finish_cmd(struct fsi_msg *cmd)
+{
+ /* Left align message */
+ cmd->msg <<= (64 - cmd->bits);
+}
+
+static bool check_same_address(struct fsi_master_acf *master, int id,
+ uint32_t addr)
+{
+ /* this will also handle LAST_ADDR_INVALID */
+ return master->last_addr == (((id & 0x3) << 21) | (addr & ~0x3));
+}
+
+static bool check_relative_address(struct fsi_master_acf *master, int id,
+ uint32_t addr, uint32_t *rel_addrp)
+{
+ uint32_t last_addr = master->last_addr;
+ int32_t rel_addr;
+
+ if (last_addr == LAST_ADDR_INVALID)
+ return false;
+
+ /* We may be in 23-bit addressing mode, which uses the id as the
+ * top two address bits. So, if we're referencing a different ID,
+ * use absolute addresses.
+ */
+ if (((last_addr >> 21) & 0x3) != id)
+ return false;
+
+ /* remove the top two bits from any 23-bit addressing */
+ last_addr &= (1 << 21) - 1;
+
+ /* We know that the addresses are limited to 21 bits, so this won't
+ * overflow the signed rel_addr */
+ rel_addr = addr - last_addr;
+ if (rel_addr > 255 || rel_addr < -256)
+ return false;
+
+ *rel_addrp = (uint32_t)rel_addr;
+
+ return true;
+}
+
+static void last_address_update(struct fsi_master_acf *master,
+ int id, bool valid, uint32_t addr)
+{
+ if (!valid)
+ master->last_addr = LAST_ADDR_INVALID;
+ else
+ master->last_addr = ((id & 0x3) << 21) | (addr & ~0x3);
+}
+
+/*
+ * Encode an Absolute/Relative/Same Address command
+ */
+static void build_ar_command(struct fsi_master_acf *master,
+ struct fsi_msg *cmd, uint8_t id,
+ uint32_t addr, size_t size,
+ const void *data)
+{
+ int i, addr_bits, opcode_bits;
+ bool write = !!data;
+ uint8_t ds, opcode;
+ uint32_t rel_addr;
+
+ cmd->bits = 0;
+ cmd->msg = 0;
+
+ /* we have 21 bits of address max */
+ addr &= ((1 << 21) - 1);
+
+ /* cmd opcodes are variable length - SAME_AR is only two bits */
+ opcode_bits = 3;
+
+ if (check_same_address(master, id, addr)) {
+ /* we still address the byte offset within the word */
+ addr_bits = 2;
+ opcode_bits = 2;
+ opcode = FSI_CMD_SAME_AR;
+ trace_fsi_master_acf_cmd_same_addr(master);
+
+ } else if (check_relative_address(master, id, addr, &rel_addr)) {
+ /* 8 bits plus sign */
+ addr_bits = 9;
+ addr = rel_addr;
+ opcode = FSI_CMD_REL_AR;
+ trace_fsi_master_acf_cmd_rel_addr(master, rel_addr);
+
+ } else {
+ addr_bits = 21;
+ opcode = FSI_CMD_ABS_AR;
+ trace_fsi_master_acf_cmd_abs_addr(master, addr);
+ }
+
+ /*
+ * The read/write size is encoded in the lower bits of the address
+ * (as it must be naturally-aligned), and the following ds bit.
+ *
+ * size addr:1 addr:0 ds
+ * 1 x x 0
+ * 2 x 0 1
+ * 4 0 1 1
+ *
+ */
+ ds = size > 1 ? 1 : 0;
+ addr &= ~(size - 1);
+ if (size == 4)
+ addr |= 1;
+
+ msg_push_bits(cmd, id, 2);
+ msg_push_bits(cmd, opcode, opcode_bits);
+ msg_push_bits(cmd, write ? 0 : 1, 1);
+ msg_push_bits(cmd, addr, addr_bits);
+ msg_push_bits(cmd, ds, 1);
+ for (i = 0; write && i < size; i++)
+ msg_push_bits(cmd, ((uint8_t *)data)[i], 8);
+
+ msg_push_crc(cmd);
+ msg_finish_cmd(cmd);
+}
+
+static void build_dpoll_command(struct fsi_msg *cmd, uint8_t slave_id)
+{
+ cmd->bits = 0;
+ cmd->msg = 0;
+
+ msg_push_bits(cmd, slave_id, 2);
+ msg_push_bits(cmd, FSI_CMD_DPOLL, 3);
+ msg_push_crc(cmd);
+ msg_finish_cmd(cmd);
+}
+
+static void build_epoll_command(struct fsi_msg *cmd, uint8_t slave_id)
+{
+ cmd->bits = 0;
+ cmd->msg = 0;
+
+ msg_push_bits(cmd, slave_id, 2);
+ msg_push_bits(cmd, FSI_CMD_EPOLL, 3);
+ msg_push_crc(cmd);
+ msg_finish_cmd(cmd);
+}
+
+static void build_term_command(struct fsi_msg *cmd, uint8_t slave_id)
+{
+ cmd->bits = 0;
+ cmd->msg = 0;
+
+ msg_push_bits(cmd, slave_id, 2);
+ msg_push_bits(cmd, FSI_CMD_TERM, 6);
+ msg_push_crc(cmd);
+ msg_finish_cmd(cmd);
+}
+
+static int do_copro_command(struct fsi_master_acf *master, uint32_t op)
+{
+ uint32_t timeout = 10000000;
+ uint8_t stat;
+
+ trace_fsi_master_acf_copro_command(master, op);
+
+ /* Send command */
+ iowrite32be(op, master->sram + CMD_STAT_REG);
+
+ /* Ring doorbell if any */
+ if (master->cvic)
+ iowrite32(0x2, master->cvic + CVIC_TRIG_REG);
+
+ /* Wait for status to indicate completion (or error) */
+ do {
+ if (timeout-- == 0) {
+ dev_warn(master->dev,
+ "Timeout waiting for coprocessor completion\n");
+ return -ETIMEDOUT;
+ }
+ stat = ioread8(master->sram + CMD_STAT_REG);
+ } while(stat < STAT_COMPLETE || stat == 0xff);
+
+ if (stat == STAT_COMPLETE)
+ return 0;
+ switch(stat) {
+ case STAT_ERR_INVAL_CMD:
+ return -EINVAL;
+ case STAT_ERR_INVAL_IRQ:
+ return -EIO;
+ case STAT_ERR_MTOE:
+ return -ESHUTDOWN;
+ }
+ return -ENXIO;
+}
+
+static int clock_zeros(struct fsi_master_acf *master, int count)
+{
+ while (count) {
+ int rc, lcnt = min(count, 255);
+
+ rc = do_copro_command(master,
+ CMD_IDLE_CLOCKS | (lcnt << CMD_REG_CLEN_SHIFT));
+ if (rc)
+ return rc;
+ count -= lcnt;
+ }
+ return 0;
+}
+
+static int send_request(struct fsi_master_acf *master, struct fsi_msg *cmd,
+ unsigned int resp_bits)
+{
+ uint32_t op;
+
+ trace_fsi_master_acf_send_request(master, cmd, resp_bits);
+
+ /* Store message into SRAM */
+ iowrite32be((cmd->msg >> 32), master->sram + CMD_DATA);
+ iowrite32be((cmd->msg & 0xffffffff), master->sram + CMD_DATA + 4);
+
+ op = CMD_COMMAND;
+ op |= cmd->bits << CMD_REG_CLEN_SHIFT;
+ if (resp_bits)
+ op |= resp_bits << CMD_REG_RLEN_SHIFT;
+
+ return do_copro_command(master, op);
+}
+
+static int read_copro_response(struct fsi_master_acf *master, uint8_t size,
+ uint32_t *response, u8 *tag)
+{
+ uint8_t rtag = ioread8(master->sram + STAT_RTAG) & 0xf;
+ uint8_t rcrc = ioread8(master->sram + STAT_RCRC) & 0xf;
+ uint32_t rdata = 0;
+ uint32_t crc;
+ uint8_t ack;
+
+ *tag = ack = rtag & 3;
+
+ /* we have a whole message now; check CRC */
+ crc = crc4(0, 1, 1);
+ crc = crc4(crc, rtag, 4);
+ if (ack == FSI_RESP_ACK && size) {
+ rdata = ioread32be(master->sram + RSP_DATA);
+ crc = crc4(crc, rdata, size);
+ if (response)
+ *response = rdata;
+ }
+ crc = crc4(crc, rcrc, 4);
+
+ trace_fsi_master_acf_copro_response(master, rtag, rcrc, rdata, crc == 0);
+
+ if (crc) {
+ /*
+ * Check if it's all 1's or all 0's, that probably means
+ * the host is off
+ */
+ if ((rtag == 0xf && rcrc == 0xf) || (rtag == 0 && rcrc == 0))
+ return -ENODEV;
+ dev_dbg(master->dev, "Bad response CRC !\n");
+ return -EAGAIN;
+ }
+ return 0;
+}
+
+static int send_term(struct fsi_master_acf *master, uint8_t slave)
+{
+ struct fsi_msg cmd;
+ uint8_t tag;
+ int rc;
+
+ build_term_command(&cmd, slave);
+
+ rc = send_request(master, &cmd, 0);
+ if (rc) {
+ dev_warn(master->dev, "Error %d sending term\n", rc);
+ return rc;
+ }
+
+ rc = read_copro_response(master, 0, NULL, &tag);
+ if (rc < 0) {
+ dev_err(master->dev,
+ "TERM failed; lost communication with slave\n");
+ return -EIO;
+ } else if (tag != FSI_RESP_ACK) {
+ dev_err(master->dev, "TERM failed; response %d\n", tag);
+ return -EIO;
+ }
+ return 0;
+}
+
+static void dump_ucode_trace(struct fsi_master_acf *master)
+{
+ char trbuf[52];
+ char *p;
+ int i;
+
+ dev_dbg(master->dev,
+ "CMDSTAT:%08x RTAG=%02x RCRC=%02x RDATA=%02x #INT=%08x\n",
+ ioread32be(master->sram + CMD_STAT_REG),
+ ioread8(master->sram + STAT_RTAG),
+ ioread8(master->sram + STAT_RCRC),
+ ioread32be(master->sram + RSP_DATA),
+ ioread32be(master->sram + INT_CNT));
+
+ for (i = 0; i < 512; i++) {
+ uint8_t v;
+ if ((i % 16) == 0)
+ p = trbuf;
+ v = ioread8(master->sram + TRACEBUF + i);
+ p += sprintf(p, "%02x ", v);
+ if (((i % 16) == 15) || v == TR_END)
+ dev_dbg(master->dev, "%s\n", trbuf);
+ if (v == TR_END)
+ break;
+ }
+}
+
+static int handle_response(struct fsi_master_acf *master,
+ uint8_t slave, uint8_t size, void *data)
+{
+ int busy_count = 0, rc;
+ int crc_err_retries = 0;
+ struct fsi_msg cmd;
+ uint32_t response;
+ uint8_t tag;
+retry:
+ rc = read_copro_response(master, size, &response, &tag);
+
+ /* Handle retries on CRC errors */
+ if (rc == -EAGAIN) {
+ /* Too many retries ? */
+ if (crc_err_retries++ > FSI_CRC_ERR_RETRIES) {
+ /*
+ * Pass it up as a -EIO otherwise upper level will retry
+ * the whole command which isn't what we want here.
+ */
+ rc = -EIO;
+ goto bail;
+ }
+ trace_fsi_master_acf_crc_rsp_error(master, crc_err_retries);
+ if (master->trace_enabled)
+ dump_ucode_trace(master);
+ rc = clock_zeros(master, FSI_MASTER_EPOLL_CLOCKS);
+ if (rc) {
+ dev_warn(master->dev,
+ "Error %d clocking zeros for E_POLL\n", rc);
+ return rc;
+ }
+ build_epoll_command(&cmd, slave);
+ rc = send_request(master, &cmd, size);
+ if (rc) {
+ dev_warn(master->dev, "Error %d sending E_POLL\n", rc);
+ return -EIO;
+ }
+ goto retry;
+ }
+ if (rc)
+ return rc;
+
+ switch (tag) {
+ case FSI_RESP_ACK:
+ if (size && data) {
+ if (size == 32)
+ *(__be32 *)data = cpu_to_be32(response);
+ else if (size == 16)
+ *(__be16 *)data = cpu_to_be16(response);
+ else
+ *(u8 *)data = response;
+ }
+ break;
+ case FSI_RESP_BUSY:
+ /*
+ * Its necessary to clock slave before issuing
+ * d-poll, not indicated in the hardware protocol
+ * spec. < 20 clocks causes slave to hang, 21 ok.
+ */
+ dev_dbg(master->dev, "Busy, retrying...\n");
+ if (master->trace_enabled)
+ dump_ucode_trace(master);
+ rc = clock_zeros(master, FSI_MASTER_DPOLL_CLOCKS);
+ if (rc) {
+ dev_warn(master->dev,
+ "Error %d clocking zeros for D_POLL\n", rc);
+ break;
+ }
+ if (busy_count++ < FSI_MASTER_MAX_BUSY) {
+ build_dpoll_command(&cmd, slave);
+ rc = send_request(master, &cmd, size);
+ if (rc) {
+ dev_warn(master->dev, "Error %d sending D_POLL\n", rc);
+ break;
+ }
+ goto retry;
+ }
+ dev_dbg(master->dev,
+ "ERR slave is stuck in busy state, issuing TERM\n");
+ send_term(master, slave);
+ rc = -EIO;
+ break;
+
+ case FSI_RESP_ERRA:
+ dev_dbg(master->dev, "ERRA received\n");
+ if (master->trace_enabled)
+ dump_ucode_trace(master);
+ rc = -EIO;
+ break;
+ case FSI_RESP_ERRC:
+ dev_dbg(master->dev, "ERRC received\n");
+ if (master->trace_enabled)
+ dump_ucode_trace(master);
+ rc = -EAGAIN;
+ break;
+ }
+ bail:
+ if (busy_count > 0) {
+ trace_fsi_master_acf_poll_response_busy(master, busy_count);
+ }
+
+ return rc;
+}
+
+static int fsi_master_acf_xfer(struct fsi_master_acf *master, uint8_t slave,
+ struct fsi_msg *cmd, size_t resp_len, void *resp)
+{
+ int rc = -EAGAIN, retries = 0;
+
+ resp_len <<= 3;
+ while ((retries++) < FSI_CRC_ERR_RETRIES) {
+ rc = send_request(master, cmd, resp_len);
+ if (rc) {
+ if (rc != -ESHUTDOWN)
+ dev_warn(master->dev, "Error %d sending command\n", rc);
+ break;
+ }
+ rc = handle_response(master, slave, resp_len, resp);
+ if (rc != -EAGAIN)
+ break;
+ rc = -EIO;
+ dev_dbg(master->dev, "ECRC retry %d\n", retries);
+
+ /* Pace it a bit before retry */
+ msleep(1);
+ }
+
+ return rc;
+}
+
+static int fsi_master_acf_read(struct fsi_master *_master, int link,
+ uint8_t id, uint32_t addr, void *val,
+ size_t size)
+{
+ struct fsi_master_acf *master = to_fsi_master_acf(_master);
+ struct fsi_msg cmd;
+ int rc;
+
+ if (link != 0)
+ return -ENODEV;
+
+ mutex_lock(&master->lock);
+ dev_dbg(master->dev, "read id %d addr %x size %zd\n", id, addr, size);
+ build_ar_command(master, &cmd, id, addr, size, NULL);
+ rc = fsi_master_acf_xfer(master, id, &cmd, size, val);
+ last_address_update(master, id, rc == 0, addr);
+ if (rc)
+ dev_dbg(master->dev, "read id %d addr 0x%08x err: %d\n",
+ id, addr, rc);
+ mutex_unlock(&master->lock);
+
+ return rc;
+}
+
+static int fsi_master_acf_write(struct fsi_master *_master, int link,
+ uint8_t id, uint32_t addr, const void *val,
+ size_t size)
+{
+ struct fsi_master_acf *master = to_fsi_master_acf(_master);
+ struct fsi_msg cmd;
+ int rc;
+
+ if (link != 0)
+ return -ENODEV;
+
+ mutex_lock(&master->lock);
+ build_ar_command(master, &cmd, id, addr, size, val);
+ dev_dbg(master->dev, "write id %d addr %x size %zd raw_data: %08x\n",
+ id, addr, size, *(uint32_t *)val);
+ rc = fsi_master_acf_xfer(master, id, &cmd, 0, NULL);
+ last_address_update(master, id, rc == 0, addr);
+ if (rc)
+ dev_dbg(master->dev, "write id %d addr 0x%08x err: %d\n",
+ id, addr, rc);
+ mutex_unlock(&master->lock);
+
+ return rc;
+}
+
+static int fsi_master_acf_term(struct fsi_master *_master,
+ int link, uint8_t id)
+{
+ struct fsi_master_acf *master = to_fsi_master_acf(_master);
+ struct fsi_msg cmd;
+ int rc;
+
+ if (link != 0)
+ return -ENODEV;
+
+ mutex_lock(&master->lock);
+ build_term_command(&cmd, id);
+ dev_dbg(master->dev, "term id %d\n", id);
+ rc = fsi_master_acf_xfer(master, id, &cmd, 0, NULL);
+ last_address_update(master, id, false, 0);
+ mutex_unlock(&master->lock);
+
+ return rc;
+}
+
+static int fsi_master_acf_break(struct fsi_master *_master, int link)
+{
+ struct fsi_master_acf *master = to_fsi_master_acf(_master);
+ int rc;
+
+ if (link != 0)
+ return -ENODEV;
+
+ mutex_lock(&master->lock);
+ if (master->external_mode) {
+ mutex_unlock(&master->lock);
+ return -EBUSY;
+ }
+ dev_dbg(master->dev, "sending BREAK\n");
+ rc = do_copro_command(master, CMD_BREAK);
+ last_address_update(master, 0, false, 0);
+ mutex_unlock(&master->lock);
+
+ /* Wait for logic reset to take effect */
+ udelay(200);
+
+ return rc;
+}
+
+static void reset_cf(struct fsi_master_acf *master)
+{
+ regmap_write(master->scu, SCU_COPRO_CTRL, SCU_COPRO_RESET);
+ usleep_range(20,20);
+ regmap_write(master->scu, SCU_COPRO_CTRL, 0);
+ usleep_range(20,20);
+}
+
+static void start_cf(struct fsi_master_acf *master)
+{
+ regmap_write(master->scu, SCU_COPRO_CTRL, SCU_COPRO_CLK_EN);
+}
+
+static void setup_ast2500_cf_maps(struct fsi_master_acf *master)
+{
+ /*
+ * Note about byteswap setting: the bus is wired backwards,
+ * so setting the byteswap bit actually makes the ColdFire
+ * work "normally" for a BE processor, ie, put the MSB in
+ * the lowest address byte.
+ *
+ * We thus need to set the bit for our main memory which
+ * contains our program code. We create two mappings for
+ * the register, one with each setting.
+ *
+ * Segments 2 and 3 has a "swapped" mapping (BE)
+ * and 6 and 7 have a non-swapped mapping (LE) which allows
+ * us to avoid byteswapping register accesses since the
+ * registers are all LE.
+ */
+
+ /* Setup segment 0 to our memory region */
+ regmap_write(master->scu, SCU_2500_COPRO_SEG0, master->cf_mem_addr |
+ SCU_2500_COPRO_SEG_SWAP);
+
+ /* Segments 2 and 3 to sysregs with byteswap (for SRAM) */
+ regmap_write(master->scu, SCU_2500_COPRO_SEG2, SYSREG_BASE |
+ SCU_2500_COPRO_SEG_SWAP);
+ regmap_write(master->scu, SCU_2500_COPRO_SEG3, SYSREG_BASE | 0x100000 |
+ SCU_2500_COPRO_SEG_SWAP);
+
+ /* And segment 6 and 7 to sysregs no byteswap */
+ regmap_write(master->scu, SCU_2500_COPRO_SEG6, SYSREG_BASE);
+ regmap_write(master->scu, SCU_2500_COPRO_SEG7, SYSREG_BASE | 0x100000);
+
+ /* Memory cachable, regs and SRAM not cachable */
+ regmap_write(master->scu, SCU_2500_COPRO_CACHE_CTL,
+ SCU_2500_COPRO_SEG0_CACHE_EN | SCU_2500_COPRO_CACHE_EN);
+}
+
+static void setup_ast2400_cf_maps(struct fsi_master_acf *master)
+{
+ /* Setup segment 0 to our memory region */
+ regmap_write(master->scu, SCU_2400_COPRO_SEG0, master->cf_mem_addr |
+ SCU_2400_COPRO_SEG_SWAP);
+
+ /* Segments 2 to sysregs with byteswap (for SRAM) */
+ regmap_write(master->scu, SCU_2400_COPRO_SEG2, SYSREG_BASE |
+ SCU_2400_COPRO_SEG_SWAP);
+
+ /* And segment 6 to sysregs no byteswap */
+ regmap_write(master->scu, SCU_2400_COPRO_SEG6, SYSREG_BASE);
+
+ /* Memory cachable, regs and SRAM not cachable */
+ regmap_write(master->scu, SCU_2400_COPRO_CACHE_CTL,
+ SCU_2400_COPRO_SEG0_CACHE_EN | SCU_2400_COPRO_CACHE_EN);
+}
+
+static void setup_common_fw_config(struct fsi_master_acf *master,
+ void __iomem *base)
+{
+ iowrite16be(master->gpio_clk_vreg, base + HDR_CLOCK_GPIO_VADDR);
+ iowrite16be(master->gpio_clk_dreg, base + HDR_CLOCK_GPIO_DADDR);
+ iowrite16be(master->gpio_dat_vreg, base + HDR_DATA_GPIO_VADDR);
+ iowrite16be(master->gpio_dat_dreg, base + HDR_DATA_GPIO_DADDR);
+ iowrite16be(master->gpio_tra_vreg, base + HDR_TRANS_GPIO_VADDR);
+ iowrite16be(master->gpio_tra_dreg, base + HDR_TRANS_GPIO_DADDR);
+ iowrite8(master->gpio_clk_bit, base + HDR_CLOCK_GPIO_BIT);
+ iowrite8(master->gpio_dat_bit, base + HDR_DATA_GPIO_BIT);
+ iowrite8(master->gpio_tra_bit, base + HDR_TRANS_GPIO_BIT);
+}
+
+static void setup_ast2500_fw_config(struct fsi_master_acf *master)
+{
+ void __iomem *base = master->cf_mem + HDR_OFFSET;
+
+ setup_common_fw_config(master, base);
+ iowrite32be(FW_CONTROL_USE_STOP, base + HDR_FW_CONTROL);
+}
+
+static void setup_ast2400_fw_config(struct fsi_master_acf *master)
+{
+ void __iomem *base = master->cf_mem + HDR_OFFSET;
+
+ setup_common_fw_config(master, base);
+ iowrite32be(FW_CONTROL_CONT_CLOCK|FW_CONTROL_DUMMY_RD, base + HDR_FW_CONTROL);
+}
+
+static int setup_gpios_for_copro(struct fsi_master_acf *master)
+{
+
+ int rc;
+
+ /* This aren't under ColdFire control, just set them up appropriately */
+ gpiod_direction_output(master->gpio_mux, 1);
+ gpiod_direction_output(master->gpio_enable, 1);
+
+ /* Those are under ColdFire control, let it configure them */
+ rc = aspeed_gpio_copro_grab_gpio(master->gpio_clk, &master->gpio_clk_vreg,
+ &master->gpio_clk_dreg, &master->gpio_clk_bit);
+ if (rc) {
+ dev_err(master->dev, "failed to assign clock gpio to coprocessor\n");
+ return rc;
+ }
+ rc = aspeed_gpio_copro_grab_gpio(master->gpio_data, &master->gpio_dat_vreg,
+ &master->gpio_dat_dreg, &master->gpio_dat_bit);
+ if (rc) {
+ dev_err(master->dev, "failed to assign data gpio to coprocessor\n");
+ aspeed_gpio_copro_release_gpio(master->gpio_clk);
+ return rc;
+ }
+ rc = aspeed_gpio_copro_grab_gpio(master->gpio_trans, &master->gpio_tra_vreg,
+ &master->gpio_tra_dreg, &master->gpio_tra_bit);
+ if (rc) {
+ dev_err(master->dev, "failed to assign trans gpio to coprocessor\n");
+ aspeed_gpio_copro_release_gpio(master->gpio_clk);
+ aspeed_gpio_copro_release_gpio(master->gpio_data);
+ return rc;
+ }
+ return 0;
+}
+
+static void release_copro_gpios(struct fsi_master_acf *master)
+{
+ aspeed_gpio_copro_release_gpio(master->gpio_clk);
+ aspeed_gpio_copro_release_gpio(master->gpio_data);
+ aspeed_gpio_copro_release_gpio(master->gpio_trans);
+}
+
+static int load_copro_firmware(struct fsi_master_acf *master)
+{
+ const struct firmware *fw;
+ uint16_t sig = 0, wanted_sig;
+ const u8 *data;
+ size_t size = 0;
+ int rc;
+
+ /* Get the binary */
+ rc = request_firmware(&fw, FW_FILE_NAME, master->dev);
+ if (rc) {
+ dev_err(
+ master->dev, "Error %d to load firwmare '%s' !\n",
+ rc, FW_FILE_NAME);
+ return rc;
+ }
+
+ /* Which image do we want ? (shared vs. split clock/data GPIOs) */
+ if (master->gpio_clk_vreg == master->gpio_dat_vreg)
+ wanted_sig = SYS_SIG_SHARED;
+ else
+ wanted_sig = SYS_SIG_SPLIT;
+ dev_dbg(master->dev, "Looking for image sig %04x\n", wanted_sig);
+
+ /* Try to find it */
+ for (data = fw->data; data < (fw->data + fw->size);) {
+ sig = be16_to_cpup((__be16 *)(data + HDR_OFFSET + HDR_SYS_SIG));
+ size = be32_to_cpup((__be32 *)(data + HDR_OFFSET + HDR_FW_SIZE));
+ if (sig == wanted_sig)
+ break;
+ data += size;
+ }
+ if (sig != wanted_sig) {
+ dev_err(master->dev, "Failed to locate image sig %04x in FW blob\n",
+ wanted_sig);
+ rc = -ENODEV;
+ goto release_fw;
+ }
+ if (size > master->cf_mem_size) {
+ dev_err(master->dev, "FW size (%zd) bigger than memory reserve (%zd)\n",
+ fw->size, master->cf_mem_size);
+ rc = -ENOMEM;
+ } else {
+ memcpy_toio(master->cf_mem, data, size);
+ }
+
+release_fw:
+ release_firmware(fw);
+ return rc;
+}
+
+static int check_firmware_image(struct fsi_master_acf *master)
+{
+ uint32_t fw_vers, fw_api, fw_options;
+
+ fw_vers = ioread16be(master->cf_mem + HDR_OFFSET + HDR_FW_VERS);
+ fw_api = ioread16be(master->cf_mem + HDR_OFFSET + HDR_API_VERS);
+ fw_options = ioread32be(master->cf_mem + HDR_OFFSET + HDR_FW_OPTIONS);
+ master->trace_enabled = !!(fw_options & FW_OPTION_TRACE_EN);
+
+ /* Check version and signature */
+ dev_info(master->dev, "ColdFire initialized, firmware v%d API v%d.%d (trace %s)\n",
+ fw_vers, fw_api >> 8, fw_api & 0xff,
+ master->trace_enabled ? "enabled" : "disabled");
+
+ if ((fw_api >> 8) != API_VERSION_MAJ) {
+ dev_err(master->dev, "Unsupported coprocessor API version !\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int copro_enable_sw_irq(struct fsi_master_acf *master)
+{
+ int timeout;
+ uint32_t val;
+
+ /*
+ * Enable coprocessor interrupt input. I've had problems getting the
+ * value to stick, so try in a loop
+ */
+ for (timeout = 0; timeout < 10; timeout++) {
+ iowrite32(0x2, master->cvic + CVIC_EN_REG);
+ val = ioread32(master->cvic + CVIC_EN_REG);
+ if (val & 2)
+ break;
+ msleep(1);
+ }
+ if (!(val & 2)) {
+ dev_err(master->dev, "Failed to enable coprocessor interrupt !\n");
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static int fsi_master_acf_setup(struct fsi_master_acf *master)
+{
+ int timeout, rc;
+ uint32_t val;
+
+ /* Make sure the ColdFire is stopped */
+ reset_cf(master);
+
+ /*
+ * Clear SRAM. This needs to happen before we setup the GPIOs
+ * as we might start trying to arbitrate as soon as that happens.
+ */
+ memset_io(master->sram, 0, SRAM_SIZE);
+
+ /* Configure GPIOs */
+ rc = setup_gpios_for_copro(master);
+ if (rc)
+ return rc;
+
+ /* Load the firmware into the reserved memory */
+ rc = load_copro_firmware(master);
+ if (rc)
+ return rc;
+
+ /* Read signature and check versions */
+ rc = check_firmware_image(master);
+ if (rc)
+ return rc;
+
+ /* Setup coldfire memory map */
+ if (master->is_ast2500) {
+ setup_ast2500_cf_maps(master);
+ setup_ast2500_fw_config(master);
+ } else {
+ setup_ast2400_cf_maps(master);
+ setup_ast2400_fw_config(master);
+ }
+
+ /* Start the ColdFire */
+ start_cf(master);
+
+ /* Wait for status register to indicate command completion
+ * which signals the initialization is complete
+ */
+ for (timeout = 0; timeout < 10; timeout++) {
+ val = ioread8(master->sram + CF_STARTED);
+ if (val)
+ break;
+ msleep(1);
+ }
+ if (!val) {
+ dev_err(master->dev, "Coprocessor startup timeout !\n");
+ rc = -ENODEV;
+ goto err;
+ }
+
+ /* Configure echo & send delay */
+ iowrite8(master->t_send_delay, master->sram + SEND_DLY_REG);
+ iowrite8(master->t_echo_delay, master->sram + ECHO_DLY_REG);
+
+ /* Enable SW interrupt to copro if any */
+ if (master->cvic) {
+ rc = copro_enable_sw_irq(master);
+ if (rc)
+ goto err;
+ }
+ return 0;
+ err:
+ /* An error occurred, don't leave the coprocessor running */
+ reset_cf(master);
+
+ /* Release the GPIOs */
+ release_copro_gpios(master);
+
+ return rc;
+}
+
+
+static void fsi_master_acf_terminate(struct fsi_master_acf *master)
+{
+ unsigned long flags;
+
+ /*
+ * A GPIO arbitration requestion could come in while this is
+ * happening. To avoid problems, we disable interrupts so it
+ * cannot preempt us on this CPU
+ */
+
+ local_irq_save(flags);
+
+ /* Stop the coprocessor */
+ reset_cf(master);
+
+ /* We mark the copro not-started */
+ iowrite32(0, master->sram + CF_STARTED);
+
+ /* We mark the ARB register as having given up arbitration to
+ * deal with a potential race with the arbitration request
+ */
+ iowrite8(ARB_ARM_ACK, master->sram + ARB_REG);
+
+ local_irq_restore(flags);
+
+ /* Return the GPIOs to the ARM */
+ release_copro_gpios(master);
+}
+
+static void fsi_master_acf_setup_external(struct fsi_master_acf *master)
+{
+ /* Setup GPIOs for external FSI master (FSP box) */
+ gpiod_direction_output(master->gpio_mux, 0);
+ gpiod_direction_output(master->gpio_trans, 0);
+ gpiod_direction_output(master->gpio_enable, 1);
+ gpiod_direction_input(master->gpio_clk);
+ gpiod_direction_input(master->gpio_data);
+}
+
+static int fsi_master_acf_link_enable(struct fsi_master *_master, int link)
+{
+ struct fsi_master_acf *master = to_fsi_master_acf(_master);
+ int rc = -EBUSY;
+
+ if (link != 0)
+ return -ENODEV;
+
+ mutex_lock(&master->lock);
+ if (!master->external_mode) {
+ gpiod_set_value(master->gpio_enable, 1);
+ rc = 0;
+ }
+ mutex_unlock(&master->lock);
+
+ return rc;
+}
+
+static int fsi_master_acf_link_config(struct fsi_master *_master, int link,
+ u8 t_send_delay, u8 t_echo_delay)
+{
+ struct fsi_master_acf *master = to_fsi_master_acf(_master);
+
+ if (link != 0)
+ return -ENODEV;
+
+ mutex_lock(&master->lock);
+ master->t_send_delay = t_send_delay;
+ master->t_echo_delay = t_echo_delay;
+ dev_dbg(master->dev, "Changing delays: send=%d echo=%d\n",
+ t_send_delay, t_echo_delay);
+ iowrite8(master->t_send_delay, master->sram + SEND_DLY_REG);
+ iowrite8(master->t_echo_delay, master->sram + ECHO_DLY_REG);
+ mutex_unlock(&master->lock);
+
+ return 0;
+}
+
+static ssize_t external_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fsi_master_acf *master = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE - 1, "%u\n",
+ master->external_mode ? 1 : 0);
+}
+
+static ssize_t external_mode_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct fsi_master_acf *master = dev_get_drvdata(dev);
+ unsigned long val;
+ bool external_mode;
+ int err;
+
+ err = kstrtoul(buf, 0, &val);
+ if (err)
+ return err;
+
+ external_mode = !!val;
+
+ mutex_lock(&master->lock);
+
+ if (external_mode == master->external_mode) {
+ mutex_unlock(&master->lock);
+ return count;
+ }
+
+ master->external_mode = external_mode;
+ if (master->external_mode) {
+ fsi_master_acf_terminate(master);
+ fsi_master_acf_setup_external(master);
+ } else
+ fsi_master_acf_setup(master);
+
+ mutex_unlock(&master->lock);
+
+ fsi_master_rescan(&master->master);
+
+ return count;
+}
+
+static DEVICE_ATTR(external_mode, 0664,
+ external_mode_show, external_mode_store);
+
+static int fsi_master_acf_gpio_request(void *data)
+{
+ struct fsi_master_acf *master = data;
+ int timeout;
+ u8 val;
+
+ /* Note: This doesn't require holding out mutex */
+
+ /* Write reqest */
+ iowrite8(ARB_ARM_REQ, master->sram + ARB_REG);
+
+ /*
+ * There is a race (which does happen at boot time) when we get an
+ * arbitration request as we are either about to or just starting
+ * the coprocessor.
+ *
+ * To handle it, we first check if we are running. If not yet we
+ * check whether the copro is started in the SCU.
+ *
+ * If it's not started, we can basically just assume we have arbitration
+ * and return. Otherwise, we wait normally expecting for the arbitration
+ * to eventually complete.
+ */
+ if (ioread32(master->sram + CF_STARTED) == 0) {
+ unsigned int reg = 0;
+
+ regmap_read(master->scu, SCU_COPRO_CTRL, &reg);
+ if (!(reg & SCU_COPRO_CLK_EN))
+ return 0;
+ }
+
+ /* Ring doorbell if any */
+ if (master->cvic)
+ iowrite32(0x2, master->cvic + CVIC_TRIG_REG);
+
+ for (timeout = 0; timeout < 10000; timeout++) {
+ val = ioread8(master->sram + ARB_REG);
+ if (val != ARB_ARM_REQ)
+ break;
+ udelay(1);
+ }
+
+ /* If it failed, override anyway */
+ if (val != ARB_ARM_ACK)
+ dev_warn(master->dev, "GPIO request arbitration timeout\n");
+
+ return 0;
+}
+
+static int fsi_master_acf_gpio_release(void *data)
+{
+ struct fsi_master_acf *master = data;
+
+ /* Write release */
+ iowrite8(0, master->sram + ARB_REG);
+
+ /* Ring doorbell if any */
+ if (master->cvic)
+ iowrite32(0x2, master->cvic + CVIC_TRIG_REG);
+
+ return 0;
+}
+
+static void fsi_master_acf_release(struct device *dev)
+{
+ struct fsi_master_acf *master = to_fsi_master_acf(dev_to_fsi_master(dev));
+
+ /* Cleanup, stop coprocessor */
+ mutex_lock(&master->lock);
+ fsi_master_acf_terminate(master);
+ aspeed_gpio_copro_set_ops(NULL, NULL);
+ mutex_unlock(&master->lock);
+
+ /* Free resources */
+ gen_pool_free(master->sram_pool, (unsigned long)master->sram, SRAM_SIZE);
+ of_node_put(dev_of_node(master->dev));
+
+ kfree(master);
+}
+
+static const struct aspeed_gpio_copro_ops fsi_master_acf_gpio_ops = {
+ .request_access = fsi_master_acf_gpio_request,
+ .release_access = fsi_master_acf_gpio_release,
+};
+
+static int fsi_master_acf_probe(struct platform_device *pdev)
+{
+ struct device_node *np, *mnode = dev_of_node(&pdev->dev);
+ struct genpool_data_fixed gpdf;
+ struct fsi_master_acf *master;
+ struct gpio_desc *gpio;
+ struct resource res;
+ uint32_t cf_mem_align;
+ int rc;
+
+ master = kzalloc(sizeof(*master), GFP_KERNEL);
+ if (!master)
+ return -ENOMEM;
+
+ master->dev = &pdev->dev;
+ master->master.dev.parent = master->dev;
+ master->last_addr = LAST_ADDR_INVALID;
+
+ /* AST2400 vs. AST2500 */
+ master->is_ast2500 = of_device_is_compatible(mnode, "aspeed,ast2500-cf-fsi-master");
+
+ /* Grab the SCU, we'll need to access it to configure the coprocessor */
+ if (master->is_ast2500)
+ master->scu = syscon_regmap_lookup_by_compatible("aspeed,ast2500-scu");
+ else
+ master->scu = syscon_regmap_lookup_by_compatible("aspeed,ast2400-scu");
+ if (IS_ERR(master->scu)) {
+ dev_err(&pdev->dev, "failed to find SCU regmap\n");
+ rc = PTR_ERR(master->scu);
+ goto err_free;
+ }
+
+ /* Grab all the GPIOs we need */
+ gpio = devm_gpiod_get(&pdev->dev, "clock", 0);
+ if (IS_ERR(gpio)) {
+ dev_err(&pdev->dev, "failed to get clock gpio\n");
+ rc = PTR_ERR(gpio);
+ goto err_free;
+ }
+ master->gpio_clk = gpio;
+
+ gpio = devm_gpiod_get(&pdev->dev, "data", 0);
+ if (IS_ERR(gpio)) {
+ dev_err(&pdev->dev, "failed to get data gpio\n");
+ rc = PTR_ERR(gpio);
+ goto err_free;
+ }
+ master->gpio_data = gpio;
+
+ /* Optional GPIOs */
+ gpio = devm_gpiod_get_optional(&pdev->dev, "trans", 0);
+ if (IS_ERR(gpio)) {
+ dev_err(&pdev->dev, "failed to get trans gpio\n");
+ rc = PTR_ERR(gpio);
+ goto err_free;
+ }
+ master->gpio_trans = gpio;
+
+ gpio = devm_gpiod_get_optional(&pdev->dev, "enable", 0);
+ if (IS_ERR(gpio)) {
+ dev_err(&pdev->dev, "failed to get enable gpio\n");
+ rc = PTR_ERR(gpio);
+ goto err_free;
+ }
+ master->gpio_enable = gpio;
+
+ gpio = devm_gpiod_get_optional(&pdev->dev, "mux", 0);
+ if (IS_ERR(gpio)) {
+ dev_err(&pdev->dev, "failed to get mux gpio\n");
+ rc = PTR_ERR(gpio);
+ goto err_free;
+ }
+ master->gpio_mux = gpio;
+
+ /* Grab the reserved memory region (use DMA API instead ?) */
+ np = of_parse_phandle(mnode, "memory-region", 0);
+ if (!np) {
+ dev_err(&pdev->dev, "Didn't find reserved memory\n");
+ rc = -EINVAL;
+ goto err_free;
+ }
+ rc = of_address_to_resource(np, 0, &res);
+ of_node_put(np);
+ if (rc) {
+ dev_err(&pdev->dev, "Couldn't address to resource for reserved memory\n");
+ rc = -ENOMEM;
+ goto err_free;
+ }
+ master->cf_mem_size = resource_size(&res);
+ master->cf_mem_addr = (uint32_t)res.start;
+ cf_mem_align = master->is_ast2500 ? 0x00100000 : 0x00200000;
+ if (master->cf_mem_addr & (cf_mem_align - 1)) {
+ dev_err(&pdev->dev, "Reserved memory has insufficient alignment\n");
+ rc = -ENOMEM;
+ goto err_free;
+ }
+ master->cf_mem = devm_ioremap_resource(&pdev->dev, &res);
+ if (IS_ERR(master->cf_mem)) {
+ rc = PTR_ERR(master->cf_mem);
+ dev_err(&pdev->dev, "Error %d mapping coldfire memory\n", rc);
+ goto err_free;
+ }
+ dev_dbg(&pdev->dev, "DRAM allocation @%x\n", master->cf_mem_addr);
+
+ /* AST2500 has a SW interrupt to the coprocessor */
+ if (master->is_ast2500) {
+ /* Grab the CVIC (ColdFire interrupts controller) */
+ np = of_parse_phandle(mnode, "aspeed,cvic", 0);
+ if (!np) {
+ dev_err(&pdev->dev, "Didn't find CVIC\n");
+ rc = -EINVAL;
+ goto err_free;
+ }
+ master->cvic = devm_of_iomap(&pdev->dev, np, 0, NULL);
+ if (IS_ERR(master->cvic)) {
+ rc = PTR_ERR(master->cvic);
+ dev_err(&pdev->dev, "Error %d mapping CVIC\n", rc);
+ goto err_free;
+ }
+ rc = of_property_read_u32(np, "copro-sw-interrupts",
+ &master->cvic_sw_irq);
+ if (rc) {
+ dev_err(&pdev->dev, "Can't find coprocessor SW interrupt\n");
+ goto err_free;
+ }
+ }
+
+ /* Grab the SRAM */
+ master->sram_pool = of_gen_pool_get(dev_of_node(&pdev->dev), "aspeed,sram", 0);
+ if (!master->sram_pool) {
+ rc = -ENODEV;
+ dev_err(&pdev->dev, "Can't find sram pool\n");
+ goto err_free;
+ }
+
+ /* Current microcode only deals with fixed location in SRAM */
+ gpdf.offset = 0;
+ master->sram = (void __iomem *)gen_pool_alloc_algo(master->sram_pool, SRAM_SIZE,
+ gen_pool_fixed_alloc, &gpdf);
+ if (!master->sram) {
+ rc = -ENOMEM;
+ dev_err(&pdev->dev, "Failed to allocate sram from pool\n");
+ goto err_free;
+ }
+ dev_dbg(&pdev->dev, "SRAM allocation @%lx\n",
+ (unsigned long)gen_pool_virt_to_phys(master->sram_pool,
+ (unsigned long)master->sram));
+
+ /*
+ * Hookup with the GPIO driver for arbitration of GPIO banks
+ * ownership.
+ */
+ aspeed_gpio_copro_set_ops(&fsi_master_acf_gpio_ops, master);
+
+ /* Default FSI command delays */
+ master->t_send_delay = FSI_SEND_DELAY_CLOCKS;
+ master->t_echo_delay = FSI_ECHO_DELAY_CLOCKS;
+ master->master.n_links = 1;
+ if (master->is_ast2500)
+ master->master.flags = FSI_MASTER_FLAG_SWCLOCK;
+ master->master.read = fsi_master_acf_read;
+ master->master.write = fsi_master_acf_write;
+ master->master.term = fsi_master_acf_term;
+ master->master.send_break = fsi_master_acf_break;
+ master->master.link_enable = fsi_master_acf_link_enable;
+ master->master.link_config = fsi_master_acf_link_config;
+ master->master.dev.of_node = of_node_get(dev_of_node(master->dev));
+ master->master.dev.release = fsi_master_acf_release;
+ platform_set_drvdata(pdev, master);
+ mutex_init(&master->lock);
+
+ mutex_lock(&master->lock);
+ rc = fsi_master_acf_setup(master);
+ mutex_unlock(&master->lock);
+ if (rc)
+ goto release_of_dev;
+
+ rc = device_create_file(&pdev->dev, &dev_attr_external_mode);
+ if (rc)
+ goto stop_copro;
+
+ rc = fsi_master_register(&master->master);
+ if (!rc)
+ return 0;
+
+ device_remove_file(master->dev, &dev_attr_external_mode);
+ put_device(&master->master.dev);
+ return rc;
+
+ stop_copro:
+ fsi_master_acf_terminate(master);
+ release_of_dev:
+ aspeed_gpio_copro_set_ops(NULL, NULL);
+ gen_pool_free(master->sram_pool, (unsigned long)master->sram, SRAM_SIZE);
+ of_node_put(dev_of_node(master->dev));
+ err_free:
+ kfree(master);
+ return rc;
+}
+
+
+static int fsi_master_acf_remove(struct platform_device *pdev)
+{
+ struct fsi_master_acf *master = platform_get_drvdata(pdev);
+
+ device_remove_file(master->dev, &dev_attr_external_mode);
+
+ fsi_master_unregister(&master->master);
+
+ return 0;
+}
+
+static const struct of_device_id fsi_master_acf_match[] = {
+ { .compatible = "aspeed,ast2400-cf-fsi-master" },
+ { .compatible = "aspeed,ast2500-cf-fsi-master" },
+ { },
+};
+
+static struct platform_driver fsi_master_acf = {
+ .driver = {
+ .name = "fsi-master-acf",
+ .of_match_table = fsi_master_acf_match,
+ },
+ .probe = fsi_master_acf_probe,
+ .remove = fsi_master_acf_remove,
+};
+
+module_platform_driver(fsi_master_acf);
+MODULE_LICENSE("GPL");
diff --git a/drivers/fsi/fsi-master-gpio.c b/drivers/fsi/fsi-master-gpio.c
index 3f487449a277..4eb3a766fd4a 100644
--- a/drivers/fsi/fsi-master-gpio.c
+++ b/drivers/fsi/fsi-master-gpio.c
@@ -8,59 +8,31 @@
#include <linux/fsi.h>
#include <linux/gpio/consumer.h>
#include <linux/io.h>
+#include <linux/irqflags.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
-#include <linux/spinlock.h>
#include "fsi-master.h"
#define FSI_GPIO_STD_DLY 1 /* Standard pin delay in nS */
-#define FSI_ECHO_DELAY_CLOCKS 16 /* Number clocks for echo delay */
-#define FSI_PRE_BREAK_CLOCKS 50 /* Number clocks to prep for break */
-#define FSI_BREAK_CLOCKS 256 /* Number of clocks to issue break */
-#define FSI_POST_BREAK_CLOCKS 16000 /* Number clocks to set up cfam */
-#define FSI_INIT_CLOCKS 5000 /* Clock out any old data */
-#define FSI_GPIO_STD_DELAY 10 /* Standard GPIO delay in nS */
- /* todo: adjust down as low as */
- /* possible or eliminate */
-#define FSI_GPIO_CMD_DPOLL 0x2
-#define FSI_GPIO_CMD_TERM 0x3f
-#define FSI_GPIO_CMD_ABS_AR 0x4
-
-#define FSI_GPIO_DPOLL_CLOCKS 100 /* < 21 will cause slave to hang */
-
-/* Bus errors */
-#define FSI_GPIO_ERR_BUSY 1 /* Slave stuck in busy state */
-#define FSI_GPIO_RESP_ERRA 2 /* Any (misc) Error */
-#define FSI_GPIO_RESP_ERRC 3 /* Slave reports master CRC error */
-#define FSI_GPIO_MTOE 4 /* Master time out error */
-#define FSI_GPIO_CRC_INVAL 5 /* Master reports slave CRC error */
-
-/* Normal slave responses */
-#define FSI_GPIO_RESP_BUSY 1
-#define FSI_GPIO_RESP_ACK 0
-#define FSI_GPIO_RESP_ACKD 4
-
-#define FSI_GPIO_MAX_BUSY 100
-#define FSI_GPIO_MTOE_COUNT 1000
-#define FSI_GPIO_DRAIN_BITS 20
-#define FSI_GPIO_CRC_SIZE 4
-#define FSI_GPIO_MSG_ID_SIZE 2
-#define FSI_GPIO_MSG_RESPID_SIZE 2
-#define FSI_GPIO_PRIME_SLAVE_CLOCKS 100
+#define LAST_ADDR_INVALID 0x1
struct fsi_master_gpio {
struct fsi_master master;
struct device *dev;
- spinlock_t cmd_lock; /* Lock for commands */
+ struct mutex cmd_lock; /* mutex for command ordering */
struct gpio_desc *gpio_clk;
struct gpio_desc *gpio_data;
struct gpio_desc *gpio_trans; /* Voltage translator */
struct gpio_desc *gpio_enable; /* FSI enable */
struct gpio_desc *gpio_mux; /* Mux control */
bool external_mode;
+ bool no_delays;
+ uint32_t last_addr;
+ uint8_t t_send_delay;
+ uint8_t t_echo_delay;
};
#define CREATE_TRACE_POINTS
@@ -78,19 +50,31 @@ static void clock_toggle(struct fsi_master_gpio *master, int count)
int i;
for (i = 0; i < count; i++) {
- ndelay(FSI_GPIO_STD_DLY);
+ if (!master->no_delays)
+ ndelay(FSI_GPIO_STD_DLY);
gpiod_set_value(master->gpio_clk, 0);
- ndelay(FSI_GPIO_STD_DLY);
+ if (!master->no_delays)
+ ndelay(FSI_GPIO_STD_DLY);
gpiod_set_value(master->gpio_clk, 1);
}
}
-static int sda_in(struct fsi_master_gpio *master)
+static int sda_clock_in(struct fsi_master_gpio *master)
{
int in;
- ndelay(FSI_GPIO_STD_DLY);
+ if (!master->no_delays)
+ ndelay(FSI_GPIO_STD_DLY);
+ gpiod_set_value(master->gpio_clk, 0);
+
+ /* Dummy read to feed the synchronizers */
+ gpiod_get_value(master->gpio_data);
+
+ /* Actual data read */
in = gpiod_get_value(master->gpio_data);
+ if (!master->no_delays)
+ ndelay(FSI_GPIO_STD_DLY);
+ gpiod_set_value(master->gpio_clk, 1);
return in ? 1 : 0;
}
@@ -113,10 +97,17 @@ static void set_sda_output(struct fsi_master_gpio *master, int value)
static void clock_zeros(struct fsi_master_gpio *master, int count)
{
+ trace_fsi_master_gpio_clock_zeros(master, count);
set_sda_output(master, 1);
clock_toggle(master, count);
}
+static void echo_delay(struct fsi_master_gpio *master)
+{
+ clock_zeros(master, master->t_echo_delay);
+}
+
+
static void serial_in(struct fsi_master_gpio *master, struct fsi_gpio_msg *msg,
uint8_t num_bits)
{
@@ -125,8 +116,7 @@ static void serial_in(struct fsi_master_gpio *master, struct fsi_gpio_msg *msg,
set_sda_input(master);
for (bit = 0; bit < num_bits; bit++) {
- clock_toggle(master, 1);
- in_bit = sda_in(master);
+ in_bit = sda_clock_in(master);
msg->msg <<= 1;
msg->msg |= ~in_bit & 0x1; /* Data is active low */
}
@@ -191,22 +181,92 @@ static void msg_push_crc(struct fsi_gpio_msg *msg)
msg_push_bits(msg, crc, 4);
}
+static bool check_same_address(struct fsi_master_gpio *master, int id,
+ uint32_t addr)
+{
+ /* this will also handle LAST_ADDR_INVALID */
+ return master->last_addr == (((id & 0x3) << 21) | (addr & ~0x3));
+}
+
+static bool check_relative_address(struct fsi_master_gpio *master, int id,
+ uint32_t addr, uint32_t *rel_addrp)
+{
+ uint32_t last_addr = master->last_addr;
+ int32_t rel_addr;
+
+ if (last_addr == LAST_ADDR_INVALID)
+ return false;
+
+ /* We may be in 23-bit addressing mode, which uses the id as the
+ * top two address bits. So, if we're referencing a different ID,
+ * use absolute addresses.
+ */
+ if (((last_addr >> 21) & 0x3) != id)
+ return false;
+
+ /* remove the top two bits from any 23-bit addressing */
+ last_addr &= (1 << 21) - 1;
+
+ /* We know that the addresses are limited to 21 bits, so this won't
+ * overflow the signed rel_addr */
+ rel_addr = addr - last_addr;
+ if (rel_addr > 255 || rel_addr < -256)
+ return false;
+
+ *rel_addrp = (uint32_t)rel_addr;
+
+ return true;
+}
+
+static void last_address_update(struct fsi_master_gpio *master,
+ int id, bool valid, uint32_t addr)
+{
+ if (!valid)
+ master->last_addr = LAST_ADDR_INVALID;
+ else
+ master->last_addr = ((id & 0x3) << 21) | (addr & ~0x3);
+}
+
/*
- * Encode an Absolute Address command
+ * Encode an Absolute/Relative/Same Address command
*/
-static void build_abs_ar_command(struct fsi_gpio_msg *cmd,
- uint8_t id, uint32_t addr, size_t size, const void *data)
+static void build_ar_command(struct fsi_master_gpio *master,
+ struct fsi_gpio_msg *cmd, uint8_t id,
+ uint32_t addr, size_t size, const void *data)
{
+ int i, addr_bits, opcode_bits;
bool write = !!data;
- uint8_t ds;
- int i;
+ uint8_t ds, opcode;
+ uint32_t rel_addr;
cmd->bits = 0;
cmd->msg = 0;
- msg_push_bits(cmd, id, 2);
- msg_push_bits(cmd, FSI_GPIO_CMD_ABS_AR, 3);
- msg_push_bits(cmd, write ? 0 : 1, 1);
+ /* we have 21 bits of address max */
+ addr &= ((1 << 21) - 1);
+
+ /* cmd opcodes are variable length - SAME_AR is only two bits */
+ opcode_bits = 3;
+
+ if (check_same_address(master, id, addr)) {
+ /* we still address the byte offset within the word */
+ addr_bits = 2;
+ opcode_bits = 2;
+ opcode = FSI_CMD_SAME_AR;
+ trace_fsi_master_gpio_cmd_same_addr(master);
+
+ } else if (check_relative_address(master, id, addr, &rel_addr)) {
+ /* 8 bits plus sign */
+ addr_bits = 9;
+ addr = rel_addr;
+ opcode = FSI_CMD_REL_AR;
+ trace_fsi_master_gpio_cmd_rel_addr(master, rel_addr);
+
+ } else {
+ addr_bits = 21;
+ opcode = FSI_CMD_ABS_AR;
+ trace_fsi_master_gpio_cmd_abs_addr(master, addr);
+ }
/*
* The read/write size is encoded in the lower bits of the address
@@ -223,7 +283,10 @@ static void build_abs_ar_command(struct fsi_gpio_msg *cmd,
if (size == 4)
addr |= 1;
- msg_push_bits(cmd, addr & ((1 << 21) - 1), 21);
+ msg_push_bits(cmd, id, 2);
+ msg_push_bits(cmd, opcode, opcode_bits);
+ msg_push_bits(cmd, write ? 0 : 1, 1);
+ msg_push_bits(cmd, addr, addr_bits);
msg_push_bits(cmd, ds, 1);
for (i = 0; write && i < size; i++)
msg_push_bits(cmd, ((uint8_t *)data)[i], 8);
@@ -237,14 +300,18 @@ static void build_dpoll_command(struct fsi_gpio_msg *cmd, uint8_t slave_id)
cmd->msg = 0;
msg_push_bits(cmd, slave_id, 2);
- msg_push_bits(cmd, FSI_GPIO_CMD_DPOLL, 3);
+ msg_push_bits(cmd, FSI_CMD_DPOLL, 3);
msg_push_crc(cmd);
}
-static void echo_delay(struct fsi_master_gpio *master)
+static void build_epoll_command(struct fsi_gpio_msg *cmd, uint8_t slave_id)
{
- set_sda_output(master, 1);
- clock_toggle(master, FSI_ECHO_DELAY_CLOCKS);
+ cmd->bits = 0;
+ cmd->msg = 0;
+
+ msg_push_bits(cmd, slave_id, 2);
+ msg_push_bits(cmd, FSI_CMD_EPOLL, 3);
+ msg_push_crc(cmd);
}
static void build_term_command(struct fsi_gpio_msg *cmd, uint8_t slave_id)
@@ -253,40 +320,40 @@ static void build_term_command(struct fsi_gpio_msg *cmd, uint8_t slave_id)
cmd->msg = 0;
msg_push_bits(cmd, slave_id, 2);
- msg_push_bits(cmd, FSI_GPIO_CMD_TERM, 6);
+ msg_push_bits(cmd, FSI_CMD_TERM, 6);
msg_push_crc(cmd);
}
/*
- * Store information on master errors so handler can detect and clean
- * up the bus
+ * Note: callers rely specifically on this returning -EAGAIN for
+ * a CRC error detected in the response. Use other error code
+ * for other situations. It will be converted to something else
+ * higher up the stack before it reaches userspace.
*/
-static void fsi_master_gpio_error(struct fsi_master_gpio *master, int error)
-{
-
-}
-
static int read_one_response(struct fsi_master_gpio *master,
uint8_t data_size, struct fsi_gpio_msg *msgp, uint8_t *tagp)
{
struct fsi_gpio_msg msg;
- uint8_t id, tag;
+ unsigned long flags;
uint32_t crc;
+ uint8_t tag;
int i;
+ local_irq_save(flags);
+
/* wait for the start bit */
- for (i = 0; i < FSI_GPIO_MTOE_COUNT; i++) {
+ for (i = 0; i < FSI_MASTER_MTOE_COUNT; i++) {
msg.bits = 0;
msg.msg = 0;
serial_in(master, &msg, 1);
if (msg.msg)
break;
}
- if (i == FSI_GPIO_MTOE_COUNT) {
+ if (i == FSI_MASTER_MTOE_COUNT) {
dev_dbg(master->dev,
"Master time out waiting for response\n");
- fsi_master_gpio_error(master, FSI_GPIO_MTOE);
- return -EIO;
+ local_irq_restore(flags);
+ return -ETIMEDOUT;
}
msg.bits = 0;
@@ -295,23 +362,27 @@ static int read_one_response(struct fsi_master_gpio *master,
/* Read slave ID & response tag */
serial_in(master, &msg, 4);
- id = (msg.msg >> FSI_GPIO_MSG_RESPID_SIZE) & 0x3;
tag = msg.msg & 0x3;
/* If we have an ACK and we're expecting data, clock the data in too */
- if (tag == FSI_GPIO_RESP_ACK && data_size)
+ if (tag == FSI_RESP_ACK && data_size)
serial_in(master, &msg, data_size * 8);
/* read CRC */
- serial_in(master, &msg, FSI_GPIO_CRC_SIZE);
+ serial_in(master, &msg, FSI_CRC_SIZE);
+
+ local_irq_restore(flags);
/* we have a whole message now; check CRC */
crc = crc4(0, 1, 1);
crc = crc4(crc, msg.msg, msg.bits);
if (crc) {
- dev_dbg(master->dev, "ERR response CRC\n");
- fsi_master_gpio_error(master, FSI_GPIO_CRC_INVAL);
- return -EIO;
+ /* Check if it's all 1's, that probably means the host is off */
+ if (((~msg.msg) & ((1ull << msg.bits) - 1)) == 0)
+ return -ENODEV;
+ dev_dbg(master->dev, "ERR response CRC msg: 0x%016llx (%d bits)\n",
+ msg.msg, msg.bits);
+ return -EAGAIN;
}
if (msgp)
@@ -325,19 +396,23 @@ static int read_one_response(struct fsi_master_gpio *master,
static int issue_term(struct fsi_master_gpio *master, uint8_t slave)
{
struct fsi_gpio_msg cmd;
+ unsigned long flags;
uint8_t tag;
int rc;
build_term_command(&cmd, slave);
+
+ local_irq_save(flags);
serial_out(master, &cmd);
echo_delay(master);
+ local_irq_restore(flags);
rc = read_one_response(master, 0, NULL, &tag);
if (rc < 0) {
dev_err(master->dev,
"TERM failed; lost communication with slave\n");
return -EIO;
- } else if (tag != FSI_GPIO_RESP_ACK) {
+ } else if (tag != FSI_RESP_ACK) {
dev_err(master->dev, "TERM failed; response %d\n", tag);
return -EIO;
}
@@ -350,16 +425,39 @@ static int poll_for_response(struct fsi_master_gpio *master,
{
struct fsi_gpio_msg response, cmd;
int busy_count = 0, rc, i;
+ unsigned long flags;
uint8_t tag;
uint8_t *data_byte = data;
-
+ int crc_err_retries = 0;
retry:
rc = read_one_response(master, size, &response, &tag);
- if (rc)
- return rc;
+
+ /* Handle retries on CRC errors */
+ if (rc == -EAGAIN) {
+ /* Too many retries ? */
+ if (crc_err_retries++ > FSI_CRC_ERR_RETRIES) {
+ /*
+ * Pass it up as a -EIO otherwise upper level will retry
+ * the whole command which isn't what we want here.
+ */
+ rc = -EIO;
+ goto fail;
+ }
+ dev_dbg(master->dev,
+ "CRC error retry %d\n", crc_err_retries);
+ trace_fsi_master_gpio_crc_rsp_error(master);
+ build_epoll_command(&cmd, slave);
+ local_irq_save(flags);
+ clock_zeros(master, FSI_MASTER_EPOLL_CLOCKS);
+ serial_out(master, &cmd);
+ echo_delay(master);
+ local_irq_restore(flags);
+ goto retry;
+ } else if (rc)
+ goto fail;
switch (tag) {
- case FSI_GPIO_RESP_ACK:
+ case FSI_RESP_ACK:
if (size && data) {
uint64_t val = response.msg;
/* clear crc & mask */
@@ -372,57 +470,89 @@ retry:
}
}
break;
- case FSI_GPIO_RESP_BUSY:
+ case FSI_RESP_BUSY:
/*
* Its necessary to clock slave before issuing
* d-poll, not indicated in the hardware protocol
* spec. < 20 clocks causes slave to hang, 21 ok.
*/
- clock_zeros(master, FSI_GPIO_DPOLL_CLOCKS);
- if (busy_count++ < FSI_GPIO_MAX_BUSY) {
+ if (busy_count++ < FSI_MASTER_MAX_BUSY) {
build_dpoll_command(&cmd, slave);
+ local_irq_save(flags);
+ clock_zeros(master, FSI_MASTER_DPOLL_CLOCKS);
serial_out(master, &cmd);
echo_delay(master);
+ local_irq_restore(flags);
goto retry;
}
dev_warn(master->dev,
"ERR slave is stuck in busy state, issuing TERM\n");
+ local_irq_save(flags);
+ clock_zeros(master, FSI_MASTER_DPOLL_CLOCKS);
+ local_irq_restore(flags);
issue_term(master, slave);
rc = -EIO;
break;
- case FSI_GPIO_RESP_ERRA:
- case FSI_GPIO_RESP_ERRC:
- dev_dbg(master->dev, "ERR%c received: 0x%x\n",
- tag == FSI_GPIO_RESP_ERRA ? 'A' : 'C',
- (int)response.msg);
- fsi_master_gpio_error(master, response.msg);
+ case FSI_RESP_ERRA:
+ dev_dbg(master->dev, "ERRA received: 0x%x\n", (int)response.msg);
rc = -EIO;
break;
+ case FSI_RESP_ERRC:
+ dev_dbg(master->dev, "ERRC received: 0x%x\n", (int)response.msg);
+ trace_fsi_master_gpio_crc_cmd_error(master);
+ rc = -EAGAIN;
+ break;
}
- /* Clock the slave enough to be ready for next operation */
- clock_zeros(master, FSI_GPIO_PRIME_SLAVE_CLOCKS);
+ if (busy_count > 0)
+ trace_fsi_master_gpio_poll_response_busy(master, busy_count);
+ fail:
+ /*
+ * tSendDelay clocks, avoids signal reflections when switching
+ * from receive of response back to send of data.
+ */
+ local_irq_save(flags);
+ clock_zeros(master, master->t_send_delay);
+ local_irq_restore(flags);
+
return rc;
}
-static int fsi_master_gpio_xfer(struct fsi_master_gpio *master, uint8_t slave,
- struct fsi_gpio_msg *cmd, size_t resp_len, void *resp)
+static int send_request(struct fsi_master_gpio *master,
+ struct fsi_gpio_msg *cmd)
{
unsigned long flags;
- int rc;
-
- spin_lock_irqsave(&master->cmd_lock, flags);
- if (master->external_mode) {
- spin_unlock_irqrestore(&master->cmd_lock, flags);
+ if (master->external_mode)
return -EBUSY;
- }
+ local_irq_save(flags);
serial_out(master, cmd);
echo_delay(master);
- rc = poll_for_response(master, slave, resp_len, resp);
- spin_unlock_irqrestore(&master->cmd_lock, flags);
+ local_irq_restore(flags);
+
+ return 0;
+}
+
+static int fsi_master_gpio_xfer(struct fsi_master_gpio *master, uint8_t slave,
+ struct fsi_gpio_msg *cmd, size_t resp_len, void *resp)
+{
+ int rc = -EAGAIN, retries = 0;
+
+ while ((retries++) < FSI_CRC_ERR_RETRIES) {
+ rc = send_request(master, cmd);
+ if (rc)
+ break;
+ rc = poll_for_response(master, slave, resp_len, resp);
+ if (rc != -EAGAIN)
+ break;
+ rc = -EIO;
+ dev_warn(master->dev, "ECRC retry %d\n", retries);
+
+ /* Pace it a bit before retry */
+ msleep(1);
+ }
return rc;
}
@@ -432,12 +562,18 @@ static int fsi_master_gpio_read(struct fsi_master *_master, int link,
{
struct fsi_master_gpio *master = to_fsi_master_gpio(_master);
struct fsi_gpio_msg cmd;
+ int rc;
if (link != 0)
return -ENODEV;
- build_abs_ar_command(&cmd, id, addr, size, NULL);
- return fsi_master_gpio_xfer(master, id, &cmd, size, val);
+ mutex_lock(&master->cmd_lock);
+ build_ar_command(master, &cmd, id, addr, size, NULL);
+ rc = fsi_master_gpio_xfer(master, id, &cmd, size, val);
+ last_address_update(master, id, rc == 0, addr);
+ mutex_unlock(&master->cmd_lock);
+
+ return rc;
}
static int fsi_master_gpio_write(struct fsi_master *_master, int link,
@@ -445,12 +581,18 @@ static int fsi_master_gpio_write(struct fsi_master *_master, int link,
{
struct fsi_master_gpio *master = to_fsi_master_gpio(_master);
struct fsi_gpio_msg cmd;
+ int rc;
if (link != 0)
return -ENODEV;
- build_abs_ar_command(&cmd, id, addr, size, val);
- return fsi_master_gpio_xfer(master, id, &cmd, 0, NULL);
+ mutex_lock(&master->cmd_lock);
+ build_ar_command(master, &cmd, id, addr, size, val);
+ rc = fsi_master_gpio_xfer(master, id, &cmd, 0, NULL);
+ last_address_update(master, id, rc == 0, addr);
+ mutex_unlock(&master->cmd_lock);
+
+ return rc;
}
static int fsi_master_gpio_term(struct fsi_master *_master,
@@ -458,12 +600,18 @@ static int fsi_master_gpio_term(struct fsi_master *_master,
{
struct fsi_master_gpio *master = to_fsi_master_gpio(_master);
struct fsi_gpio_msg cmd;
+ int rc;
if (link != 0)
return -ENODEV;
+ mutex_lock(&master->cmd_lock);
build_term_command(&cmd, id);
- return fsi_master_gpio_xfer(master, id, &cmd, 0, NULL);
+ rc = fsi_master_gpio_xfer(master, id, &cmd, 0, NULL);
+ last_address_update(master, id, false, 0);
+ mutex_unlock(&master->cmd_lock);
+
+ return rc;
}
static int fsi_master_gpio_break(struct fsi_master *_master, int link)
@@ -476,11 +624,14 @@ static int fsi_master_gpio_break(struct fsi_master *_master, int link)
trace_fsi_master_gpio_break(master);
- spin_lock_irqsave(&master->cmd_lock, flags);
+ mutex_lock(&master->cmd_lock);
if (master->external_mode) {
- spin_unlock_irqrestore(&master->cmd_lock, flags);
+ mutex_unlock(&master->cmd_lock);
return -EBUSY;
}
+
+ local_irq_save(flags);
+
set_sda_output(master, 1);
sda_out(master, 1);
clock_toggle(master, FSI_PRE_BREAK_CLOCKS);
@@ -489,7 +640,11 @@ static int fsi_master_gpio_break(struct fsi_master *_master, int link)
echo_delay(master);
sda_out(master, 1);
clock_toggle(master, FSI_POST_BREAK_CLOCKS);
- spin_unlock_irqrestore(&master->cmd_lock, flags);
+
+ local_irq_restore(flags);
+
+ last_address_update(master, 0, false, 0);
+ mutex_unlock(&master->cmd_lock);
/* Wait for logic reset to take effect */
udelay(200);
@@ -499,6 +654,8 @@ static int fsi_master_gpio_break(struct fsi_master *_master, int link)
static void fsi_master_gpio_init(struct fsi_master_gpio *master)
{
+ unsigned long flags;
+
gpiod_direction_output(master->gpio_mux, 1);
gpiod_direction_output(master->gpio_trans, 1);
gpiod_direction_output(master->gpio_enable, 1);
@@ -506,7 +663,9 @@ static void fsi_master_gpio_init(struct fsi_master_gpio *master)
gpiod_direction_output(master->gpio_data, 1);
/* todo: evaluate if clocks can be reduced */
+ local_irq_save(flags);
clock_zeros(master, FSI_INIT_CLOCKS);
+ local_irq_restore(flags);
}
static void fsi_master_gpio_init_external(struct fsi_master_gpio *master)
@@ -521,22 +680,37 @@ static void fsi_master_gpio_init_external(struct fsi_master_gpio *master)
static int fsi_master_gpio_link_enable(struct fsi_master *_master, int link)
{
struct fsi_master_gpio *master = to_fsi_master_gpio(_master);
- unsigned long flags;
int rc = -EBUSY;
if (link != 0)
return -ENODEV;
- spin_lock_irqsave(&master->cmd_lock, flags);
+ mutex_lock(&master->cmd_lock);
if (!master->external_mode) {
gpiod_set_value(master->gpio_enable, 1);
rc = 0;
}
- spin_unlock_irqrestore(&master->cmd_lock, flags);
+ mutex_unlock(&master->cmd_lock);
return rc;
}
+static int fsi_master_gpio_link_config(struct fsi_master *_master, int link,
+ u8 t_send_delay, u8 t_echo_delay)
+{
+ struct fsi_master_gpio *master = to_fsi_master_gpio(_master);
+
+ if (link != 0)
+ return -ENODEV;
+
+ mutex_lock(&master->cmd_lock);
+ master->t_send_delay = t_send_delay;
+ master->t_echo_delay = t_echo_delay;
+ mutex_unlock(&master->cmd_lock);
+
+ return 0;
+}
+
static ssize_t external_mode_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -550,7 +724,7 @@ static ssize_t external_mode_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct fsi_master_gpio *master = dev_get_drvdata(dev);
- unsigned long flags, val;
+ unsigned long val;
bool external_mode;
int err;
@@ -560,10 +734,10 @@ static ssize_t external_mode_store(struct device *dev,
external_mode = !!val;
- spin_lock_irqsave(&master->cmd_lock, flags);
+ mutex_lock(&master->cmd_lock);
if (external_mode == master->external_mode) {
- spin_unlock_irqrestore(&master->cmd_lock, flags);
+ mutex_unlock(&master->cmd_lock);
return count;
}
@@ -572,7 +746,8 @@ static ssize_t external_mode_store(struct device *dev,
fsi_master_gpio_init_external(master);
else
fsi_master_gpio_init(master);
- spin_unlock_irqrestore(&master->cmd_lock, flags);
+
+ mutex_unlock(&master->cmd_lock);
fsi_master_rescan(&master->master);
@@ -582,31 +757,44 @@ static ssize_t external_mode_store(struct device *dev,
static DEVICE_ATTR(external_mode, 0664,
external_mode_show, external_mode_store);
+static void fsi_master_gpio_release(struct device *dev)
+{
+ struct fsi_master_gpio *master = to_fsi_master_gpio(dev_to_fsi_master(dev));
+
+ of_node_put(dev_of_node(master->dev));
+
+ kfree(master);
+}
+
static int fsi_master_gpio_probe(struct platform_device *pdev)
{
struct fsi_master_gpio *master;
struct gpio_desc *gpio;
int rc;
- master = devm_kzalloc(&pdev->dev, sizeof(*master), GFP_KERNEL);
+ master = kzalloc(sizeof(*master), GFP_KERNEL);
if (!master)
return -ENOMEM;
master->dev = &pdev->dev;
master->master.dev.parent = master->dev;
master->master.dev.of_node = of_node_get(dev_of_node(master->dev));
+ master->master.dev.release = fsi_master_gpio_release;
+ master->last_addr = LAST_ADDR_INVALID;
gpio = devm_gpiod_get(&pdev->dev, "clock", 0);
if (IS_ERR(gpio)) {
dev_err(&pdev->dev, "failed to get clock gpio\n");
- return PTR_ERR(gpio);
+ rc = PTR_ERR(gpio);
+ goto err_free;
}
master->gpio_clk = gpio;
gpio = devm_gpiod_get(&pdev->dev, "data", 0);
if (IS_ERR(gpio)) {
dev_err(&pdev->dev, "failed to get data gpio\n");
- return PTR_ERR(gpio);
+ rc = PTR_ERR(gpio);
+ goto err_free;
}
master->gpio_data = gpio;
@@ -614,24 +802,38 @@ static int fsi_master_gpio_probe(struct platform_device *pdev)
gpio = devm_gpiod_get_optional(&pdev->dev, "trans", 0);
if (IS_ERR(gpio)) {
dev_err(&pdev->dev, "failed to get trans gpio\n");
- return PTR_ERR(gpio);
+ rc = PTR_ERR(gpio);
+ goto err_free;
}
master->gpio_trans = gpio;
gpio = devm_gpiod_get_optional(&pdev->dev, "enable", 0);
if (IS_ERR(gpio)) {
dev_err(&pdev->dev, "failed to get enable gpio\n");
- return PTR_ERR(gpio);
+ rc = PTR_ERR(gpio);
+ goto err_free;
}
master->gpio_enable = gpio;
gpio = devm_gpiod_get_optional(&pdev->dev, "mux", 0);
if (IS_ERR(gpio)) {
dev_err(&pdev->dev, "failed to get mux gpio\n");
- return PTR_ERR(gpio);
+ rc = PTR_ERR(gpio);
+ goto err_free;
}
master->gpio_mux = gpio;
+ /*
+ * Check if GPIO block is slow enought that no extra delays
+ * are necessary. This improves performance on ast2500 by
+ * an order of magnitude.
+ */
+ master->no_delays = device_property_present(&pdev->dev, "no-gpio-delays");
+
+ /* Default FSI command delays */
+ master->t_send_delay = FSI_SEND_DELAY_CLOCKS;
+ master->t_echo_delay = FSI_ECHO_DELAY_CLOCKS;
+
master->master.n_links = 1;
master->master.flags = FSI_MASTER_FLAG_SWCLOCK;
master->master.read = fsi_master_gpio_read;
@@ -639,34 +841,37 @@ static int fsi_master_gpio_probe(struct platform_device *pdev)
master->master.term = fsi_master_gpio_term;
master->master.send_break = fsi_master_gpio_break;
master->master.link_enable = fsi_master_gpio_link_enable;
+ master->master.link_config = fsi_master_gpio_link_config;
platform_set_drvdata(pdev, master);
- spin_lock_init(&master->cmd_lock);
+ mutex_init(&master->cmd_lock);
fsi_master_gpio_init(master);
rc = device_create_file(&pdev->dev, &dev_attr_external_mode);
if (rc)
- return rc;
+ goto err_free;
- return fsi_master_register(&master->master);
+ rc = fsi_master_register(&master->master);
+ if (rc) {
+ device_remove_file(&pdev->dev, &dev_attr_external_mode);
+ put_device(&master->master.dev);
+ return rc;
+ }
+ return 0;
+ err_free:
+ kfree(master);
+ return rc;
}
+
static int fsi_master_gpio_remove(struct platform_device *pdev)
{
struct fsi_master_gpio *master = platform_get_drvdata(pdev);
- devm_gpiod_put(&pdev->dev, master->gpio_clk);
- devm_gpiod_put(&pdev->dev, master->gpio_data);
- if (master->gpio_trans)
- devm_gpiod_put(&pdev->dev, master->gpio_trans);
- if (master->gpio_enable)
- devm_gpiod_put(&pdev->dev, master->gpio_enable);
- if (master->gpio_mux)
- devm_gpiod_put(&pdev->dev, master->gpio_mux);
- fsi_master_unregister(&master->master);
+ device_remove_file(&pdev->dev, &dev_attr_external_mode);
- of_node_put(master->master.dev.of_node);
+ fsi_master_unregister(&master->master);
return 0;
}
diff --git a/drivers/fsi/fsi-master-hub.c b/drivers/fsi/fsi-master-hub.c
index 5885fc4a1ef0..b3c1e9debcf2 100644
--- a/drivers/fsi/fsi-master-hub.c
+++ b/drivers/fsi/fsi-master-hub.c
@@ -122,7 +122,8 @@ static int hub_master_write(struct fsi_master *master, int link,
static int hub_master_break(struct fsi_master *master, int link)
{
- uint32_t addr, cmd;
+ uint32_t addr;
+ __be32 cmd;
addr = 0x4;
cmd = cpu_to_be32(0xc0de0000);
@@ -205,7 +206,7 @@ static int hub_master_init(struct fsi_master_hub *hub)
if (rc)
return rc;
- reg = ~0;
+ reg = cpu_to_be32(~0);
rc = fsi_device_write(dev, FSI_MSENP0, &reg, sizeof(reg));
if (rc)
return rc;
diff --git a/drivers/fsi/fsi-master.h b/drivers/fsi/fsi-master.h
index ee0b46086026..040a7d4cf717 100644
--- a/drivers/fsi/fsi-master.h
+++ b/drivers/fsi/fsi-master.h
@@ -18,7 +18,41 @@
#define DRIVERS_FSI_MASTER_H
#include <linux/device.h>
+#include <linux/mutex.h>
+/* Various protocol delays */
+#define FSI_ECHO_DELAY_CLOCKS 16 /* Number clocks for echo delay */
+#define FSI_SEND_DELAY_CLOCKS 16 /* Number clocks for send delay */
+#define FSI_PRE_BREAK_CLOCKS 50 /* Number clocks to prep for break */
+#define FSI_BREAK_CLOCKS 256 /* Number of clocks to issue break */
+#define FSI_POST_BREAK_CLOCKS 16000 /* Number clocks to set up cfam */
+#define FSI_INIT_CLOCKS 5000 /* Clock out any old data */
+#define FSI_MASTER_DPOLL_CLOCKS 50 /* < 21 will cause slave to hang */
+#define FSI_MASTER_EPOLL_CLOCKS 50 /* Number of clocks for E_POLL retry */
+
+/* Various retry maximums */
+#define FSI_CRC_ERR_RETRIES 10
+#define FSI_MASTER_MAX_BUSY 200
+#define FSI_MASTER_MTOE_COUNT 1000
+
+/* Command encodings */
+#define FSI_CMD_DPOLL 0x2
+#define FSI_CMD_EPOLL 0x3
+#define FSI_CMD_TERM 0x3f
+#define FSI_CMD_ABS_AR 0x4
+#define FSI_CMD_REL_AR 0x5
+#define FSI_CMD_SAME_AR 0x3 /* but only a 2-bit opcode... */
+
+/* Slave responses */
+#define FSI_RESP_ACK 0 /* Success */
+#define FSI_RESP_BUSY 1 /* Slave busy */
+#define FSI_RESP_ERRA 2 /* Any (misc) Error */
+#define FSI_RESP_ERRC 3 /* Slave reports master CRC error */
+
+/* Misc */
+#define FSI_CRC_SIZE 4
+
+/* fsi-master definition and flags */
#define FSI_MASTER_FLAG_SWCLOCK 0x1
struct fsi_master {
@@ -26,6 +60,7 @@ struct fsi_master {
int idx;
int n_links;
int flags;
+ struct mutex scan_lock;
int (*read)(struct fsi_master *, int link, uint8_t id,
uint32_t addr, void *val, size_t size);
int (*write)(struct fsi_master *, int link, uint8_t id,
@@ -33,6 +68,8 @@ struct fsi_master {
int (*term)(struct fsi_master *, int link, uint8_t id);
int (*send_break)(struct fsi_master *, int link);
int (*link_enable)(struct fsi_master *, int link);
+ int (*link_config)(struct fsi_master *, int link,
+ u8 t_send_delay, u8 t_echo_delay);
};
#define dev_to_fsi_master(d) container_of(d, struct fsi_master, dev)
diff --git a/drivers/fsi/fsi-sbefifo.c b/drivers/fsi/fsi-sbefifo.c
new file mode 100644
index 000000000000..ae861342626e
--- /dev/null
+++ b/drivers/fsi/fsi-sbefifo.c
@@ -0,0 +1,1066 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) IBM Corporation 2017
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERGCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/fsi.h>
+#include <linux/fsi-sbefifo.h>
+#include <linux/kernel.h>
+#include <linux/cdev.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/delay.h>
+#include <linux/uio.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
+
+/*
+ * The SBEFIFO is a pipe-like FSI device for communicating with
+ * the self boot engine on POWER processors.
+ */
+
+#define DEVICE_NAME "sbefifo"
+#define FSI_ENGID_SBE 0x22
+
+/*
+ * Register layout
+ */
+
+/* Register banks */
+#define SBEFIFO_UP 0x00 /* FSI -> Host */
+#define SBEFIFO_DOWN 0x40 /* Host -> FSI */
+
+/* Per-bank registers */
+#define SBEFIFO_FIFO 0x00 /* The FIFO itself */
+#define SBEFIFO_STS 0x04 /* Status register */
+#define SBEFIFO_STS_PARITY_ERR 0x20000000
+#define SBEFIFO_STS_RESET_REQ 0x02000000
+#define SBEFIFO_STS_GOT_EOT 0x00800000
+#define SBEFIFO_STS_MAX_XFER_LIMIT 0x00400000
+#define SBEFIFO_STS_FULL 0x00200000
+#define SBEFIFO_STS_EMPTY 0x00100000
+#define SBEFIFO_STS_ECNT_MASK 0x000f0000
+#define SBEFIFO_STS_ECNT_SHIFT 16
+#define SBEFIFO_STS_VALID_MASK 0x0000ff00
+#define SBEFIFO_STS_VALID_SHIFT 8
+#define SBEFIFO_STS_EOT_MASK 0x000000ff
+#define SBEFIFO_STS_EOT_SHIFT 0
+#define SBEFIFO_EOT_RAISE 0x08 /* (Up only) Set End Of Transfer */
+#define SBEFIFO_REQ_RESET 0x0C /* (Up only) Reset Request */
+#define SBEFIFO_PERFORM_RESET 0x10 /* (Down only) Perform Reset */
+#define SBEFIFO_EOT_ACK 0x14 /* (Down only) Acknowledge EOT */
+#define SBEFIFO_DOWN_MAX 0x18 /* (Down only) Max transfer */
+
+/* CFAM GP Mailbox SelfBoot Message register */
+#define CFAM_GP_MBOX_SBM_ADDR 0x2824 /* Converted 0x2809 */
+
+#define CFAM_SBM_SBE_BOOTED 0x80000000
+#define CFAM_SBM_SBE_ASYNC_FFDC 0x40000000
+#define CFAM_SBM_SBE_STATE_MASK 0x00f00000
+#define CFAM_SBM_SBE_STATE_SHIFT 20
+
+enum sbe_state
+{
+ SBE_STATE_UNKNOWN = 0x0, // Unkown, initial state
+ SBE_STATE_IPLING = 0x1, // IPL'ing - autonomous mode (transient)
+ SBE_STATE_ISTEP = 0x2, // ISTEP - Running IPL by steps (transient)
+ SBE_STATE_MPIPL = 0x3, // MPIPL
+ SBE_STATE_RUNTIME = 0x4, // SBE Runtime
+ SBE_STATE_DMT = 0x5, // Dead Man Timer State (transient)
+ SBE_STATE_DUMP = 0x6, // Dumping
+ SBE_STATE_FAILURE = 0x7, // Internal SBE failure
+ SBE_STATE_QUIESCE = 0x8, // Final state - needs SBE reset to get out
+};
+
+/* FIFO depth */
+#define SBEFIFO_FIFO_DEPTH 8
+
+/* Helpers */
+#define sbefifo_empty(sts) ((sts) & SBEFIFO_STS_EMPTY)
+#define sbefifo_full(sts) ((sts) & SBEFIFO_STS_FULL)
+#define sbefifo_parity_err(sts) ((sts) & SBEFIFO_STS_PARITY_ERR)
+#define sbefifo_populated(sts) (((sts) & SBEFIFO_STS_ECNT_MASK) >> SBEFIFO_STS_ECNT_SHIFT)
+#define sbefifo_vacant(sts) (SBEFIFO_FIFO_DEPTH - sbefifo_populated(sts))
+#define sbefifo_eot_set(sts) (((sts) & SBEFIFO_STS_EOT_MASK) >> SBEFIFO_STS_EOT_SHIFT)
+
+/* Reset request timeout in ms */
+#define SBEFIFO_RESET_TIMEOUT 10000
+
+/* Timeouts for commands in ms */
+#define SBEFIFO_TIMEOUT_START_CMD 10000
+#define SBEFIFO_TIMEOUT_IN_CMD 1000
+#define SBEFIFO_TIMEOUT_START_RSP 10000
+#define SBEFIFO_TIMEOUT_IN_RSP 1000
+
+/* Other constants */
+#define SBEFIFO_MAX_USER_CMD_LEN (0x100000 + PAGE_SIZE)
+#define SBEFIFO_RESET_MAGIC 0x52534554 /* "RSET" */
+
+struct sbefifo {
+ uint32_t magic;
+#define SBEFIFO_MAGIC 0x53424546 /* "SBEF" */
+ struct fsi_device *fsi_dev;
+ struct device dev;
+ struct cdev cdev;
+ struct mutex lock;
+ bool broken;
+ bool dead;
+ bool async_ffdc;
+};
+
+struct sbefifo_user {
+ struct sbefifo *sbefifo;
+ struct mutex file_lock;
+ void *cmd_page;
+ void *pending_cmd;
+ size_t pending_len;
+};
+
+static DEFINE_MUTEX(sbefifo_ffdc_mutex);
+
+
+static void __sbefifo_dump_ffdc(struct device *dev, const __be32 *ffdc,
+ size_t ffdc_sz, bool internal)
+{
+ int pack = 0;
+#define FFDC_LSIZE 60
+ static char ffdc_line[FFDC_LSIZE];
+ char *p = ffdc_line;
+
+ while (ffdc_sz) {
+ u32 w0, w1, w2, i;
+ if (ffdc_sz < 3) {
+ dev_err(dev, "SBE invalid FFDC package size %zd\n", ffdc_sz);
+ return;
+ }
+ w0 = be32_to_cpu(*(ffdc++));
+ w1 = be32_to_cpu(*(ffdc++));
+ w2 = be32_to_cpu(*(ffdc++));
+ ffdc_sz -= 3;
+ if ((w0 >> 16) != 0xFFDC) {
+ dev_err(dev, "SBE invalid FFDC package signature %08x %08x %08x\n",
+ w0, w1, w2);
+ break;
+ }
+ w0 &= 0xffff;
+ if (w0 > ffdc_sz) {
+ dev_err(dev, "SBE FFDC package len %d words but only %zd remaining\n",
+ w0, ffdc_sz);
+ w0 = ffdc_sz;
+ break;
+ }
+ if (internal) {
+ dev_warn(dev, "+---- SBE FFDC package %d for async err -----+\n",
+ pack++);
+ } else {
+ dev_warn(dev, "+---- SBE FFDC package %d for cmd %02x:%02x -----+\n",
+ pack++, (w1 >> 8) & 0xff, w1 & 0xff);
+ }
+ dev_warn(dev, "| Response code: %08x |\n", w2);
+ dev_warn(dev, "|-------------------------------------------|\n");
+ for (i = 0; i < w0; i++) {
+ if ((i & 3) == 0) {
+ p = ffdc_line;
+ p += sprintf(p, "| %04x:", i << 4);
+ }
+ p += sprintf(p, " %08x", be32_to_cpu(*(ffdc++)));
+ ffdc_sz--;
+ if ((i & 3) == 3 || i == (w0 - 1)) {
+ while ((i & 3) < 3) {
+ p += sprintf(p, " ");
+ i++;
+ }
+ dev_warn(dev, "%s |\n", ffdc_line);
+ }
+ }
+ dev_warn(dev, "+-------------------------------------------+\n");
+ }
+}
+
+static void sbefifo_dump_ffdc(struct device *dev, const __be32 *ffdc,
+ size_t ffdc_sz, bool internal)
+{
+ mutex_lock(&sbefifo_ffdc_mutex);
+ __sbefifo_dump_ffdc(dev, ffdc, ffdc_sz, internal);
+ mutex_unlock(&sbefifo_ffdc_mutex);
+}
+
+int sbefifo_parse_status(struct device *dev, u16 cmd, __be32 *response,
+ size_t resp_len, size_t *data_len)
+{
+ u32 dh, s0, s1;
+ size_t ffdc_sz;
+
+ if (resp_len < 3) {
+ pr_debug("sbefifo: cmd %04x, response too small: %zd\n",
+ cmd, resp_len);
+ return -ENXIO;
+ }
+ dh = be32_to_cpu(response[resp_len - 1]);
+ if (dh > resp_len || dh < 3) {
+ dev_err(dev, "SBE cmd %02x:%02x status offset out of range: %d/%zd\n",
+ cmd >> 8, cmd & 0xff, dh, resp_len);
+ return -ENXIO;
+ }
+ s0 = be32_to_cpu(response[resp_len - dh]);
+ s1 = be32_to_cpu(response[resp_len - dh + 1]);
+ if (((s0 >> 16) != 0xC0DE) || ((s0 & 0xffff) != cmd)) {
+ dev_err(dev, "SBE cmd %02x:%02x, status signature invalid: 0x%08x 0x%08x\n",
+ cmd >> 8, cmd & 0xff, s0, s1);
+ return -ENXIO;
+ }
+ if (s1 != 0) {
+ ffdc_sz = dh - 3;
+ dev_warn(dev, "SBE error cmd %02x:%02x status=%04x:%04x\n",
+ cmd >> 8, cmd & 0xff, s1 >> 16, s1 & 0xffff);
+ if (ffdc_sz)
+ sbefifo_dump_ffdc(dev, &response[resp_len - dh + 2],
+ ffdc_sz, false);
+ }
+ if (data_len)
+ *data_len = resp_len - dh;
+
+ /*
+ * Primary status don't have the top bit set, so can't be confused with
+ * Linux negative error codes, so return the status word whole.
+ */
+ return s1;
+}
+EXPORT_SYMBOL_GPL(sbefifo_parse_status);
+
+static int sbefifo_regr(struct sbefifo *sbefifo, int reg, u32 *word)
+{
+ __be32 raw_word;
+ int rc;
+
+ rc = fsi_device_read(sbefifo->fsi_dev, reg, &raw_word,
+ sizeof(raw_word));
+ if (rc)
+ return rc;
+
+ *word = be32_to_cpu(raw_word);
+
+ return 0;
+}
+
+static int sbefifo_regw(struct sbefifo *sbefifo, int reg, u32 word)
+{
+ __be32 raw_word = cpu_to_be32(word);
+
+ return fsi_device_write(sbefifo->fsi_dev, reg, &raw_word,
+ sizeof(raw_word));
+}
+
+static int sbefifo_check_sbe_state(struct sbefifo *sbefifo)
+{
+ __be32 raw_word;
+ u32 sbm;
+ int rc;
+
+ rc = fsi_slave_read(sbefifo->fsi_dev->slave, CFAM_GP_MBOX_SBM_ADDR,
+ &raw_word, sizeof(raw_word));
+ if (rc)
+ return rc;
+ sbm = be32_to_cpu(raw_word);
+
+ /* SBE booted at all ? */
+ if (!(sbm & CFAM_SBM_SBE_BOOTED))
+ return -ESHUTDOWN;
+
+ /* Check its state */
+ switch ((sbm & CFAM_SBM_SBE_STATE_MASK) >> CFAM_SBM_SBE_STATE_SHIFT) {
+ case SBE_STATE_UNKNOWN:
+ return -ESHUTDOWN;
+ case SBE_STATE_IPLING:
+ case SBE_STATE_ISTEP:
+ case SBE_STATE_MPIPL:
+ case SBE_STATE_DMT:
+ return -EBUSY;
+ case SBE_STATE_RUNTIME:
+ case SBE_STATE_DUMP: /* Not sure about that one */
+ break;
+ case SBE_STATE_FAILURE:
+ case SBE_STATE_QUIESCE:
+ return -ESHUTDOWN;
+ }
+
+ /* Is there async FFDC available ? Remember it */
+ if (sbm & CFAM_SBM_SBE_ASYNC_FFDC)
+ sbefifo->async_ffdc = true;
+
+ return 0;
+}
+
+/* Don't flip endianness of data to/from FIFO, just pass through. */
+static int sbefifo_down_read(struct sbefifo *sbefifo, __be32 *word)
+{
+ return fsi_device_read(sbefifo->fsi_dev, SBEFIFO_DOWN, word,
+ sizeof(*word));
+}
+
+static int sbefifo_up_write(struct sbefifo *sbefifo, __be32 word)
+{
+ return fsi_device_write(sbefifo->fsi_dev, SBEFIFO_UP, &word,
+ sizeof(word));
+}
+
+static int sbefifo_request_reset(struct sbefifo *sbefifo)
+{
+ struct device *dev = &sbefifo->fsi_dev->dev;
+ u32 status, timeout;
+ int rc;
+
+ dev_dbg(dev, "Requesting FIFO reset\n");
+
+ /* Mark broken first, will be cleared if reset succeeds */
+ sbefifo->broken = true;
+
+ /* Send reset request */
+ rc = sbefifo_regw(sbefifo, SBEFIFO_UP | SBEFIFO_REQ_RESET, 1);
+ if (rc) {
+ dev_err(dev, "Sending reset request failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ /* Wait for it to complete */
+ for (timeout = 0; timeout < SBEFIFO_RESET_TIMEOUT; timeout++) {
+ rc = sbefifo_regr(sbefifo, SBEFIFO_UP | SBEFIFO_STS, &status);
+ if (rc) {
+ dev_err(dev, "Failed to read UP fifo status during reset"
+ " , rc=%d\n", rc);
+ return rc;
+ }
+
+ if (!(status & SBEFIFO_STS_RESET_REQ)) {
+ dev_dbg(dev, "FIFO reset done\n");
+ sbefifo->broken = false;
+ return 0;
+ }
+
+ msleep(1);
+ }
+ dev_err(dev, "FIFO reset timed out\n");
+
+ return -ETIMEDOUT;
+}
+
+static int sbefifo_cleanup_hw(struct sbefifo *sbefifo)
+{
+ struct device *dev = &sbefifo->fsi_dev->dev;
+ u32 up_status, down_status;
+ bool need_reset = false;
+ int rc;
+
+ rc = sbefifo_check_sbe_state(sbefifo);
+ if (rc) {
+ dev_dbg(dev, "SBE state=%d\n", rc);
+ return rc;
+ }
+
+ /* If broken, we don't need to look at status, go straight to reset */
+ if (sbefifo->broken)
+ goto do_reset;
+
+ rc = sbefifo_regr(sbefifo, SBEFIFO_UP | SBEFIFO_STS, &up_status);
+ if (rc) {
+ dev_err(dev, "Cleanup: Reading UP status failed, rc=%d\n", rc);
+
+ /* Will try reset again on next attempt at using it */
+ sbefifo->broken = true;
+ return rc;
+ }
+
+ rc = sbefifo_regr(sbefifo, SBEFIFO_DOWN | SBEFIFO_STS, &down_status);
+ if (rc) {
+ dev_err(dev, "Cleanup: Reading DOWN status failed, rc=%d\n", rc);
+
+ /* Will try reset again on next attempt at using it */
+ sbefifo->broken = true;
+ return rc;
+ }
+
+ /* The FIFO already contains a reset request from the SBE ? */
+ if (down_status & SBEFIFO_STS_RESET_REQ) {
+ dev_info(dev, "Cleanup: FIFO reset request set, resetting\n");
+ rc = sbefifo_regw(sbefifo, SBEFIFO_UP, SBEFIFO_PERFORM_RESET);
+ if (rc) {
+ sbefifo->broken = true;
+ dev_err(dev, "Cleanup: Reset reg write failed, rc=%d\n", rc);
+ return rc;
+ }
+ sbefifo->broken = false;
+ return 0;
+ }
+
+ /* Parity error on either FIFO ? */
+ if ((up_status | down_status) & SBEFIFO_STS_PARITY_ERR)
+ need_reset = true;
+
+ /* Either FIFO not empty ? */
+ if (!((up_status & down_status) & SBEFIFO_STS_EMPTY))
+ need_reset = true;
+
+ if (!need_reset)
+ return 0;
+
+ dev_info(dev, "Cleanup: FIFO not clean (up=0x%08x down=0x%08x)\n",
+ up_status, down_status);
+
+ do_reset:
+
+ /* Mark broken, will be cleared if/when reset succeeds */
+ return sbefifo_request_reset(sbefifo);
+}
+
+static int sbefifo_wait(struct sbefifo *sbefifo, bool up,
+ u32 *status, unsigned long timeout)
+{
+ struct device *dev = &sbefifo->fsi_dev->dev;
+ unsigned long end_time;
+ bool ready = false;
+ u32 addr, sts = 0;
+ int rc;
+
+ dev_vdbg(dev, "Wait on %s fifo...\n", up ? "up" : "down");
+
+ addr = (up ? SBEFIFO_UP : SBEFIFO_DOWN) | SBEFIFO_STS;
+
+ end_time = jiffies + timeout;
+ while (!time_after(jiffies, end_time)) {
+ cond_resched();
+ rc = sbefifo_regr(sbefifo, addr, &sts);
+ if (rc < 0) {
+ dev_err(dev, "FSI error %d reading status register\n", rc);
+ return rc;
+ }
+ if (!up && sbefifo_parity_err(sts)) {
+ dev_err(dev, "Parity error in DOWN FIFO\n");
+ return -ENXIO;
+ }
+ ready = !(up ? sbefifo_full(sts) : sbefifo_empty(sts));
+ if (ready)
+ break;
+ }
+ if (!ready) {
+ dev_err(dev, "%s FIFO Timeout ! status=%08x\n", up ? "UP" : "DOWN", sts);
+ return -ETIMEDOUT;
+ }
+ dev_vdbg(dev, "End of wait status: %08x\n", sts);
+
+ *status = sts;
+
+ return 0;
+}
+
+static int sbefifo_send_command(struct sbefifo *sbefifo,
+ const __be32 *command, size_t cmd_len)
+{
+ struct device *dev = &sbefifo->fsi_dev->dev;
+ size_t len, chunk, vacant = 0, remaining = cmd_len;
+ unsigned long timeout;
+ u32 status;
+ int rc;
+
+ dev_vdbg(dev, "sending command (%zd words, cmd=%04x)\n",
+ cmd_len, be32_to_cpu(command[1]));
+
+ /* As long as there's something to send */
+ timeout = msecs_to_jiffies(SBEFIFO_TIMEOUT_START_CMD);
+ while (remaining) {
+ /* Wait for room in the FIFO */
+ rc = sbefifo_wait(sbefifo, true, &status, timeout);
+ if (rc < 0)
+ return rc;
+ timeout = msecs_to_jiffies(SBEFIFO_TIMEOUT_IN_CMD);
+
+ vacant = sbefifo_vacant(status);
+ len = chunk = min(vacant, remaining);
+
+ dev_vdbg(dev, " status=%08x vacant=%zd chunk=%zd\n",
+ status, vacant, chunk);
+
+ /* Write as much as we can */
+ while (len--) {
+ rc = sbefifo_up_write(sbefifo, *(command++));
+ if (rc) {
+ dev_err(dev, "FSI error %d writing UP FIFO\n", rc);
+ return rc;
+ }
+ }
+ remaining -= chunk;
+ vacant -= chunk;
+ }
+
+ /* If there's no room left, wait for some to write EOT */
+ if (!vacant) {
+ rc = sbefifo_wait(sbefifo, true, &status, timeout);
+ if (rc)
+ return rc;
+ }
+
+ /* Send an EOT */
+ rc = sbefifo_regw(sbefifo, SBEFIFO_UP | SBEFIFO_EOT_RAISE, 0);
+ if (rc)
+ dev_err(dev, "FSI error %d writing EOT\n", rc);
+ return rc;
+}
+
+static int sbefifo_read_response(struct sbefifo *sbefifo, struct iov_iter *response)
+{
+ struct device *dev = &sbefifo->fsi_dev->dev;
+ u32 status, eot_set;
+ unsigned long timeout;
+ bool overflow = false;
+ __be32 data;
+ size_t len;
+ int rc;
+
+ dev_vdbg(dev, "reading response, buflen = %zd\n", iov_iter_count(response));
+
+ timeout = msecs_to_jiffies(SBEFIFO_TIMEOUT_START_RSP);
+ for (;;) {
+ /* Grab FIFO status (this will handle parity errors) */
+ rc = sbefifo_wait(sbefifo, false, &status, timeout);
+ if (rc < 0)
+ return rc;
+ timeout = msecs_to_jiffies(SBEFIFO_TIMEOUT_IN_RSP);
+
+ /* Decode status */
+ len = sbefifo_populated(status);
+ eot_set = sbefifo_eot_set(status);
+
+ dev_vdbg(dev, " chunk size %zd eot_set=0x%x\n", len, eot_set);
+
+ /* Go through the chunk */
+ while(len--) {
+ /* Read the data */
+ rc = sbefifo_down_read(sbefifo, &data);
+ if (rc < 0)
+ return rc;
+
+ /* Was it an EOT ? */
+ if (eot_set & 0x80) {
+ /*
+ * There should be nothing else in the FIFO,
+ * if there is, mark broken, this will force
+ * a reset on next use, but don't fail the
+ * command.
+ */
+ if (len) {
+ dev_warn(dev, "FIFO read hit"
+ " EOT with still %zd data\n",
+ len);
+ sbefifo->broken = true;
+ }
+
+ /* We are done */
+ rc = sbefifo_regw(sbefifo,
+ SBEFIFO_DOWN | SBEFIFO_EOT_ACK, 0);
+
+ /*
+ * If that write fail, still complete the request but mark
+ * the fifo as broken for subsequent reset (not much else
+ * we can do here).
+ */
+ if (rc) {
+ dev_err(dev, "FSI error %d ack'ing EOT\n", rc);
+ sbefifo->broken = true;
+ }
+
+ /* Tell whether we overflowed */
+ return overflow ? -EOVERFLOW : 0;
+ }
+
+ /* Store it if there is room */
+ if (iov_iter_count(response) >= sizeof(__be32)) {
+ if (copy_to_iter(&data, sizeof(__be32), response) < sizeof(__be32))
+ return -EFAULT;
+ } else {
+ dev_vdbg(dev, "Response overflowed !\n");
+
+ overflow = true;
+ }
+
+ /* Next EOT bit */
+ eot_set <<= 1;
+ }
+ }
+ /* Shouldn't happen */
+ return -EIO;
+}
+
+static int sbefifo_do_command(struct sbefifo *sbefifo,
+ const __be32 *command, size_t cmd_len,
+ struct iov_iter *response)
+{
+ /* Try sending the command */
+ int rc = sbefifo_send_command(sbefifo, command, cmd_len);
+ if (rc)
+ return rc;
+
+ /* Now, get the response */
+ return sbefifo_read_response(sbefifo, response);
+}
+
+static void sbefifo_collect_async_ffdc(struct sbefifo *sbefifo)
+{
+ struct device *dev = &sbefifo->fsi_dev->dev;
+ struct iov_iter ffdc_iter;
+ struct kvec ffdc_iov;
+ __be32 *ffdc;
+ size_t ffdc_sz;
+ __be32 cmd[2];
+ int rc;
+
+ sbefifo->async_ffdc = false;
+ ffdc = vmalloc(SBEFIFO_MAX_FFDC_SIZE);
+ if (!ffdc) {
+ dev_err(dev, "Failed to allocate SBE FFDC buffer\n");
+ return;
+ }
+ ffdc_iov.iov_base = ffdc;
+ ffdc_iov.iov_len = SBEFIFO_MAX_FFDC_SIZE;
+ iov_iter_kvec(&ffdc_iter, WRITE | ITER_KVEC, &ffdc_iov, 1, SBEFIFO_MAX_FFDC_SIZE);
+ cmd[0] = cpu_to_be32(2);
+ cmd[1] = cpu_to_be32(SBEFIFO_CMD_GET_SBE_FFDC);
+ rc = sbefifo_do_command(sbefifo, cmd, 2, &ffdc_iter);
+ if (rc != 0) {
+ dev_err(dev, "Error %d retrieving SBE FFDC\n", rc);
+ goto bail;
+ }
+ ffdc_sz = SBEFIFO_MAX_FFDC_SIZE - iov_iter_count(&ffdc_iter);
+ ffdc_sz /= sizeof(__be32);
+ rc = sbefifo_parse_status(dev, SBEFIFO_CMD_GET_SBE_FFDC, ffdc,
+ ffdc_sz, &ffdc_sz);
+ if (rc != 0) {
+ dev_err(dev, "Error %d decoding SBE FFDC\n", rc);
+ goto bail;
+ }
+ if (ffdc_sz > 0)
+ sbefifo_dump_ffdc(dev, ffdc, ffdc_sz, true);
+ bail:
+ vfree(ffdc);
+
+}
+
+static int __sbefifo_submit(struct sbefifo *sbefifo,
+ const __be32 *command, size_t cmd_len,
+ struct iov_iter *response)
+{
+ struct device *dev = &sbefifo->fsi_dev->dev;
+ int rc;
+
+ if (sbefifo->dead)
+ return -ENODEV;
+
+ if (cmd_len < 2 || be32_to_cpu(command[0]) != cmd_len) {
+ dev_vdbg(dev, "Invalid command len %zd (header: %d)\n",
+ cmd_len, be32_to_cpu(command[0]));
+ return -EINVAL;
+ }
+
+ /* First ensure the HW is in a clean state */
+ rc = sbefifo_cleanup_hw(sbefifo);
+ if (rc)
+ return rc;
+
+ /* Look for async FFDC first if any */
+ if (sbefifo->async_ffdc)
+ sbefifo_collect_async_ffdc(sbefifo);
+
+ rc = sbefifo_do_command(sbefifo, command, cmd_len, response);
+ if (rc != 0 && rc != -EOVERFLOW)
+ goto fail;
+ return rc;
+ fail:
+ /*
+ * On failure, attempt a reset. Ignore the result, it will mark
+ * the fifo broken if the reset fails
+ */
+ sbefifo_request_reset(sbefifo);
+
+ /* Return original error */
+ return rc;
+}
+
+/**
+ * sbefifo_submit() - Submit and SBE fifo command and receive response
+ * @dev: The sbefifo device
+ * @command: The raw command data
+ * @cmd_len: The command size (in 32-bit words)
+ * @response: The output response buffer
+ * @resp_len: In: Response buffer size, Out: Response size
+ *
+ * This will perform the entire operation. If the reponse buffer
+ * overflows, returns -EOVERFLOW
+ */
+int sbefifo_submit(struct device *dev, const __be32 *command, size_t cmd_len,
+ __be32 *response, size_t *resp_len)
+{
+ struct sbefifo *sbefifo;
+ struct iov_iter resp_iter;
+ struct kvec resp_iov;
+ size_t rbytes;
+ int rc;
+
+ if (!dev)
+ return -ENODEV;
+ sbefifo = dev_get_drvdata(dev);
+ if (!sbefifo)
+ return -ENODEV;
+ if (WARN_ON_ONCE(sbefifo->magic != SBEFIFO_MAGIC))
+ return -ENODEV;
+ if (!resp_len || !command || !response)
+ return -EINVAL;
+
+ /* Prepare iov iterator */
+ rbytes = (*resp_len) * sizeof(__be32);
+ resp_iov.iov_base = response;
+ resp_iov.iov_len = rbytes;
+ iov_iter_kvec(&resp_iter, WRITE | ITER_KVEC, &resp_iov, 1, rbytes);
+
+ /* Perform the command */
+ mutex_lock(&sbefifo->lock);
+ rc = __sbefifo_submit(sbefifo, command, cmd_len, &resp_iter);
+ mutex_unlock(&sbefifo->lock);
+
+ /* Extract the response length */
+ rbytes -= iov_iter_count(&resp_iter);
+ *resp_len = rbytes / sizeof(__be32);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(sbefifo_submit);
+
+/*
+ * Char device interface
+ */
+
+static void sbefifo_release_command(struct sbefifo_user *user)
+{
+ if (is_vmalloc_addr(user->pending_cmd))
+ vfree(user->pending_cmd);
+ user->pending_cmd = NULL;
+ user->pending_len = 0;
+}
+
+static int sbefifo_user_open(struct inode *inode, struct file *file)
+{
+ struct sbefifo *sbefifo = container_of(inode->i_cdev, struct sbefifo, cdev);
+ struct sbefifo_user *user;
+
+ user = kzalloc(sizeof(struct sbefifo_user), GFP_KERNEL);
+ if (!user)
+ return -ENOMEM;
+
+ file->private_data = user;
+ user->sbefifo = sbefifo;
+ user->cmd_page = (void *)__get_free_page(GFP_KERNEL);
+ if (!user->cmd_page) {
+ kfree(user);
+ return -ENOMEM;
+ }
+ mutex_init(&user->file_lock);
+
+ return 0;
+}
+
+static ssize_t sbefifo_user_read(struct file *file, char __user *buf,
+ size_t len, loff_t *offset)
+{
+ struct sbefifo_user *user = file->private_data;
+ struct sbefifo *sbefifo;
+ struct iov_iter resp_iter;
+ struct iovec resp_iov;
+ size_t cmd_len;
+ int rc;
+
+ if (!user)
+ return -EINVAL;
+ sbefifo = user->sbefifo;
+ if (len & 3)
+ return -EINVAL;
+
+ mutex_lock(&user->file_lock);
+
+ /* Cronus relies on -EAGAIN after a short read */
+ if (user->pending_len == 0) {
+ rc = -EAGAIN;
+ goto bail;
+ }
+ if (user->pending_len < 8) {
+ rc = -EINVAL;
+ goto bail;
+ }
+ cmd_len = user->pending_len >> 2;
+
+ /* Prepare iov iterator */
+ resp_iov.iov_base = buf;
+ resp_iov.iov_len = len;
+ iov_iter_init(&resp_iter, WRITE, &resp_iov, 1, len);
+
+ /* Perform the command */
+ mutex_lock(&sbefifo->lock);
+ rc = __sbefifo_submit(sbefifo, user->pending_cmd, cmd_len, &resp_iter);
+ mutex_unlock(&sbefifo->lock);
+ if (rc < 0)
+ goto bail;
+
+ /* Extract the response length */
+ rc = len - iov_iter_count(&resp_iter);
+ bail:
+ sbefifo_release_command(user);
+ mutex_unlock(&user->file_lock);
+ return rc;
+}
+
+static ssize_t sbefifo_user_write(struct file *file, const char __user *buf,
+ size_t len, loff_t *offset)
+{
+ struct sbefifo_user *user = file->private_data;
+ struct sbefifo *sbefifo;
+ int rc = len;
+
+ if (!user)
+ return -EINVAL;
+ sbefifo = user->sbefifo;
+ if (len > SBEFIFO_MAX_USER_CMD_LEN)
+ return -EINVAL;
+ if (len & 3)
+ return -EINVAL;
+
+ mutex_lock(&user->file_lock);
+
+ /* Can we use the pre-allocate buffer ? If not, allocate */
+ if (len <= PAGE_SIZE)
+ user->pending_cmd = user->cmd_page;
+ else
+ user->pending_cmd = vmalloc(len);
+ if (!user->pending_cmd) {
+ rc = -ENOMEM;
+ goto bail;
+ }
+
+ /* Copy the command into the staging buffer */
+ if (copy_from_user(user->pending_cmd, buf, len)) {
+ rc = -EFAULT;
+ goto bail;
+ }
+
+ /* Check for the magic reset command */
+ if (len == 4 && be32_to_cpu(*(__be32 *)user->pending_cmd) ==
+ SBEFIFO_RESET_MAGIC) {
+
+ /* Clear out any pending command */
+ user->pending_len = 0;
+
+ /* Trigger reset request */
+ mutex_lock(&sbefifo->lock);
+ rc = sbefifo_request_reset(user->sbefifo);
+ mutex_unlock(&sbefifo->lock);
+ if (rc == 0)
+ rc = 4;
+ goto bail;
+ }
+
+ /* Update the staging buffer size */
+ user->pending_len = len;
+ bail:
+ if (!user->pending_len)
+ sbefifo_release_command(user);
+
+ mutex_unlock(&user->file_lock);
+
+ /* And that's it, we'll issue the command on a read */
+ return rc;
+}
+
+static int sbefifo_user_release(struct inode *inode, struct file *file)
+{
+ struct sbefifo_user *user = file->private_data;
+
+ if (!user)
+ return -EINVAL;
+
+ sbefifo_release_command(user);
+ free_page((unsigned long)user->cmd_page);
+ kfree(user);
+
+ return 0;
+}
+
+static const struct file_operations sbefifo_fops = {
+ .owner = THIS_MODULE,
+ .open = sbefifo_user_open,
+ .read = sbefifo_user_read,
+ .write = sbefifo_user_write,
+ .release = sbefifo_user_release,
+};
+
+static void sbefifo_free(struct device *dev)
+{
+ struct sbefifo *sbefifo = container_of(dev, struct sbefifo, dev);
+
+ put_device(&sbefifo->fsi_dev->dev);
+ kfree(sbefifo);
+}
+
+/*
+ * Probe/remove
+ */
+
+static int sbefifo_probe(struct device *dev)
+{
+ struct fsi_device *fsi_dev = to_fsi_dev(dev);
+ struct sbefifo *sbefifo;
+ struct device_node *np;
+ struct platform_device *child;
+ char child_name[32];
+ int rc, didx, child_idx = 0;
+
+ dev_dbg(dev, "Found sbefifo device\n");
+
+ sbefifo = kzalloc(sizeof(*sbefifo), GFP_KERNEL);
+ if (!sbefifo)
+ return -ENOMEM;
+
+ /* Grab a reference to the device (parent of our cdev), we'll drop it later */
+ if (!get_device(dev)) {
+ kfree(sbefifo);
+ return -ENODEV;
+ }
+
+ sbefifo->magic = SBEFIFO_MAGIC;
+ sbefifo->fsi_dev = fsi_dev;
+ dev_set_drvdata(dev, sbefifo);
+ mutex_init(&sbefifo->lock);
+
+ /*
+ * Try cleaning up the FIFO. If this fails, we still register the
+ * driver and will try cleaning things up again on the next access.
+ */
+ rc = sbefifo_cleanup_hw(sbefifo);
+ if (rc && rc != -ESHUTDOWN)
+ dev_err(dev, "Initial HW cleanup failed, will retry later\n");
+
+ /* Create chardev for userspace access */
+ sbefifo->dev.type = &fsi_cdev_type;
+ sbefifo->dev.parent = dev;
+ sbefifo->dev.release = sbefifo_free;
+ device_initialize(&sbefifo->dev);
+
+ /* Allocate a minor in the FSI space */
+ rc = fsi_get_new_minor(fsi_dev, fsi_dev_sbefifo, &sbefifo->dev.devt, &didx);
+ if (rc)
+ goto err;
+
+ dev_set_name(&sbefifo->dev, "sbefifo%d", didx);
+ cdev_init(&sbefifo->cdev, &sbefifo_fops);
+ rc = cdev_device_add(&sbefifo->cdev, &sbefifo->dev);
+ if (rc) {
+ dev_err(dev, "Error %d creating char device %s\n",
+ rc, dev_name(&sbefifo->dev));
+ goto err_free_minor;
+ }
+
+ /* Create platform devs for dts child nodes (occ, etc) */
+ for_each_available_child_of_node(dev->of_node, np) {
+ snprintf(child_name, sizeof(child_name), "%s-dev%d",
+ dev_name(&sbefifo->dev), child_idx++);
+ child = of_platform_device_create(np, child_name, dev);
+ if (!child)
+ dev_warn(dev, "failed to create child %s dev\n",
+ child_name);
+ }
+
+ return 0;
+ err_free_minor:
+ fsi_free_minor(sbefifo->dev.devt);
+ err:
+ put_device(&sbefifo->dev);
+ return rc;
+}
+
+static int sbefifo_unregister_child(struct device *dev, void *data)
+{
+ struct platform_device *child = to_platform_device(dev);
+
+ of_device_unregister(child);
+ if (dev->of_node)
+ of_node_clear_flag(dev->of_node, OF_POPULATED);
+
+ return 0;
+}
+
+static int sbefifo_remove(struct device *dev)
+{
+ struct sbefifo *sbefifo = dev_get_drvdata(dev);
+
+ dev_dbg(dev, "Removing sbefifo device...\n");
+
+ mutex_lock(&sbefifo->lock);
+ sbefifo->dead = true;
+ mutex_unlock(&sbefifo->lock);
+
+ cdev_device_del(&sbefifo->cdev, &sbefifo->dev);
+ fsi_free_minor(sbefifo->dev.devt);
+ device_for_each_child(dev, NULL, sbefifo_unregister_child);
+ put_device(&sbefifo->dev);
+
+ return 0;
+}
+
+static struct fsi_device_id sbefifo_ids[] = {
+ {
+ .engine_type = FSI_ENGID_SBE,
+ .version = FSI_VERSION_ANY,
+ },
+ { 0 }
+};
+
+static struct fsi_driver sbefifo_drv = {
+ .id_table = sbefifo_ids,
+ .drv = {
+ .name = DEVICE_NAME,
+ .bus = &fsi_bus_type,
+ .probe = sbefifo_probe,
+ .remove = sbefifo_remove,
+ }
+};
+
+static int sbefifo_init(void)
+{
+ return fsi_driver_register(&sbefifo_drv);
+}
+
+static void sbefifo_exit(void)
+{
+ fsi_driver_unregister(&sbefifo_drv);
+}
+
+module_init(sbefifo_init);
+module_exit(sbefifo_exit);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Brad Bishop <bradleyb@fuzziesquirrel.com>");
+MODULE_AUTHOR("Eddie James <eajames@linux.vnet.ibm.com>");
+MODULE_AUTHOR("Andrew Jeffery <andrew@aj.id.au>");
+MODULE_AUTHOR("Benjamin Herrenschmidt <benh@kernel.crashing.org>");
+MODULE_DESCRIPTION("Linux device interface to the POWER Self Boot Engine");
diff --git a/drivers/fsi/fsi-scom.c b/drivers/fsi/fsi-scom.c
index e13353a2fd7c..df94021dd9d1 100644
--- a/drivers/fsi/fsi-scom.c
+++ b/drivers/fsi/fsi-scom.c
@@ -20,42 +20,73 @@
#include <linux/fs.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
-#include <linux/miscdevice.h>
+#include <linux/cdev.h>
#include <linux/list.h>
-#include <linux/idr.h>
-#define FSI_ENGID_SCOM 0x5
+#include <uapi/linux/fsi.h>
-#define SCOM_FSI2PIB_DELAY 50
+#define FSI_ENGID_SCOM 0x5
/* SCOM engine register set */
#define SCOM_DATA0_REG 0x00
#define SCOM_DATA1_REG 0x04
#define SCOM_CMD_REG 0x08
-#define SCOM_RESET_REG 0x1C
+#define SCOM_FSI2PIB_RESET_REG 0x18
+#define SCOM_STATUS_REG 0x1C /* Read */
+#define SCOM_PIB_RESET_REG 0x1C /* Write */
-#define SCOM_RESET_CMD 0x80000000
+/* Command register */
#define SCOM_WRITE_CMD 0x80000000
+#define SCOM_READ_CMD 0x00000000
+
+/* Status register bits */
+#define SCOM_STATUS_ERR_SUMMARY 0x80000000
+#define SCOM_STATUS_PROTECTION 0x01000000
+#define SCOM_STATUS_PARITY 0x04000000
+#define SCOM_STATUS_PIB_ABORT 0x00100000
+#define SCOM_STATUS_PIB_RESP_MASK 0x00007000
+#define SCOM_STATUS_PIB_RESP_SHIFT 12
+
+#define SCOM_STATUS_ANY_ERR (SCOM_STATUS_ERR_SUMMARY | \
+ SCOM_STATUS_PROTECTION | \
+ SCOM_STATUS_PARITY | \
+ SCOM_STATUS_PIB_ABORT | \
+ SCOM_STATUS_PIB_RESP_MASK)
+/* SCOM address encodings */
+#define XSCOM_ADDR_IND_FLAG BIT_ULL(63)
+#define XSCOM_ADDR_INF_FORM1 BIT_ULL(60)
+
+/* SCOM indirect stuff */
+#define XSCOM_ADDR_DIRECT_PART 0x7fffffffull
+#define XSCOM_ADDR_INDIRECT_PART 0x000fffff00000000ull
+#define XSCOM_DATA_IND_READ BIT_ULL(63)
+#define XSCOM_DATA_IND_COMPLETE BIT_ULL(31)
+#define XSCOM_DATA_IND_ERR_MASK 0x70000000ull
+#define XSCOM_DATA_IND_ERR_SHIFT 28
+#define XSCOM_DATA_IND_DATA 0x0000ffffull
+#define XSCOM_DATA_IND_FORM1_DATA 0x000fffffffffffffull
+#define XSCOM_ADDR_FORM1_LOW 0x000ffffffffull
+#define XSCOM_ADDR_FORM1_HI 0xfff00000000ull
+#define XSCOM_ADDR_FORM1_HI_SHIFT 20
+
+/* Retries */
+#define SCOM_MAX_RETRIES 100 /* Retries on busy */
+#define SCOM_MAX_IND_RETRIES 10 /* Retries indirect not ready */
struct scom_device {
struct list_head link;
struct fsi_device *fsi_dev;
- struct miscdevice mdev;
- char name[32];
- int idx;
+ struct device dev;
+ struct cdev cdev;
+ struct mutex lock;
+ bool dead;
};
-#define to_scom_dev(x) container_of((x), struct scom_device, mdev)
-
-static struct list_head scom_devices;
-
-static DEFINE_IDA(scom_ida);
-
-static int put_scom(struct scom_device *scom_dev, uint64_t value,
- uint32_t addr)
+static int __put_scom(struct scom_device *scom_dev, uint64_t value,
+ uint32_t addr, uint32_t *status)
{
+ __be32 data, raw_status;
int rc;
- uint32_t data;
data = cpu_to_be32((value >> 32) & 0xffffffff);
rc = fsi_device_write(scom_dev->fsi_dev, SCOM_DATA0_REG, &data,
@@ -70,53 +101,286 @@ static int put_scom(struct scom_device *scom_dev, uint64_t value,
return rc;
data = cpu_to_be32(SCOM_WRITE_CMD | addr);
- return fsi_device_write(scom_dev->fsi_dev, SCOM_CMD_REG, &data,
+ rc = fsi_device_write(scom_dev->fsi_dev, SCOM_CMD_REG, &data,
sizeof(uint32_t));
+ if (rc)
+ return rc;
+ rc = fsi_device_read(scom_dev->fsi_dev, SCOM_STATUS_REG, &raw_status,
+ sizeof(uint32_t));
+ if (rc)
+ return rc;
+ *status = be32_to_cpu(raw_status);
+
+ return 0;
}
-static int get_scom(struct scom_device *scom_dev, uint64_t *value,
- uint32_t addr)
+static int __get_scom(struct scom_device *scom_dev, uint64_t *value,
+ uint32_t addr, uint32_t *status)
{
- uint32_t result, data;
+ __be32 data, raw_status;
int rc;
+
*value = 0ULL;
- data = cpu_to_be32(addr);
+ data = cpu_to_be32(SCOM_READ_CMD | addr);
rc = fsi_device_write(scom_dev->fsi_dev, SCOM_CMD_REG, &data,
sizeof(uint32_t));
if (rc)
return rc;
+ rc = fsi_device_read(scom_dev->fsi_dev, SCOM_STATUS_REG, &raw_status,
+ sizeof(uint32_t));
+ if (rc)
+ return rc;
- rc = fsi_device_read(scom_dev->fsi_dev, SCOM_DATA0_REG, &result,
+ /*
+ * Read the data registers even on error, so we don't have
+ * to interpret the status register here.
+ */
+ rc = fsi_device_read(scom_dev->fsi_dev, SCOM_DATA0_REG, &data,
sizeof(uint32_t));
if (rc)
return rc;
-
- *value |= (uint64_t)cpu_to_be32(result) << 32;
- rc = fsi_device_read(scom_dev->fsi_dev, SCOM_DATA1_REG, &result,
+ *value |= (uint64_t)be32_to_cpu(data) << 32;
+ rc = fsi_device_read(scom_dev->fsi_dev, SCOM_DATA1_REG, &data,
sizeof(uint32_t));
if (rc)
return rc;
+ *value |= be32_to_cpu(data);
+ *status = be32_to_cpu(raw_status);
- *value |= cpu_to_be32(result);
+ return rc;
+}
+
+static int put_indirect_scom_form0(struct scom_device *scom, uint64_t value,
+ uint64_t addr, uint32_t *status)
+{
+ uint64_t ind_data, ind_addr;
+ int rc, retries, err = 0;
+
+ if (value & ~XSCOM_DATA_IND_DATA)
+ return -EINVAL;
+
+ ind_addr = addr & XSCOM_ADDR_DIRECT_PART;
+ ind_data = (addr & XSCOM_ADDR_INDIRECT_PART) | value;
+ rc = __put_scom(scom, ind_data, ind_addr, status);
+ if (rc || (*status & SCOM_STATUS_ANY_ERR))
+ return rc;
+
+ for (retries = 0; retries < SCOM_MAX_IND_RETRIES; retries++) {
+ rc = __get_scom(scom, &ind_data, addr, status);
+ if (rc || (*status & SCOM_STATUS_ANY_ERR))
+ return rc;
+
+ err = (ind_data & XSCOM_DATA_IND_ERR_MASK) >> XSCOM_DATA_IND_ERR_SHIFT;
+ *status = err << SCOM_STATUS_PIB_RESP_SHIFT;
+ if ((ind_data & XSCOM_DATA_IND_COMPLETE) || (err != SCOM_PIB_BLOCKED))
+ return 0;
+
+ msleep(1);
+ }
+ return rc;
+}
+
+static int put_indirect_scom_form1(struct scom_device *scom, uint64_t value,
+ uint64_t addr, uint32_t *status)
+{
+ uint64_t ind_data, ind_addr;
+
+ if (value & ~XSCOM_DATA_IND_FORM1_DATA)
+ return -EINVAL;
+ ind_addr = addr & XSCOM_ADDR_FORM1_LOW;
+ ind_data = value | (addr & XSCOM_ADDR_FORM1_HI) << XSCOM_ADDR_FORM1_HI_SHIFT;
+ return __put_scom(scom, ind_data, ind_addr, status);
+}
+
+static int get_indirect_scom_form0(struct scom_device *scom, uint64_t *value,
+ uint64_t addr, uint32_t *status)
+{
+ uint64_t ind_data, ind_addr;
+ int rc, retries, err = 0;
+
+ ind_addr = addr & XSCOM_ADDR_DIRECT_PART;
+ ind_data = (addr & XSCOM_ADDR_INDIRECT_PART) | XSCOM_DATA_IND_READ;
+ rc = __put_scom(scom, ind_data, ind_addr, status);
+ if (rc || (*status & SCOM_STATUS_ANY_ERR))
+ return rc;
+
+ for (retries = 0; retries < SCOM_MAX_IND_RETRIES; retries++) {
+ rc = __get_scom(scom, &ind_data, addr, status);
+ if (rc || (*status & SCOM_STATUS_ANY_ERR))
+ return rc;
+
+ err = (ind_data & XSCOM_DATA_IND_ERR_MASK) >> XSCOM_DATA_IND_ERR_SHIFT;
+ *status = err << SCOM_STATUS_PIB_RESP_SHIFT;
+ *value = ind_data & XSCOM_DATA_IND_DATA;
+
+ if ((ind_data & XSCOM_DATA_IND_COMPLETE) || (err != SCOM_PIB_BLOCKED))
+ return 0;
+
+ msleep(1);
+ }
+ return rc;
+}
+
+static int raw_put_scom(struct scom_device *scom, uint64_t value,
+ uint64_t addr, uint32_t *status)
+{
+ if (addr & XSCOM_ADDR_IND_FLAG) {
+ if (addr & XSCOM_ADDR_INF_FORM1)
+ return put_indirect_scom_form1(scom, value, addr, status);
+ else
+ return put_indirect_scom_form0(scom, value, addr, status);
+ } else
+ return __put_scom(scom, value, addr, status);
+}
+
+static int raw_get_scom(struct scom_device *scom, uint64_t *value,
+ uint64_t addr, uint32_t *status)
+{
+ if (addr & XSCOM_ADDR_IND_FLAG) {
+ if (addr & XSCOM_ADDR_INF_FORM1)
+ return -ENXIO;
+ return get_indirect_scom_form0(scom, value, addr, status);
+ } else
+ return __get_scom(scom, value, addr, status);
+}
+
+static int handle_fsi2pib_status(struct scom_device *scom, uint32_t status)
+{
+ uint32_t dummy = -1;
+
+ if (status & SCOM_STATUS_PROTECTION)
+ return -EPERM;
+ if (status & SCOM_STATUS_PARITY) {
+ fsi_device_write(scom->fsi_dev, SCOM_FSI2PIB_RESET_REG, &dummy,
+ sizeof(uint32_t));
+ return -EIO;
+ }
+ /* Return -EBUSY on PIB abort to force a retry */
+ if (status & SCOM_STATUS_PIB_ABORT)
+ return -EBUSY;
+ if (status & SCOM_STATUS_ERR_SUMMARY) {
+ fsi_device_write(scom->fsi_dev, SCOM_FSI2PIB_RESET_REG, &dummy,
+ sizeof(uint32_t));
+ return -EIO;
+ }
return 0;
}
+static int handle_pib_status(struct scom_device *scom, uint8_t status)
+{
+ uint32_t dummy = -1;
+
+ if (status == SCOM_PIB_SUCCESS)
+ return 0;
+ if (status == SCOM_PIB_BLOCKED)
+ return -EBUSY;
+
+ /* Reset the bridge */
+ fsi_device_write(scom->fsi_dev, SCOM_FSI2PIB_RESET_REG, &dummy,
+ sizeof(uint32_t));
+
+ switch(status) {
+ case SCOM_PIB_OFFLINE:
+ return -ENODEV;
+ case SCOM_PIB_BAD_ADDR:
+ return -ENXIO;
+ case SCOM_PIB_TIMEOUT:
+ return -ETIMEDOUT;
+ case SCOM_PIB_PARTIAL:
+ case SCOM_PIB_CLK_ERR:
+ case SCOM_PIB_PARITY_ERR:
+ default:
+ return -EIO;
+ }
+}
+
+static int put_scom(struct scom_device *scom, uint64_t value,
+ uint64_t addr)
+{
+ uint32_t status, dummy = -1;
+ int rc, retries;
+
+ for (retries = 0; retries < SCOM_MAX_RETRIES; retries++) {
+ rc = raw_put_scom(scom, value, addr, &status);
+ if (rc) {
+ /* Try resetting the bridge if FSI fails */
+ if (rc != -ENODEV && retries == 0) {
+ fsi_device_write(scom->fsi_dev, SCOM_FSI2PIB_RESET_REG,
+ &dummy, sizeof(uint32_t));
+ rc = -EBUSY;
+ } else
+ return rc;
+ } else
+ rc = handle_fsi2pib_status(scom, status);
+ if (rc && rc != -EBUSY)
+ break;
+ if (rc == 0) {
+ rc = handle_pib_status(scom,
+ (status & SCOM_STATUS_PIB_RESP_MASK)
+ >> SCOM_STATUS_PIB_RESP_SHIFT);
+ if (rc && rc != -EBUSY)
+ break;
+ }
+ if (rc == 0)
+ break;
+ msleep(1);
+ }
+ return rc;
+}
+
+static int get_scom(struct scom_device *scom, uint64_t *value,
+ uint64_t addr)
+{
+ uint32_t status, dummy = -1;
+ int rc, retries;
+
+ for (retries = 0; retries < SCOM_MAX_RETRIES; retries++) {
+ rc = raw_get_scom(scom, value, addr, &status);
+ if (rc) {
+ /* Try resetting the bridge if FSI fails */
+ if (rc != -ENODEV && retries == 0) {
+ fsi_device_write(scom->fsi_dev, SCOM_FSI2PIB_RESET_REG,
+ &dummy, sizeof(uint32_t));
+ rc = -EBUSY;
+ } else
+ return rc;
+ } else
+ rc = handle_fsi2pib_status(scom, status);
+ if (rc && rc != -EBUSY)
+ break;
+ if (rc == 0) {
+ rc = handle_pib_status(scom,
+ (status & SCOM_STATUS_PIB_RESP_MASK)
+ >> SCOM_STATUS_PIB_RESP_SHIFT);
+ if (rc && rc != -EBUSY)
+ break;
+ }
+ if (rc == 0)
+ break;
+ msleep(1);
+ }
+ return rc;
+}
+
static ssize_t scom_read(struct file *filep, char __user *buf, size_t len,
- loff_t *offset)
+ loff_t *offset)
{
- int rc;
- struct miscdevice *mdev =
- (struct miscdevice *)filep->private_data;
- struct scom_device *scom = to_scom_dev(mdev);
+ struct scom_device *scom = filep->private_data;
struct device *dev = &scom->fsi_dev->dev;
uint64_t val;
+ int rc;
if (len != sizeof(uint64_t))
return -EINVAL;
- rc = get_scom(scom, &val, *offset);
+ mutex_lock(&scom->lock);
+ if (scom->dead)
+ rc = -ENODEV;
+ else
+ rc = get_scom(scom, &val, *offset);
+ mutex_unlock(&scom->lock);
if (rc) {
dev_dbg(dev, "get_scom fail:%d\n", rc);
return rc;
@@ -130,11 +394,10 @@ static ssize_t scom_read(struct file *filep, char __user *buf, size_t len,
}
static ssize_t scom_write(struct file *filep, const char __user *buf,
- size_t len, loff_t *offset)
+ size_t len, loff_t *offset)
{
int rc;
- struct miscdevice *mdev = filep->private_data;
- struct scom_device *scom = to_scom_dev(mdev);
+ struct scom_device *scom = filep->private_data;
struct device *dev = &scom->fsi_dev->dev;
uint64_t val;
@@ -147,7 +410,12 @@ static ssize_t scom_write(struct file *filep, const char __user *buf,
return -EINVAL;
}
- rc = put_scom(scom, val, *offset);
+ mutex_lock(&scom->lock);
+ if (scom->dead)
+ rc = -ENODEV;
+ else
+ rc = put_scom(scom, val, *offset);
+ mutex_unlock(&scom->lock);
if (rc) {
dev_dbg(dev, "put_scom failed with:%d\n", rc);
return rc;
@@ -171,50 +439,205 @@ static loff_t scom_llseek(struct file *file, loff_t offset, int whence)
return offset;
}
+static void raw_convert_status(struct scom_access *acc, uint32_t status)
+{
+ acc->pib_status = (status & SCOM_STATUS_PIB_RESP_MASK) >>
+ SCOM_STATUS_PIB_RESP_SHIFT;
+ acc->intf_errors = 0;
+
+ if (status & SCOM_STATUS_PROTECTION)
+ acc->intf_errors |= SCOM_INTF_ERR_PROTECTION;
+ else if (status & SCOM_STATUS_PARITY)
+ acc->intf_errors |= SCOM_INTF_ERR_PARITY;
+ else if (status & SCOM_STATUS_PIB_ABORT)
+ acc->intf_errors |= SCOM_INTF_ERR_ABORT;
+ else if (status & SCOM_STATUS_ERR_SUMMARY)
+ acc->intf_errors |= SCOM_INTF_ERR_UNKNOWN;
+}
+
+static int scom_raw_read(struct scom_device *scom, void __user *argp)
+{
+ struct scom_access acc;
+ uint32_t status;
+ int rc;
+
+ if (copy_from_user(&acc, argp, sizeof(struct scom_access)))
+ return -EFAULT;
+
+ rc = raw_get_scom(scom, &acc.data, acc.addr, &status);
+ if (rc)
+ return rc;
+ raw_convert_status(&acc, status);
+ if (copy_to_user(argp, &acc, sizeof(struct scom_access)))
+ return -EFAULT;
+ return 0;
+}
+
+static int scom_raw_write(struct scom_device *scom, void __user *argp)
+{
+ u64 prev_data, mask, data;
+ struct scom_access acc;
+ uint32_t status;
+ int rc;
+
+ if (copy_from_user(&acc, argp, sizeof(struct scom_access)))
+ return -EFAULT;
+
+ if (acc.mask) {
+ rc = raw_get_scom(scom, &prev_data, acc.addr, &status);
+ if (rc)
+ return rc;
+ if (status & SCOM_STATUS_ANY_ERR)
+ goto fail;
+ mask = acc.mask;
+ } else {
+ prev_data = mask = -1ull;
+ }
+ data = (prev_data & ~mask) | (acc.data & mask);
+ rc = raw_put_scom(scom, data, acc.addr, &status);
+ if (rc)
+ return rc;
+ fail:
+ raw_convert_status(&acc, status);
+ if (copy_to_user(argp, &acc, sizeof(struct scom_access)))
+ return -EFAULT;
+ return 0;
+}
+
+static int scom_reset(struct scom_device *scom, void __user *argp)
+{
+ uint32_t flags, dummy = -1;
+ int rc = 0;
+
+ if (get_user(flags, (__u32 __user *)argp))
+ return -EFAULT;
+ if (flags & SCOM_RESET_PIB)
+ rc = fsi_device_write(scom->fsi_dev, SCOM_PIB_RESET_REG, &dummy,
+ sizeof(uint32_t));
+ if (!rc && (flags & (SCOM_RESET_PIB | SCOM_RESET_INTF)))
+ rc = fsi_device_write(scom->fsi_dev, SCOM_FSI2PIB_RESET_REG, &dummy,
+ sizeof(uint32_t));
+ return rc;
+}
+
+static int scom_check(struct scom_device *scom, void __user *argp)
+{
+ /* Still need to find out how to get "protected" */
+ return put_user(SCOM_CHECK_SUPPORTED, (__u32 __user *)argp);
+}
+
+static long scom_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct scom_device *scom = file->private_data;
+ void __user *argp = (void __user *)arg;
+ int rc = -ENOTTY;
+
+ mutex_lock(&scom->lock);
+ if (scom->dead) {
+ mutex_unlock(&scom->lock);
+ return -ENODEV;
+ }
+ switch(cmd) {
+ case FSI_SCOM_CHECK:
+ rc = scom_check(scom, argp);
+ break;
+ case FSI_SCOM_READ:
+ rc = scom_raw_read(scom, argp);
+ break;
+ case FSI_SCOM_WRITE:
+ rc = scom_raw_write(scom, argp);
+ break;
+ case FSI_SCOM_RESET:
+ rc = scom_reset(scom, argp);
+ break;
+ }
+ mutex_unlock(&scom->lock);
+ return rc;
+}
+
+static int scom_open(struct inode *inode, struct file *file)
+{
+ struct scom_device *scom = container_of(inode->i_cdev, struct scom_device, cdev);
+
+ file->private_data = scom;
+
+ return 0;
+}
+
static const struct file_operations scom_fops = {
- .owner = THIS_MODULE,
- .llseek = scom_llseek,
- .read = scom_read,
- .write = scom_write,
+ .owner = THIS_MODULE,
+ .open = scom_open,
+ .llseek = scom_llseek,
+ .read = scom_read,
+ .write = scom_write,
+ .unlocked_ioctl = scom_ioctl,
};
+static void scom_free(struct device *dev)
+{
+ struct scom_device *scom = container_of(dev, struct scom_device, dev);
+
+ put_device(&scom->fsi_dev->dev);
+ kfree(scom);
+}
+
static int scom_probe(struct device *dev)
{
- uint32_t data;
struct fsi_device *fsi_dev = to_fsi_dev(dev);
struct scom_device *scom;
+ int rc, didx;
- scom = devm_kzalloc(dev, sizeof(*scom), GFP_KERNEL);
+ scom = kzalloc(sizeof(*scom), GFP_KERNEL);
if (!scom)
return -ENOMEM;
+ dev_set_drvdata(dev, scom);
+ mutex_init(&scom->lock);
- scom->idx = ida_simple_get(&scom_ida, 1, INT_MAX, GFP_KERNEL);
- snprintf(scom->name, sizeof(scom->name), "scom%d", scom->idx);
+ /* Grab a reference to the device (parent of our cdev), we'll drop it later */
+ if (!get_device(dev)) {
+ kfree(scom);
+ return -ENODEV;
+ }
scom->fsi_dev = fsi_dev;
- scom->mdev.minor = MISC_DYNAMIC_MINOR;
- scom->mdev.fops = &scom_fops;
- scom->mdev.name = scom->name;
- scom->mdev.parent = dev;
- list_add(&scom->link, &scom_devices);
- data = cpu_to_be32(SCOM_RESET_CMD);
- fsi_device_write(fsi_dev, SCOM_RESET_REG, &data, sizeof(uint32_t));
+ /* Create chardev for userspace access */
+ scom->dev.type = &fsi_cdev_type;
+ scom->dev.parent = dev;
+ scom->dev.release = scom_free;
+ device_initialize(&scom->dev);
+
+ /* Allocate a minor in the FSI space */
+ rc = fsi_get_new_minor(fsi_dev, fsi_dev_scom, &scom->dev.devt, &didx);
+ if (rc)
+ goto err;
+
+ dev_set_name(&scom->dev, "scom%d", didx);
+ cdev_init(&scom->cdev, &scom_fops);
+ rc = cdev_device_add(&scom->cdev, &scom->dev);
+ if (rc) {
+ dev_err(dev, "Error %d creating char device %s\n",
+ rc, dev_name(&scom->dev));
+ goto err_free_minor;
+ }
- return misc_register(&scom->mdev);
+ return 0;
+ err_free_minor:
+ fsi_free_minor(scom->dev.devt);
+ err:
+ put_device(&scom->dev);
+ return rc;
}
static int scom_remove(struct device *dev)
{
- struct scom_device *scom, *scom_tmp;
- struct fsi_device *fsi_dev = to_fsi_dev(dev);
+ struct scom_device *scom = dev_get_drvdata(dev);
- list_for_each_entry_safe(scom, scom_tmp, &scom_devices, link) {
- if (scom->fsi_dev == fsi_dev) {
- list_del(&scom->link);
- ida_simple_remove(&scom_ida, scom->idx);
- misc_deregister(&scom->mdev);
- }
- }
+ mutex_lock(&scom->lock);
+ scom->dead = true;
+ mutex_unlock(&scom->lock);
+ cdev_device_del(&scom->cdev, &scom->dev);
+ fsi_free_minor(scom->dev.devt);
+ put_device(&scom->dev);
return 0;
}
@@ -239,20 +662,11 @@ static struct fsi_driver scom_drv = {
static int scom_init(void)
{
- INIT_LIST_HEAD(&scom_devices);
return fsi_driver_register(&scom_drv);
}
static void scom_exit(void)
{
- struct list_head *pos;
- struct scom_device *scom;
-
- list_for_each(pos, &scom_devices) {
- scom = list_entry(pos, struct scom_device, link);
- misc_deregister(&scom->mdev);
- devm_kfree(&scom->fsi_dev->dev, scom);
- }
fsi_driver_unregister(&scom_drv);
}
diff --git a/drivers/gnss/Kconfig b/drivers/gnss/Kconfig
new file mode 100644
index 000000000000..6abc88514512
--- /dev/null
+++ b/drivers/gnss/Kconfig
@@ -0,0 +1,43 @@
+#
+# GNSS receiver configuration
+#
+
+menuconfig GNSS
+ tristate "GNSS receiver support"
+ ---help---
+ Say Y here if you have a GNSS receiver (e.g. a GPS receiver).
+
+ To compile this driver as a module, choose M here: the module will
+ be called gnss.
+
+if GNSS
+
+config GNSS_SERIAL
+ tristate
+
+config GNSS_SIRF_SERIAL
+ tristate "SiRFstar GNSS receiver support"
+ depends on SERIAL_DEV_BUS
+ ---help---
+ Say Y here if you have a SiRFstar-based GNSS receiver which uses a
+ serial interface.
+
+ To compile this driver as a module, choose M here: the module will
+ be called gnss-sirf.
+
+ If unsure, say N.
+
+config GNSS_UBX_SERIAL
+ tristate "u-blox GNSS receiver support"
+ depends on SERIAL_DEV_BUS
+ select GNSS_SERIAL
+ ---help---
+ Say Y here if you have a u-blox GNSS receiver which uses a serial
+ interface.
+
+ To compile this driver as a module, choose M here: the module will
+ be called gnss-ubx.
+
+ If unsure, say N.
+
+endif # GNSS
diff --git a/drivers/gnss/Makefile b/drivers/gnss/Makefile
new file mode 100644
index 000000000000..5cf0ebe0330a
--- /dev/null
+++ b/drivers/gnss/Makefile
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the GNSS subsystem.
+#
+
+obj-$(CONFIG_GNSS) += gnss.o
+gnss-y := core.o
+
+obj-$(CONFIG_GNSS_SERIAL) += gnss-serial.o
+gnss-serial-y := serial.o
+
+obj-$(CONFIG_GNSS_SIRF_SERIAL) += gnss-sirf.o
+gnss-sirf-y := sirf.o
+
+obj-$(CONFIG_GNSS_UBX_SERIAL) += gnss-ubx.o
+gnss-ubx-y := ubx.o
diff --git a/drivers/gnss/core.c b/drivers/gnss/core.c
new file mode 100644
index 000000000000..4291a0dd22aa
--- /dev/null
+++ b/drivers/gnss/core.c
@@ -0,0 +1,420 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * GNSS receiver core
+ *
+ * Copyright (C) 2018 Johan Hovold <johan@kernel.org>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/cdev.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/gnss.h>
+#include <linux/idr.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/wait.h>
+
+#define GNSS_FLAG_HAS_WRITE_RAW BIT(0)
+
+#define GNSS_MINORS 16
+
+static DEFINE_IDA(gnss_minors);
+static dev_t gnss_first;
+
+/* FIFO size must be a power of two */
+#define GNSS_READ_FIFO_SIZE 4096
+#define GNSS_WRITE_BUF_SIZE 1024
+
+#define to_gnss_device(d) container_of((d), struct gnss_device, dev)
+
+static int gnss_open(struct inode *inode, struct file *file)
+{
+ struct gnss_device *gdev;
+ int ret = 0;
+
+ gdev = container_of(inode->i_cdev, struct gnss_device, cdev);
+
+ get_device(&gdev->dev);
+
+ nonseekable_open(inode, file);
+ file->private_data = gdev;
+
+ down_write(&gdev->rwsem);
+ if (gdev->disconnected) {
+ ret = -ENODEV;
+ goto unlock;
+ }
+
+ if (gdev->count++ == 0) {
+ ret = gdev->ops->open(gdev);
+ if (ret)
+ gdev->count--;
+ }
+unlock:
+ up_write(&gdev->rwsem);
+
+ if (ret)
+ put_device(&gdev->dev);
+
+ return ret;
+}
+
+static int gnss_release(struct inode *inode, struct file *file)
+{
+ struct gnss_device *gdev = file->private_data;
+
+ down_write(&gdev->rwsem);
+ if (gdev->disconnected)
+ goto unlock;
+
+ if (--gdev->count == 0) {
+ gdev->ops->close(gdev);
+ kfifo_reset(&gdev->read_fifo);
+ }
+unlock:
+ up_write(&gdev->rwsem);
+
+ put_device(&gdev->dev);
+
+ return 0;
+}
+
+static ssize_t gnss_read(struct file *file, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct gnss_device *gdev = file->private_data;
+ unsigned int copied;
+ int ret;
+
+ mutex_lock(&gdev->read_mutex);
+ while (kfifo_is_empty(&gdev->read_fifo)) {
+ mutex_unlock(&gdev->read_mutex);
+
+ if (gdev->disconnected)
+ return 0;
+
+ if (file->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+
+ ret = wait_event_interruptible(gdev->read_queue,
+ gdev->disconnected ||
+ !kfifo_is_empty(&gdev->read_fifo));
+ if (ret)
+ return -ERESTARTSYS;
+
+ mutex_lock(&gdev->read_mutex);
+ }
+
+ ret = kfifo_to_user(&gdev->read_fifo, buf, count, &copied);
+ if (ret == 0)
+ ret = copied;
+
+ mutex_unlock(&gdev->read_mutex);
+
+ return ret;
+}
+
+static ssize_t gnss_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct gnss_device *gdev = file->private_data;
+ size_t written = 0;
+ int ret;
+
+ if (gdev->disconnected)
+ return -EIO;
+
+ if (!count)
+ return 0;
+
+ if (!(gdev->flags & GNSS_FLAG_HAS_WRITE_RAW))
+ return -EIO;
+
+ /* Ignoring O_NONBLOCK, write_raw() is synchronous. */
+
+ ret = mutex_lock_interruptible(&gdev->write_mutex);
+ if (ret)
+ return -ERESTARTSYS;
+
+ for (;;) {
+ size_t n = count - written;
+
+ if (n > GNSS_WRITE_BUF_SIZE)
+ n = GNSS_WRITE_BUF_SIZE;
+
+ if (copy_from_user(gdev->write_buf, buf, n)) {
+ ret = -EFAULT;
+ goto out_unlock;
+ }
+
+ /*
+ * Assumes write_raw can always accept GNSS_WRITE_BUF_SIZE
+ * bytes.
+ *
+ * FIXME: revisit
+ */
+ down_read(&gdev->rwsem);
+ if (!gdev->disconnected)
+ ret = gdev->ops->write_raw(gdev, gdev->write_buf, n);
+ else
+ ret = -EIO;
+ up_read(&gdev->rwsem);
+
+ if (ret < 0)
+ break;
+
+ written += ret;
+ buf += ret;
+
+ if (written == count)
+ break;
+ }
+
+ if (written)
+ ret = written;
+out_unlock:
+ mutex_unlock(&gdev->write_mutex);
+
+ return ret;
+}
+
+static __poll_t gnss_poll(struct file *file, poll_table *wait)
+{
+ struct gnss_device *gdev = file->private_data;
+ __poll_t mask = 0;
+
+ poll_wait(file, &gdev->read_queue, wait);
+
+ if (!kfifo_is_empty(&gdev->read_fifo))
+ mask |= EPOLLIN | EPOLLRDNORM;
+ if (gdev->disconnected)
+ mask |= EPOLLHUP;
+
+ return mask;
+}
+
+static const struct file_operations gnss_fops = {
+ .owner = THIS_MODULE,
+ .open = gnss_open,
+ .release = gnss_release,
+ .read = gnss_read,
+ .write = gnss_write,
+ .poll = gnss_poll,
+ .llseek = no_llseek,
+};
+
+static struct class *gnss_class;
+
+static void gnss_device_release(struct device *dev)
+{
+ struct gnss_device *gdev = to_gnss_device(dev);
+
+ kfree(gdev->write_buf);
+ kfifo_free(&gdev->read_fifo);
+ ida_simple_remove(&gnss_minors, gdev->id);
+ kfree(gdev);
+}
+
+struct gnss_device *gnss_allocate_device(struct device *parent)
+{
+ struct gnss_device *gdev;
+ struct device *dev;
+ int id;
+ int ret;
+
+ gdev = kzalloc(sizeof(*gdev), GFP_KERNEL);
+ if (!gdev)
+ return NULL;
+
+ id = ida_simple_get(&gnss_minors, 0, GNSS_MINORS, GFP_KERNEL);
+ if (id < 0) {
+ kfree(gdev);
+ return NULL;
+ }
+
+ gdev->id = id;
+
+ dev = &gdev->dev;
+ device_initialize(dev);
+ dev->devt = gnss_first + id;
+ dev->class = gnss_class;
+ dev->parent = parent;
+ dev->release = gnss_device_release;
+ dev_set_drvdata(dev, gdev);
+ dev_set_name(dev, "gnss%d", id);
+
+ init_rwsem(&gdev->rwsem);
+ mutex_init(&gdev->read_mutex);
+ mutex_init(&gdev->write_mutex);
+ init_waitqueue_head(&gdev->read_queue);
+
+ ret = kfifo_alloc(&gdev->read_fifo, GNSS_READ_FIFO_SIZE, GFP_KERNEL);
+ if (ret)
+ goto err_put_device;
+
+ gdev->write_buf = kzalloc(GNSS_WRITE_BUF_SIZE, GFP_KERNEL);
+ if (!gdev->write_buf)
+ goto err_put_device;
+
+ cdev_init(&gdev->cdev, &gnss_fops);
+ gdev->cdev.owner = THIS_MODULE;
+
+ return gdev;
+
+err_put_device:
+ put_device(dev);
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(gnss_allocate_device);
+
+void gnss_put_device(struct gnss_device *gdev)
+{
+ put_device(&gdev->dev);
+}
+EXPORT_SYMBOL_GPL(gnss_put_device);
+
+int gnss_register_device(struct gnss_device *gdev)
+{
+ int ret;
+
+ /* Set a flag which can be accessed without holding the rwsem. */
+ if (gdev->ops->write_raw != NULL)
+ gdev->flags |= GNSS_FLAG_HAS_WRITE_RAW;
+
+ ret = cdev_device_add(&gdev->cdev, &gdev->dev);
+ if (ret) {
+ dev_err(&gdev->dev, "failed to add device: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(gnss_register_device);
+
+void gnss_deregister_device(struct gnss_device *gdev)
+{
+ down_write(&gdev->rwsem);
+ gdev->disconnected = true;
+ if (gdev->count) {
+ wake_up_interruptible(&gdev->read_queue);
+ gdev->ops->close(gdev);
+ }
+ up_write(&gdev->rwsem);
+
+ cdev_device_del(&gdev->cdev, &gdev->dev);
+}
+EXPORT_SYMBOL_GPL(gnss_deregister_device);
+
+/*
+ * Caller guarantees serialisation.
+ *
+ * Must not be called for a closed device.
+ */
+int gnss_insert_raw(struct gnss_device *gdev, const unsigned char *buf,
+ size_t count)
+{
+ int ret;
+
+ ret = kfifo_in(&gdev->read_fifo, buf, count);
+
+ wake_up_interruptible(&gdev->read_queue);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(gnss_insert_raw);
+
+static const char * const gnss_type_names[GNSS_TYPE_COUNT] = {
+ [GNSS_TYPE_NMEA] = "NMEA",
+ [GNSS_TYPE_SIRF] = "SiRF",
+ [GNSS_TYPE_UBX] = "UBX",
+};
+
+static const char *gnss_type_name(struct gnss_device *gdev)
+{
+ const char *name = NULL;
+
+ if (gdev->type < GNSS_TYPE_COUNT)
+ name = gnss_type_names[gdev->type];
+
+ if (!name)
+ dev_WARN(&gdev->dev, "type name not defined\n");
+
+ return name;
+}
+
+static ssize_t type_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct gnss_device *gdev = to_gnss_device(dev);
+
+ return sprintf(buf, "%s\n", gnss_type_name(gdev));
+}
+static DEVICE_ATTR_RO(type);
+
+static struct attribute *gnss_attrs[] = {
+ &dev_attr_type.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(gnss);
+
+static int gnss_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct gnss_device *gdev = to_gnss_device(dev);
+ int ret;
+
+ ret = add_uevent_var(env, "GNSS_TYPE=%s", gnss_type_name(gdev));
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int __init gnss_module_init(void)
+{
+ int ret;
+
+ ret = alloc_chrdev_region(&gnss_first, 0, GNSS_MINORS, "gnss");
+ if (ret < 0) {
+ pr_err("failed to allocate device numbers: %d\n", ret);
+ return ret;
+ }
+
+ gnss_class = class_create(THIS_MODULE, "gnss");
+ if (IS_ERR(gnss_class)) {
+ ret = PTR_ERR(gnss_class);
+ pr_err("failed to create class: %d\n", ret);
+ goto err_unregister_chrdev;
+ }
+
+ gnss_class->dev_groups = gnss_groups;
+ gnss_class->dev_uevent = gnss_uevent;
+
+ pr_info("GNSS driver registered with major %d\n", MAJOR(gnss_first));
+
+ return 0;
+
+err_unregister_chrdev:
+ unregister_chrdev_region(gnss_first, GNSS_MINORS);
+
+ return ret;
+}
+module_init(gnss_module_init);
+
+static void __exit gnss_module_exit(void)
+{
+ class_destroy(gnss_class);
+ unregister_chrdev_region(gnss_first, GNSS_MINORS);
+ ida_destroy(&gnss_minors);
+}
+module_exit(gnss_module_exit);
+
+MODULE_AUTHOR("Johan Hovold <johan@kernel.org>");
+MODULE_DESCRIPTION("GNSS receiver core");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gnss/serial.c b/drivers/gnss/serial.c
new file mode 100644
index 000000000000..b01ba4438501
--- /dev/null
+++ b/drivers/gnss/serial.c
@@ -0,0 +1,275 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Generic serial GNSS receiver driver
+ *
+ * Copyright (C) 2018 Johan Hovold <johan@kernel.org>
+ */
+
+#include <linux/errno.h>
+#include <linux/gnss.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/serdev.h>
+#include <linux/slab.h>
+
+#include "serial.h"
+
+static int gnss_serial_open(struct gnss_device *gdev)
+{
+ struct gnss_serial *gserial = gnss_get_drvdata(gdev);
+ struct serdev_device *serdev = gserial->serdev;
+ int ret;
+
+ ret = serdev_device_open(serdev);
+ if (ret)
+ return ret;
+
+ serdev_device_set_baudrate(serdev, gserial->speed);
+ serdev_device_set_flow_control(serdev, false);
+
+ ret = pm_runtime_get_sync(&serdev->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(&serdev->dev);
+ goto err_close;
+ }
+
+ return 0;
+
+err_close:
+ serdev_device_close(serdev);
+
+ return ret;
+}
+
+static void gnss_serial_close(struct gnss_device *gdev)
+{
+ struct gnss_serial *gserial = gnss_get_drvdata(gdev);
+ struct serdev_device *serdev = gserial->serdev;
+
+ serdev_device_close(serdev);
+
+ pm_runtime_put(&serdev->dev);
+}
+
+static int gnss_serial_write_raw(struct gnss_device *gdev,
+ const unsigned char *buf, size_t count)
+{
+ struct gnss_serial *gserial = gnss_get_drvdata(gdev);
+ struct serdev_device *serdev = gserial->serdev;
+ int ret;
+
+ /* write is only buffered synchronously */
+ ret = serdev_device_write(serdev, buf, count, 0);
+ if (ret < 0)
+ return ret;
+
+ /* FIXME: determine if interrupted? */
+ serdev_device_wait_until_sent(serdev, 0);
+
+ return count;
+}
+
+static const struct gnss_operations gnss_serial_gnss_ops = {
+ .open = gnss_serial_open,
+ .close = gnss_serial_close,
+ .write_raw = gnss_serial_write_raw,
+};
+
+static int gnss_serial_receive_buf(struct serdev_device *serdev,
+ const unsigned char *buf, size_t count)
+{
+ struct gnss_serial *gserial = serdev_device_get_drvdata(serdev);
+ struct gnss_device *gdev = gserial->gdev;
+
+ return gnss_insert_raw(gdev, buf, count);
+}
+
+static const struct serdev_device_ops gnss_serial_serdev_ops = {
+ .receive_buf = gnss_serial_receive_buf,
+ .write_wakeup = serdev_device_write_wakeup,
+};
+
+static int gnss_serial_set_power(struct gnss_serial *gserial,
+ enum gnss_serial_pm_state state)
+{
+ if (!gserial->ops || !gserial->ops->set_power)
+ return 0;
+
+ return gserial->ops->set_power(gserial, state);
+}
+
+/*
+ * FIXME: need to provide subdriver defaults or separate dt parsing from
+ * allocation.
+ */
+static int gnss_serial_parse_dt(struct serdev_device *serdev)
+{
+ struct gnss_serial *gserial = serdev_device_get_drvdata(serdev);
+ struct device_node *node = serdev->dev.of_node;
+ u32 speed = 4800;
+
+ of_property_read_u32(node, "current-speed", &speed);
+
+ gserial->speed = speed;
+
+ return 0;
+}
+
+struct gnss_serial *gnss_serial_allocate(struct serdev_device *serdev,
+ size_t data_size)
+{
+ struct gnss_serial *gserial;
+ struct gnss_device *gdev;
+ int ret;
+
+ gserial = kzalloc(sizeof(*gserial) + data_size, GFP_KERNEL);
+ if (!gserial)
+ return ERR_PTR(-ENOMEM);
+
+ gdev = gnss_allocate_device(&serdev->dev);
+ if (!gdev) {
+ ret = -ENOMEM;
+ goto err_free_gserial;
+ }
+
+ gdev->ops = &gnss_serial_gnss_ops;
+ gnss_set_drvdata(gdev, gserial);
+
+ gserial->serdev = serdev;
+ gserial->gdev = gdev;
+
+ serdev_device_set_drvdata(serdev, gserial);
+ serdev_device_set_client_ops(serdev, &gnss_serial_serdev_ops);
+
+ ret = gnss_serial_parse_dt(serdev);
+ if (ret)
+ goto err_put_device;
+
+ return gserial;
+
+err_put_device:
+ gnss_put_device(gserial->gdev);
+err_free_gserial:
+ kfree(gserial);
+
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(gnss_serial_allocate);
+
+void gnss_serial_free(struct gnss_serial *gserial)
+{
+ gnss_put_device(gserial->gdev);
+ kfree(gserial);
+};
+EXPORT_SYMBOL_GPL(gnss_serial_free);
+
+int gnss_serial_register(struct gnss_serial *gserial)
+{
+ struct serdev_device *serdev = gserial->serdev;
+ int ret;
+
+ if (IS_ENABLED(CONFIG_PM)) {
+ pm_runtime_enable(&serdev->dev);
+ } else {
+ ret = gnss_serial_set_power(gserial, GNSS_SERIAL_ACTIVE);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = gnss_register_device(gserial->gdev);
+ if (ret)
+ goto err_disable_rpm;
+
+ return 0;
+
+err_disable_rpm:
+ if (IS_ENABLED(CONFIG_PM))
+ pm_runtime_disable(&serdev->dev);
+ else
+ gnss_serial_set_power(gserial, GNSS_SERIAL_OFF);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(gnss_serial_register);
+
+void gnss_serial_deregister(struct gnss_serial *gserial)
+{
+ struct serdev_device *serdev = gserial->serdev;
+
+ gnss_deregister_device(gserial->gdev);
+
+ if (IS_ENABLED(CONFIG_PM))
+ pm_runtime_disable(&serdev->dev);
+ else
+ gnss_serial_set_power(gserial, GNSS_SERIAL_OFF);
+}
+EXPORT_SYMBOL_GPL(gnss_serial_deregister);
+
+#ifdef CONFIG_PM
+static int gnss_serial_runtime_suspend(struct device *dev)
+{
+ struct gnss_serial *gserial = dev_get_drvdata(dev);
+
+ return gnss_serial_set_power(gserial, GNSS_SERIAL_STANDBY);
+}
+
+static int gnss_serial_runtime_resume(struct device *dev)
+{
+ struct gnss_serial *gserial = dev_get_drvdata(dev);
+
+ return gnss_serial_set_power(gserial, GNSS_SERIAL_ACTIVE);
+}
+#endif /* CONFIG_PM */
+
+static int gnss_serial_prepare(struct device *dev)
+{
+ if (pm_runtime_suspended(dev))
+ return 1;
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int gnss_serial_suspend(struct device *dev)
+{
+ struct gnss_serial *gserial = dev_get_drvdata(dev);
+ int ret = 0;
+
+ /*
+ * FIXME: serdev currently lacks support for managing the underlying
+ * device's wakeup settings. A workaround would be to close the serdev
+ * device here if it is open.
+ */
+
+ if (!pm_runtime_suspended(dev))
+ ret = gnss_serial_set_power(gserial, GNSS_SERIAL_STANDBY);
+
+ return ret;
+}
+
+static int gnss_serial_resume(struct device *dev)
+{
+ struct gnss_serial *gserial = dev_get_drvdata(dev);
+ int ret = 0;
+
+ if (!pm_runtime_suspended(dev))
+ ret = gnss_serial_set_power(gserial, GNSS_SERIAL_ACTIVE);
+
+ return ret;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+const struct dev_pm_ops gnss_serial_pm_ops = {
+ .prepare = gnss_serial_prepare,
+ SET_SYSTEM_SLEEP_PM_OPS(gnss_serial_suspend, gnss_serial_resume)
+ SET_RUNTIME_PM_OPS(gnss_serial_runtime_suspend, gnss_serial_runtime_resume, NULL)
+};
+EXPORT_SYMBOL_GPL(gnss_serial_pm_ops);
+
+MODULE_AUTHOR("Johan Hovold <johan@kernel.org>");
+MODULE_DESCRIPTION("Generic serial GNSS receiver driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gnss/serial.h b/drivers/gnss/serial.h
new file mode 100644
index 000000000000..980ffdc86c2a
--- /dev/null
+++ b/drivers/gnss/serial.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Generic serial GNSS receiver driver
+ *
+ * Copyright (C) 2018 Johan Hovold <johan@kernel.org>
+ */
+
+#ifndef _LINUX_GNSS_SERIAL_H
+#define _LINUX_GNSS_SERIAL_H
+
+#include <asm/termbits.h>
+#include <linux/pm.h>
+
+struct gnss_serial {
+ struct serdev_device *serdev;
+ struct gnss_device *gdev;
+ speed_t speed;
+ const struct gnss_serial_ops *ops;
+ unsigned long drvdata[0];
+};
+
+enum gnss_serial_pm_state {
+ GNSS_SERIAL_OFF,
+ GNSS_SERIAL_ACTIVE,
+ GNSS_SERIAL_STANDBY,
+};
+
+struct gnss_serial_ops {
+ int (*set_power)(struct gnss_serial *gserial,
+ enum gnss_serial_pm_state state);
+};
+
+extern const struct dev_pm_ops gnss_serial_pm_ops;
+
+struct gnss_serial *gnss_serial_allocate(struct serdev_device *gserial,
+ size_t data_size);
+void gnss_serial_free(struct gnss_serial *gserial);
+
+int gnss_serial_register(struct gnss_serial *gserial);
+void gnss_serial_deregister(struct gnss_serial *gserial);
+
+static inline void *gnss_serial_get_drvdata(struct gnss_serial *gserial)
+{
+ return gserial->drvdata;
+}
+
+#endif /* _LINUX_GNSS_SERIAL_H */
diff --git a/drivers/gnss/sirf.c b/drivers/gnss/sirf.c
new file mode 100644
index 000000000000..79cb98950013
--- /dev/null
+++ b/drivers/gnss/sirf.c
@@ -0,0 +1,408 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SiRFstar GNSS receiver driver
+ *
+ * Copyright (C) 2018 Johan Hovold <johan@kernel.org>
+ */
+
+#include <linux/errno.h>
+#include <linux/gnss.h>
+#include <linux/gpio/consumer.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <linux/serdev.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+
+#define SIRF_BOOT_DELAY 500
+#define SIRF_ON_OFF_PULSE_TIME 100
+#define SIRF_ACTIVATE_TIMEOUT 200
+#define SIRF_HIBERNATE_TIMEOUT 200
+
+struct sirf_data {
+ struct gnss_device *gdev;
+ struct serdev_device *serdev;
+ speed_t speed;
+ struct regulator *vcc;
+ struct gpio_desc *on_off;
+ struct gpio_desc *wakeup;
+ int irq;
+ bool active;
+ wait_queue_head_t power_wait;
+};
+
+static int sirf_open(struct gnss_device *gdev)
+{
+ struct sirf_data *data = gnss_get_drvdata(gdev);
+ struct serdev_device *serdev = data->serdev;
+ int ret;
+
+ ret = serdev_device_open(serdev);
+ if (ret)
+ return ret;
+
+ serdev_device_set_baudrate(serdev, data->speed);
+ serdev_device_set_flow_control(serdev, false);
+
+ ret = pm_runtime_get_sync(&serdev->dev);
+ if (ret < 0) {
+ dev_err(&gdev->dev, "failed to runtime resume: %d\n", ret);
+ pm_runtime_put_noidle(&serdev->dev);
+ goto err_close;
+ }
+
+ return 0;
+
+err_close:
+ serdev_device_close(serdev);
+
+ return ret;
+}
+
+static void sirf_close(struct gnss_device *gdev)
+{
+ struct sirf_data *data = gnss_get_drvdata(gdev);
+ struct serdev_device *serdev = data->serdev;
+
+ serdev_device_close(serdev);
+
+ pm_runtime_put(&serdev->dev);
+}
+
+static int sirf_write_raw(struct gnss_device *gdev, const unsigned char *buf,
+ size_t count)
+{
+ struct sirf_data *data = gnss_get_drvdata(gdev);
+ struct serdev_device *serdev = data->serdev;
+ int ret;
+
+ /* write is only buffered synchronously */
+ ret = serdev_device_write(serdev, buf, count, 0);
+ if (ret < 0)
+ return ret;
+
+ /* FIXME: determine if interrupted? */
+ serdev_device_wait_until_sent(serdev, 0);
+
+ return count;
+}
+
+static const struct gnss_operations sirf_gnss_ops = {
+ .open = sirf_open,
+ .close = sirf_close,
+ .write_raw = sirf_write_raw,
+};
+
+static int sirf_receive_buf(struct serdev_device *serdev,
+ const unsigned char *buf, size_t count)
+{
+ struct sirf_data *data = serdev_device_get_drvdata(serdev);
+ struct gnss_device *gdev = data->gdev;
+
+ return gnss_insert_raw(gdev, buf, count);
+}
+
+static const struct serdev_device_ops sirf_serdev_ops = {
+ .receive_buf = sirf_receive_buf,
+ .write_wakeup = serdev_device_write_wakeup,
+};
+
+static irqreturn_t sirf_wakeup_handler(int irq, void *dev_id)
+{
+ struct sirf_data *data = dev_id;
+ struct device *dev = &data->serdev->dev;
+ int ret;
+
+ ret = gpiod_get_value_cansleep(data->wakeup);
+ dev_dbg(dev, "%s - wakeup = %d\n", __func__, ret);
+ if (ret < 0)
+ goto out;
+
+ data->active = !!ret;
+ wake_up_interruptible(&data->power_wait);
+out:
+ return IRQ_HANDLED;
+}
+
+static int sirf_wait_for_power_state(struct sirf_data *data, bool active,
+ unsigned long timeout)
+{
+ int ret;
+
+ ret = wait_event_interruptible_timeout(data->power_wait,
+ data->active == active, msecs_to_jiffies(timeout));
+ if (ret < 0)
+ return ret;
+
+ if (ret == 0) {
+ dev_warn(&data->serdev->dev, "timeout waiting for active state = %d\n",
+ active);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void sirf_pulse_on_off(struct sirf_data *data)
+{
+ gpiod_set_value_cansleep(data->on_off, 1);
+ msleep(SIRF_ON_OFF_PULSE_TIME);
+ gpiod_set_value_cansleep(data->on_off, 0);
+}
+
+static int sirf_set_active(struct sirf_data *data, bool active)
+{
+ unsigned long timeout;
+ int retries = 3;
+ int ret;
+
+ if (active)
+ timeout = SIRF_ACTIVATE_TIMEOUT;
+ else
+ timeout = SIRF_HIBERNATE_TIMEOUT;
+
+ while (retries-- > 0) {
+ sirf_pulse_on_off(data);
+ ret = sirf_wait_for_power_state(data, active, timeout);
+ if (ret < 0) {
+ if (ret == -ETIMEDOUT)
+ continue;
+
+ return ret;
+ }
+
+ break;
+ }
+
+ if (retries == 0)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int sirf_runtime_suspend(struct device *dev)
+{
+ struct sirf_data *data = dev_get_drvdata(dev);
+
+ if (!data->on_off)
+ return regulator_disable(data->vcc);
+
+ return sirf_set_active(data, false);
+}
+
+static int sirf_runtime_resume(struct device *dev)
+{
+ struct sirf_data *data = dev_get_drvdata(dev);
+
+ if (!data->on_off)
+ return regulator_enable(data->vcc);
+
+ return sirf_set_active(data, true);
+}
+
+static int __maybe_unused sirf_suspend(struct device *dev)
+{
+ struct sirf_data *data = dev_get_drvdata(dev);
+ int ret = 0;
+
+ if (!pm_runtime_suspended(dev))
+ ret = sirf_runtime_suspend(dev);
+
+ if (data->wakeup)
+ disable_irq(data->irq);
+
+ return ret;
+}
+
+static int __maybe_unused sirf_resume(struct device *dev)
+{
+ struct sirf_data *data = dev_get_drvdata(dev);
+ int ret = 0;
+
+ if (data->wakeup)
+ enable_irq(data->irq);
+
+ if (!pm_runtime_suspended(dev))
+ ret = sirf_runtime_resume(dev);
+
+ return ret;
+}
+
+static const struct dev_pm_ops sirf_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(sirf_suspend, sirf_resume)
+ SET_RUNTIME_PM_OPS(sirf_runtime_suspend, sirf_runtime_resume, NULL)
+};
+
+static int sirf_parse_dt(struct serdev_device *serdev)
+{
+ struct sirf_data *data = serdev_device_get_drvdata(serdev);
+ struct device_node *node = serdev->dev.of_node;
+ u32 speed = 9600;
+
+ of_property_read_u32(node, "current-speed", &speed);
+
+ data->speed = speed;
+
+ return 0;
+}
+
+static int sirf_probe(struct serdev_device *serdev)
+{
+ struct device *dev = &serdev->dev;
+ struct gnss_device *gdev;
+ struct sirf_data *data;
+ int ret;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ gdev = gnss_allocate_device(dev);
+ if (!gdev)
+ return -ENOMEM;
+
+ gdev->type = GNSS_TYPE_SIRF;
+ gdev->ops = &sirf_gnss_ops;
+ gnss_set_drvdata(gdev, data);
+
+ data->serdev = serdev;
+ data->gdev = gdev;
+
+ init_waitqueue_head(&data->power_wait);
+
+ serdev_device_set_drvdata(serdev, data);
+ serdev_device_set_client_ops(serdev, &sirf_serdev_ops);
+
+ ret = sirf_parse_dt(serdev);
+ if (ret)
+ goto err_put_device;
+
+ data->vcc = devm_regulator_get(dev, "vcc");
+ if (IS_ERR(data->vcc)) {
+ ret = PTR_ERR(data->vcc);
+ goto err_put_device;
+ }
+
+ data->on_off = devm_gpiod_get_optional(dev, "sirf,onoff",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(data->on_off))
+ goto err_put_device;
+
+ if (data->on_off) {
+ data->wakeup = devm_gpiod_get_optional(dev, "sirf,wakeup",
+ GPIOD_IN);
+ if (IS_ERR(data->wakeup))
+ goto err_put_device;
+
+ /*
+ * Configurations where WAKEUP has been left not connected,
+ * are currently not supported.
+ */
+ if (!data->wakeup) {
+ dev_err(dev, "no wakeup gpio specified\n");
+ ret = -ENODEV;
+ goto err_put_device;
+ }
+ }
+
+ if (data->wakeup) {
+ ret = gpiod_to_irq(data->wakeup);
+ if (ret < 0)
+ goto err_put_device;
+
+ data->irq = ret;
+
+ ret = devm_request_threaded_irq(dev, data->irq, NULL,
+ sirf_wakeup_handler,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ "wakeup", data);
+ if (ret)
+ goto err_put_device;
+ }
+
+ if (data->on_off) {
+ ret = regulator_enable(data->vcc);
+ if (ret)
+ goto err_put_device;
+
+ /* Wait for chip to boot into hibernate mode */
+ msleep(SIRF_BOOT_DELAY);
+ }
+
+ if (IS_ENABLED(CONFIG_PM)) {
+ pm_runtime_set_suspended(dev); /* clear runtime_error flag */
+ pm_runtime_enable(dev);
+ } else {
+ ret = sirf_runtime_resume(dev);
+ if (ret < 0)
+ goto err_disable_vcc;
+ }
+
+ ret = gnss_register_device(gdev);
+ if (ret)
+ goto err_disable_rpm;
+
+ return 0;
+
+err_disable_rpm:
+ if (IS_ENABLED(CONFIG_PM))
+ pm_runtime_disable(dev);
+ else
+ sirf_runtime_suspend(dev);
+err_disable_vcc:
+ if (data->on_off)
+ regulator_disable(data->vcc);
+err_put_device:
+ gnss_put_device(data->gdev);
+
+ return ret;
+}
+
+static void sirf_remove(struct serdev_device *serdev)
+{
+ struct sirf_data *data = serdev_device_get_drvdata(serdev);
+
+ gnss_deregister_device(data->gdev);
+
+ if (IS_ENABLED(CONFIG_PM))
+ pm_runtime_disable(&serdev->dev);
+ else
+ sirf_runtime_suspend(&serdev->dev);
+
+ if (data->on_off)
+ regulator_disable(data->vcc);
+
+ gnss_put_device(data->gdev);
+};
+
+#ifdef CONFIG_OF
+static const struct of_device_id sirf_of_match[] = {
+ { .compatible = "fastrax,uc430" },
+ { .compatible = "linx,r4" },
+ { .compatible = "wi2wi,w2sg0008i" },
+ { .compatible = "wi2wi,w2sg0084i" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, sirf_of_match);
+#endif
+
+static struct serdev_device_driver sirf_driver = {
+ .driver = {
+ .name = "gnss-sirf",
+ .of_match_table = of_match_ptr(sirf_of_match),
+ .pm = &sirf_pm_ops,
+ },
+ .probe = sirf_probe,
+ .remove = sirf_remove,
+};
+module_serdev_device_driver(sirf_driver);
+
+MODULE_AUTHOR("Johan Hovold <johan@kernel.org>");
+MODULE_DESCRIPTION("SiRFstar GNSS receiver driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gnss/ubx.c b/drivers/gnss/ubx.c
new file mode 100644
index 000000000000..12568aebb7f6
--- /dev/null
+++ b/drivers/gnss/ubx.c
@@ -0,0 +1,153 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * u-blox GNSS receiver driver
+ *
+ * Copyright (C) 2018 Johan Hovold <johan@kernel.org>
+ */
+
+#include <linux/errno.h>
+#include <linux/gnss.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+#include <linux/serdev.h>
+
+#include "serial.h"
+
+struct ubx_data {
+ struct regulator *v_bckp;
+ struct regulator *vcc;
+};
+
+static int ubx_set_active(struct gnss_serial *gserial)
+{
+ struct ubx_data *data = gnss_serial_get_drvdata(gserial);
+ int ret;
+
+ ret = regulator_enable(data->vcc);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int ubx_set_standby(struct gnss_serial *gserial)
+{
+ struct ubx_data *data = gnss_serial_get_drvdata(gserial);
+ int ret;
+
+ ret = regulator_disable(data->vcc);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int ubx_set_power(struct gnss_serial *gserial,
+ enum gnss_serial_pm_state state)
+{
+ switch (state) {
+ case GNSS_SERIAL_ACTIVE:
+ return ubx_set_active(gserial);
+ case GNSS_SERIAL_OFF:
+ case GNSS_SERIAL_STANDBY:
+ return ubx_set_standby(gserial);
+ }
+
+ return -EINVAL;
+}
+
+static const struct gnss_serial_ops ubx_gserial_ops = {
+ .set_power = ubx_set_power,
+};
+
+static int ubx_probe(struct serdev_device *serdev)
+{
+ struct gnss_serial *gserial;
+ struct ubx_data *data;
+ int ret;
+
+ gserial = gnss_serial_allocate(serdev, sizeof(*data));
+ if (IS_ERR(gserial)) {
+ ret = PTR_ERR(gserial);
+ return ret;
+ }
+
+ gserial->ops = &ubx_gserial_ops;
+
+ gserial->gdev->type = GNSS_TYPE_UBX;
+
+ data = gnss_serial_get_drvdata(gserial);
+
+ data->vcc = devm_regulator_get(&serdev->dev, "vcc");
+ if (IS_ERR(data->vcc)) {
+ ret = PTR_ERR(data->vcc);
+ goto err_free_gserial;
+ }
+
+ data->v_bckp = devm_regulator_get_optional(&serdev->dev, "v-bckp");
+ if (IS_ERR(data->v_bckp)) {
+ ret = PTR_ERR(data->v_bckp);
+ if (ret == -ENODEV)
+ data->v_bckp = NULL;
+ else
+ goto err_free_gserial;
+ }
+
+ if (data->v_bckp) {
+ ret = regulator_enable(data->v_bckp);
+ if (ret)
+ goto err_free_gserial;
+ }
+
+ ret = gnss_serial_register(gserial);
+ if (ret)
+ goto err_disable_v_bckp;
+
+ return 0;
+
+err_disable_v_bckp:
+ if (data->v_bckp)
+ regulator_disable(data->v_bckp);
+err_free_gserial:
+ gnss_serial_free(gserial);
+
+ return ret;
+}
+
+static void ubx_remove(struct serdev_device *serdev)
+{
+ struct gnss_serial *gserial = serdev_device_get_drvdata(serdev);
+ struct ubx_data *data = gnss_serial_get_drvdata(gserial);
+
+ gnss_serial_deregister(gserial);
+ if (data->v_bckp)
+ regulator_disable(data->v_bckp);
+ gnss_serial_free(gserial);
+};
+
+#ifdef CONFIG_OF
+static const struct of_device_id ubx_of_match[] = {
+ { .compatible = "u-blox,neo-8" },
+ { .compatible = "u-blox,neo-m8" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ubx_of_match);
+#endif
+
+static struct serdev_device_driver ubx_driver = {
+ .driver = {
+ .name = "gnss-ubx",
+ .of_match_table = of_match_ptr(ubx_of_match),
+ .pm = &gnss_serial_pm_ops,
+ },
+ .probe = ubx_probe,
+ .remove = ubx_remove,
+};
+module_serdev_device_driver(ubx_driver);
+
+MODULE_AUTHOR("Johan Hovold <johan@kernel.org>");
+MODULE_DESCRIPTION("u-blox GNSS receiver driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/mediatek/mtk_cec.c b/drivers/gpu/drm/mediatek/mtk_cec.c
index 7a3eb8c17ef9..5ce84d0dbf81 100644
--- a/drivers/gpu/drm/mediatek/mtk_cec.c
+++ b/drivers/gpu/drm/mediatek/mtk_cec.c
@@ -15,6 +15,7 @@
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/interrupt.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include "mtk_cec.h"
diff --git a/drivers/gpu/drm/sun4i/sun6i_drc.c b/drivers/gpu/drm/sun4i/sun6i_drc.c
index b5e071a49045..88eb268fdf73 100644
--- a/drivers/gpu/drm/sun4i/sun6i_drc.c
+++ b/drivers/gpu/drm/sun4i/sun6i_drc.c
@@ -12,6 +12,7 @@
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/reset.h>
diff --git a/drivers/hid/hid-hyperv.c b/drivers/hid/hid-hyperv.c
index 3aa2bb9f0f81..b372854cf38d 100644
--- a/drivers/hid/hid-hyperv.c
+++ b/drivers/hid/hid-hyperv.c
@@ -598,6 +598,9 @@ static struct hv_driver mousevsc_drv = {
.id_table = id_table,
.probe = mousevsc_probe,
.remove = mousevsc_remove,
+ .driver = {
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
};
static int __init mousevsc_init(void)
diff --git a/drivers/hsi/controllers/omap_ssi_port.c b/drivers/hsi/controllers/omap_ssi_port.c
index 7765de2f1ef1..2ada82d2ec8c 100644
--- a/drivers/hsi/controllers/omap_ssi_port.c
+++ b/drivers/hsi/controllers/omap_ssi_port.c
@@ -20,6 +20,7 @@
* 02110-1301 USA
*/
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index ba0a092ae085..741857d80da1 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -29,12 +29,26 @@
#include <linux/hyperv.h>
#include <linux/uio.h>
#include <linux/interrupt.h>
+#include <asm/page.h>
#include "hyperv_vmbus.h"
#define NUM_PAGES_SPANNED(addr, len) \
((PAGE_ALIGN(addr + len) >> PAGE_SHIFT) - (addr >> PAGE_SHIFT))
+static unsigned long virt_to_hvpfn(void *addr)
+{
+ unsigned long paddr;
+
+ if (is_vmalloc_addr(addr))
+ paddr = page_to_phys(vmalloc_to_page(addr)) +
+ offset_in_page(addr);
+ else
+ paddr = __pa(addr);
+
+ return paddr >> PAGE_SHIFT;
+}
+
/*
* vmbus_setevent- Trigger an event notification on the specified
* channel.
@@ -298,8 +312,8 @@ static int create_gpadl_header(void *kbuffer, u32 size,
gpadl_header->range[0].byte_offset = 0;
gpadl_header->range[0].byte_count = size;
for (i = 0; i < pfncount; i++)
- gpadl_header->range[0].pfn_array[i] = slow_virt_to_phys(
- kbuffer + PAGE_SIZE * i) >> PAGE_SHIFT;
+ gpadl_header->range[0].pfn_array[i] = virt_to_hvpfn(
+ kbuffer + PAGE_SIZE * i);
*msginfo = msgheader;
pfnsum = pfncount;
@@ -350,9 +364,8 @@ static int create_gpadl_header(void *kbuffer, u32 size,
* so the hypervisor guarantees that this is ok.
*/
for (i = 0; i < pfncurr; i++)
- gpadl_body->pfn[i] = slow_virt_to_phys(
- kbuffer + PAGE_SIZE * (pfnsum + i)) >>
- PAGE_SHIFT;
+ gpadl_body->pfn[i] = virt_to_hvpfn(
+ kbuffer + PAGE_SIZE * (pfnsum + i));
/* add to msg header */
list_add_tail(&msgbody->msglistentry,
@@ -380,8 +393,8 @@ static int create_gpadl_header(void *kbuffer, u32 size,
gpadl_header->range[0].byte_offset = 0;
gpadl_header->range[0].byte_count = size;
for (i = 0; i < pagecount; i++)
- gpadl_header->range[0].pfn_array[i] = slow_virt_to_phys(
- kbuffer + PAGE_SIZE * i) >> PAGE_SHIFT;
+ gpadl_header->range[0].pfn_array[i] = virt_to_hvpfn(
+ kbuffer + PAGE_SIZE * i);
*msginfo = msgheader;
}
@@ -558,11 +571,8 @@ static void reset_channel_cb(void *arg)
channel->onchannel_callback = NULL;
}
-static int vmbus_close_internal(struct vmbus_channel *channel)
+void vmbus_reset_channel_cb(struct vmbus_channel *channel)
{
- struct vmbus_channel_close_channel *msg;
- int ret;
-
/*
* vmbus_on_event(), running in the per-channel tasklet, can race
* with vmbus_close_internal() in the case of SMP guest, e.g., when
@@ -572,6 +582,29 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
*/
tasklet_disable(&channel->callback_event);
+ channel->sc_creation_callback = NULL;
+
+ /* Stop the callback asap */
+ if (channel->target_cpu != get_cpu()) {
+ put_cpu();
+ smp_call_function_single(channel->target_cpu, reset_channel_cb,
+ channel, true);
+ } else {
+ reset_channel_cb(channel);
+ put_cpu();
+ }
+
+ /* Re-enable tasklet for use on re-open */
+ tasklet_enable(&channel->callback_event);
+}
+
+static int vmbus_close_internal(struct vmbus_channel *channel)
+{
+ struct vmbus_channel_close_channel *msg;
+ int ret;
+
+ vmbus_reset_channel_cb(channel);
+
/*
* In case a device driver's probe() fails (e.g.,
* util_probe() -> vmbus_open() returns -ENOMEM) and the device is
@@ -585,16 +618,6 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
}
channel->state = CHANNEL_OPEN_STATE;
- channel->sc_creation_callback = NULL;
- /* Stop callback and cancel the timer asap */
- if (channel->target_cpu != get_cpu()) {
- put_cpu();
- smp_call_function_single(channel->target_cpu, reset_channel_cb,
- channel, true);
- } else {
- reset_channel_cb(channel);
- put_cpu();
- }
/* Send a closing message */
@@ -639,8 +662,6 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
get_order(channel->ringbuffer_pagecount * PAGE_SIZE));
out:
- /* re-enable tasklet for use on re-open */
- tasklet_enable(&channel->callback_event);
return ret;
}
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index ecc2bd275a73..0f0e091c117c 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -527,10 +527,8 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
struct hv_device *dev
= newchannel->primary_channel->device_obj;
- if (vmbus_add_channel_kobj(dev, newchannel)) {
- atomic_dec(&vmbus_connection.offer_in_progress);
+ if (vmbus_add_channel_kobj(dev, newchannel))
goto err_free_chan;
- }
if (channel->sc_creation_callback != NULL)
channel->sc_creation_callback(newchannel);
@@ -895,6 +893,12 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
}
/*
+ * Before setting channel->rescind in vmbus_rescind_cleanup(), we
+ * should make sure the channel callback is not running any more.
+ */
+ vmbus_reset_channel_cb(channel);
+
+ /*
* Now wait for offer handling to complete.
*/
vmbus_rescind_cleanup(channel);
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index 658dc765753b..748a1c4172a6 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -64,7 +64,7 @@ int hv_init(void)
return -ENOMEM;
direct_mode_enabled = ms_hyperv.misc_features &
- HV_X64_STIMER_DIRECT_MODE_AVAILABLE;
+ HV_STIMER_DIRECT_MODE_AVAILABLE;
return 0;
}
@@ -127,14 +127,14 @@ static int hv_ce_set_next_event(unsigned long delta,
current_tick = hyperv_cs->read(NULL);
current_tick += delta;
- hv_init_timer(HV_X64_MSR_STIMER0_COUNT, current_tick);
+ hv_init_timer(0, current_tick);
return 0;
}
static int hv_ce_shutdown(struct clock_event_device *evt)
{
- hv_init_timer(HV_X64_MSR_STIMER0_COUNT, 0);
- hv_init_timer_config(HV_X64_MSR_STIMER0_CONFIG, 0);
+ hv_init_timer(0, 0);
+ hv_init_timer_config(0, 0);
if (direct_mode_enabled)
hv_disable_stimer0_percpu_irq(stimer0_irq);
@@ -164,7 +164,7 @@ static int hv_ce_set_oneshot(struct clock_event_device *evt)
timer_cfg.direct_mode = 0;
timer_cfg.sintx = VMBUS_MESSAGE_SINT;
}
- hv_init_timer_config(HV_X64_MSR_STIMER0_CONFIG, timer_cfg.as_uint64);
+ hv_init_timer_config(0, timer_cfg.as_uint64);
return 0;
}
@@ -242,6 +242,10 @@ int hv_synic_alloc(void)
return 0;
err:
+ /*
+ * Any memory allocations that succeeded will be freed when
+ * the caller cleans up by calling hv_synic_free()
+ */
return -ENOMEM;
}
@@ -254,12 +258,10 @@ void hv_synic_free(void)
struct hv_per_cpu_context *hv_cpu
= per_cpu_ptr(hv_context.cpu_context, cpu);
- if (hv_cpu->synic_event_page)
- free_page((unsigned long)hv_cpu->synic_event_page);
- if (hv_cpu->synic_message_page)
- free_page((unsigned long)hv_cpu->synic_message_page);
- if (hv_cpu->post_msg_page)
- free_page((unsigned long)hv_cpu->post_msg_page);
+ kfree(hv_cpu->clk_evt);
+ free_page((unsigned long)hv_cpu->synic_event_page);
+ free_page((unsigned long)hv_cpu->synic_message_page);
+ free_page((unsigned long)hv_cpu->post_msg_page);
}
kfree(hv_context.hv_numa_map);
@@ -298,18 +300,16 @@ int hv_synic_init(unsigned int cpu)
hv_set_siefp(siefp.as_uint64);
/* Setup the shared SINT. */
- hv_get_synint_state(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT,
- shared_sint.as_uint64);
+ hv_get_synint_state(VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
shared_sint.vector = HYPERVISOR_CALLBACK_VECTOR;
shared_sint.masked = false;
- if (ms_hyperv.hints & HV_X64_DEPRECATING_AEOI_RECOMMENDED)
+ if (ms_hyperv.hints & HV_DEPRECATING_AEOI_RECOMMENDED)
shared_sint.auto_eoi = false;
else
shared_sint.auto_eoi = true;
- hv_set_synint_state(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT,
- shared_sint.as_uint64);
+ hv_set_synint_state(VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
/* Enable the global synic bit */
hv_get_synic_state(sctrl.as_uint64);
@@ -322,7 +322,7 @@ int hv_synic_init(unsigned int cpu)
/*
* Register the per-cpu clockevent source.
*/
- if (ms_hyperv.features & HV_X64_MSR_SYNTIMER_AVAILABLE)
+ if (ms_hyperv.features & HV_MSR_SYNTIMER_AVAILABLE)
clockevents_config_and_register(hv_cpu->clk_evt,
HV_TIMER_FREQUENCY,
HV_MIN_DELTA_TICKS,
@@ -337,7 +337,7 @@ void hv_synic_clockevents_cleanup(void)
{
int cpu;
- if (!(ms_hyperv.features & HV_X64_MSR_SYNTIMER_AVAILABLE))
+ if (!(ms_hyperv.features & HV_MSR_SYNTIMER_AVAILABLE))
return;
if (direct_mode_enabled)
@@ -396,7 +396,7 @@ int hv_synic_cleanup(unsigned int cpu)
return -EBUSY;
/* Turn off clockevent device */
- if (ms_hyperv.features & HV_X64_MSR_SYNTIMER_AVAILABLE) {
+ if (ms_hyperv.features & HV_MSR_SYNTIMER_AVAILABLE) {
struct hv_per_cpu_context *hv_cpu
= this_cpu_ptr(hv_context.cpu_context);
@@ -405,15 +405,13 @@ int hv_synic_cleanup(unsigned int cpu)
put_cpu_ptr(hv_cpu);
}
- hv_get_synint_state(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT,
- shared_sint.as_uint64);
+ hv_get_synint_state(VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
shared_sint.masked = 1;
/* Need to correctly cleanup in the case of SMP!!! */
/* Disable the interrupt */
- hv_set_synint_state(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT,
- shared_sint.as_uint64);
+ hv_set_synint_state(VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
hv_get_simp(simp.as_uint64);
simp.simp_enabled = 0;
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index b3e9f13f8bc3..b1b788082793 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -1765,6 +1765,9 @@ static struct hv_driver balloon_drv = {
.id_table = id_table,
.probe = balloon_probe,
.remove = balloon_remove,
+ .driver = {
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
};
static int __init init_balloon_drv(void)
diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
index 14dce25c104f..423205077bf6 100644
--- a/drivers/hv/hv_util.c
+++ b/drivers/hv/hv_util.c
@@ -487,6 +487,9 @@ static struct hv_driver util_drv = {
.id_table = id_table,
.probe = util_probe,
.remove = util_remove,
+ .driver = {
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
};
static int hv_ptp_enable(struct ptp_clock_info *info,
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index be3c8b10b84a..3e90eb91db45 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -431,7 +431,24 @@ static u32 hv_pkt_iter_bytes_read(const struct hv_ring_buffer_info *rbi,
}
/*
- * Update host ring buffer after iterating over packets.
+ * Update host ring buffer after iterating over packets. If the host has
+ * stopped queuing new entries because it found the ring buffer full, and
+ * sufficient space is being freed up, signal the host. But be careful to
+ * only signal the host when necessary, both for performance reasons and
+ * because Hyper-V protects itself by throttling guests that signal
+ * inappropriately.
+ *
+ * Determining when to signal is tricky. There are three key data inputs
+ * that must be handled in this order to avoid race conditions:
+ *
+ * 1. Update the read_index
+ * 2. Read the pending_send_sz
+ * 3. Read the current write_index
+ *
+ * The interrupt_mask is not used to determine when to signal. The
+ * interrupt_mask is used only on the guest->host ring buffer when
+ * sending requests to the host. The host does not use it on the host->
+ * guest ring buffer to indicate whether it should be signaled.
*/
void hv_pkt_iter_close(struct vmbus_channel *channel)
{
@@ -447,22 +464,30 @@ void hv_pkt_iter_close(struct vmbus_channel *channel)
start_read_index = rbi->ring_buffer->read_index;
rbi->ring_buffer->read_index = rbi->priv_read_index;
+ /*
+ * Older versions of Hyper-V (before WS2102 and Win8) do not
+ * implement pending_send_sz and simply poll if the host->guest
+ * ring buffer is full. No signaling is needed or expected.
+ */
if (!rbi->ring_buffer->feature_bits.feat_pending_send_sz)
return;
/*
* Issue a full memory barrier before making the signaling decision.
- * Here is the reason for having this barrier:
- * If the reading of the pend_sz (in this function)
- * were to be reordered and read before we commit the new read
- * index (in the calling function) we could
- * have a problem. If the host were to set the pending_sz after we
- * have sampled pending_sz and go to sleep before we commit the
+ * If reading pending_send_sz were to be reordered and happen
+ * before we commit the new read_index, a race could occur. If the
+ * host were to set the pending_send_sz after we have sampled
+ * pending_send_sz, and the ring buffer blocks before we commit the
* read index, we could miss sending the interrupt. Issue a full
* memory barrier to address this.
*/
virt_mb();
+ /*
+ * If the pending_send_sz is zero, then the ring buffer is not
+ * blocked and there is no need to signal. This is far by the
+ * most common case, so exit quickly for best performance.
+ */
pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz);
if (!pending_sz)
return;
@@ -476,14 +501,32 @@ void hv_pkt_iter_close(struct vmbus_channel *channel)
bytes_read = hv_pkt_iter_bytes_read(rbi, start_read_index);
/*
- * If there was space before we began iteration,
- * then host was not blocked.
+ * We want to signal the host only if we're transitioning
+ * from a "not enough free space" state to a "enough free
+ * space" state. For example, it's possible that this function
+ * could run and free up enough space to signal the host, and then
+ * run again and free up additional space before the host has a
+ * chance to clear the pending_send_sz. The 2nd invocation would
+ * be a null transition from "enough free space" to "enough free
+ * space", which doesn't warrant a signal.
+ *
+ * Exactly filling the ring buffer is treated as "not enough
+ * space". The ring buffer always must have at least one byte
+ * empty so the empty and full conditions are distinguishable.
+ * hv_get_bytes_to_write() doesn't fully tell the truth in
+ * this regard.
+ *
+ * So first check if we were in the "enough free space" state
+ * before we began the iteration. If so, the host was not
+ * blocked, and there's no need to signal.
*/
-
if (curr_write_sz - bytes_read > pending_sz)
return;
- /* If pending write will not fit, don't give false hope. */
+ /*
+ * Similarly, if the new state is "not enough space", then
+ * there's no need to signal.
+ */
if (curr_write_sz <= pending_sz)
return;
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index b10fe26c4891..b1b548a21f91 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -56,6 +56,8 @@ static struct completion probe_event;
static int hyperv_cpuhp_online;
+static void *hv_panic_page;
+
static int hyperv_panic_event(struct notifier_block *nb, unsigned long val,
void *args)
{
@@ -208,6 +210,20 @@ static ssize_t modalias_show(struct device *dev,
}
static DEVICE_ATTR_RO(modalias);
+#ifdef CONFIG_NUMA
+static ssize_t numa_node_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hv_device *hv_dev = device_to_hv_device(dev);
+
+ if (!hv_dev->channel)
+ return -ENODEV;
+
+ return sprintf(buf, "%d\n", hv_dev->channel->numa_node);
+}
+static DEVICE_ATTR_RO(numa_node);
+#endif
+
static ssize_t server_monitor_pending_show(struct device *dev,
struct device_attribute *dev_attr,
char *buf)
@@ -490,6 +506,9 @@ static struct attribute *vmbus_dev_attrs[] = {
&dev_attr_class_id.attr,
&dev_attr_device_id.attr,
&dev_attr_modalias.attr,
+#ifdef CONFIG_NUMA
+ &dev_attr_numa_node.attr,
+#endif
&dev_attr_server_monitor_pending.attr,
&dev_attr_client_monitor_pending.attr,
&dev_attr_server_monitor_latency.attr,
@@ -1018,6 +1037,72 @@ static void vmbus_isr(void)
add_interrupt_randomness(HYPERVISOR_CALLBACK_VECTOR, 0);
}
+/*
+ * Boolean to control whether to report panic messages over Hyper-V.
+ *
+ * It can be set via /proc/sys/kernel/hyperv/record_panic_msg
+ */
+static int sysctl_record_panic_msg = 1;
+
+/*
+ * Callback from kmsg_dump. Grab as much as possible from the end of the kmsg
+ * buffer and call into Hyper-V to transfer the data.
+ */
+static void hv_kmsg_dump(struct kmsg_dumper *dumper,
+ enum kmsg_dump_reason reason)
+{
+ size_t bytes_written;
+ phys_addr_t panic_pa;
+
+ /* We are only interested in panics. */
+ if ((reason != KMSG_DUMP_PANIC) || (!sysctl_record_panic_msg))
+ return;
+
+ panic_pa = virt_to_phys(hv_panic_page);
+
+ /*
+ * Write dump contents to the page. No need to synchronize; panic should
+ * be single-threaded.
+ */
+ kmsg_dump_get_buffer(dumper, true, hv_panic_page, PAGE_SIZE,
+ &bytes_written);
+ if (bytes_written)
+ hyperv_report_panic_msg(panic_pa, bytes_written);
+}
+
+static struct kmsg_dumper hv_kmsg_dumper = {
+ .dump = hv_kmsg_dump,
+};
+
+static struct ctl_table_header *hv_ctl_table_hdr;
+static int zero;
+static int one = 1;
+
+/*
+ * sysctl option to allow the user to control whether kmsg data should be
+ * reported to Hyper-V on panic.
+ */
+static struct ctl_table hv_ctl_table[] = {
+ {
+ .procname = "hyperv_record_panic_msg",
+ .data = &sysctl_record_panic_msg,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &zero,
+ .extra2 = &one
+ },
+ {}
+};
+
+static struct ctl_table hv_root_table[] = {
+ {
+ .procname = "kernel",
+ .mode = 0555,
+ .child = hv_ctl_table
+ },
+ {}
+};
/*
* vmbus_bus_init -Main vmbus driver initialization routine.
@@ -1065,6 +1150,32 @@ static int vmbus_bus_init(void)
* Only register if the crash MSRs are available
*/
if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
+ u64 hyperv_crash_ctl;
+ /*
+ * Sysctl registration is not fatal, since by default
+ * reporting is enabled.
+ */
+ hv_ctl_table_hdr = register_sysctl_table(hv_root_table);
+ if (!hv_ctl_table_hdr)
+ pr_err("Hyper-V: sysctl table register error");
+
+ /*
+ * Register for panic kmsg callback only if the right
+ * capability is supported by the hypervisor.
+ */
+ hv_get_crash_ctl(hyperv_crash_ctl);
+ if (hyperv_crash_ctl & HV_CRASH_CTL_CRASH_NOTIFY_MSG) {
+ hv_panic_page = (void *)get_zeroed_page(GFP_KERNEL);
+ if (hv_panic_page) {
+ ret = kmsg_dump_register(&hv_kmsg_dumper);
+ if (ret)
+ pr_err("Hyper-V: kmsg dump register "
+ "error 0x%x\n", ret);
+ } else
+ pr_err("Hyper-V: panic message page memory "
+ "allocation failed");
+ }
+
register_die_notifier(&hyperv_die_block);
atomic_notifier_chain_register(&panic_notifier_list,
&hyperv_panic_block);
@@ -1081,7 +1192,9 @@ err_alloc:
hv_remove_vmbus_irq();
bus_unregister(&hv_bus);
-
+ free_page((unsigned long)hv_panic_page);
+ unregister_sysctl_table(hv_ctl_table_hdr);
+ hv_ctl_table_hdr = NULL;
return ret;
}
@@ -1785,10 +1898,15 @@ static void __exit vmbus_exit(void)
vmbus_free_channels();
if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
+ kmsg_dump_unregister(&hv_kmsg_dumper);
unregister_die_notifier(&hyperv_die_block);
atomic_notifier_chain_unregister(&panic_notifier_list,
&hyperv_panic_block);
}
+
+ free_page((unsigned long)hv_panic_page);
+ unregister_sysctl_table(hv_ctl_table_hdr);
+ hv_ctl_table_hdr = NULL;
bus_unregister(&hv_bus);
cpuhp_remove_state(hyperv_cpuhp_online);
diff --git a/drivers/hwmon/max197.c b/drivers/hwmon/max197.c
index 638567fb7cd8..3d9e210beedf 100644
--- a/drivers/hwmon/max197.c
+++ b/drivers/hwmon/max197.c
@@ -13,6 +13,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/init.h>
#include <linux/err.h>
#include <linux/slab.h>
diff --git a/drivers/hwmon/mc13783-adc.c b/drivers/hwmon/mc13783-adc.c
index 67860ad2e3d9..78fe8759d2a9 100644
--- a/drivers/hwmon/mc13783-adc.c
+++ b/drivers/hwmon/mc13783-adc.c
@@ -23,6 +23,7 @@
#include <linux/hwmon-sysfs.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/hwmon.h>
#include <linux/slab.h>
#include <linux/init.h>
diff --git a/drivers/hwtracing/coresight/Kconfig b/drivers/hwtracing/coresight/Kconfig
index ef9cb3c164e1..ad34380cac49 100644
--- a/drivers/hwtracing/coresight/Kconfig
+++ b/drivers/hwtracing/coresight/Kconfig
@@ -31,6 +31,17 @@ config CORESIGHT_LINK_AND_SINK_TMC
complies with the generic implementation of the component without
special enhancement or added features.
+config CORESIGHT_CATU
+ bool "Coresight Address Translation Unit (CATU) driver"
+ depends on CORESIGHT_LINK_AND_SINK_TMC
+ help
+ Enable support for the Coresight Address Translation Unit (CATU).
+ CATU supports a scatter gather table of 4K pages, with forward/backward
+ lookup. CATU helps TMC ETR to use a large physically non-contiguous trace
+ buffer by translating the addresses used by ETR to the physical address
+ by looking up the provided table. CATU can also be used in pass-through
+ mode where the address is not translated.
+
config CORESIGHT_SINK_TPIU
bool "Coresight generic TPIU driver"
depends on CORESIGHT_LINKS_AND_SINKS
diff --git a/drivers/hwtracing/coresight/Makefile b/drivers/hwtracing/coresight/Makefile
index 61db9dd0d571..41870ded51a3 100644
--- a/drivers/hwtracing/coresight/Makefile
+++ b/drivers/hwtracing/coresight/Makefile
@@ -18,3 +18,4 @@ obj-$(CONFIG_CORESIGHT_SOURCE_ETM4X) += coresight-etm4x.o \
obj-$(CONFIG_CORESIGHT_DYNAMIC_REPLICATOR) += coresight-dynamic-replicator.o
obj-$(CONFIG_CORESIGHT_STM) += coresight-stm.o
obj-$(CONFIG_CORESIGHT_CPU_DEBUG) += coresight-cpu-debug.o
+obj-$(CONFIG_CORESIGHT_CATU) += coresight-catu.o
diff --git a/drivers/hwtracing/coresight/coresight-catu.c b/drivers/hwtracing/coresight/coresight-catu.c
new file mode 100644
index 000000000000..ff94e58845b7
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-catu.c
@@ -0,0 +1,577 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Arm Limited. All rights reserved.
+ *
+ * Coresight Address Translation Unit support
+ *
+ * Author: Suzuki K Poulose <suzuki.poulose@arm.com>
+ */
+
+#include <linux/amba/bus.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#include "coresight-catu.h"
+#include "coresight-priv.h"
+#include "coresight-tmc.h"
+
+#define csdev_to_catu_drvdata(csdev) \
+ dev_get_drvdata(csdev->dev.parent)
+
+/* Verbose output for CATU table contents */
+#ifdef CATU_DEBUG
+#define catu_dbg(x, ...) dev_dbg(x, __VA_ARGS__)
+#else
+#define catu_dbg(x, ...) do {} while (0)
+#endif
+
+struct catu_etr_buf {
+ struct tmc_sg_table *catu_table;
+ dma_addr_t sladdr;
+};
+
+/*
+ * CATU uses a page size of 4KB for page tables as well as data pages.
+ * Each 64bit entry in the table has the following format.
+ *
+ * 63 12 1 0
+ * ------------------------------------
+ * | Address [63-12] | SBZ | V|
+ * ------------------------------------
+ *
+ * Where bit[0] V indicates if the address is valid or not.
+ * Each 4K table pages have upto 256 data page pointers, taking upto 2K
+ * size. There are two Link pointers, pointing to the previous and next
+ * table pages respectively at the end of the 4K page. (i.e, entry 510
+ * and 511).
+ * E.g, a table of two pages could look like :
+ *
+ * Table Page 0 Table Page 1
+ * SLADDR ===> x------------------x x--> x-----------------x
+ * INADDR ->| Page 0 | V | | | Page 256 | V | <- INADDR+1M
+ * |------------------| | |-----------------|
+ * INADDR+4K ->| Page 1 | V | | | |
+ * |------------------| | |-----------------|
+ * | Page 2 | V | | | |
+ * |------------------| | |-----------------|
+ * | ... | V | | | ... |
+ * |------------------| | |-----------------|
+ * INADDR+1020K| Page 255 | V | | | Page 511 | V |
+ * SLADDR+2K==>|------------------| | |-----------------|
+ * | UNUSED | | | | |
+ * |------------------| | | |
+ * | UNUSED | | | | |
+ * |------------------| | | |
+ * | ... | | | | |
+ * |------------------| | |-----------------|
+ * | IGNORED | 0 | | | Table Page 0| 1 |
+ * |------------------| | |-----------------|
+ * | Table Page 1| 1 |--x | IGNORED | 0 |
+ * x------------------x x-----------------x
+ * SLADDR+4K==>
+ *
+ * The base input address (used by the ETR, programmed in INADDR_{LO,HI})
+ * must be aligned to 1MB (the size addressable by a single page table).
+ * The CATU maps INADDR{LO:HI} to the first page in the table pointed
+ * to by SLADDR{LO:HI} and so on.
+ *
+ */
+typedef u64 cate_t;
+
+#define CATU_PAGE_SHIFT 12
+#define CATU_PAGE_SIZE (1UL << CATU_PAGE_SHIFT)
+#define CATU_PAGES_PER_SYSPAGE (PAGE_SIZE / CATU_PAGE_SIZE)
+
+/* Page pointers are only allocated in the first 2K half */
+#define CATU_PTRS_PER_PAGE ((CATU_PAGE_SIZE >> 1) / sizeof(cate_t))
+#define CATU_PTRS_PER_SYSPAGE (CATU_PAGES_PER_SYSPAGE * CATU_PTRS_PER_PAGE)
+#define CATU_LINK_PREV ((CATU_PAGE_SIZE / sizeof(cate_t)) - 2)
+#define CATU_LINK_NEXT ((CATU_PAGE_SIZE / sizeof(cate_t)) - 1)
+
+#define CATU_ADDR_SHIFT 12
+#define CATU_ADDR_MASK ~(((cate_t)1 << CATU_ADDR_SHIFT) - 1)
+#define CATU_ENTRY_VALID ((cate_t)0x1)
+#define CATU_VALID_ENTRY(addr) \
+ (((cate_t)(addr) & CATU_ADDR_MASK) | CATU_ENTRY_VALID)
+#define CATU_ENTRY_ADDR(entry) ((cate_t)(entry) & ~((cate_t)CATU_ENTRY_VALID))
+
+/* CATU expects the INADDR to be aligned to 1M. */
+#define CATU_DEFAULT_INADDR (1ULL << 20)
+
+/*
+ * catu_get_table : Retrieve the table pointers for the given @offset
+ * within the buffer. The buffer is wrapped around to a valid offset.
+ *
+ * Returns : The CPU virtual address for the beginning of the table
+ * containing the data page pointer for @offset. If @daddrp is not NULL,
+ * @daddrp points the DMA address of the beginning of the table.
+ */
+static inline cate_t *catu_get_table(struct tmc_sg_table *catu_table,
+ unsigned long offset,
+ dma_addr_t *daddrp)
+{
+ unsigned long buf_size = tmc_sg_table_buf_size(catu_table);
+ unsigned int table_nr, pg_idx, pg_offset;
+ struct tmc_pages *table_pages = &catu_table->table_pages;
+ void *ptr;
+
+ /* Make sure offset is within the range */
+ offset %= buf_size;
+
+ /*
+ * Each table can address 1MB and a single kernel page can
+ * contain "CATU_PAGES_PER_SYSPAGE" CATU tables.
+ */
+ table_nr = offset >> 20;
+ /* Find the table page where the table_nr lies in */
+ pg_idx = table_nr / CATU_PAGES_PER_SYSPAGE;
+ pg_offset = (table_nr % CATU_PAGES_PER_SYSPAGE) * CATU_PAGE_SIZE;
+ if (daddrp)
+ *daddrp = table_pages->daddrs[pg_idx] + pg_offset;
+ ptr = page_address(table_pages->pages[pg_idx]);
+ return (cate_t *)((unsigned long)ptr + pg_offset);
+}
+
+#ifdef CATU_DEBUG
+static void catu_dump_table(struct tmc_sg_table *catu_table)
+{
+ int i;
+ cate_t *table;
+ unsigned long table_end, buf_size, offset = 0;
+
+ buf_size = tmc_sg_table_buf_size(catu_table);
+ dev_dbg(catu_table->dev,
+ "Dump table %p, tdaddr: %llx\n",
+ catu_table, catu_table->table_daddr);
+
+ while (offset < buf_size) {
+ table_end = offset + SZ_1M < buf_size ?
+ offset + SZ_1M : buf_size;
+ table = catu_get_table(catu_table, offset, NULL);
+ for (i = 0; offset < table_end; i++, offset += CATU_PAGE_SIZE)
+ dev_dbg(catu_table->dev, "%d: %llx\n", i, table[i]);
+ dev_dbg(catu_table->dev, "Prev : %llx, Next: %llx\n",
+ table[CATU_LINK_PREV], table[CATU_LINK_NEXT]);
+ dev_dbg(catu_table->dev, "== End of sub-table ===");
+ }
+ dev_dbg(catu_table->dev, "== End of Table ===");
+}
+
+#else
+static inline void catu_dump_table(struct tmc_sg_table *catu_table)
+{
+}
+#endif
+
+static inline cate_t catu_make_entry(dma_addr_t addr)
+{
+ return addr ? CATU_VALID_ENTRY(addr) : 0;
+}
+
+/*
+ * catu_populate_table : Populate the given CATU table.
+ * The table is always populated as a circular table.
+ * i.e, the "prev" link of the "first" table points to the "last"
+ * table and the "next" link of the "last" table points to the
+ * "first" table. The buffer should be made linear by calling
+ * catu_set_table().
+ */
+static void
+catu_populate_table(struct tmc_sg_table *catu_table)
+{
+ int i;
+ int sys_pidx; /* Index to current system data page */
+ int catu_pidx; /* Index of CATU page within the system data page */
+ unsigned long offset, buf_size, table_end;
+ dma_addr_t data_daddr;
+ dma_addr_t prev_taddr, next_taddr, cur_taddr;
+ cate_t *table_ptr, *next_table;
+
+ buf_size = tmc_sg_table_buf_size(catu_table);
+ sys_pidx = catu_pidx = 0;
+ offset = 0;
+
+ table_ptr = catu_get_table(catu_table, 0, &cur_taddr);
+ prev_taddr = 0; /* Prev link for the first table */
+
+ while (offset < buf_size) {
+ /*
+ * The @offset is always 1M aligned here and we have an
+ * empty table @table_ptr to fill. Each table can address
+ * upto 1MB data buffer. The last table may have fewer
+ * entries if the buffer size is not aligned.
+ */
+ table_end = (offset + SZ_1M) < buf_size ?
+ (offset + SZ_1M) : buf_size;
+ for (i = 0; offset < table_end;
+ i++, offset += CATU_PAGE_SIZE) {
+
+ data_daddr = catu_table->data_pages.daddrs[sys_pidx] +
+ catu_pidx * CATU_PAGE_SIZE;
+ catu_dbg(catu_table->dev,
+ "[table %5ld:%03d] 0x%llx\n",
+ (offset >> 20), i, data_daddr);
+ table_ptr[i] = catu_make_entry(data_daddr);
+ /* Move the pointers for data pages */
+ catu_pidx = (catu_pidx + 1) % CATU_PAGES_PER_SYSPAGE;
+ if (catu_pidx == 0)
+ sys_pidx++;
+ }
+
+ /*
+ * If we have finished all the valid entries, fill the rest of
+ * the table (i.e, last table page) with invalid entries,
+ * to fail the lookups.
+ */
+ if (offset == buf_size) {
+ memset(&table_ptr[i], 0,
+ sizeof(cate_t) * (CATU_PTRS_PER_PAGE - i));
+ next_taddr = 0;
+ } else {
+ next_table = catu_get_table(catu_table,
+ offset, &next_taddr);
+ }
+
+ table_ptr[CATU_LINK_PREV] = catu_make_entry(prev_taddr);
+ table_ptr[CATU_LINK_NEXT] = catu_make_entry(next_taddr);
+
+ catu_dbg(catu_table->dev,
+ "[table%5ld]: Cur: 0x%llx Prev: 0x%llx, Next: 0x%llx\n",
+ (offset >> 20) - 1, cur_taddr, prev_taddr, next_taddr);
+
+ /* Update the prev/next addresses */
+ if (next_taddr) {
+ prev_taddr = cur_taddr;
+ cur_taddr = next_taddr;
+ table_ptr = next_table;
+ }
+ }
+
+ /* Sync the table for device */
+ tmc_sg_table_sync_table(catu_table);
+}
+
+static struct tmc_sg_table *
+catu_init_sg_table(struct device *catu_dev, int node,
+ ssize_t size, void **pages)
+{
+ int nr_tpages;
+ struct tmc_sg_table *catu_table;
+
+ /*
+ * Each table can address upto 1MB and we can have
+ * CATU_PAGES_PER_SYSPAGE tables in a system page.
+ */
+ nr_tpages = DIV_ROUND_UP(size, SZ_1M) / CATU_PAGES_PER_SYSPAGE;
+ catu_table = tmc_alloc_sg_table(catu_dev, node, nr_tpages,
+ size >> PAGE_SHIFT, pages);
+ if (IS_ERR(catu_table))
+ return catu_table;
+
+ catu_populate_table(catu_table);
+ dev_dbg(catu_dev,
+ "Setup table %p, size %ldKB, %d table pages\n",
+ catu_table, (unsigned long)size >> 10, nr_tpages);
+ catu_dump_table(catu_table);
+ return catu_table;
+}
+
+static void catu_free_etr_buf(struct etr_buf *etr_buf)
+{
+ struct catu_etr_buf *catu_buf;
+
+ if (!etr_buf || etr_buf->mode != ETR_MODE_CATU || !etr_buf->private)
+ return;
+
+ catu_buf = etr_buf->private;
+ tmc_free_sg_table(catu_buf->catu_table);
+ kfree(catu_buf);
+}
+
+static ssize_t catu_get_data_etr_buf(struct etr_buf *etr_buf, u64 offset,
+ size_t len, char **bufpp)
+{
+ struct catu_etr_buf *catu_buf = etr_buf->private;
+
+ return tmc_sg_table_get_data(catu_buf->catu_table, offset, len, bufpp);
+}
+
+static void catu_sync_etr_buf(struct etr_buf *etr_buf, u64 rrp, u64 rwp)
+{
+ struct catu_etr_buf *catu_buf = etr_buf->private;
+ struct tmc_sg_table *catu_table = catu_buf->catu_table;
+ u64 r_offset, w_offset;
+
+ /*
+ * ETR started off at etr_buf->hwaddr. Convert the RRP/RWP to
+ * offsets within the trace buffer.
+ */
+ r_offset = rrp - etr_buf->hwaddr;
+ w_offset = rwp - etr_buf->hwaddr;
+
+ if (!etr_buf->full) {
+ etr_buf->len = w_offset - r_offset;
+ if (w_offset < r_offset)
+ etr_buf->len += etr_buf->size;
+ } else {
+ etr_buf->len = etr_buf->size;
+ }
+
+ etr_buf->offset = r_offset;
+ tmc_sg_table_sync_data_range(catu_table, r_offset, etr_buf->len);
+}
+
+static int catu_alloc_etr_buf(struct tmc_drvdata *tmc_drvdata,
+ struct etr_buf *etr_buf, int node, void **pages)
+{
+ struct coresight_device *csdev;
+ struct device *catu_dev;
+ struct tmc_sg_table *catu_table;
+ struct catu_etr_buf *catu_buf;
+
+ csdev = tmc_etr_get_catu_device(tmc_drvdata);
+ if (!csdev)
+ return -ENODEV;
+ catu_dev = csdev->dev.parent;
+ catu_buf = kzalloc(sizeof(*catu_buf), GFP_KERNEL);
+ if (!catu_buf)
+ return -ENOMEM;
+
+ catu_table = catu_init_sg_table(catu_dev, node, etr_buf->size, pages);
+ if (IS_ERR(catu_table)) {
+ kfree(catu_buf);
+ return PTR_ERR(catu_table);
+ }
+
+ etr_buf->mode = ETR_MODE_CATU;
+ etr_buf->private = catu_buf;
+ etr_buf->hwaddr = CATU_DEFAULT_INADDR;
+
+ catu_buf->catu_table = catu_table;
+ /* Get the table base address */
+ catu_buf->sladdr = catu_table->table_daddr;
+
+ return 0;
+}
+
+const struct etr_buf_operations etr_catu_buf_ops = {
+ .alloc = catu_alloc_etr_buf,
+ .free = catu_free_etr_buf,
+ .sync = catu_sync_etr_buf,
+ .get_data = catu_get_data_etr_buf,
+};
+
+coresight_simple_reg32(struct catu_drvdata, devid, CORESIGHT_DEVID);
+coresight_simple_reg32(struct catu_drvdata, control, CATU_CONTROL);
+coresight_simple_reg32(struct catu_drvdata, status, CATU_STATUS);
+coresight_simple_reg32(struct catu_drvdata, mode, CATU_MODE);
+coresight_simple_reg32(struct catu_drvdata, axictrl, CATU_AXICTRL);
+coresight_simple_reg32(struct catu_drvdata, irqen, CATU_IRQEN);
+coresight_simple_reg64(struct catu_drvdata, sladdr,
+ CATU_SLADDRLO, CATU_SLADDRHI);
+coresight_simple_reg64(struct catu_drvdata, inaddr,
+ CATU_INADDRLO, CATU_INADDRHI);
+
+static struct attribute *catu_mgmt_attrs[] = {
+ &dev_attr_devid.attr,
+ &dev_attr_control.attr,
+ &dev_attr_status.attr,
+ &dev_attr_mode.attr,
+ &dev_attr_axictrl.attr,
+ &dev_attr_irqen.attr,
+ &dev_attr_sladdr.attr,
+ &dev_attr_inaddr.attr,
+ NULL,
+};
+
+static const struct attribute_group catu_mgmt_group = {
+ .attrs = catu_mgmt_attrs,
+ .name = "mgmt",
+};
+
+static const struct attribute_group *catu_groups[] = {
+ &catu_mgmt_group,
+ NULL,
+};
+
+
+static inline int catu_wait_for_ready(struct catu_drvdata *drvdata)
+{
+ return coresight_timeout(drvdata->base,
+ CATU_STATUS, CATU_STATUS_READY, 1);
+}
+
+static int catu_enable_hw(struct catu_drvdata *drvdata, void *data)
+{
+ u32 control, mode;
+ struct etr_buf *etr_buf = data;
+
+ if (catu_wait_for_ready(drvdata))
+ dev_warn(drvdata->dev, "Timeout while waiting for READY\n");
+
+ control = catu_read_control(drvdata);
+ if (control & BIT(CATU_CONTROL_ENABLE)) {
+ dev_warn(drvdata->dev, "CATU is already enabled\n");
+ return -EBUSY;
+ }
+
+ control |= BIT(CATU_CONTROL_ENABLE);
+
+ if (etr_buf && etr_buf->mode == ETR_MODE_CATU) {
+ struct catu_etr_buf *catu_buf = etr_buf->private;
+
+ mode = CATU_MODE_TRANSLATE;
+ catu_write_axictrl(drvdata, CATU_OS_AXICTRL);
+ catu_write_sladdr(drvdata, catu_buf->sladdr);
+ catu_write_inaddr(drvdata, CATU_DEFAULT_INADDR);
+ } else {
+ mode = CATU_MODE_PASS_THROUGH;
+ catu_write_sladdr(drvdata, 0);
+ catu_write_inaddr(drvdata, 0);
+ }
+
+ catu_write_irqen(drvdata, 0);
+ catu_write_mode(drvdata, mode);
+ catu_write_control(drvdata, control);
+ dev_dbg(drvdata->dev, "Enabled in %s mode\n",
+ (mode == CATU_MODE_PASS_THROUGH) ?
+ "Pass through" :
+ "Translate");
+ return 0;
+}
+
+static int catu_enable(struct coresight_device *csdev, void *data)
+{
+ int rc;
+ struct catu_drvdata *catu_drvdata = csdev_to_catu_drvdata(csdev);
+
+ CS_UNLOCK(catu_drvdata->base);
+ rc = catu_enable_hw(catu_drvdata, data);
+ CS_LOCK(catu_drvdata->base);
+ return rc;
+}
+
+static int catu_disable_hw(struct catu_drvdata *drvdata)
+{
+ int rc = 0;
+
+ catu_write_control(drvdata, 0);
+ if (catu_wait_for_ready(drvdata)) {
+ dev_info(drvdata->dev, "Timeout while waiting for READY\n");
+ rc = -EAGAIN;
+ }
+
+ dev_dbg(drvdata->dev, "Disabled\n");
+ return rc;
+}
+
+static int catu_disable(struct coresight_device *csdev, void *__unused)
+{
+ int rc;
+ struct catu_drvdata *catu_drvdata = csdev_to_catu_drvdata(csdev);
+
+ CS_UNLOCK(catu_drvdata->base);
+ rc = catu_disable_hw(catu_drvdata);
+ CS_LOCK(catu_drvdata->base);
+ return rc;
+}
+
+const struct coresight_ops_helper catu_helper_ops = {
+ .enable = catu_enable,
+ .disable = catu_disable,
+};
+
+const struct coresight_ops catu_ops = {
+ .helper_ops = &catu_helper_ops,
+};
+
+static int catu_probe(struct amba_device *adev, const struct amba_id *id)
+{
+ int ret = 0;
+ u32 dma_mask;
+ struct catu_drvdata *drvdata;
+ struct coresight_desc catu_desc;
+ struct coresight_platform_data *pdata = NULL;
+ struct device *dev = &adev->dev;
+ struct device_node *np = dev->of_node;
+ void __iomem *base;
+
+ if (np) {
+ pdata = of_get_coresight_platform_data(dev, np);
+ if (IS_ERR(pdata)) {
+ ret = PTR_ERR(pdata);
+ goto out;
+ }
+ dev->platform_data = pdata;
+ }
+
+ drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ drvdata->dev = dev;
+ dev_set_drvdata(dev, drvdata);
+ base = devm_ioremap_resource(dev, &adev->res);
+ if (IS_ERR(base)) {
+ ret = PTR_ERR(base);
+ goto out;
+ }
+
+ /* Setup dma mask for the device */
+ dma_mask = readl_relaxed(base + CORESIGHT_DEVID) & 0x3f;
+ switch (dma_mask) {
+ case 32:
+ case 40:
+ case 44:
+ case 48:
+ case 52:
+ case 56:
+ case 64:
+ break;
+ default:
+ /* Default to the 40bits as supported by TMC-ETR */
+ dma_mask = 40;
+ }
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(dma_mask));
+ if (ret)
+ goto out;
+
+ drvdata->base = base;
+ catu_desc.pdata = pdata;
+ catu_desc.dev = dev;
+ catu_desc.groups = catu_groups;
+ catu_desc.type = CORESIGHT_DEV_TYPE_HELPER;
+ catu_desc.subtype.helper_subtype = CORESIGHT_DEV_SUBTYPE_HELPER_CATU;
+ catu_desc.ops = &catu_ops;
+ drvdata->csdev = coresight_register(&catu_desc);
+ if (IS_ERR(drvdata->csdev))
+ ret = PTR_ERR(drvdata->csdev);
+out:
+ pm_runtime_put(&adev->dev);
+ return ret;
+}
+
+static struct amba_id catu_ids[] = {
+ {
+ .id = 0x000bb9ee,
+ .mask = 0x000fffff,
+ },
+ {},
+};
+
+static struct amba_driver catu_driver = {
+ .drv = {
+ .name = "coresight-catu",
+ .owner = THIS_MODULE,
+ .suppress_bind_attrs = true,
+ },
+ .probe = catu_probe,
+ .id_table = catu_ids,
+};
+
+builtin_amba_driver(catu_driver);
diff --git a/drivers/hwtracing/coresight/coresight-catu.h b/drivers/hwtracing/coresight/coresight-catu.h
new file mode 100644
index 000000000000..1b281f0dcccc
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-catu.h
@@ -0,0 +1,119 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Arm Limited. All rights reserved.
+ *
+ * Author: Suzuki K Poulose <suzuki.poulose@arm.com>
+ */
+
+#ifndef _CORESIGHT_CATU_H
+#define _CORESIGHT_CATU_H
+
+#include "coresight-priv.h"
+
+/* Register offset from base */
+#define CATU_CONTROL 0x000
+#define CATU_MODE 0x004
+#define CATU_AXICTRL 0x008
+#define CATU_IRQEN 0x00c
+#define CATU_SLADDRLO 0x020
+#define CATU_SLADDRHI 0x024
+#define CATU_INADDRLO 0x028
+#define CATU_INADDRHI 0x02c
+#define CATU_STATUS 0x100
+#define CATU_DEVARCH 0xfbc
+
+#define CATU_CONTROL_ENABLE 0
+
+#define CATU_MODE_PASS_THROUGH 0U
+#define CATU_MODE_TRANSLATE 1U
+
+#define CATU_AXICTRL_ARCACHE_SHIFT 4
+#define CATU_AXICTRL_ARCACHE_MASK 0xf
+#define CATU_AXICTRL_ARPROT_MASK 0x3
+#define CATU_AXICTRL_ARCACHE(arcache) \
+ (((arcache) & CATU_AXICTRL_ARCACHE_MASK) << CATU_AXICTRL_ARCACHE_SHIFT)
+
+#define CATU_AXICTRL_VAL(arcache, arprot) \
+ (CATU_AXICTRL_ARCACHE(arcache) | ((arprot) & CATU_AXICTRL_ARPROT_MASK))
+
+#define AXI3_AxCACHE_WB_READ_ALLOC 0x7
+/*
+ * AXI - ARPROT bits:
+ * See AMBA AXI & ACE Protocol specification (ARM IHI 0022E)
+ * sectionA4.7 Access Permissions.
+ *
+ * Bit 0: 0 - Unprivileged access, 1 - Privileged access
+ * Bit 1: 0 - Secure access, 1 - Non-secure access.
+ * Bit 2: 0 - Data access, 1 - instruction access.
+ *
+ * CATU AXICTRL:ARPROT[2] is res0 as we always access data.
+ */
+#define CATU_OS_ARPROT 0x2
+
+#define CATU_OS_AXICTRL \
+ CATU_AXICTRL_VAL(AXI3_AxCACHE_WB_READ_ALLOC, CATU_OS_ARPROT)
+
+#define CATU_STATUS_READY 8
+#define CATU_STATUS_ADRERR 0
+#define CATU_STATUS_AXIERR 4
+
+#define CATU_IRQEN_ON 0x1
+#define CATU_IRQEN_OFF 0x0
+
+struct catu_drvdata {
+ struct device *dev;
+ void __iomem *base;
+ struct coresight_device *csdev;
+ int irq;
+};
+
+#define CATU_REG32(name, offset) \
+static inline u32 \
+catu_read_##name(struct catu_drvdata *drvdata) \
+{ \
+ return coresight_read_reg_pair(drvdata->base, offset, -1); \
+} \
+static inline void \
+catu_write_##name(struct catu_drvdata *drvdata, u32 val) \
+{ \
+ coresight_write_reg_pair(drvdata->base, val, offset, -1); \
+}
+
+#define CATU_REG_PAIR(name, lo_off, hi_off) \
+static inline u64 \
+catu_read_##name(struct catu_drvdata *drvdata) \
+{ \
+ return coresight_read_reg_pair(drvdata->base, lo_off, hi_off); \
+} \
+static inline void \
+catu_write_##name(struct catu_drvdata *drvdata, u64 val) \
+{ \
+ coresight_write_reg_pair(drvdata->base, val, lo_off, hi_off); \
+}
+
+CATU_REG32(control, CATU_CONTROL);
+CATU_REG32(mode, CATU_MODE);
+CATU_REG32(irqen, CATU_IRQEN);
+CATU_REG32(axictrl, CATU_AXICTRL);
+CATU_REG_PAIR(sladdr, CATU_SLADDRLO, CATU_SLADDRHI)
+CATU_REG_PAIR(inaddr, CATU_INADDRLO, CATU_INADDRHI)
+
+static inline bool coresight_is_catu_device(struct coresight_device *csdev)
+{
+ if (!IS_ENABLED(CONFIG_CORESIGHT_CATU))
+ return false;
+ if (csdev->type != CORESIGHT_DEV_TYPE_HELPER)
+ return false;
+ if (csdev->subtype.helper_subtype != CORESIGHT_DEV_SUBTYPE_HELPER_CATU)
+ return false;
+ return true;
+}
+
+#ifdef CONFIG_CORESIGHT_CATU
+extern const struct etr_buf_operations etr_catu_buf_ops;
+#else
+/* Dummy declaration for the CATU ops */
+static const struct etr_buf_operations etr_catu_buf_ops;
+#endif
+
+#endif
diff --git a/drivers/hwtracing/coresight/coresight-etb10.c b/drivers/hwtracing/coresight/coresight-etb10.c
index 320d29df17e1..306119eaf16a 100644
--- a/drivers/hwtracing/coresight/coresight-etb10.c
+++ b/drivers/hwtracing/coresight/coresight-etb10.c
@@ -195,7 +195,6 @@ static void etb_dump_hw(struct etb_drvdata *drvdata)
bool lost = false;
int i;
u8 *buf_ptr;
- const u32 *barrier;
u32 read_data, depth;
u32 read_ptr, write_ptr;
u32 frame_off, frame_endoff;
@@ -226,19 +225,16 @@ static void etb_dump_hw(struct etb_drvdata *drvdata)
depth = drvdata->buffer_depth;
buf_ptr = drvdata->buf;
- barrier = barrier_pkt;
for (i = 0; i < depth; i++) {
read_data = readl_relaxed(drvdata->base +
ETB_RAM_READ_DATA_REG);
- if (lost && *barrier) {
- read_data = *barrier;
- barrier++;
- }
-
*(u32 *)buf_ptr = read_data;
buf_ptr += 4;
}
+ if (lost)
+ coresight_insert_barrier_packet(drvdata->buf);
+
if (frame_off) {
buf_ptr -= (frame_endoff * 4);
for (i = 0; i < frame_endoff; i++) {
@@ -447,7 +443,7 @@ static void etb_update_buffer(struct coresight_device *csdev,
buf_ptr = buf->data_pages[cur] + offset;
read_data = readl_relaxed(drvdata->base +
ETB_RAM_READ_DATA_REG);
- if (lost && *barrier) {
+ if (lost && i < CORESIGHT_BARRIER_PKT_SIZE) {
read_data = *barrier;
barrier++;
}
diff --git a/drivers/hwtracing/coresight/coresight-etm.h b/drivers/hwtracing/coresight/coresight-etm.h
index e8b4549e30e2..79e1ad860d8a 100644
--- a/drivers/hwtracing/coresight/coresight-etm.h
+++ b/drivers/hwtracing/coresight/coresight-etm.h
@@ -168,8 +168,6 @@
* @seq_curr_state: current value of the sequencer register.
* @ctxid_idx: index for the context ID registers.
* @ctxid_pid: value for the context ID to trigger on.
- * @ctxid_vpid: Virtual PID seen by users if PID namespace is enabled, otherwise
- * the same value of ctxid_pid.
* @ctxid_mask: mask applicable to all the context IDs.
* @sync_freq: Synchronisation frequency.
* @timestamp_event: Defines an event that requests the insertion
@@ -202,7 +200,6 @@ struct etm_config {
u32 seq_curr_state;
u8 ctxid_idx;
u32 ctxid_pid[ETM_MAX_CTXID_CMP];
- u32 ctxid_vpid[ETM_MAX_CTXID_CMP];
u32 ctxid_mask;
u32 sync_freq;
u32 timestamp_event;
diff --git a/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
index 9435c1481f61..75487b3fad86 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
@@ -4,6 +4,7 @@
* Author: Mathieu Poirier <mathieu.poirier@linaro.org>
*/
+#include <linux/pid_namespace.h>
#include <linux/pm_runtime.h>
#include <linux/sysfs.h>
#include "coresight-etm.h"
@@ -1025,8 +1026,15 @@ static ssize_t ctxid_pid_show(struct device *dev,
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etm_config *config = &drvdata->config;
+ /*
+ * Don't use contextID tracing if coming from a PID namespace. See
+ * comment in ctxid_pid_store().
+ */
+ if (task_active_pid_ns(current) != &init_pid_ns)
+ return -EINVAL;
+
spin_lock(&drvdata->spinlock);
- val = config->ctxid_vpid[config->ctxid_idx];
+ val = config->ctxid_pid[config->ctxid_idx];
spin_unlock(&drvdata->spinlock);
return sprintf(buf, "%#lx\n", val);
@@ -1037,19 +1045,28 @@ static ssize_t ctxid_pid_store(struct device *dev,
const char *buf, size_t size)
{
int ret;
- unsigned long vpid, pid;
+ unsigned long pid;
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etm_config *config = &drvdata->config;
- ret = kstrtoul(buf, 16, &vpid);
+ /*
+ * When contextID tracing is enabled the tracers will insert the
+ * value found in the contextID register in the trace stream. But if
+ * a process is in a namespace the PID of that process as seen from the
+ * namespace won't be what the kernel sees, something that makes the
+ * feature confusing and can potentially leak kernel only information.
+ * As such refuse to use the feature if @current is not in the initial
+ * PID namespace.
+ */
+ if (task_active_pid_ns(current) != &init_pid_ns)
+ return -EINVAL;
+
+ ret = kstrtoul(buf, 16, &pid);
if (ret)
return ret;
- pid = coresight_vpid_to_pid(vpid);
-
spin_lock(&drvdata->spinlock);
config->ctxid_pid[config->ctxid_idx] = pid;
- config->ctxid_vpid[config->ctxid_idx] = vpid;
spin_unlock(&drvdata->spinlock);
return size;
@@ -1063,6 +1080,13 @@ static ssize_t ctxid_mask_show(struct device *dev,
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etm_config *config = &drvdata->config;
+ /*
+ * Don't use contextID tracing if coming from a PID namespace. See
+ * comment in ctxid_pid_store().
+ */
+ if (task_active_pid_ns(current) != &init_pid_ns)
+ return -EINVAL;
+
val = config->ctxid_mask;
return sprintf(buf, "%#lx\n", val);
}
@@ -1076,6 +1100,13 @@ static ssize_t ctxid_mask_store(struct device *dev,
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etm_config *config = &drvdata->config;
+ /*
+ * Don't use contextID tracing if coming from a PID namespace. See
+ * comment in ctxid_pid_store().
+ */
+ if (task_active_pid_ns(current) != &init_pid_ns)
+ return -EINVAL;
+
ret = kstrtoul(buf, 16, &val);
if (ret)
return ret;
diff --git a/drivers/hwtracing/coresight/coresight-etm3x.c b/drivers/hwtracing/coresight/coresight-etm3x.c
index 15ed64d51a5b..7c74263c333d 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x.c
@@ -230,10 +230,8 @@ void etm_set_default(struct etm_config *config)
config->seq_curr_state = 0x0;
config->ctxid_idx = 0x0;
- for (i = 0; i < ETM_MAX_CTXID_CMP; i++) {
+ for (i = 0; i < ETM_MAX_CTXID_CMP; i++)
config->ctxid_pid[i] = 0x0;
- config->ctxid_vpid[i] = 0x0;
- }
config->ctxid_mask = 0x0;
/* Setting default to 1024 as per TRM recommendation */
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
index 4eb8da785ce0..a0365e23678e 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
@@ -4,6 +4,7 @@
* Author: Mathieu Poirier <mathieu.poirier@linaro.org>
*/
+#include <linux/pid_namespace.h>
#include <linux/pm_runtime.h>
#include <linux/sysfs.h>
#include "coresight-etm4x.h"
@@ -250,10 +251,8 @@ static ssize_t reset_store(struct device *dev,
}
config->ctxid_idx = 0x0;
- for (i = 0; i < drvdata->numcidc; i++) {
+ for (i = 0; i < drvdata->numcidc; i++)
config->ctxid_pid[i] = 0x0;
- config->ctxid_vpid[i] = 0x0;
- }
config->ctxid_mask0 = 0x0;
config->ctxid_mask1 = 0x0;
@@ -1637,9 +1636,16 @@ static ssize_t ctxid_pid_show(struct device *dev,
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etmv4_config *config = &drvdata->config;
+ /*
+ * Don't use contextID tracing if coming from a PID namespace. See
+ * comment in ctxid_pid_store().
+ */
+ if (task_active_pid_ns(current) != &init_pid_ns)
+ return -EINVAL;
+
spin_lock(&drvdata->spinlock);
idx = config->ctxid_idx;
- val = (unsigned long)config->ctxid_vpid[idx];
+ val = (unsigned long)config->ctxid_pid[idx];
spin_unlock(&drvdata->spinlock);
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
@@ -1649,26 +1655,35 @@ static ssize_t ctxid_pid_store(struct device *dev,
const char *buf, size_t size)
{
u8 idx;
- unsigned long vpid, pid;
+ unsigned long pid;
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etmv4_config *config = &drvdata->config;
/*
+ * When contextID tracing is enabled the tracers will insert the
+ * value found in the contextID register in the trace stream. But if
+ * a process is in a namespace the PID of that process as seen from the
+ * namespace won't be what the kernel sees, something that makes the
+ * feature confusing and can potentially leak kernel only information.
+ * As such refuse to use the feature if @current is not in the initial
+ * PID namespace.
+ */
+ if (task_active_pid_ns(current) != &init_pid_ns)
+ return -EINVAL;
+
+ /*
* only implemented when ctxid tracing is enabled, i.e. at least one
* ctxid comparator is implemented and ctxid is greater than 0 bits
* in length
*/
if (!drvdata->ctxid_size || !drvdata->numcidc)
return -EINVAL;
- if (kstrtoul(buf, 16, &vpid))
+ if (kstrtoul(buf, 16, &pid))
return -EINVAL;
- pid = coresight_vpid_to_pid(vpid);
-
spin_lock(&drvdata->spinlock);
idx = config->ctxid_idx;
config->ctxid_pid[idx] = (u64)pid;
- config->ctxid_vpid[idx] = (u64)vpid;
spin_unlock(&drvdata->spinlock);
return size;
}
@@ -1682,6 +1697,13 @@ static ssize_t ctxid_masks_show(struct device *dev,
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etmv4_config *config = &drvdata->config;
+ /*
+ * Don't use contextID tracing if coming from a PID namespace. See
+ * comment in ctxid_pid_store().
+ */
+ if (task_active_pid_ns(current) != &init_pid_ns)
+ return -EINVAL;
+
spin_lock(&drvdata->spinlock);
val1 = config->ctxid_mask0;
val2 = config->ctxid_mask1;
@@ -1699,6 +1721,13 @@ static ssize_t ctxid_masks_store(struct device *dev,
struct etmv4_config *config = &drvdata->config;
/*
+ * Don't use contextID tracing if coming from a PID namespace. See
+ * comment in ctxid_pid_store().
+ */
+ if (task_active_pid_ns(current) != &init_pid_ns)
+ return -EINVAL;
+
+ /*
* only implemented when ctxid tracing is enabled, i.e. at least one
* ctxid comparator is implemented and ctxid is greater than 0 bits
* in length
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
index 9bc04c50d45b..1d94ebec027b 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x.c
@@ -1027,7 +1027,8 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
}
pm_runtime_put(&adev->dev);
- dev_info(dev, "%s initialized\n", (char *)id->data);
+ dev_info(dev, "CPU%d: ETM v%d.%d initialized\n",
+ drvdata->cpu, drvdata->arch >> 4, drvdata->arch & 0xf);
if (boot_enable) {
coresight_enable(drvdata->csdev);
@@ -1045,23 +1046,19 @@ err_arch_supported:
return ret;
}
+#define ETM4x_AMBA_ID(pid) \
+ { \
+ .id = pid, \
+ .mask = 0x000fffff, \
+ }
+
static const struct amba_id etm4_ids[] = {
- { /* ETM 4.0 - Cortex-A53 */
- .id = 0x000bb95d,
- .mask = 0x000fffff,
- .data = "ETM 4.0",
- },
- { /* ETM 4.0 - Cortex-A57 */
- .id = 0x000bb95e,
- .mask = 0x000fffff,
- .data = "ETM 4.0",
- },
- { /* ETM 4.0 - A72, Maia, HiSilicon */
- .id = 0x000bb95a,
- .mask = 0x000fffff,
- .data = "ETM 4.0",
- },
- { 0, 0},
+ ETM4x_AMBA_ID(0x000bb95d), /* Cortex-A53 */
+ ETM4x_AMBA_ID(0x000bb95e), /* Cortex-A57 */
+ ETM4x_AMBA_ID(0x000bb95a), /* Cortex-A72 */
+ ETM4x_AMBA_ID(0x000bb959), /* Cortex-A73 */
+ ETM4x_AMBA_ID(0x000bb9da), /* Cortex-A35 */
+ {},
};
static struct amba_driver etm4x_driver = {
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h
index b7c4a6f6c6b9..52786e9d8926 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.h
+++ b/drivers/hwtracing/coresight/coresight-etm4x.h
@@ -230,8 +230,6 @@
* @addr_type: Current status of the comparator register.
* @ctxid_idx: Context ID index selector.
* @ctxid_pid: Value of the context ID comparator.
- * @ctxid_vpid: Virtual PID seen by users if PID namespace is enabled, otherwise
- * the same value of ctxid_pid.
* @ctxid_mask0:Context ID comparator mask for comparator 0-3.
* @ctxid_mask1:Context ID comparator mask for comparator 4-7.
* @vmid_idx: VM ID index selector.
@@ -274,7 +272,6 @@ struct etmv4_config {
u8 addr_type[ETM_MAX_SINGLE_ADDR_CMP];
u8 ctxid_idx;
u64 ctxid_pid[ETMv4_MAX_CTXID_CMP];
- u64 ctxid_vpid[ETMv4_MAX_CTXID_CMP];
u32 ctxid_mask0;
u32 ctxid_mask1;
u8 vmid_idx;
diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h
index 0e5a74dae6a6..1a6cf3589866 100644
--- a/drivers/hwtracing/coresight/coresight-priv.h
+++ b/drivers/hwtracing/coresight/coresight-priv.h
@@ -57,7 +57,8 @@ static DEVICE_ATTR_RO(name)
#define coresight_simple_reg64(type, name, lo_off, hi_off) \
__coresight_simple_func(type, NULL, name, lo_off, hi_off)
-extern const u32 barrier_pkt[5];
+extern const u32 barrier_pkt[4];
+#define CORESIGHT_BARRIER_PKT_SIZE (sizeof(barrier_pkt))
enum etm_addr_type {
ETM_ADDR_TYPE_NONE,
@@ -91,6 +92,13 @@ struct cs_buffers {
void **data_pages;
};
+static inline void coresight_insert_barrier_packet(void *buf)
+{
+ if (buf)
+ memcpy(buf, barrier_pkt, CORESIGHT_BARRIER_PKT_SIZE);
+}
+
+
static inline void CS_LOCK(void __iomem *addr)
{
do {
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c
index 61d849b11c26..0549249f4b39 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etf.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c
@@ -32,39 +32,28 @@ static void tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
static void tmc_etb_dump_hw(struct tmc_drvdata *drvdata)
{
- bool lost = false;
char *bufp;
- const u32 *barrier;
- u32 read_data, status;
+ u32 read_data, lost;
int i;
- /*
- * Get a hold of the status register and see if a wrap around
- * has occurred.
- */
- status = readl_relaxed(drvdata->base + TMC_STS);
- if (status & TMC_STS_FULL)
- lost = true;
-
+ /* Check if the buffer wrapped around. */
+ lost = readl_relaxed(drvdata->base + TMC_STS) & TMC_STS_FULL;
bufp = drvdata->buf;
drvdata->len = 0;
- barrier = barrier_pkt;
while (1) {
for (i = 0; i < drvdata->memwidth; i++) {
read_data = readl_relaxed(drvdata->base + TMC_RRD);
if (read_data == 0xFFFFFFFF)
- return;
-
- if (lost && *barrier) {
- read_data = *barrier;
- barrier++;
- }
-
+ goto done;
memcpy(bufp, &read_data, 4);
bufp += 4;
drvdata->len += 4;
}
}
+done:
+ if (lost)
+ coresight_insert_barrier_packet(drvdata->buf);
+ return;
}
static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
@@ -109,6 +98,24 @@ static void tmc_etf_disable_hw(struct tmc_drvdata *drvdata)
CS_LOCK(drvdata->base);
}
+/*
+ * Return the available trace data in the buffer from @pos, with
+ * a maximum limit of @len, updating the @bufpp on where to
+ * find it.
+ */
+ssize_t tmc_etb_get_sysfs_trace(struct tmc_drvdata *drvdata,
+ loff_t pos, size_t len, char **bufpp)
+{
+ ssize_t actual = len;
+
+ /* Adjust the len to available size @pos */
+ if (pos + actual > drvdata->len)
+ actual = drvdata->len - pos;
+ if (actual > 0)
+ *bufpp = drvdata->buf + pos;
+ return actual;
+}
+
static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev)
{
int ret = 0;
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
index 02f747afa2ba..2eda5de304c2 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
@@ -6,22 +6,912 @@
#include <linux/coresight.h>
#include <linux/dma-mapping.h>
+#include <linux/iommu.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include "coresight-catu.h"
#include "coresight-priv.h"
#include "coresight-tmc.h"
+struct etr_flat_buf {
+ struct device *dev;
+ dma_addr_t daddr;
+ void *vaddr;
+ size_t size;
+};
+
+/*
+ * The TMC ETR SG has a page size of 4K. The SG table contains pointers
+ * to 4KB buffers. However, the OS may use a PAGE_SIZE different from
+ * 4K (i.e, 16KB or 64KB). This implies that a single OS page could
+ * contain more than one SG buffer and tables.
+ *
+ * A table entry has the following format:
+ *
+ * ---Bit31------------Bit4-------Bit1-----Bit0--
+ * | Address[39:12] | SBZ | Entry Type |
+ * ----------------------------------------------
+ *
+ * Address: Bits [39:12] of a physical page address. Bits [11:0] are
+ * always zero.
+ *
+ * Entry type:
+ * b00 - Reserved.
+ * b01 - Last entry in the tables, points to 4K page buffer.
+ * b10 - Normal entry, points to 4K page buffer.
+ * b11 - Link. The address points to the base of next table.
+ */
+
+typedef u32 sgte_t;
+
+#define ETR_SG_PAGE_SHIFT 12
+#define ETR_SG_PAGE_SIZE (1UL << ETR_SG_PAGE_SHIFT)
+#define ETR_SG_PAGES_PER_SYSPAGE (PAGE_SIZE / ETR_SG_PAGE_SIZE)
+#define ETR_SG_PTRS_PER_PAGE (ETR_SG_PAGE_SIZE / sizeof(sgte_t))
+#define ETR_SG_PTRS_PER_SYSPAGE (PAGE_SIZE / sizeof(sgte_t))
+
+#define ETR_SG_ET_MASK 0x3
+#define ETR_SG_ET_LAST 0x1
+#define ETR_SG_ET_NORMAL 0x2
+#define ETR_SG_ET_LINK 0x3
+
+#define ETR_SG_ADDR_SHIFT 4
+
+#define ETR_SG_ENTRY(addr, type) \
+ (sgte_t)((((addr) >> ETR_SG_PAGE_SHIFT) << ETR_SG_ADDR_SHIFT) | \
+ (type & ETR_SG_ET_MASK))
+
+#define ETR_SG_ADDR(entry) \
+ (((dma_addr_t)(entry) >> ETR_SG_ADDR_SHIFT) << ETR_SG_PAGE_SHIFT)
+#define ETR_SG_ET(entry) ((entry) & ETR_SG_ET_MASK)
+
+/*
+ * struct etr_sg_table : ETR SG Table
+ * @sg_table: Generic SG Table holding the data/table pages.
+ * @hwaddr: hwaddress used by the TMC, which is the base
+ * address of the table.
+ */
+struct etr_sg_table {
+ struct tmc_sg_table *sg_table;
+ dma_addr_t hwaddr;
+};
+
+/*
+ * tmc_etr_sg_table_entries: Total number of table entries required to map
+ * @nr_pages system pages.
+ *
+ * We need to map @nr_pages * ETR_SG_PAGES_PER_SYSPAGE data pages.
+ * Each TMC page can map (ETR_SG_PTRS_PER_PAGE - 1) buffer pointers,
+ * with the last entry pointing to another page of table entries.
+ * If we spill over to a new page for mapping 1 entry, we could as
+ * well replace the link entry of the previous page with the last entry.
+ */
+static inline unsigned long __attribute_const__
+tmc_etr_sg_table_entries(int nr_pages)
+{
+ unsigned long nr_sgpages = nr_pages * ETR_SG_PAGES_PER_SYSPAGE;
+ unsigned long nr_sglinks = nr_sgpages / (ETR_SG_PTRS_PER_PAGE - 1);
+ /*
+ * If we spill over to a new page for 1 entry, we could as well
+ * make it the LAST entry in the previous page, skipping the Link
+ * address.
+ */
+ if (nr_sglinks && (nr_sgpages % (ETR_SG_PTRS_PER_PAGE - 1) < 2))
+ nr_sglinks--;
+ return nr_sgpages + nr_sglinks;
+}
+
+/*
+ * tmc_pages_get_offset: Go through all the pages in the tmc_pages
+ * and map the device address @addr to an offset within the virtual
+ * contiguous buffer.
+ */
+static long
+tmc_pages_get_offset(struct tmc_pages *tmc_pages, dma_addr_t addr)
+{
+ int i;
+ dma_addr_t page_start;
+
+ for (i = 0; i < tmc_pages->nr_pages; i++) {
+ page_start = tmc_pages->daddrs[i];
+ if (addr >= page_start && addr < (page_start + PAGE_SIZE))
+ return i * PAGE_SIZE + (addr - page_start);
+ }
+
+ return -EINVAL;
+}
+
+/*
+ * tmc_pages_free : Unmap and free the pages used by tmc_pages.
+ * If the pages were not allocated in tmc_pages_alloc(), we would
+ * simply drop the refcount.
+ */
+static void tmc_pages_free(struct tmc_pages *tmc_pages,
+ struct device *dev, enum dma_data_direction dir)
+{
+ int i;
+
+ for (i = 0; i < tmc_pages->nr_pages; i++) {
+ if (tmc_pages->daddrs && tmc_pages->daddrs[i])
+ dma_unmap_page(dev, tmc_pages->daddrs[i],
+ PAGE_SIZE, dir);
+ if (tmc_pages->pages && tmc_pages->pages[i])
+ __free_page(tmc_pages->pages[i]);
+ }
+
+ kfree(tmc_pages->pages);
+ kfree(tmc_pages->daddrs);
+ tmc_pages->pages = NULL;
+ tmc_pages->daddrs = NULL;
+ tmc_pages->nr_pages = 0;
+}
+
+/*
+ * tmc_pages_alloc : Allocate and map pages for a given @tmc_pages.
+ * If @pages is not NULL, the list of page virtual addresses are
+ * used as the data pages. The pages are then dma_map'ed for @dev
+ * with dma_direction @dir.
+ *
+ * Returns 0 upon success, else the error number.
+ */
+static int tmc_pages_alloc(struct tmc_pages *tmc_pages,
+ struct device *dev, int node,
+ enum dma_data_direction dir, void **pages)
+{
+ int i, nr_pages;
+ dma_addr_t paddr;
+ struct page *page;
+
+ nr_pages = tmc_pages->nr_pages;
+ tmc_pages->daddrs = kcalloc(nr_pages, sizeof(*tmc_pages->daddrs),
+ GFP_KERNEL);
+ if (!tmc_pages->daddrs)
+ return -ENOMEM;
+ tmc_pages->pages = kcalloc(nr_pages, sizeof(*tmc_pages->pages),
+ GFP_KERNEL);
+ if (!tmc_pages->pages) {
+ kfree(tmc_pages->daddrs);
+ tmc_pages->daddrs = NULL;
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < nr_pages; i++) {
+ if (pages && pages[i]) {
+ page = virt_to_page(pages[i]);
+ /* Hold a refcount on the page */
+ get_page(page);
+ } else {
+ page = alloc_pages_node(node,
+ GFP_KERNEL | __GFP_ZERO, 0);
+ }
+ paddr = dma_map_page(dev, page, 0, PAGE_SIZE, dir);
+ if (dma_mapping_error(dev, paddr))
+ goto err;
+ tmc_pages->daddrs[i] = paddr;
+ tmc_pages->pages[i] = page;
+ }
+ return 0;
+err:
+ tmc_pages_free(tmc_pages, dev, dir);
+ return -ENOMEM;
+}
+
+static inline long
+tmc_sg_get_data_page_offset(struct tmc_sg_table *sg_table, dma_addr_t addr)
+{
+ return tmc_pages_get_offset(&sg_table->data_pages, addr);
+}
+
+static inline void tmc_free_table_pages(struct tmc_sg_table *sg_table)
+{
+ if (sg_table->table_vaddr)
+ vunmap(sg_table->table_vaddr);
+ tmc_pages_free(&sg_table->table_pages, sg_table->dev, DMA_TO_DEVICE);
+}
+
+static void tmc_free_data_pages(struct tmc_sg_table *sg_table)
+{
+ if (sg_table->data_vaddr)
+ vunmap(sg_table->data_vaddr);
+ tmc_pages_free(&sg_table->data_pages, sg_table->dev, DMA_FROM_DEVICE);
+}
+
+void tmc_free_sg_table(struct tmc_sg_table *sg_table)
+{
+ tmc_free_table_pages(sg_table);
+ tmc_free_data_pages(sg_table);
+}
+
+/*
+ * Alloc pages for the table. Since this will be used by the device,
+ * allocate the pages closer to the device (i.e, dev_to_node(dev)
+ * rather than the CPU node).
+ */
+static int tmc_alloc_table_pages(struct tmc_sg_table *sg_table)
+{
+ int rc;
+ struct tmc_pages *table_pages = &sg_table->table_pages;
+
+ rc = tmc_pages_alloc(table_pages, sg_table->dev,
+ dev_to_node(sg_table->dev),
+ DMA_TO_DEVICE, NULL);
+ if (rc)
+ return rc;
+ sg_table->table_vaddr = vmap(table_pages->pages,
+ table_pages->nr_pages,
+ VM_MAP,
+ PAGE_KERNEL);
+ if (!sg_table->table_vaddr)
+ rc = -ENOMEM;
+ else
+ sg_table->table_daddr = table_pages->daddrs[0];
+ return rc;
+}
+
+static int tmc_alloc_data_pages(struct tmc_sg_table *sg_table, void **pages)
+{
+ int rc;
+
+ /* Allocate data pages on the node requested by the caller */
+ rc = tmc_pages_alloc(&sg_table->data_pages,
+ sg_table->dev, sg_table->node,
+ DMA_FROM_DEVICE, pages);
+ if (!rc) {
+ sg_table->data_vaddr = vmap(sg_table->data_pages.pages,
+ sg_table->data_pages.nr_pages,
+ VM_MAP,
+ PAGE_KERNEL);
+ if (!sg_table->data_vaddr)
+ rc = -ENOMEM;
+ }
+ return rc;
+}
+
+/*
+ * tmc_alloc_sg_table: Allocate and setup dma pages for the TMC SG table
+ * and data buffers. TMC writes to the data buffers and reads from the SG
+ * Table pages.
+ *
+ * @dev - Device to which page should be DMA mapped.
+ * @node - Numa node for mem allocations
+ * @nr_tpages - Number of pages for the table entries.
+ * @nr_dpages - Number of pages for Data buffer.
+ * @pages - Optional list of virtual address of pages.
+ */
+struct tmc_sg_table *tmc_alloc_sg_table(struct device *dev,
+ int node,
+ int nr_tpages,
+ int nr_dpages,
+ void **pages)
+{
+ long rc;
+ struct tmc_sg_table *sg_table;
+
+ sg_table = kzalloc(sizeof(*sg_table), GFP_KERNEL);
+ if (!sg_table)
+ return ERR_PTR(-ENOMEM);
+ sg_table->data_pages.nr_pages = nr_dpages;
+ sg_table->table_pages.nr_pages = nr_tpages;
+ sg_table->node = node;
+ sg_table->dev = dev;
+
+ rc = tmc_alloc_data_pages(sg_table, pages);
+ if (!rc)
+ rc = tmc_alloc_table_pages(sg_table);
+ if (rc) {
+ tmc_free_sg_table(sg_table);
+ kfree(sg_table);
+ return ERR_PTR(rc);
+ }
+
+ return sg_table;
+}
+
+/*
+ * tmc_sg_table_sync_data_range: Sync the data buffer written
+ * by the device from @offset upto a @size bytes.
+ */
+void tmc_sg_table_sync_data_range(struct tmc_sg_table *table,
+ u64 offset, u64 size)
+{
+ int i, index, start;
+ int npages = DIV_ROUND_UP(size, PAGE_SIZE);
+ struct device *dev = table->dev;
+ struct tmc_pages *data = &table->data_pages;
+
+ start = offset >> PAGE_SHIFT;
+ for (i = start; i < (start + npages); i++) {
+ index = i % data->nr_pages;
+ dma_sync_single_for_cpu(dev, data->daddrs[index],
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ }
+}
+
+/* tmc_sg_sync_table: Sync the page table */
+void tmc_sg_table_sync_table(struct tmc_sg_table *sg_table)
+{
+ int i;
+ struct device *dev = sg_table->dev;
+ struct tmc_pages *table_pages = &sg_table->table_pages;
+
+ for (i = 0; i < table_pages->nr_pages; i++)
+ dma_sync_single_for_device(dev, table_pages->daddrs[i],
+ PAGE_SIZE, DMA_TO_DEVICE);
+}
+
+/*
+ * tmc_sg_table_get_data: Get the buffer pointer for data @offset
+ * in the SG buffer. The @bufpp is updated to point to the buffer.
+ * Returns :
+ * the length of linear data available at @offset.
+ * or
+ * <= 0 if no data is available.
+ */
+ssize_t tmc_sg_table_get_data(struct tmc_sg_table *sg_table,
+ u64 offset, size_t len, char **bufpp)
+{
+ size_t size;
+ int pg_idx = offset >> PAGE_SHIFT;
+ int pg_offset = offset & (PAGE_SIZE - 1);
+ struct tmc_pages *data_pages = &sg_table->data_pages;
+
+ size = tmc_sg_table_buf_size(sg_table);
+ if (offset >= size)
+ return -EINVAL;
+
+ /* Make sure we don't go beyond the end */
+ len = (len < (size - offset)) ? len : size - offset;
+ /* Respect the page boundaries */
+ len = (len < (PAGE_SIZE - pg_offset)) ? len : (PAGE_SIZE - pg_offset);
+ if (len > 0)
+ *bufpp = page_address(data_pages->pages[pg_idx]) + pg_offset;
+ return len;
+}
+
+#ifdef ETR_SG_DEBUG
+/* Map a dma address to virtual address */
+static unsigned long
+tmc_sg_daddr_to_vaddr(struct tmc_sg_table *sg_table,
+ dma_addr_t addr, bool table)
+{
+ long offset;
+ unsigned long base;
+ struct tmc_pages *tmc_pages;
+
+ if (table) {
+ tmc_pages = &sg_table->table_pages;
+ base = (unsigned long)sg_table->table_vaddr;
+ } else {
+ tmc_pages = &sg_table->data_pages;
+ base = (unsigned long)sg_table->data_vaddr;
+ }
+
+ offset = tmc_pages_get_offset(tmc_pages, addr);
+ if (offset < 0)
+ return 0;
+ return base + offset;
+}
+
+/* Dump the given sg_table */
+static void tmc_etr_sg_table_dump(struct etr_sg_table *etr_table)
+{
+ sgte_t *ptr;
+ int i = 0;
+ dma_addr_t addr;
+ struct tmc_sg_table *sg_table = etr_table->sg_table;
+
+ ptr = (sgte_t *)tmc_sg_daddr_to_vaddr(sg_table,
+ etr_table->hwaddr, true);
+ while (ptr) {
+ addr = ETR_SG_ADDR(*ptr);
+ switch (ETR_SG_ET(*ptr)) {
+ case ETR_SG_ET_NORMAL:
+ dev_dbg(sg_table->dev,
+ "%05d: %p\t:[N] 0x%llx\n", i, ptr, addr);
+ ptr++;
+ break;
+ case ETR_SG_ET_LINK:
+ dev_dbg(sg_table->dev,
+ "%05d: *** %p\t:{L} 0x%llx ***\n",
+ i, ptr, addr);
+ ptr = (sgte_t *)tmc_sg_daddr_to_vaddr(sg_table,
+ addr, true);
+ break;
+ case ETR_SG_ET_LAST:
+ dev_dbg(sg_table->dev,
+ "%05d: ### %p\t:[L] 0x%llx ###\n",
+ i, ptr, addr);
+ return;
+ default:
+ dev_dbg(sg_table->dev,
+ "%05d: xxx %p\t:[INVALID] 0x%llx xxx\n",
+ i, ptr, addr);
+ return;
+ }
+ i++;
+ }
+ dev_dbg(sg_table->dev, "******* End of Table *****\n");
+}
+#else
+static inline void tmc_etr_sg_table_dump(struct etr_sg_table *etr_table) {}
+#endif
+
+/*
+ * Populate the SG Table page table entries from table/data
+ * pages allocated. Each Data page has ETR_SG_PAGES_PER_SYSPAGE SG pages.
+ * So does a Table page. So we keep track of indices of the tables
+ * in each system page and move the pointers accordingly.
+ */
+#define INC_IDX_ROUND(idx, size) ((idx) = ((idx) + 1) % (size))
+static void tmc_etr_sg_table_populate(struct etr_sg_table *etr_table)
+{
+ dma_addr_t paddr;
+ int i, type, nr_entries;
+ int tpidx = 0; /* index to the current system table_page */
+ int sgtidx = 0; /* index to the sg_table within the current syspage */
+ int sgtentry = 0; /* the entry within the sg_table */
+ int dpidx = 0; /* index to the current system data_page */
+ int spidx = 0; /* index to the SG page within the current data page */
+ sgte_t *ptr; /* pointer to the table entry to fill */
+ struct tmc_sg_table *sg_table = etr_table->sg_table;
+ dma_addr_t *table_daddrs = sg_table->table_pages.daddrs;
+ dma_addr_t *data_daddrs = sg_table->data_pages.daddrs;
+
+ nr_entries = tmc_etr_sg_table_entries(sg_table->data_pages.nr_pages);
+ /*
+ * Use the contiguous virtual address of the table to update entries.
+ */
+ ptr = sg_table->table_vaddr;
+ /*
+ * Fill all the entries, except the last entry to avoid special
+ * checks within the loop.
+ */
+ for (i = 0; i < nr_entries - 1; i++) {
+ if (sgtentry == ETR_SG_PTRS_PER_PAGE - 1) {
+ /*
+ * Last entry in a sg_table page is a link address to
+ * the next table page. If this sg_table is the last
+ * one in the system page, it links to the first
+ * sg_table in the next system page. Otherwise, it
+ * links to the next sg_table page within the system
+ * page.
+ */
+ if (sgtidx == ETR_SG_PAGES_PER_SYSPAGE - 1) {
+ paddr = table_daddrs[tpidx + 1];
+ } else {
+ paddr = table_daddrs[tpidx] +
+ (ETR_SG_PAGE_SIZE * (sgtidx + 1));
+ }
+ type = ETR_SG_ET_LINK;
+ } else {
+ /*
+ * Update the indices to the data_pages to point to the
+ * next sg_page in the data buffer.
+ */
+ type = ETR_SG_ET_NORMAL;
+ paddr = data_daddrs[dpidx] + spidx * ETR_SG_PAGE_SIZE;
+ if (!INC_IDX_ROUND(spidx, ETR_SG_PAGES_PER_SYSPAGE))
+ dpidx++;
+ }
+ *ptr++ = ETR_SG_ENTRY(paddr, type);
+ /*
+ * Move to the next table pointer, moving the table page index
+ * if necessary
+ */
+ if (!INC_IDX_ROUND(sgtentry, ETR_SG_PTRS_PER_PAGE)) {
+ if (!INC_IDX_ROUND(sgtidx, ETR_SG_PAGES_PER_SYSPAGE))
+ tpidx++;
+ }
+ }
+
+ /* Set up the last entry, which is always a data pointer */
+ paddr = data_daddrs[dpidx] + spidx * ETR_SG_PAGE_SIZE;
+ *ptr++ = ETR_SG_ENTRY(paddr, ETR_SG_ET_LAST);
+}
+
+/*
+ * tmc_init_etr_sg_table: Allocate a TMC ETR SG table, data buffer of @size and
+ * populate the table.
+ *
+ * @dev - Device pointer for the TMC
+ * @node - NUMA node where the memory should be allocated
+ * @size - Total size of the data buffer
+ * @pages - Optional list of page virtual address
+ */
+static struct etr_sg_table *
+tmc_init_etr_sg_table(struct device *dev, int node,
+ unsigned long size, void **pages)
+{
+ int nr_entries, nr_tpages;
+ int nr_dpages = size >> PAGE_SHIFT;
+ struct tmc_sg_table *sg_table;
+ struct etr_sg_table *etr_table;
+
+ etr_table = kzalloc(sizeof(*etr_table), GFP_KERNEL);
+ if (!etr_table)
+ return ERR_PTR(-ENOMEM);
+ nr_entries = tmc_etr_sg_table_entries(nr_dpages);
+ nr_tpages = DIV_ROUND_UP(nr_entries, ETR_SG_PTRS_PER_SYSPAGE);
+
+ sg_table = tmc_alloc_sg_table(dev, node, nr_tpages, nr_dpages, pages);
+ if (IS_ERR(sg_table)) {
+ kfree(etr_table);
+ return ERR_PTR(PTR_ERR(sg_table));
+ }
+
+ etr_table->sg_table = sg_table;
+ /* TMC should use table base address for DBA */
+ etr_table->hwaddr = sg_table->table_daddr;
+ tmc_etr_sg_table_populate(etr_table);
+ /* Sync the table pages for the HW */
+ tmc_sg_table_sync_table(sg_table);
+ tmc_etr_sg_table_dump(etr_table);
+
+ return etr_table;
+}
+
+/*
+ * tmc_etr_alloc_flat_buf: Allocate a contiguous DMA buffer.
+ */
+static int tmc_etr_alloc_flat_buf(struct tmc_drvdata *drvdata,
+ struct etr_buf *etr_buf, int node,
+ void **pages)
+{
+ struct etr_flat_buf *flat_buf;
+
+ /* We cannot reuse existing pages for flat buf */
+ if (pages)
+ return -EINVAL;
+
+ flat_buf = kzalloc(sizeof(*flat_buf), GFP_KERNEL);
+ if (!flat_buf)
+ return -ENOMEM;
+
+ flat_buf->vaddr = dma_alloc_coherent(drvdata->dev, etr_buf->size,
+ &flat_buf->daddr, GFP_KERNEL);
+ if (!flat_buf->vaddr) {
+ kfree(flat_buf);
+ return -ENOMEM;
+ }
+
+ flat_buf->size = etr_buf->size;
+ flat_buf->dev = drvdata->dev;
+ etr_buf->hwaddr = flat_buf->daddr;
+ etr_buf->mode = ETR_MODE_FLAT;
+ etr_buf->private = flat_buf;
+ return 0;
+}
+
+static void tmc_etr_free_flat_buf(struct etr_buf *etr_buf)
+{
+ struct etr_flat_buf *flat_buf = etr_buf->private;
+
+ if (flat_buf && flat_buf->daddr)
+ dma_free_coherent(flat_buf->dev, flat_buf->size,
+ flat_buf->vaddr, flat_buf->daddr);
+ kfree(flat_buf);
+}
+
+static void tmc_etr_sync_flat_buf(struct etr_buf *etr_buf, u64 rrp, u64 rwp)
+{
+ /*
+ * Adjust the buffer to point to the beginning of the trace data
+ * and update the available trace data.
+ */
+ etr_buf->offset = rrp - etr_buf->hwaddr;
+ if (etr_buf->full)
+ etr_buf->len = etr_buf->size;
+ else
+ etr_buf->len = rwp - rrp;
+}
+
+static ssize_t tmc_etr_get_data_flat_buf(struct etr_buf *etr_buf,
+ u64 offset, size_t len, char **bufpp)
+{
+ struct etr_flat_buf *flat_buf = etr_buf->private;
+
+ *bufpp = (char *)flat_buf->vaddr + offset;
+ /*
+ * tmc_etr_buf_get_data already adjusts the length to handle
+ * buffer wrapping around.
+ */
+ return len;
+}
+
+static const struct etr_buf_operations etr_flat_buf_ops = {
+ .alloc = tmc_etr_alloc_flat_buf,
+ .free = tmc_etr_free_flat_buf,
+ .sync = tmc_etr_sync_flat_buf,
+ .get_data = tmc_etr_get_data_flat_buf,
+};
+
+/*
+ * tmc_etr_alloc_sg_buf: Allocate an SG buf @etr_buf. Setup the parameters
+ * appropriately.
+ */
+static int tmc_etr_alloc_sg_buf(struct tmc_drvdata *drvdata,
+ struct etr_buf *etr_buf, int node,
+ void **pages)
+{
+ struct etr_sg_table *etr_table;
+
+ etr_table = tmc_init_etr_sg_table(drvdata->dev, node,
+ etr_buf->size, pages);
+ if (IS_ERR(etr_table))
+ return -ENOMEM;
+ etr_buf->hwaddr = etr_table->hwaddr;
+ etr_buf->mode = ETR_MODE_ETR_SG;
+ etr_buf->private = etr_table;
+ return 0;
+}
+
+static void tmc_etr_free_sg_buf(struct etr_buf *etr_buf)
+{
+ struct etr_sg_table *etr_table = etr_buf->private;
+
+ if (etr_table) {
+ tmc_free_sg_table(etr_table->sg_table);
+ kfree(etr_table);
+ }
+}
+
+static ssize_t tmc_etr_get_data_sg_buf(struct etr_buf *etr_buf, u64 offset,
+ size_t len, char **bufpp)
+{
+ struct etr_sg_table *etr_table = etr_buf->private;
+
+ return tmc_sg_table_get_data(etr_table->sg_table, offset, len, bufpp);
+}
+
+static void tmc_etr_sync_sg_buf(struct etr_buf *etr_buf, u64 rrp, u64 rwp)
+{
+ long r_offset, w_offset;
+ struct etr_sg_table *etr_table = etr_buf->private;
+ struct tmc_sg_table *table = etr_table->sg_table;
+
+ /* Convert hw address to offset in the buffer */
+ r_offset = tmc_sg_get_data_page_offset(table, rrp);
+ if (r_offset < 0) {
+ dev_warn(table->dev,
+ "Unable to map RRP %llx to offset\n", rrp);
+ etr_buf->len = 0;
+ return;
+ }
+
+ w_offset = tmc_sg_get_data_page_offset(table, rwp);
+ if (w_offset < 0) {
+ dev_warn(table->dev,
+ "Unable to map RWP %llx to offset\n", rwp);
+ etr_buf->len = 0;
+ return;
+ }
+
+ etr_buf->offset = r_offset;
+ if (etr_buf->full)
+ etr_buf->len = etr_buf->size;
+ else
+ etr_buf->len = ((w_offset < r_offset) ? etr_buf->size : 0) +
+ w_offset - r_offset;
+ tmc_sg_table_sync_data_range(table, r_offset, etr_buf->len);
+}
+
+static const struct etr_buf_operations etr_sg_buf_ops = {
+ .alloc = tmc_etr_alloc_sg_buf,
+ .free = tmc_etr_free_sg_buf,
+ .sync = tmc_etr_sync_sg_buf,
+ .get_data = tmc_etr_get_data_sg_buf,
+};
+
+/*
+ * TMC ETR could be connected to a CATU device, which can provide address
+ * translation service. This is represented by the Output port of the TMC
+ * (ETR) connected to the input port of the CATU.
+ *
+ * Returns : coresight_device ptr for the CATU device if a CATU is found.
+ * : NULL otherwise.
+ */
+struct coresight_device *
+tmc_etr_get_catu_device(struct tmc_drvdata *drvdata)
+{
+ int i;
+ struct coresight_device *tmp, *etr = drvdata->csdev;
+
+ if (!IS_ENABLED(CONFIG_CORESIGHT_CATU))
+ return NULL;
+
+ for (i = 0; i < etr->nr_outport; i++) {
+ tmp = etr->conns[i].child_dev;
+ if (tmp && coresight_is_catu_device(tmp))
+ return tmp;
+ }
+
+ return NULL;
+}
+
+static inline void tmc_etr_enable_catu(struct tmc_drvdata *drvdata)
+{
+ struct coresight_device *catu = tmc_etr_get_catu_device(drvdata);
+
+ if (catu && helper_ops(catu)->enable)
+ helper_ops(catu)->enable(catu, drvdata->etr_buf);
+}
+
+static inline void tmc_etr_disable_catu(struct tmc_drvdata *drvdata)
+{
+ struct coresight_device *catu = tmc_etr_get_catu_device(drvdata);
+
+ if (catu && helper_ops(catu)->disable)
+ helper_ops(catu)->disable(catu, drvdata->etr_buf);
+}
+
+static const struct etr_buf_operations *etr_buf_ops[] = {
+ [ETR_MODE_FLAT] = &etr_flat_buf_ops,
+ [ETR_MODE_ETR_SG] = &etr_sg_buf_ops,
+ [ETR_MODE_CATU] = &etr_catu_buf_ops,
+};
+
+static inline int tmc_etr_mode_alloc_buf(int mode,
+ struct tmc_drvdata *drvdata,
+ struct etr_buf *etr_buf, int node,
+ void **pages)
+{
+ int rc = -EINVAL;
+
+ switch (mode) {
+ case ETR_MODE_FLAT:
+ case ETR_MODE_ETR_SG:
+ case ETR_MODE_CATU:
+ if (etr_buf_ops[mode]->alloc)
+ rc = etr_buf_ops[mode]->alloc(drvdata, etr_buf,
+ node, pages);
+ if (!rc)
+ etr_buf->ops = etr_buf_ops[mode];
+ return rc;
+ default:
+ return -EINVAL;
+ }
+}
+
+/*
+ * tmc_alloc_etr_buf: Allocate a buffer use by ETR.
+ * @drvdata : ETR device details.
+ * @size : size of the requested buffer.
+ * @flags : Required properties for the buffer.
+ * @node : Node for memory allocations.
+ * @pages : An optional list of pages.
+ */
+static struct etr_buf *tmc_alloc_etr_buf(struct tmc_drvdata *drvdata,
+ ssize_t size, int flags,
+ int node, void **pages)
+{
+ int rc = -ENOMEM;
+ bool has_etr_sg, has_iommu;
+ bool has_sg, has_catu;
+ struct etr_buf *etr_buf;
+
+ has_etr_sg = tmc_etr_has_cap(drvdata, TMC_ETR_SG);
+ has_iommu = iommu_get_domain_for_dev(drvdata->dev);
+ has_catu = !!tmc_etr_get_catu_device(drvdata);
+
+ has_sg = has_catu || has_etr_sg;
+
+ etr_buf = kzalloc(sizeof(*etr_buf), GFP_KERNEL);
+ if (!etr_buf)
+ return ERR_PTR(-ENOMEM);
+
+ etr_buf->size = size;
+
+ /*
+ * If we have to use an existing list of pages, we cannot reliably
+ * use a contiguous DMA memory (even if we have an IOMMU). Otherwise,
+ * we use the contiguous DMA memory if at least one of the following
+ * conditions is true:
+ * a) The ETR cannot use Scatter-Gather.
+ * b) we have a backing IOMMU
+ * c) The requested memory size is smaller (< 1M).
+ *
+ * Fallback to available mechanisms.
+ *
+ */
+ if (!pages &&
+ (!has_sg || has_iommu || size < SZ_1M))
+ rc = tmc_etr_mode_alloc_buf(ETR_MODE_FLAT, drvdata,
+ etr_buf, node, pages);
+ if (rc && has_etr_sg)
+ rc = tmc_etr_mode_alloc_buf(ETR_MODE_ETR_SG, drvdata,
+ etr_buf, node, pages);
+ if (rc && has_catu)
+ rc = tmc_etr_mode_alloc_buf(ETR_MODE_CATU, drvdata,
+ etr_buf, node, pages);
+ if (rc) {
+ kfree(etr_buf);
+ return ERR_PTR(rc);
+ }
+
+ dev_dbg(drvdata->dev, "allocated buffer of size %ldKB in mode %d\n",
+ (unsigned long)size >> 10, etr_buf->mode);
+ return etr_buf;
+}
+
+static void tmc_free_etr_buf(struct etr_buf *etr_buf)
+{
+ WARN_ON(!etr_buf->ops || !etr_buf->ops->free);
+ etr_buf->ops->free(etr_buf);
+ kfree(etr_buf);
+}
+
+/*
+ * tmc_etr_buf_get_data: Get the pointer the trace data at @offset
+ * with a maximum of @len bytes.
+ * Returns: The size of the linear data available @pos, with *bufpp
+ * updated to point to the buffer.
+ */
+static ssize_t tmc_etr_buf_get_data(struct etr_buf *etr_buf,
+ u64 offset, size_t len, char **bufpp)
+{
+ /* Adjust the length to limit this transaction to end of buffer */
+ len = (len < (etr_buf->size - offset)) ? len : etr_buf->size - offset;
+
+ return etr_buf->ops->get_data(etr_buf, (u64)offset, len, bufpp);
+}
+
+static inline s64
+tmc_etr_buf_insert_barrier_packet(struct etr_buf *etr_buf, u64 offset)
+{
+ ssize_t len;
+ char *bufp;
+
+ len = tmc_etr_buf_get_data(etr_buf, offset,
+ CORESIGHT_BARRIER_PKT_SIZE, &bufp);
+ if (WARN_ON(len < CORESIGHT_BARRIER_PKT_SIZE))
+ return -EINVAL;
+ coresight_insert_barrier_packet(bufp);
+ return offset + CORESIGHT_BARRIER_PKT_SIZE;
+}
+
+/*
+ * tmc_sync_etr_buf: Sync the trace buffer availability with drvdata.
+ * Makes sure the trace data is synced to the memory for consumption.
+ * @etr_buf->offset will hold the offset to the beginning of the trace data
+ * within the buffer, with @etr_buf->len bytes to consume.
+ */
+static void tmc_sync_etr_buf(struct tmc_drvdata *drvdata)
+{
+ struct etr_buf *etr_buf = drvdata->etr_buf;
+ u64 rrp, rwp;
+ u32 status;
+
+ rrp = tmc_read_rrp(drvdata);
+ rwp = tmc_read_rwp(drvdata);
+ status = readl_relaxed(drvdata->base + TMC_STS);
+ etr_buf->full = status & TMC_STS_FULL;
+
+ WARN_ON(!etr_buf->ops || !etr_buf->ops->sync);
+
+ etr_buf->ops->sync(etr_buf, rrp, rwp);
+
+ /* Insert barrier packets at the beginning, if there was an overflow */
+ if (etr_buf->full)
+ tmc_etr_buf_insert_barrier_packet(etr_buf, etr_buf->offset);
+}
+
static void tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
{
u32 axictl, sts;
+ struct etr_buf *etr_buf = drvdata->etr_buf;
- /* Zero out the memory to help with debug */
- memset(drvdata->vaddr, 0, drvdata->size);
+ /*
+ * If this ETR is connected to a CATU, enable it before we turn
+ * this on
+ */
+ tmc_etr_enable_catu(drvdata);
CS_UNLOCK(drvdata->base);
/* Wait for TMCSReady bit to be set */
tmc_wait_for_tmcready(drvdata);
- writel_relaxed(drvdata->size / 4, drvdata->base + TMC_RSZ);
+ writel_relaxed(etr_buf->size / 4, drvdata->base + TMC_RSZ);
writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
axictl = readl_relaxed(drvdata->base + TMC_AXICTL);
@@ -34,16 +924,22 @@ static void tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
axictl |= TMC_AXICTL_ARCACHE_OS;
}
+ if (etr_buf->mode == ETR_MODE_ETR_SG) {
+ if (WARN_ON(!tmc_etr_has_cap(drvdata, TMC_ETR_SG)))
+ return;
+ axictl |= TMC_AXICTL_SCT_GAT_MODE;
+ }
+
writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
- tmc_write_dba(drvdata, drvdata->paddr);
+ tmc_write_dba(drvdata, etr_buf->hwaddr);
/*
* If the TMC pointers must be programmed before the session,
* we have to set it properly (i.e, RRP/RWP to base address and
* STS to "not full").
*/
if (tmc_etr_has_cap(drvdata, TMC_ETR_SAVE_RESTORE)) {
- tmc_write_rrp(drvdata, drvdata->paddr);
- tmc_write_rwp(drvdata, drvdata->paddr);
+ tmc_write_rrp(drvdata, etr_buf->hwaddr);
+ tmc_write_rwp(drvdata, etr_buf->hwaddr);
sts = readl_relaxed(drvdata->base + TMC_STS) & ~TMC_STS_FULL;
writel_relaxed(sts, drvdata->base + TMC_STS);
}
@@ -58,37 +954,49 @@ static void tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
CS_LOCK(drvdata->base);
}
-static void tmc_etr_dump_hw(struct tmc_drvdata *drvdata)
+/*
+ * Return the available trace data in the buffer (starts at etr_buf->offset,
+ * limited by etr_buf->len) from @pos, with a maximum limit of @len,
+ * also updating the @bufpp on where to find it. Since the trace data
+ * starts at anywhere in the buffer, depending on the RRP, we adjust the
+ * @len returned to handle buffer wrapping around.
+ */
+ssize_t tmc_etr_get_sysfs_trace(struct tmc_drvdata *drvdata,
+ loff_t pos, size_t len, char **bufpp)
{
- const u32 *barrier;
- u32 val;
- u32 *temp;
- u64 rwp;
+ s64 offset;
+ ssize_t actual = len;
+ struct etr_buf *etr_buf = drvdata->etr_buf;
- rwp = tmc_read_rwp(drvdata);
- val = readl_relaxed(drvdata->base + TMC_STS);
+ if (pos + actual > etr_buf->len)
+ actual = etr_buf->len - pos;
+ if (actual <= 0)
+ return actual;
- /*
- * Adjust the buffer to point to the beginning of the trace data
- * and update the available trace data.
- */
- if (val & TMC_STS_FULL) {
- drvdata->buf = drvdata->vaddr + rwp - drvdata->paddr;
- drvdata->len = drvdata->size;
+ /* Compute the offset from which we read the data */
+ offset = etr_buf->offset + pos;
+ if (offset >= etr_buf->size)
+ offset -= etr_buf->size;
+ return tmc_etr_buf_get_data(etr_buf, offset, actual, bufpp);
+}
- barrier = barrier_pkt;
- temp = (u32 *)drvdata->buf;
+static struct etr_buf *
+tmc_etr_setup_sysfs_buf(struct tmc_drvdata *drvdata)
+{
+ return tmc_alloc_etr_buf(drvdata, drvdata->size,
+ 0, cpu_to_node(0), NULL);
+}
- while (*barrier) {
- *temp = *barrier;
- temp++;
- barrier++;
- }
+static void
+tmc_etr_free_sysfs_buf(struct etr_buf *buf)
+{
+ if (buf)
+ tmc_free_etr_buf(buf);
+}
- } else {
- drvdata->buf = drvdata->vaddr;
- drvdata->len = rwp - drvdata->paddr;
- }
+static void tmc_etr_sync_sysfs_buf(struct tmc_drvdata *drvdata)
+{
+ tmc_sync_etr_buf(drvdata);
}
static void tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
@@ -101,44 +1009,45 @@ static void tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
* read before the TMC is disabled.
*/
if (drvdata->mode == CS_MODE_SYSFS)
- tmc_etr_dump_hw(drvdata);
+ tmc_etr_sync_sysfs_buf(drvdata);
+
tmc_disable_hw(drvdata);
CS_LOCK(drvdata->base);
+
+ /* Disable CATU device if this ETR is connected to one */
+ tmc_etr_disable_catu(drvdata);
}
static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
{
int ret = 0;
- bool used = false;
unsigned long flags;
- void __iomem *vaddr = NULL;
- dma_addr_t paddr = 0;
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ struct etr_buf *new_buf = NULL, *free_buf = NULL;
/*
- * If we don't have a buffer release the lock and allocate memory.
- * Otherwise keep the lock and move along.
+ * If we are enabling the ETR from disabled state, we need to make
+ * sure we have a buffer with the right size. The etr_buf is not reset
+ * immediately after we stop the tracing in SYSFS mode as we wait for
+ * the user to collect the data. We may be able to reuse the existing
+ * buffer, provided the size matches. Any allocation has to be done
+ * with the lock released.
*/
spin_lock_irqsave(&drvdata->spinlock, flags);
- if (!drvdata->vaddr) {
+ if (!drvdata->etr_buf || (drvdata->etr_buf->size != drvdata->size)) {
spin_unlock_irqrestore(&drvdata->spinlock, flags);
- /*
- * Contiguous memory can't be allocated while a spinlock is
- * held. As such allocate memory here and free it if a buffer
- * has already been allocated (from a previous session).
- */
- vaddr = dma_alloc_coherent(drvdata->dev, drvdata->size,
- &paddr, GFP_KERNEL);
- if (!vaddr)
- return -ENOMEM;
+ /* Allocate memory with the locks released */
+ free_buf = new_buf = tmc_etr_setup_sysfs_buf(drvdata);
+ if (IS_ERR(new_buf))
+ return PTR_ERR(new_buf);
/* Let's try again */
spin_lock_irqsave(&drvdata->spinlock, flags);
}
- if (drvdata->reading) {
+ if (drvdata->reading || drvdata->mode == CS_MODE_PERF) {
ret = -EBUSY;
goto out;
}
@@ -146,21 +1055,19 @@ static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
/*
* In sysFS mode we can have multiple writers per sink. Since this
* sink is already enabled no memory is needed and the HW need not be
- * touched.
+ * touched, even if the buffer size has changed.
*/
if (drvdata->mode == CS_MODE_SYSFS)
goto out;
/*
- * If drvdata::vaddr == NULL, use the memory allocated above.
- * Otherwise a buffer still exists from a previous session, so
- * simply use that.
+ * If we don't have a buffer or it doesn't match the requested size,
+ * use the buffer allocated above. Otherwise reuse the existing buffer.
*/
- if (drvdata->vaddr == NULL) {
- used = true;
- drvdata->vaddr = vaddr;
- drvdata->paddr = paddr;
- drvdata->buf = drvdata->vaddr;
+ if (!drvdata->etr_buf ||
+ (new_buf && drvdata->etr_buf->size != new_buf->size)) {
+ free_buf = drvdata->etr_buf;
+ drvdata->etr_buf = new_buf;
}
drvdata->mode = CS_MODE_SYSFS;
@@ -169,8 +1076,8 @@ out:
spin_unlock_irqrestore(&drvdata->spinlock, flags);
/* Free memory outside the spinlock if need be */
- if (!used && vaddr)
- dma_free_coherent(drvdata->dev, drvdata->size, vaddr, paddr);
+ if (free_buf)
+ tmc_etr_free_sysfs_buf(free_buf);
if (!ret)
dev_info(drvdata->dev, "TMC-ETR enabled\n");
@@ -180,32 +1087,8 @@ out:
static int tmc_enable_etr_sink_perf(struct coresight_device *csdev)
{
- int ret = 0;
- unsigned long flags;
- struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
-
- spin_lock_irqsave(&drvdata->spinlock, flags);
- if (drvdata->reading) {
- ret = -EINVAL;
- goto out;
- }
-
- /*
- * In Perf mode there can be only one writer per sink. There
- * is also no need to continue if the ETR is already operated
- * from sysFS.
- */
- if (drvdata->mode != CS_MODE_DISABLED) {
- ret = -EINVAL;
- goto out;
- }
-
- drvdata->mode = CS_MODE_PERF;
- tmc_etr_enable_hw(drvdata);
-out:
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
-
- return ret;
+ /* We don't support perf mode yet ! */
+ return -EINVAL;
}
static int tmc_enable_etr_sink(struct coresight_device *csdev, u32 mode)
@@ -273,8 +1156,8 @@ int tmc_read_prepare_etr(struct tmc_drvdata *drvdata)
goto out;
}
- /* If drvdata::buf is NULL the trace data has been read already */
- if (drvdata->buf == NULL) {
+ /* If drvdata::etr_buf is NULL the trace data has been read already */
+ if (drvdata->etr_buf == NULL) {
ret = -EINVAL;
goto out;
}
@@ -293,8 +1176,7 @@ out:
int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata)
{
unsigned long flags;
- dma_addr_t paddr;
- void __iomem *vaddr = NULL;
+ struct etr_buf *etr_buf = NULL;
/* config types are set a boot time and never change */
if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR))
@@ -306,9 +1188,8 @@ int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata)
if (drvdata->mode == CS_MODE_SYSFS) {
/*
* The trace run will continue with the same allocated trace
- * buffer. The trace buffer is cleared in tmc_etr_enable_hw(),
- * so we don't have to explicitly clear it. Also, since the
- * tracer is still enabled drvdata::buf can't be NULL.
+ * buffer. Since the tracer is still enabled drvdata::buf can't
+ * be NULL.
*/
tmc_etr_enable_hw(drvdata);
} else {
@@ -316,17 +1197,16 @@ int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata)
* The ETR is not tracing and the buffer was just read.
* As such prepare to free the trace buffer.
*/
- vaddr = drvdata->vaddr;
- paddr = drvdata->paddr;
- drvdata->buf = drvdata->vaddr = NULL;
+ etr_buf = drvdata->etr_buf;
+ drvdata->etr_buf = NULL;
}
drvdata->reading = false;
spin_unlock_irqrestore(&drvdata->spinlock, flags);
/* Free allocated memory out side of the spinlock */
- if (vaddr)
- dma_free_coherent(drvdata->dev, drvdata->size, vaddr, paddr);
+ if (etr_buf)
+ tmc_free_etr_buf(etr_buf);
return 0;
}
diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c
index 456f122df74f..1b817ec1192c 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.c
+++ b/drivers/hwtracing/coresight/coresight-tmc.c
@@ -12,6 +12,7 @@
#include <linux/err.h>
#include <linux/fs.h>
#include <linux/miscdevice.h>
+#include <linux/property.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
#include <linux/dma-mapping.h>
@@ -123,35 +124,40 @@ static int tmc_open(struct inode *inode, struct file *file)
return 0;
}
+static inline ssize_t tmc_get_sysfs_trace(struct tmc_drvdata *drvdata,
+ loff_t pos, size_t len, char **bufpp)
+{
+ switch (drvdata->config_type) {
+ case TMC_CONFIG_TYPE_ETB:
+ case TMC_CONFIG_TYPE_ETF:
+ return tmc_etb_get_sysfs_trace(drvdata, pos, len, bufpp);
+ case TMC_CONFIG_TYPE_ETR:
+ return tmc_etr_get_sysfs_trace(drvdata, pos, len, bufpp);
+ }
+
+ return -EINVAL;
+}
+
static ssize_t tmc_read(struct file *file, char __user *data, size_t len,
loff_t *ppos)
{
+ char *bufp;
+ ssize_t actual;
struct tmc_drvdata *drvdata = container_of(file->private_data,
struct tmc_drvdata, miscdev);
- char *bufp = drvdata->buf + *ppos;
+ actual = tmc_get_sysfs_trace(drvdata, *ppos, len, &bufp);
+ if (actual <= 0)
+ return 0;
- if (*ppos + len > drvdata->len)
- len = drvdata->len - *ppos;
-
- if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
- if (bufp == (char *)(drvdata->vaddr + drvdata->size))
- bufp = drvdata->vaddr;
- else if (bufp > (char *)(drvdata->vaddr + drvdata->size))
- bufp -= drvdata->size;
- if ((bufp + len) > (char *)(drvdata->vaddr + drvdata->size))
- len = (char *)(drvdata->vaddr + drvdata->size) - bufp;
- }
-
- if (copy_to_user(data, bufp, len)) {
+ if (copy_to_user(data, bufp, actual)) {
dev_dbg(drvdata->dev, "%s: copy_to_user failed\n", __func__);
return -EFAULT;
}
- *ppos += len;
+ *ppos += actual;
+ dev_dbg(drvdata->dev, "%zu bytes copied\n", actual);
- dev_dbg(drvdata->dev, "%s: %zu bytes copied, %d bytes left\n",
- __func__, len, (int)(drvdata->len - *ppos));
- return len;
+ return actual;
}
static int tmc_release(struct inode *inode, struct file *file)
@@ -271,8 +277,41 @@ static ssize_t trigger_cntr_store(struct device *dev,
}
static DEVICE_ATTR_RW(trigger_cntr);
+static ssize_t buffer_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ return sprintf(buf, "%#x\n", drvdata->size);
+}
+
+static ssize_t buffer_size_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ unsigned long val;
+ struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ /* Only permitted for TMC-ETRs */
+ if (drvdata->config_type != TMC_CONFIG_TYPE_ETR)
+ return -EPERM;
+
+ ret = kstrtoul(buf, 0, &val);
+ if (ret)
+ return ret;
+ /* The buffer size should be page aligned */
+ if (val & (PAGE_SIZE - 1))
+ return -EINVAL;
+ drvdata->size = val;
+ return size;
+}
+
+static DEVICE_ATTR_RW(buffer_size);
+
static struct attribute *coresight_tmc_attrs[] = {
&dev_attr_trigger_cntr.attr,
+ &dev_attr_buffer_size.attr,
NULL,
};
@@ -291,6 +330,12 @@ const struct attribute_group *coresight_tmc_groups[] = {
NULL,
};
+static inline bool tmc_etr_can_use_sg(struct tmc_drvdata *drvdata)
+{
+ return fwnode_property_present(drvdata->dev->fwnode,
+ "arm,scatter-gather");
+}
+
/* Detect and initialise the capabilities of a TMC ETR */
static int tmc_etr_setup_caps(struct tmc_drvdata *drvdata,
u32 devid, void *dev_caps)
@@ -300,7 +345,7 @@ static int tmc_etr_setup_caps(struct tmc_drvdata *drvdata,
/* Set the unadvertised capabilities */
tmc_etr_init_caps(drvdata, (u32)(unsigned long)dev_caps);
- if (!(devid & TMC_DEVID_NOSCAT))
+ if (!(devid & TMC_DEVID_NOSCAT) && tmc_etr_can_use_sg(drvdata))
tmc_etr_set_cap(drvdata, TMC_ETR_SG);
/* Check if the AXI address width is available */
diff --git a/drivers/hwtracing/coresight/coresight-tmc.h b/drivers/hwtracing/coresight/coresight-tmc.h
index dfaff077a7fc..7027bd60c4cc 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.h
+++ b/drivers/hwtracing/coresight/coresight-tmc.h
@@ -7,6 +7,7 @@
#ifndef _CORESIGHT_TMC_H
#define _CORESIGHT_TMC_H
+#include <linux/dma-mapping.h>
#include <linux/miscdevice.h>
#define TMC_RSZ 0x004
@@ -122,6 +123,36 @@ enum tmc_mem_intf_width {
#define CORESIGHT_SOC_600_ETR_CAPS \
(TMC_ETR_SAVE_RESTORE | TMC_ETR_AXI_ARCACHE)
+enum etr_mode {
+ ETR_MODE_FLAT, /* Uses contiguous flat buffer */
+ ETR_MODE_ETR_SG, /* Uses in-built TMC ETR SG mechanism */
+ ETR_MODE_CATU, /* Use SG mechanism in CATU */
+};
+
+struct etr_buf_operations;
+
+/**
+ * struct etr_buf - Details of the buffer used by ETR
+ * @mode : Mode of the ETR buffer, contiguous, Scatter Gather etc.
+ * @full : Trace data overflow
+ * @size : Size of the buffer.
+ * @hwaddr : Address to be programmed in the TMC:DBA{LO,HI}
+ * @offset : Offset of the trace data in the buffer for consumption.
+ * @len : Available trace data @buf (may round up to the beginning).
+ * @ops : ETR buffer operations for the mode.
+ * @private : Backend specific information for the buf
+ */
+struct etr_buf {
+ enum etr_mode mode;
+ bool full;
+ ssize_t size;
+ dma_addr_t hwaddr;
+ unsigned long offset;
+ s64 len;
+ const struct etr_buf_operations *ops;
+ void *private;
+};
+
/**
* struct tmc_drvdata - specifics associated to an TMC component
* @base: memory mapped base address for this component.
@@ -129,11 +160,10 @@ enum tmc_mem_intf_width {
* @csdev: component vitals needed by the framework.
* @miscdev: specifics to handle "/dev/xyz.tmc" entry.
* @spinlock: only one at a time pls.
- * @buf: area of memory where trace data get sent.
- * @paddr: DMA start location in RAM.
- * @vaddr: virtual representation of @paddr.
- * @size: trace buffer size.
- * @len: size of the available trace.
+ * @buf: Snapshot of the trace data for ETF/ETB.
+ * @etr_buf: details of buffer used in TMC-ETR
+ * @len: size of the available trace for ETF/ETB.
+ * @size: trace buffer size for this TMC (common for all modes).
* @mode: how this TMC is being used.
* @config_type: TMC variant, must be of type @tmc_config_type.
* @memwidth: width of the memory interface databus, in bytes.
@@ -148,11 +178,12 @@ struct tmc_drvdata {
struct miscdevice miscdev;
spinlock_t spinlock;
bool reading;
- char *buf;
- dma_addr_t paddr;
- void __iomem *vaddr;
- u32 size;
+ union {
+ char *buf; /* TMC ETB */
+ struct etr_buf *etr_buf; /* TMC ETR */
+ };
u32 len;
+ u32 size;
u32 mode;
enum tmc_config_type config_type;
enum tmc_mem_intf_width memwidth;
@@ -160,6 +191,47 @@ struct tmc_drvdata {
u32 etr_caps;
};
+struct etr_buf_operations {
+ int (*alloc)(struct tmc_drvdata *drvdata, struct etr_buf *etr_buf,
+ int node, void **pages);
+ void (*sync)(struct etr_buf *etr_buf, u64 rrp, u64 rwp);
+ ssize_t (*get_data)(struct etr_buf *etr_buf, u64 offset, size_t len,
+ char **bufpp);
+ void (*free)(struct etr_buf *etr_buf);
+};
+
+/**
+ * struct tmc_pages - Collection of pages used for SG.
+ * @nr_pages: Number of pages in the list.
+ * @daddrs: Array of DMA'able page address.
+ * @pages: Array pages for the buffer.
+ */
+struct tmc_pages {
+ int nr_pages;
+ dma_addr_t *daddrs;
+ struct page **pages;
+};
+
+/*
+ * struct tmc_sg_table - Generic SG table for TMC
+ * @dev: Device for DMA allocations
+ * @table_vaddr: Contiguous Virtual address for PageTable
+ * @data_vaddr: Contiguous Virtual address for Data Buffer
+ * @table_daddr: DMA address of the PageTable base
+ * @node: Node for Page allocations
+ * @table_pages: List of pages & dma address for Table
+ * @data_pages: List of pages & dma address for Data
+ */
+struct tmc_sg_table {
+ struct device *dev;
+ void *table_vaddr;
+ void *data_vaddr;
+ dma_addr_t table_daddr;
+ int node;
+ struct tmc_pages table_pages;
+ struct tmc_pages data_pages;
+};
+
/* Generic functions */
void tmc_wait_for_tmcready(struct tmc_drvdata *drvdata);
void tmc_flush_and_stop(struct tmc_drvdata *drvdata);
@@ -172,10 +244,14 @@ int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata);
extern const struct coresight_ops tmc_etb_cs_ops;
extern const struct coresight_ops tmc_etf_cs_ops;
+ssize_t tmc_etb_get_sysfs_trace(struct tmc_drvdata *drvdata,
+ loff_t pos, size_t len, char **bufpp);
/* ETR functions */
int tmc_read_prepare_etr(struct tmc_drvdata *drvdata);
int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata);
extern const struct coresight_ops tmc_etr_cs_ops;
+ssize_t tmc_etr_get_sysfs_trace(struct tmc_drvdata *drvdata,
+ loff_t pos, size_t len, char **bufpp);
#define TMC_REG_PAIR(name, lo_off, hi_off) \
@@ -211,4 +287,23 @@ static inline bool tmc_etr_has_cap(struct tmc_drvdata *drvdata, u32 cap)
return !!(drvdata->etr_caps & cap);
}
+struct tmc_sg_table *tmc_alloc_sg_table(struct device *dev,
+ int node,
+ int nr_tpages,
+ int nr_dpages,
+ void **pages);
+void tmc_free_sg_table(struct tmc_sg_table *sg_table);
+void tmc_sg_table_sync_table(struct tmc_sg_table *sg_table);
+void tmc_sg_table_sync_data_range(struct tmc_sg_table *table,
+ u64 offset, u64 size);
+ssize_t tmc_sg_table_get_data(struct tmc_sg_table *sg_table,
+ u64 offset, size_t len, char **bufpp);
+static inline unsigned long
+tmc_sg_table_buf_size(struct tmc_sg_table *sg_table)
+{
+ return sg_table->data_pages.nr_pages << PAGE_SHIFT;
+}
+
+struct coresight_device *tmc_etr_get_catu_device(struct tmc_drvdata *drvdata);
+
#endif
diff --git a/drivers/hwtracing/coresight/coresight-tpiu.c b/drivers/hwtracing/coresight/coresight-tpiu.c
index 01b7457fe8fc..459ef930d98c 100644
--- a/drivers/hwtracing/coresight/coresight-tpiu.c
+++ b/drivers/hwtracing/coresight/coresight-tpiu.c
@@ -40,8 +40,9 @@
/** register definition **/
/* FFSR - 0x300 */
-#define FFSR_FT_STOPPED BIT(1)
+#define FFSR_FT_STOPPED_BIT 1
/* FFCR - 0x304 */
+#define FFCR_FON_MAN_BIT 6
#define FFCR_FON_MAN BIT(6)
#define FFCR_STOP_FI BIT(12)
@@ -86,9 +87,9 @@ static void tpiu_disable_hw(struct tpiu_drvdata *drvdata)
/* Generate manual flush */
writel_relaxed(FFCR_STOP_FI | FFCR_FON_MAN, drvdata->base + TPIU_FFCR);
/* Wait for flush to complete */
- coresight_timeout(drvdata->base, TPIU_FFCR, FFCR_FON_MAN, 0);
+ coresight_timeout(drvdata->base, TPIU_FFCR, FFCR_FON_MAN_BIT, 0);
/* Wait for formatter to stop */
- coresight_timeout(drvdata->base, TPIU_FFSR, FFSR_FT_STOPPED, 1);
+ coresight_timeout(drvdata->base, TPIU_FFSR, FFSR_FT_STOPPED_BIT, 1);
CS_LOCK(drvdata->base);
}
diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
index 29e834aab539..3e07fd335f8c 100644
--- a/drivers/hwtracing/coresight/coresight.c
+++ b/drivers/hwtracing/coresight/coresight.c
@@ -51,8 +51,7 @@ static struct list_head *stm_path;
* beginning of the data collected in a buffer. That way the decoder knows that
* it needs to look for another sync sequence.
*/
-const u32 barrier_pkt[5] = {0x7fffffff, 0x7fffffff,
- 0x7fffffff, 0x7fffffff, 0x0};
+const u32 barrier_pkt[4] = {0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff};
static int coresight_id_match(struct device *dev, void *data)
{
@@ -108,7 +107,7 @@ static int coresight_find_link_inport(struct coresight_device *csdev,
dev_err(&csdev->dev, "couldn't find inport, parent: %s, child: %s\n",
dev_name(&parent->dev), dev_name(&csdev->dev));
- return 0;
+ return -ENODEV;
}
static int coresight_find_link_outport(struct coresight_device *csdev,
@@ -126,7 +125,7 @@ static int coresight_find_link_outport(struct coresight_device *csdev,
dev_err(&csdev->dev, "couldn't find outport, parent: %s, child: %s\n",
dev_name(&csdev->dev), dev_name(&child->dev));
- return 0;
+ return -ENODEV;
}
static int coresight_enable_sink(struct coresight_device *csdev, u32 mode)
@@ -179,6 +178,9 @@ static int coresight_enable_link(struct coresight_device *csdev,
else
refport = 0;
+ if (refport < 0)
+ return refport;
+
if (atomic_inc_return(&csdev->refcnt[refport]) == 1) {
if (link_ops(csdev)->enable) {
ret = link_ops(csdev)->enable(csdev, inport, outport);
@@ -423,6 +425,42 @@ struct coresight_device *coresight_get_enabled_sink(bool deactivate)
return dev ? to_coresight_device(dev) : NULL;
}
+/*
+ * coresight_grab_device - Power up this device and any of the helper
+ * devices connected to it for trace operation. Since the helper devices
+ * don't appear on the trace path, they should be handled along with the
+ * the master device.
+ */
+static void coresight_grab_device(struct coresight_device *csdev)
+{
+ int i;
+
+ for (i = 0; i < csdev->nr_outport; i++) {
+ struct coresight_device *child = csdev->conns[i].child_dev;
+
+ if (child && child->type == CORESIGHT_DEV_TYPE_HELPER)
+ pm_runtime_get_sync(child->dev.parent);
+ }
+ pm_runtime_get_sync(csdev->dev.parent);
+}
+
+/*
+ * coresight_drop_device - Release this device and any of the helper
+ * devices connected to it.
+ */
+static void coresight_drop_device(struct coresight_device *csdev)
+{
+ int i;
+
+ pm_runtime_put(csdev->dev.parent);
+ for (i = 0; i < csdev->nr_outport; i++) {
+ struct coresight_device *child = csdev->conns[i].child_dev;
+
+ if (child && child->type == CORESIGHT_DEV_TYPE_HELPER)
+ pm_runtime_put(child->dev.parent);
+ }
+}
+
/**
* _coresight_build_path - recursively build a path from a @csdev to a sink.
* @csdev: The device to start from.
@@ -471,9 +509,9 @@ out:
if (!node)
return -ENOMEM;
+ coresight_grab_device(csdev);
node->csdev = csdev;
list_add(&node->link, path);
- pm_runtime_get_sync(csdev->dev.parent);
return 0;
}
@@ -517,7 +555,7 @@ void coresight_release_path(struct list_head *path)
list_for_each_entry_safe(nd, next, path, link) {
csdev = nd->csdev;
- pm_runtime_put_sync(csdev->dev.parent);
+ coresight_drop_device(csdev);
list_del(&nd->link);
kfree(nd);
}
@@ -768,6 +806,9 @@ static struct device_type coresight_dev_type[] = {
.name = "source",
.groups = coresight_source_groups,
},
+ {
+ .name = "helper",
+ },
};
static void coresight_device_release(struct device *dev)
diff --git a/drivers/input/serio/hyperv-keyboard.c b/drivers/input/serio/hyperv-keyboard.c
index 25151d9214e0..47a0e81a2989 100644
--- a/drivers/input/serio/hyperv-keyboard.c
+++ b/drivers/input/serio/hyperv-keyboard.c
@@ -424,6 +424,9 @@ static struct hv_driver hv_kbd_drv = {
.id_table = id_table,
.probe = hv_kbd_probe,
.remove = hv_kbd_remove,
+ .driver = {
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
};
static int __init hv_kbd_init(void)
diff --git a/drivers/ipack/carriers/tpci200.c b/drivers/ipack/carriers/tpci200.c
index a16b320739b4..8a9c169b6f99 100644
--- a/drivers/ipack/carriers/tpci200.c
+++ b/drivers/ipack/carriers/tpci200.c
@@ -304,6 +304,13 @@ static int tpci200_register(struct tpci200_board *tpci200)
ioremap_nocache(pci_resource_start(tpci200->info->pdev,
TPCI200_IP_INTERFACE_BAR),
TPCI200_IFACE_SIZE);
+ if (!tpci200->info->interface_regs) {
+ dev_err(&tpci200->info->pdev->dev,
+ "(bn 0x%X, sn 0x%X) failed to map driver user space!",
+ tpci200->info->pdev->bus->number,
+ tpci200->info->pdev->devfn);
+ goto out_release_mem8_space;
+ }
/* Initialize lock that protects interface_regs */
spin_lock_init(&tpci200->regs_lock);
diff --git a/drivers/media/platform/coda/imx-vdoa.c b/drivers/media/platform/coda/imx-vdoa.c
index 85a66e4e2f9a..96ab4b61669a 100644
--- a/drivers/media/platform/coda/imx-vdoa.c
+++ b/drivers/media/platform/coda/imx-vdoa.c
@@ -18,6 +18,7 @@
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include <linux/videodev2.h>
diff --git a/drivers/media/platform/rcar-fcp.c b/drivers/media/platform/rcar-fcp.c
index b47af8eb145a..43c78620c9d8 100644
--- a/drivers/media/platform/rcar-fcp.c
+++ b/drivers/media/platform/rcar-fcp.c
@@ -10,6 +10,7 @@
#include <linux/device.h>
#include <linux/list.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/media/platform/vimc/vimc-capture.c b/drivers/media/platform/vimc/vimc-capture.c
index 88a1e5670c72..ec68feaac378 100644
--- a/drivers/media/platform/vimc/vimc-capture.c
+++ b/drivers/media/platform/vimc/vimc-capture.c
@@ -17,6 +17,7 @@
#include <linux/component.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <media/v4l2-ioctl.h>
#include <media/videobuf2-core.h>
diff --git a/drivers/media/platform/vimc/vimc-debayer.c b/drivers/media/platform/vimc/vimc-debayer.c
index 6e10b63ba9ec..77887f66f323 100644
--- a/drivers/media/platform/vimc/vimc-debayer.c
+++ b/drivers/media/platform/vimc/vimc-debayer.c
@@ -17,6 +17,7 @@
#include <linux/component.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/vmalloc.h>
#include <linux/v4l2-mediabus.h>
diff --git a/drivers/media/platform/vimc/vimc-scaler.c b/drivers/media/platform/vimc/vimc-scaler.c
index e583ec7a91da..b0952ee86296 100644
--- a/drivers/media/platform/vimc/vimc-scaler.c
+++ b/drivers/media/platform/vimc/vimc-scaler.c
@@ -17,6 +17,7 @@
#include <linux/component.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/vmalloc.h>
#include <linux/v4l2-mediabus.h>
diff --git a/drivers/media/platform/vimc/vimc-sensor.c b/drivers/media/platform/vimc/vimc-sensor.c
index 605e2a2d5dd5..b2b89315e7ba 100644
--- a/drivers/media/platform/vimc/vimc-sensor.c
+++ b/drivers/media/platform/vimc/vimc-sensor.c
@@ -19,6 +19,7 @@
#include <linux/freezer.h>
#include <linux/kthread.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/v4l2-mediabus.h>
#include <linux/vmalloc.h>
diff --git a/drivers/memory/tegra/tegra186.c b/drivers/memory/tegra/tegra186.c
index 7254fb596979..ffda903c49bb 100644
--- a/drivers/memory/tegra/tegra186.c
+++ b/drivers/memory/tegra/tegra186.c
@@ -8,6 +8,7 @@
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <dt-bindings/memory/tegra186-mc.h>
diff --git a/drivers/mfd/atmel-hlcdc.c b/drivers/mfd/atmel-hlcdc.c
index 4b15b0840f16..e82543bcfdc8 100644
--- a/drivers/mfd/atmel-hlcdc.c
+++ b/drivers/mfd/atmel-hlcdc.c
@@ -22,6 +22,7 @@
#include <linux/mfd/atmel-hlcdc.h>
#include <linux/mfd/core.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
diff --git a/drivers/mfd/cros_ec_dev.c b/drivers/mfd/cros_ec_dev.c
index 306e1fd109bd..27af62ed480a 100644
--- a/drivers/mfd/cros_ec_dev.c
+++ b/drivers/mfd/cros_ec_dev.c
@@ -20,6 +20,7 @@
#include <linux/fs.h>
#include <linux/mfd/core.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/slab.h>
diff --git a/drivers/misc/aspeed-lpc-snoop.c b/drivers/misc/aspeed-lpc-snoop.c
index cb78c98bc78d..2feb4347d67f 100644
--- a/drivers/misc/aspeed-lpc-snoop.c
+++ b/drivers/misc/aspeed-lpc-snoop.c
@@ -16,12 +16,15 @@
#include <linux/bitops.h>
#include <linux/interrupt.h>
+#include <linux/fs.h>
#include <linux/kfifo.h>
#include <linux/mfd/syscon.h>
+#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/poll.h>
#include <linux/regmap.h>
#define DEVICE_NAME "aspeed-lpc-snoop"
@@ -59,20 +62,70 @@ struct aspeed_lpc_snoop_model_data {
unsigned int has_hicrb_ensnp;
};
+struct aspeed_lpc_snoop_channel {
+ struct kfifo fifo;
+ wait_queue_head_t wq;
+ struct miscdevice miscdev;
+};
+
struct aspeed_lpc_snoop {
struct regmap *regmap;
int irq;
- struct kfifo snoop_fifo[NUM_SNOOP_CHANNELS];
+ struct aspeed_lpc_snoop_channel chan[NUM_SNOOP_CHANNELS];
+};
+
+static struct aspeed_lpc_snoop_channel *snoop_file_to_chan(struct file *file)
+{
+ return container_of(file->private_data,
+ struct aspeed_lpc_snoop_channel,
+ miscdev);
+}
+
+static ssize_t snoop_file_read(struct file *file, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct aspeed_lpc_snoop_channel *chan = snoop_file_to_chan(file);
+ unsigned int copied;
+ int ret = 0;
+
+ if (kfifo_is_empty(&chan->fifo)) {
+ if (file->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+ ret = wait_event_interruptible(chan->wq,
+ !kfifo_is_empty(&chan->fifo));
+ if (ret == -ERESTARTSYS)
+ return -EINTR;
+ }
+ ret = kfifo_to_user(&chan->fifo, buffer, count, &copied);
+
+ return ret ? ret : copied;
+}
+
+static unsigned int snoop_file_poll(struct file *file,
+ struct poll_table_struct *pt)
+{
+ struct aspeed_lpc_snoop_channel *chan = snoop_file_to_chan(file);
+
+ poll_wait(file, &chan->wq, pt);
+ return !kfifo_is_empty(&chan->fifo) ? POLLIN : 0;
+}
+
+static const struct file_operations snoop_fops = {
+ .owner = THIS_MODULE,
+ .read = snoop_file_read,
+ .poll = snoop_file_poll,
+ .llseek = noop_llseek,
};
/* Save a byte to a FIFO and discard the oldest byte if FIFO is full */
-static void put_fifo_with_discard(struct kfifo *fifo, u8 val)
+static void put_fifo_with_discard(struct aspeed_lpc_snoop_channel *chan, u8 val)
{
- if (!kfifo_initialized(fifo))
+ if (!kfifo_initialized(&chan->fifo))
return;
- if (kfifo_is_full(fifo))
- kfifo_skip(fifo);
- kfifo_put(fifo, val);
+ if (kfifo_is_full(&chan->fifo))
+ kfifo_skip(&chan->fifo);
+ kfifo_put(&chan->fifo, val);
+ wake_up_interruptible(&chan->wq);
}
static irqreturn_t aspeed_lpc_snoop_irq(int irq, void *arg)
@@ -97,12 +150,12 @@ static irqreturn_t aspeed_lpc_snoop_irq(int irq, void *arg)
if (reg & HICR6_STR_SNP0W) {
u8 val = (data & SNPWDR_CH0_MASK) >> SNPWDR_CH0_SHIFT;
- put_fifo_with_discard(&lpc_snoop->snoop_fifo[0], val);
+ put_fifo_with_discard(&lpc_snoop->chan[0], val);
}
if (reg & HICR6_STR_SNP1W) {
u8 val = (data & SNPWDR_CH1_MASK) >> SNPWDR_CH1_SHIFT;
- put_fifo_with_discard(&lpc_snoop->snoop_fifo[1], val);
+ put_fifo_with_discard(&lpc_snoop->chan[1], val);
}
return IRQ_HANDLED;
@@ -139,12 +192,22 @@ static int aspeed_lpc_enable_snoop(struct aspeed_lpc_snoop *lpc_snoop,
const struct aspeed_lpc_snoop_model_data *model_data =
of_device_get_match_data(dev);
+ init_waitqueue_head(&lpc_snoop->chan[channel].wq);
/* Create FIFO datastructure */
- rc = kfifo_alloc(&lpc_snoop->snoop_fifo[channel],
+ rc = kfifo_alloc(&lpc_snoop->chan[channel].fifo,
SNOOP_FIFO_SIZE, GFP_KERNEL);
if (rc)
return rc;
+ lpc_snoop->chan[channel].miscdev.minor = MISC_DYNAMIC_MINOR;
+ lpc_snoop->chan[channel].miscdev.name =
+ devm_kasprintf(dev, GFP_KERNEL, "%s%d", DEVICE_NAME, channel);
+ lpc_snoop->chan[channel].miscdev.fops = &snoop_fops;
+ lpc_snoop->chan[channel].miscdev.parent = dev;
+ rc = misc_register(&lpc_snoop->chan[channel].miscdev);
+ if (rc)
+ return rc;
+
/* Enable LPC snoop channel at requested port */
switch (channel) {
case 0:
@@ -191,7 +254,8 @@ static void aspeed_lpc_disable_snoop(struct aspeed_lpc_snoop *lpc_snoop,
return;
}
- kfifo_free(&lpc_snoop->snoop_fifo[channel]);
+ kfifo_free(&lpc_snoop->chan[channel].fifo);
+ misc_deregister(&lpc_snoop->chan[channel].miscdev);
}
static int aspeed_lpc_snoop_probe(struct platform_device *pdev)
diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c
index e8f1d4bb806a..da445223f4cc 100644
--- a/drivers/misc/cardreader/rtsx_pcr.c
+++ b/drivers/misc/cardreader/rtsx_pcr.c
@@ -80,7 +80,7 @@ static inline void rtsx_pci_disable_aspm(struct rtsx_pcr *pcr)
0xFC, 0);
}
-int rtsx_comm_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency)
+static int rtsx_comm_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency)
{
rtsx_pci_write_register(pcr, MSGTXDATA0,
MASK_8_BIT_DEF, (u8) (latency & 0xFF));
@@ -143,7 +143,7 @@ int rtsx_set_l1off_sub(struct rtsx_pcr *pcr, u8 val)
return 0;
}
-void rtsx_set_l1off_sub_cfg_d0(struct rtsx_pcr *pcr, int active)
+static void rtsx_set_l1off_sub_cfg_d0(struct rtsx_pcr *pcr, int active)
{
if (pcr->ops->set_l1off_cfg_sub_d0)
pcr->ops->set_l1off_cfg_sub_d0(pcr, active);
@@ -162,7 +162,7 @@ static void rtsx_comm_pm_full_on(struct rtsx_pcr *pcr)
rtsx_set_l1off_sub_cfg_d0(pcr, 1);
}
-void rtsx_pm_full_on(struct rtsx_pcr *pcr)
+static void rtsx_pm_full_on(struct rtsx_pcr *pcr)
{
if (pcr->ops->full_on)
pcr->ops->full_on(pcr);
@@ -967,13 +967,13 @@ static void rtsx_pci_card_detect(struct work_struct *work)
pcr->slots[RTSX_MS_CARD].p_dev);
}
-void rtsx_pci_process_ocp(struct rtsx_pcr *pcr)
+static void rtsx_pci_process_ocp(struct rtsx_pcr *pcr)
{
if (pcr->ops->process_ocp)
pcr->ops->process_ocp(pcr);
}
-int rtsx_pci_process_ocp_interrupt(struct rtsx_pcr *pcr)
+static int rtsx_pci_process_ocp_interrupt(struct rtsx_pcr *pcr)
{
if (pcr->option.ocp_en)
rtsx_pci_process_ocp(pcr);
@@ -1094,7 +1094,7 @@ static void rtsx_comm_pm_power_saving(struct rtsx_pcr *pcr)
rtsx_enable_aspm(pcr);
}
-void rtsx_pm_power_saving(struct rtsx_pcr *pcr)
+static void rtsx_pm_power_saving(struct rtsx_pcr *pcr)
{
if (pcr->ops->power_saving)
pcr->ops->power_saving(pcr);
diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c
index 6a7d4a2ad514..840afb398f9e 100644
--- a/drivers/misc/eeprom/at25.c
+++ b/drivers/misc/eeprom/at25.c
@@ -94,8 +94,10 @@ static int at25_ee_read(void *priv, unsigned int offset,
switch (at25->addrlen) {
default: /* case 3 */
*cp++ = offset >> 16;
+ /* fall through */
case 2:
*cp++ = offset >> 8;
+ /* fall through */
case 1:
case 0: /* can't happen: for better codegen */
*cp++ = offset >> 0;
@@ -180,8 +182,10 @@ static int at25_ee_write(void *priv, unsigned int off, void *val, size_t count)
switch (at25->addrlen) {
default: /* case 3 */
*cp++ = offset >> 16;
+ /* fall through */
case 2:
*cp++ = offset >> 8;
+ /* fall through */
case 1:
case 0: /* can't happen: for better codegen */
*cp++ = offset >> 0;
diff --git a/drivers/misc/eeprom/idt_89hpesx.c b/drivers/misc/eeprom/idt_89hpesx.c
index 59dc24bb70ec..8a4659518c33 100644
--- a/drivers/misc/eeprom/idt_89hpesx.c
+++ b/drivers/misc/eeprom/idt_89hpesx.c
@@ -938,7 +938,7 @@ static ssize_t idt_dbgfs_csr_write(struct file *filep, const char __user *ubuf,
{
struct idt_89hpesx_dev *pdev = filep->private_data;
char *colon_ch, *csraddr_str, *csrval_str;
- int ret, csraddr_len, csrval_len;
+ int ret, csraddr_len;
u32 csraddr, csrval;
char *buf;
@@ -974,12 +974,10 @@ static ssize_t idt_dbgfs_csr_write(struct file *filep, const char __user *ubuf,
csraddr_str[csraddr_len] = '\0';
/* Register value must follow the colon */
csrval_str = colon_ch + 1;
- csrval_len = strnlen(csrval_str, count - csraddr_len);
} else /* if (str_colon == NULL) */ {
csraddr_str = (char *)buf; /* Just to shut warning up */
csraddr_len = strnlen(csraddr_str, count);
csrval_str = NULL;
- csrval_len = 0;
}
/* Convert CSR address to u32 value */
@@ -1130,7 +1128,7 @@ static void idt_get_fw_data(struct idt_89hpesx_dev *pdev)
device_for_each_child_node(dev, fwnode) {
ee_id = idt_ee_match_id(fwnode);
- if (IS_ERR_OR_NULL(ee_id)) {
+ if (!ee_id) {
dev_warn(dev, "Skip unsupported EEPROM device");
continue;
} else
diff --git a/drivers/misc/eeprom/max6875.c b/drivers/misc/eeprom/max6875.c
index 0e32709d1022..fc0cf9a7402e 100644
--- a/drivers/misc/eeprom/max6875.c
+++ b/drivers/misc/eeprom/max6875.c
@@ -148,7 +148,8 @@ static int max6875_probe(struct i2c_client *client,
if (client->addr & 1)
return -ENODEV;
- if (!(data = kzalloc(sizeof(struct max6875_data), GFP_KERNEL)))
+ data = kzalloc(sizeof(struct max6875_data), GFP_KERNEL);
+ if (!data)
return -ENOMEM;
/* A fake client is created on the odd address */
diff --git a/drivers/misc/genwqe/card_base.h b/drivers/misc/genwqe/card_base.h
index 1c3967f10f55..120738d6e58b 100644
--- a/drivers/misc/genwqe/card_base.h
+++ b/drivers/misc/genwqe/card_base.h
@@ -497,7 +497,7 @@ int genwqe_user_vunmap(struct genwqe_dev *cd, struct dma_mapping *m);
static inline bool dma_mapping_used(struct dma_mapping *m)
{
if (!m)
- return 0;
+ return false;
return m->size != 0;
}
diff --git a/drivers/misc/genwqe/card_debugfs.c b/drivers/misc/genwqe/card_debugfs.c
index f921dd590271..c6b82f09b3ba 100644
--- a/drivers/misc/genwqe/card_debugfs.c
+++ b/drivers/misc/genwqe/card_debugfs.c
@@ -305,7 +305,6 @@ GENWQE_DEBUGFS_RO(ddcb_info, genwqe_ddcb_info_show);
static int genwqe_info_show(struct seq_file *s, void *unused)
{
struct genwqe_dev *cd = s->private;
- u16 val16, type;
u64 app_id, slu_id, bitstream = -1;
struct pci_dev *pci_dev = cd->pci_dev;
@@ -315,9 +314,6 @@ static int genwqe_info_show(struct seq_file *s, void *unused)
if (genwqe_is_privileged(cd))
bitstream = __genwqe_readq(cd, IO_SLU_BITSTREAM);
- val16 = (u16)(slu_id & 0x0fLLU);
- type = (u16)((slu_id >> 20) & 0xffLLU);
-
seq_printf(s, "%s driver version: %s\n"
" Device Name/Type: %s %s CardIdx: %d\n"
" SLU/APP Config : 0x%016llx/0x%016llx\n"
diff --git a/drivers/misc/genwqe/card_dev.c b/drivers/misc/genwqe/card_dev.c
index 0dd6b5ef314a..f453ab82f0d7 100644
--- a/drivers/misc/genwqe/card_dev.c
+++ b/drivers/misc/genwqe/card_dev.c
@@ -304,14 +304,12 @@ static int genwqe_open(struct inode *inode, struct file *filp)
{
struct genwqe_dev *cd;
struct genwqe_file *cfile;
- struct pci_dev *pci_dev;
cfile = kzalloc(sizeof(*cfile), GFP_KERNEL);
if (cfile == NULL)
return -ENOMEM;
cd = container_of(inode->i_cdev, struct genwqe_dev, cdev_genwqe);
- pci_dev = cd->pci_dev;
cfile->cd = cd;
cfile->filp = filp;
cfile->client = NULL;
@@ -864,7 +862,6 @@ static int ddcb_cmd_fixups(struct genwqe_file *cfile, struct ddcb_requ *req)
struct genwqe_dev *cd = cfile->cd;
struct genwqe_ddcb_cmd *cmd = &req->cmd;
struct dma_mapping *m;
- const char *type = "UNKNOWN";
for (i = 0, asiv_offs = 0x00; asiv_offs <= 0x58;
i++, asiv_offs += 0x08) {
@@ -933,11 +930,9 @@ static int ddcb_cmd_fixups(struct genwqe_file *cfile, struct ddcb_requ *req)
m = genwqe_search_pin(cfile, u_addr, u_size, NULL);
if (m != NULL) {
- type = "PINNING";
page_offs = (u_addr -
(u64)m->u_vaddr)/PAGE_SIZE;
} else {
- type = "MAPPING";
m = &req->dma_mappings[i];
genwqe_mapping_init(m,
diff --git a/drivers/misc/ibmvmc.c b/drivers/misc/ibmvmc.c
index fb83d1375638..8f82bb9d11e2 100644
--- a/drivers/misc/ibmvmc.c
+++ b/drivers/misc/ibmvmc.c
@@ -273,7 +273,7 @@ static void *alloc_dma_buffer(struct vio_dev *vdev, size_t size,
dma_addr_t *dma_handle)
{
/* allocate memory */
- void *buffer = kzalloc(size, GFP_KERNEL);
+ void *buffer = kzalloc(size, GFP_ATOMIC);
if (!buffer) {
*dma_handle = 0;
diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c
index 0208c4b027c5..a6f41f96f2a1 100644
--- a/drivers/misc/mei/bus-fixup.c
+++ b/drivers/misc/mei/bus-fixup.c
@@ -1,7 +1,7 @@
/*
*
* Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2003-2013, Intel Corporation.
+ * Copyright (c) 2003-2018, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -96,8 +96,22 @@ struct mkhi_fwcaps {
u8 data[0];
} __packed;
+struct mkhi_fw_ver_block {
+ u16 minor;
+ u8 major;
+ u8 platform;
+ u16 buildno;
+ u16 hotfix;
+} __packed;
+
+struct mkhi_fw_ver {
+ struct mkhi_fw_ver_block ver[MEI_MAX_FW_VER_BLOCKS];
+} __packed;
+
#define MKHI_FWCAPS_GROUP_ID 0x3
#define MKHI_FWCAPS_SET_OS_VER_APP_RULE_CMD 6
+#define MKHI_GEN_GROUP_ID 0xFF
+#define MKHI_GEN_GET_FW_VERSION_CMD 0x2
struct mkhi_msg_hdr {
u8 group_id;
u8 command;
@@ -139,21 +153,81 @@ static int mei_osver(struct mei_cl_device *cldev)
return __mei_cl_send(cldev->cl, buf, size, mode);
}
+#define MKHI_FWVER_BUF_LEN (sizeof(struct mkhi_msg_hdr) + \
+ sizeof(struct mkhi_fw_ver))
+#define MKHI_FWVER_LEN(__num) (sizeof(struct mkhi_msg_hdr) + \
+ sizeof(struct mkhi_fw_ver_block) * (__num))
+#define MKHI_RCV_TIMEOUT 500 /* receive timeout in msec */
+static int mei_fwver(struct mei_cl_device *cldev)
+{
+ char buf[MKHI_FWVER_BUF_LEN];
+ struct mkhi_msg *req;
+ struct mkhi_fw_ver *fwver;
+ int bytes_recv, ret, i;
+
+ memset(buf, 0, sizeof(buf));
+
+ req = (struct mkhi_msg *)buf;
+ req->hdr.group_id = MKHI_GEN_GROUP_ID;
+ req->hdr.command = MKHI_GEN_GET_FW_VERSION_CMD;
+
+ ret = __mei_cl_send(cldev->cl, buf, sizeof(struct mkhi_msg_hdr),
+ MEI_CL_IO_TX_BLOCKING);
+ if (ret < 0) {
+ dev_err(&cldev->dev, "Could not send ReqFWVersion cmd\n");
+ return ret;
+ }
+
+ ret = 0;
+ bytes_recv = __mei_cl_recv(cldev->cl, buf, sizeof(buf), 0,
+ MKHI_RCV_TIMEOUT);
+ if (bytes_recv < 0 || (size_t)bytes_recv < MKHI_FWVER_LEN(1)) {
+ /*
+ * Should be at least one version block,
+ * error out if nothing found
+ */
+ dev_err(&cldev->dev, "Could not read FW version\n");
+ return -EIO;
+ }
+
+ fwver = (struct mkhi_fw_ver *)req->data;
+ memset(cldev->bus->fw_ver, 0, sizeof(cldev->bus->fw_ver));
+ for (i = 0; i < MEI_MAX_FW_VER_BLOCKS; i++) {
+ if ((size_t)bytes_recv < MKHI_FWVER_LEN(i + 1))
+ break;
+ dev_dbg(&cldev->dev, "FW version%d %d:%d.%d.%d.%d\n",
+ i, fwver->ver[i].platform,
+ fwver->ver[i].major, fwver->ver[i].minor,
+ fwver->ver[i].hotfix, fwver->ver[i].buildno);
+
+ cldev->bus->fw_ver[i].platform = fwver->ver[i].platform;
+ cldev->bus->fw_ver[i].major = fwver->ver[i].major;
+ cldev->bus->fw_ver[i].minor = fwver->ver[i].minor;
+ cldev->bus->fw_ver[i].hotfix = fwver->ver[i].hotfix;
+ cldev->bus->fw_ver[i].buildno = fwver->ver[i].buildno;
+ }
+
+ return ret;
+}
+
static void mei_mkhi_fix(struct mei_cl_device *cldev)
{
int ret;
- if (!cldev->bus->hbm_f_os_supported)
- return;
-
ret = mei_cldev_enable(cldev);
if (ret)
return;
- ret = mei_osver(cldev);
+ ret = mei_fwver(cldev);
if (ret < 0)
- dev_err(&cldev->dev, "OS version command failed %d\n", ret);
+ dev_err(&cldev->dev, "FW version command failed %d\n", ret);
+ if (cldev->bus->hbm_f_os_supported) {
+ ret = mei_osver(cldev);
+ if (ret < 0)
+ dev_err(&cldev->dev, "OS version command failed %d\n",
+ ret);
+ }
mei_cldev_disable(cldev);
}
@@ -266,8 +340,8 @@ static int mei_nfc_if_version(struct mei_cl *cl,
return -ENOMEM;
ret = 0;
- bytes_recv = __mei_cl_recv(cl, (u8 *)reply, if_version_length, 0);
- if (bytes_recv < if_version_length) {
+ bytes_recv = __mei_cl_recv(cl, (u8 *)reply, if_version_length, 0, 0);
+ if (bytes_recv < 0 || (size_t)bytes_recv < if_version_length) {
dev_err(bus->dev, "Could not read IF version\n");
ret = -EIO;
goto err;
@@ -410,7 +484,7 @@ void mei_cl_bus_dev_fixup(struct mei_cl_device *cldev)
{
struct mei_fixup *f;
const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl);
- int i;
+ size_t i;
for (i = 0; i < ARRAY_SIZE(mei_fixups); i++) {
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index b1133739fb4b..7bba62a72921 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -116,11 +116,12 @@ out:
* @buf: buffer to receive
* @length: buffer length
* @mode: io mode
+ * @timeout: recv timeout, 0 for infinite timeout
*
* Return: read size in bytes of < 0 on error
*/
ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length,
- unsigned int mode)
+ unsigned int mode, unsigned long timeout)
{
struct mei_device *bus;
struct mei_cl_cb *cb;
@@ -158,13 +159,28 @@ ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length,
mutex_unlock(&bus->device_lock);
- if (wait_event_interruptible(cl->rx_wait,
- (!list_empty(&cl->rd_completed)) ||
- (!mei_cl_is_connected(cl)))) {
-
- if (signal_pending(current))
- return -EINTR;
- return -ERESTARTSYS;
+ if (timeout) {
+ rets = wait_event_interruptible_timeout
+ (cl->rx_wait,
+ (!list_empty(&cl->rd_completed)) ||
+ (!mei_cl_is_connected(cl)),
+ msecs_to_jiffies(timeout));
+ if (rets == 0)
+ return -ETIME;
+ if (rets < 0) {
+ if (signal_pending(current))
+ return -EINTR;
+ return -ERESTARTSYS;
+ }
+ } else {
+ if (wait_event_interruptible
+ (cl->rx_wait,
+ (!list_empty(&cl->rd_completed)) ||
+ (!mei_cl_is_connected(cl)))) {
+ if (signal_pending(current))
+ return -EINTR;
+ return -ERESTARTSYS;
+ }
}
mutex_lock(&bus->device_lock);
@@ -231,7 +247,7 @@ ssize_t mei_cldev_recv_nonblock(struct mei_cl_device *cldev, u8 *buf,
{
struct mei_cl *cl = cldev->cl;
- return __mei_cl_recv(cl, buf, length, MEI_CL_IO_RX_NONBLOCK);
+ return __mei_cl_recv(cl, buf, length, MEI_CL_IO_RX_NONBLOCK, 0);
}
EXPORT_SYMBOL_GPL(mei_cldev_recv_nonblock);
@@ -248,7 +264,7 @@ ssize_t mei_cldev_recv(struct mei_cl_device *cldev, u8 *buf, size_t length)
{
struct mei_cl *cl = cldev->cl;
- return __mei_cl_recv(cl, buf, length, 0);
+ return __mei_cl_recv(cl, buf, length, 0, 0);
}
EXPORT_SYMBOL_GPL(mei_cldev_recv);
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index 8d6197a88b54..4ab6251d418e 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -863,10 +863,12 @@ int mei_cl_irq_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb,
int slots;
int ret;
- msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_request));
+ msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_request));
slots = mei_hbuf_empty_slots(dev);
+ if (slots < 0)
+ return -EOVERFLOW;
- if (slots < msg_slots)
+ if ((u32)slots < msg_slots)
return -EMSGSIZE;
ret = mei_cl_send_disconnect(cl, cb);
@@ -1053,13 +1055,15 @@ int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb,
int slots;
int rets;
- msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_request));
- slots = mei_hbuf_empty_slots(dev);
-
if (mei_cl_is_other_connecting(cl))
return 0;
- if (slots < msg_slots)
+ msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_request));
+ slots = mei_hbuf_empty_slots(dev);
+ if (slots < 0)
+ return -EOVERFLOW;
+
+ if ((u32)slots < msg_slots)
return -EMSGSIZE;
rets = mei_cl_send_connect(cl, cb);
@@ -1294,10 +1298,12 @@ int mei_cl_irq_notify(struct mei_cl *cl, struct mei_cl_cb *cb,
int ret;
bool request;
- msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_request));
+ msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_request));
slots = mei_hbuf_empty_slots(dev);
+ if (slots < 0)
+ return -EOVERFLOW;
- if (slots < msg_slots)
+ if ((u32)slots < msg_slots)
return -EMSGSIZE;
request = mei_cl_notify_fop2req(cb->fop_type);
@@ -1533,6 +1539,23 @@ nortpm:
}
/**
+ * mei_msg_hdr_init - initialize mei message header
+ *
+ * @mei_hdr: mei message header
+ * @cb: message callback structure
+ */
+static void mei_msg_hdr_init(struct mei_msg_hdr *mei_hdr, struct mei_cl_cb *cb)
+{
+ mei_hdr->host_addr = mei_cl_host_addr(cb->cl);
+ mei_hdr->me_addr = mei_cl_me_id(cb->cl);
+ mei_hdr->length = 0;
+ mei_hdr->reserved = 0;
+ mei_hdr->msg_complete = 0;
+ mei_hdr->dma_ring = 0;
+ mei_hdr->internal = cb->internal;
+}
+
+/**
* mei_cl_irq_write - write a message to device
* from the interrupt thread context
*
@@ -1548,9 +1571,10 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
struct mei_device *dev;
struct mei_msg_data *buf;
struct mei_msg_hdr mei_hdr;
+ size_t hdr_len = sizeof(mei_hdr);
size_t len;
- u32 msg_slots;
- int slots;
+ size_t hbuf_len;
+ int hbuf_slots;
int rets;
bool first_chunk;
@@ -1572,40 +1596,41 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
return 0;
}
- slots = mei_hbuf_empty_slots(dev);
len = buf->size - cb->buf_idx;
- msg_slots = mei_data2slots(len);
+ hbuf_slots = mei_hbuf_empty_slots(dev);
+ if (hbuf_slots < 0) {
+ rets = -EOVERFLOW;
+ goto err;
+ }
- mei_hdr.host_addr = mei_cl_host_addr(cl);
- mei_hdr.me_addr = mei_cl_me_id(cl);
- mei_hdr.reserved = 0;
- mei_hdr.internal = cb->internal;
+ hbuf_len = mei_slots2data(hbuf_slots);
- if (slots >= msg_slots) {
+ mei_msg_hdr_init(&mei_hdr, cb);
+
+ /**
+ * Split the message only if we can write the whole host buffer
+ * otherwise wait for next time the host buffer is empty.
+ */
+ if (len + hdr_len <= hbuf_len) {
mei_hdr.length = len;
mei_hdr.msg_complete = 1;
- /* Split the message only if we can write the whole host buffer */
- } else if (slots == dev->hbuf_depth) {
- msg_slots = slots;
- len = (slots * sizeof(u32)) - sizeof(struct mei_msg_hdr);
- mei_hdr.length = len;
- mei_hdr.msg_complete = 0;
+ } else if ((u32)hbuf_slots == mei_hbuf_depth(dev)) {
+ mei_hdr.length = hbuf_len - hdr_len;
} else {
- /* wait for next time the host buffer is empty */
return 0;
}
cl_dbg(dev, cl, "buf: size = %zu idx = %zu\n",
cb->buf.size, cb->buf_idx);
- rets = mei_write_message(dev, &mei_hdr, buf->data + cb->buf_idx);
+ rets = mei_write_message(dev, &mei_hdr, hdr_len,
+ buf->data + cb->buf_idx, mei_hdr.length);
if (rets)
goto err;
cl->status = 0;
cl->writing_state = MEI_WRITING;
cb->buf_idx += mei_hdr.length;
- cb->completed = mei_hdr.msg_complete == 1;
if (first_chunk) {
if (mei_cl_tx_flow_ctrl_creds_reduce(cl)) {
@@ -1634,13 +1659,16 @@ err:
*
* Return: number of bytes sent on success, <0 on failure.
*/
-int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb)
+ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb)
{
struct mei_device *dev;
struct mei_msg_data *buf;
struct mei_msg_hdr mei_hdr;
- int size;
- int rets;
+ size_t hdr_len = sizeof(mei_hdr);
+ size_t len;
+ size_t hbuf_len;
+ int hbuf_slots;
+ ssize_t rets;
bool blocking;
if (WARN_ON(!cl || !cl->dev))
@@ -1652,52 +1680,57 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb)
dev = cl->dev;
buf = &cb->buf;
- size = buf->size;
+ len = buf->size;
blocking = cb->blocking;
- cl_dbg(dev, cl, "size=%d\n", size);
+ cl_dbg(dev, cl, "len=%zd\n", len);
rets = pm_runtime_get(dev->dev);
if (rets < 0 && rets != -EINPROGRESS) {
pm_runtime_put_noidle(dev->dev);
- cl_err(dev, cl, "rpm: get failed %d\n", rets);
+ cl_err(dev, cl, "rpm: get failed %zd\n", rets);
goto free;
}
cb->buf_idx = 0;
cl->writing_state = MEI_IDLE;
- mei_hdr.host_addr = mei_cl_host_addr(cl);
- mei_hdr.me_addr = mei_cl_me_id(cl);
- mei_hdr.reserved = 0;
- mei_hdr.msg_complete = 0;
- mei_hdr.internal = cb->internal;
rets = mei_cl_tx_flow_ctrl_creds(cl);
if (rets < 0)
goto err;
+ mei_msg_hdr_init(&mei_hdr, cb);
+
if (rets == 0) {
cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
- rets = size;
+ rets = len;
goto out;
}
+
if (!mei_hbuf_acquire(dev)) {
cl_dbg(dev, cl, "Cannot acquire the host buffer: not sending.\n");
- rets = size;
+ rets = len;
goto out;
}
- /* Check for a maximum length */
- if (size > mei_hbuf_max_len(dev)) {
- mei_hdr.length = mei_hbuf_max_len(dev);
- mei_hdr.msg_complete = 0;
- } else {
- mei_hdr.length = size;
+ hbuf_slots = mei_hbuf_empty_slots(dev);
+ if (hbuf_slots < 0) {
+ rets = -EOVERFLOW;
+ goto out;
+ }
+
+ hbuf_len = mei_slots2data(hbuf_slots);
+
+ if (len + hdr_len <= hbuf_len) {
+ mei_hdr.length = len;
mei_hdr.msg_complete = 1;
+ } else {
+ mei_hdr.length = hbuf_len - hdr_len;
}
- rets = mei_write_message(dev, &mei_hdr, buf->data);
+ rets = mei_write_message(dev, &mei_hdr, hdr_len,
+ buf->data, mei_hdr.length);
if (rets)
goto err;
@@ -1707,7 +1740,6 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb)
cl->writing_state = MEI_WRITING;
cb->buf_idx = mei_hdr.length;
- cb->completed = mei_hdr.msg_complete == 1;
out:
if (mei_hdr.msg_complete)
@@ -1735,7 +1767,7 @@ out:
}
}
- rets = size;
+ rets = buf->size;
err:
cl_dbg(dev, cl, "rpm: autosuspend\n");
pm_runtime_mark_last_busy(dev->dev);
diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h
index 5371df4d8af3..64e318f589b4 100644
--- a/drivers/misc/mei/client.h
+++ b/drivers/misc/mei/client.h
@@ -202,7 +202,7 @@ int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl,
int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb,
struct list_head *cmpl_list);
int mei_cl_read_start(struct mei_cl *cl, size_t length, const struct file *fp);
-int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb);
+ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb);
int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
struct list_head *cmpl_list);
diff --git a/drivers/misc/mei/debugfs.c b/drivers/misc/mei/debugfs.c
index c815da91089c..7b5df8fd6c5a 100644
--- a/drivers/misc/mei/debugfs.c
+++ b/drivers/misc/mei/debugfs.c
@@ -183,6 +183,8 @@ static ssize_t mei_dbgfs_read_devstate(struct file *fp, char __user *ubuf,
dev->hbm_f_fa_supported);
pos += scnprintf(buf + pos, bufsz - pos, "\tOS: %01d\n",
dev->hbm_f_os_supported);
+ pos += scnprintf(buf + pos, bufsz - pos, "\tDR: %01d\n",
+ dev->hbm_f_dr_supported);
}
pos += scnprintf(buf + pos, bufsz - pos, "pg: %s, %s\n",
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index fe6595fe94f1..09e233d4c0de 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -96,6 +96,20 @@ static int mei_cl_conn_status_to_errno(enum mei_cl_connect_status status)
}
/**
+ * mei_hbm_write_message - wrapper for sending hbm messages.
+ *
+ * @dev: mei device
+ * @hdr: mei header
+ * @data: payload
+ */
+static inline int mei_hbm_write_message(struct mei_device *dev,
+ struct mei_msg_hdr *hdr,
+ const void *data)
+{
+ return mei_write_message(dev, hdr, sizeof(*hdr), data, hdr->length);
+}
+
+/**
* mei_hbm_idle - set hbm to idle state
*
* @dev: the device structure
@@ -131,6 +145,7 @@ static inline void mei_hbm_hdr(struct mei_msg_hdr *hdr, size_t length)
hdr->me_addr = 0;
hdr->length = length;
hdr->msg_complete = 1;
+ hdr->dma_ring = 0;
hdr->reserved = 0;
hdr->internal = 0;
}
@@ -174,7 +189,7 @@ static inline int mei_hbm_cl_write(struct mei_device *dev, struct mei_cl *cl,
mei_hbm_hdr(&mei_hdr, len);
mei_hbm_cl_hdr(cl, hbm_cmd, buf, len);
- return mei_write_message(dev, &mei_hdr, buf);
+ return mei_hbm_write_message(dev, &mei_hdr, buf);
}
/**
@@ -267,7 +282,7 @@ int mei_hbm_start_req(struct mei_device *dev)
start_req.host_version.minor_version = HBM_MINOR_VERSION;
dev->hbm_state = MEI_HBM_IDLE;
- ret = mei_write_message(dev, &mei_hdr, &start_req);
+ ret = mei_hbm_write_message(dev, &mei_hdr, &start_req);
if (ret) {
dev_err(dev->dev, "version message write failed: ret = %d\n",
ret);
@@ -304,7 +319,7 @@ static int mei_hbm_enum_clients_req(struct mei_device *dev)
enum_req.flags |= dev->hbm_f_ie_supported ?
MEI_HBM_ENUM_F_IMMEDIATE_ENUM : 0;
- ret = mei_write_message(dev, &mei_hdr, &enum_req);
+ ret = mei_hbm_write_message(dev, &mei_hdr, &enum_req);
if (ret) {
dev_err(dev->dev, "enumeration request write failed: ret = %d.\n",
ret);
@@ -373,7 +388,7 @@ static int mei_hbm_add_cl_resp(struct mei_device *dev, u8 addr, u8 status)
resp.me_addr = addr;
resp.status = status;
- ret = mei_write_message(dev, &mei_hdr, &resp);
+ ret = mei_hbm_write_message(dev, &mei_hdr, &resp);
if (ret)
dev_err(dev->dev, "add client response write failed: ret = %d\n",
ret);
@@ -430,7 +445,7 @@ int mei_hbm_cl_notify_req(struct mei_device *dev,
req.start = start;
- ret = mei_write_message(dev, &mei_hdr, &req);
+ ret = mei_hbm_write_message(dev, &mei_hdr, &req);
if (ret)
dev_err(dev->dev, "notify request failed: ret = %d\n", ret);
@@ -555,7 +570,7 @@ static int mei_hbm_prop_req(struct mei_device *dev, unsigned long start_idx)
prop_req.hbm_cmd = HOST_CLIENT_PROPERTIES_REQ_CMD;
prop_req.me_addr = addr;
- ret = mei_write_message(dev, &mei_hdr, &prop_req);
+ ret = mei_hbm_write_message(dev, &mei_hdr, &prop_req);
if (ret) {
dev_err(dev->dev, "properties request write failed: ret = %d\n",
ret);
@@ -592,7 +607,7 @@ int mei_hbm_pg(struct mei_device *dev, u8 pg_cmd)
memset(&req, 0, len);
req.hbm_cmd = pg_cmd;
- ret = mei_write_message(dev, &mei_hdr, &req);
+ ret = mei_hbm_write_message(dev, &mei_hdr, &req);
if (ret)
dev_err(dev->dev, "power gate command write failed.\n");
return ret;
@@ -618,7 +633,7 @@ static int mei_hbm_stop_req(struct mei_device *dev)
req.hbm_cmd = HOST_STOP_REQ_CMD;
req.reason = DRIVER_STOP_REQUEST;
- return mei_write_message(dev, &mei_hdr, &req);
+ return mei_hbm_write_message(dev, &mei_hdr, &req);
}
/**
@@ -992,6 +1007,12 @@ static void mei_hbm_config_features(struct mei_device *dev)
/* OS ver message Support */
if (dev->version.major_version >= HBM_MAJOR_VERSION_OS)
dev->hbm_f_os_supported = 1;
+
+ /* DMA Ring Support */
+ if (dev->version.major_version > HBM_MAJOR_VERSION_DR ||
+ (dev->version.major_version == HBM_MAJOR_VERSION_DR &&
+ dev->version.minor_version >= HBM_MINOR_VERSION_DR))
+ dev->hbm_f_dr_supported = 1;
}
/**
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
index 334ab02e1de2..0759c3a668de 100644
--- a/drivers/misc/mei/hw-me.c
+++ b/drivers/misc/mei/hw-me.c
@@ -19,6 +19,7 @@
#include <linux/kthread.h>
#include <linux/interrupt.h>
#include <linux/pm_runtime.h>
+#include <linux/sizes.h>
#include "mei_dev.h"
#include "hbm.h"
@@ -228,7 +229,7 @@ static void mei_me_hw_config(struct mei_device *dev)
/* Doesn't change in runtime */
hcsr = mei_hcsr_read(dev);
- dev->hbuf_depth = (hcsr & H_CBD) >> 24;
+ hw->hbuf_depth = (hcsr & H_CBD) >> 24;
reg = 0;
pci_read_config_dword(pdev, PCI_CFG_HFS_1, &reg);
@@ -490,70 +491,82 @@ static bool mei_me_hbuf_is_empty(struct mei_device *dev)
*/
static int mei_me_hbuf_empty_slots(struct mei_device *dev)
{
+ struct mei_me_hw *hw = to_me_hw(dev);
unsigned char filled_slots, empty_slots;
filled_slots = mei_hbuf_filled_slots(dev);
- empty_slots = dev->hbuf_depth - filled_slots;
+ empty_slots = hw->hbuf_depth - filled_slots;
/* check for overflow */
- if (filled_slots > dev->hbuf_depth)
+ if (filled_slots > hw->hbuf_depth)
return -EOVERFLOW;
return empty_slots;
}
/**
- * mei_me_hbuf_max_len - returns size of hw buffer.
+ * mei_me_hbuf_depth - returns depth of the hw buffer.
*
* @dev: the device structure
*
- * Return: size of hw buffer in bytes
+ * Return: size of hw buffer in slots
*/
-static size_t mei_me_hbuf_max_len(const struct mei_device *dev)
+static u32 mei_me_hbuf_depth(const struct mei_device *dev)
{
- return dev->hbuf_depth * sizeof(u32) - sizeof(struct mei_msg_hdr);
-}
+ struct mei_me_hw *hw = to_me_hw(dev);
+ return hw->hbuf_depth;
+}
/**
* mei_me_hbuf_write - writes a message to host hw buffer.
*
* @dev: the device structure
- * @header: mei HECI header of message
- * @buf: message payload will be written
+ * @hdr: header of message
+ * @hdr_len: header length in bytes: must be multiplication of a slot (4bytes)
+ * @data: payload
+ * @data_len: payload length in bytes
*
- * Return: -EIO if write has failed
+ * Return: 0 if success, < 0 - otherwise.
*/
static int mei_me_hbuf_write(struct mei_device *dev,
- struct mei_msg_hdr *header,
- const unsigned char *buf)
+ const void *hdr, size_t hdr_len,
+ const void *data, size_t data_len)
{
unsigned long rem;
- unsigned long length = header->length;
- u32 *reg_buf = (u32 *)buf;
+ unsigned long i;
+ const u32 *reg_buf;
u32 dw_cnt;
- int i;
int empty_slots;
- dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM(header));
+ if (WARN_ON(!hdr || !data || hdr_len & 0x3))
+ return -EINVAL;
+
+ dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM((struct mei_msg_hdr *)hdr));
empty_slots = mei_hbuf_empty_slots(dev);
dev_dbg(dev->dev, "empty slots = %hu.\n", empty_slots);
- dw_cnt = mei_data2slots(length);
- if (empty_slots < 0 || dw_cnt > empty_slots)
+ if (empty_slots < 0)
+ return -EOVERFLOW;
+
+ dw_cnt = mei_data2slots(hdr_len + data_len);
+ if (dw_cnt > (u32)empty_slots)
return -EMSGSIZE;
- mei_me_hcbww_write(dev, *((u32 *) header));
+ reg_buf = hdr;
+ for (i = 0; i < hdr_len / MEI_SLOT_SIZE; i++)
+ mei_me_hcbww_write(dev, reg_buf[i]);
- for (i = 0; i < length / 4; i++)
+ reg_buf = data;
+ for (i = 0; i < data_len / MEI_SLOT_SIZE; i++)
mei_me_hcbww_write(dev, reg_buf[i]);
- rem = length & 0x3;
+ rem = data_len & 0x3;
if (rem > 0) {
u32 reg = 0;
- memcpy(&reg, &buf[length - rem], rem);
+ memcpy(&reg, (const u8 *)data + data_len - rem, rem);
mei_me_hcbww_write(dev, reg);
}
@@ -601,11 +614,11 @@ static int mei_me_count_full_read_slots(struct mei_device *dev)
* Return: always 0
*/
static int mei_me_read_slots(struct mei_device *dev, unsigned char *buffer,
- unsigned long buffer_length)
+ unsigned long buffer_length)
{
u32 *reg_buf = (u32 *)buffer;
- for (; buffer_length >= sizeof(u32); buffer_length -= sizeof(u32))
+ for (; buffer_length >= MEI_SLOT_SIZE; buffer_length -= MEI_SLOT_SIZE)
*reg_buf++ = mei_me_mecbrw_read(dev);
if (buffer_length > 0) {
@@ -1314,7 +1327,7 @@ static const struct mei_hw_ops mei_me_hw_ops = {
.hbuf_free_slots = mei_me_hbuf_empty_slots,
.hbuf_is_ready = mei_me_hbuf_is_empty,
- .hbuf_max_len = mei_me_hbuf_max_len,
+ .hbuf_depth = mei_me_hbuf_depth,
.write = mei_me_hbuf_write,
@@ -1377,6 +1390,11 @@ static bool mei_me_fw_type_sps(struct pci_dev *pdev)
.fw_status.status[4] = PCI_CFG_HFS_5, \
.fw_status.status[5] = PCI_CFG_HFS_6
+#define MEI_CFG_DMA_128 \
+ .dma_size[DMA_DSCR_HOST] = SZ_128K, \
+ .dma_size[DMA_DSCR_DEVICE] = SZ_128K, \
+ .dma_size[DMA_DSCR_CTRL] = PAGE_SIZE
+
/* ICH Legacy devices */
static const struct mei_cfg mei_me_ich_cfg = {
MEI_CFG_ICH_HFS,
@@ -1409,6 +1427,12 @@ static const struct mei_cfg mei_me_pch8_sps_cfg = {
MEI_CFG_FW_SPS,
};
+/* Cannon Lake and newer devices */
+static const struct mei_cfg mei_me_pch12_cfg = {
+ MEI_CFG_PCH8_HFS,
+ MEI_CFG_DMA_128,
+};
+
/*
* mei_cfg_list - A list of platform platform specific configurations.
* Note: has to be synchronized with enum mei_cfg_idx.
@@ -1421,6 +1445,7 @@ static const struct mei_cfg *const mei_cfg_list[] = {
[MEI_ME_PCH_CPT_PBG_CFG] = &mei_me_pch_cpt_pbg_cfg,
[MEI_ME_PCH8_CFG] = &mei_me_pch8_cfg,
[MEI_ME_PCH8_SPS_CFG] = &mei_me_pch8_sps_cfg,
+ [MEI_ME_PCH12_CFG] = &mei_me_pch12_cfg,
};
const struct mei_cfg *mei_me_get_cfg(kernel_ulong_t idx)
diff --git a/drivers/misc/mei/hw-me.h b/drivers/misc/mei/hw-me.h
index 67892533576e..bbcc5fc106cd 100644
--- a/drivers/misc/mei/hw-me.h
+++ b/drivers/misc/mei/hw-me.h
@@ -31,10 +31,12 @@
*
* @fw_status: FW status
* @quirk_probe: device exclusion quirk
+ * @dma_size: device DMA buffers size
*/
struct mei_cfg {
const struct mei_fw_status fw_status;
bool (*quirk_probe)(struct pci_dev *pdev);
+ size_t dma_size[DMA_DSCR_NUM];
};
@@ -52,12 +54,14 @@ struct mei_cfg {
* @mem_addr: io memory address
* @pg_state: power gating state
* @d0i3_supported: di03 support
+ * @hbuf_depth: depth of hardware host/write buffer in slots
*/
struct mei_me_hw {
const struct mei_cfg *cfg;
void __iomem *mem_addr;
enum mei_pg_state pg_state;
bool d0i3_supported;
+ u8 hbuf_depth;
};
#define to_me_hw(dev) (struct mei_me_hw *)((dev)->hw)
@@ -78,6 +82,7 @@ struct mei_me_hw {
* @MEI_ME_PCH8_SPS_CFG: Platform Controller Hub Gen8 and newer
* servers platforms with quirk for
* SPS firmware exclusion.
+ * @MEI_ME_PCH12_CFG: Platform Controller Hub Gen12 and newer
* @MEI_ME_NUM_CFG: Upper Sentinel.
*/
enum mei_cfg_idx {
@@ -88,6 +93,7 @@ enum mei_cfg_idx {
MEI_ME_PCH_CPT_PBG_CFG,
MEI_ME_PCH8_CFG,
MEI_ME_PCH8_SPS_CFG,
+ MEI_ME_PCH12_CFG,
MEI_ME_NUM_CFG,
};
diff --git a/drivers/misc/mei/hw-txe.c b/drivers/misc/mei/hw-txe.c
index c2c8993e2a51..8449fe0367ff 100644
--- a/drivers/misc/mei/hw-txe.c
+++ b/drivers/misc/mei/hw-txe.c
@@ -31,6 +31,7 @@
#include "mei-trace.h"
+#define TXE_HBUF_DEPTH (PAYLOAD_SIZE / MEI_SLOT_SIZE)
/**
* mei_txe_reg_read - Reads 32bit data from the txe device
@@ -681,9 +682,6 @@ static void mei_txe_hw_config(struct mei_device *dev)
struct mei_txe_hw *hw = to_txe_hw(dev);
- /* Doesn't change in runtime */
- dev->hbuf_depth = PAYLOAD_SIZE / 4;
-
hw->aliveness = mei_txe_aliveness_get(dev);
hw->readiness = mei_txe_readiness_get(dev);
@@ -691,37 +689,34 @@ static void mei_txe_hw_config(struct mei_device *dev)
hw->aliveness, hw->readiness);
}
-
/**
* mei_txe_write - writes a message to device.
*
* @dev: the device structure
- * @header: header of message
- * @buf: message buffer will be written
+ * @hdr: header of message
+ * @hdr_len: header length in bytes - must multiplication of a slot (4bytes)
+ * @data: payload
+ * @data_len: paylead length in bytes
*
- * Return: 0 if success, <0 - otherwise.
+ * Return: 0 if success, < 0 - otherwise.
*/
-
static int mei_txe_write(struct mei_device *dev,
- struct mei_msg_hdr *header,
- const unsigned char *buf)
+ const void *hdr, size_t hdr_len,
+ const void *data, size_t data_len)
{
struct mei_txe_hw *hw = to_txe_hw(dev);
unsigned long rem;
- unsigned long length;
- int slots = dev->hbuf_depth;
- u32 *reg_buf = (u32 *)buf;
+ const u32 *reg_buf;
+ u32 slots = TXE_HBUF_DEPTH;
u32 dw_cnt;
- int i;
+ unsigned long i, j;
- if (WARN_ON(!header || !buf))
+ if (WARN_ON(!hdr || !data || hdr_len & 0x3))
return -EINVAL;
- length = header->length;
-
- dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM(header));
+ dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM((struct mei_msg_hdr *)hdr));
- dw_cnt = mei_data2slots(length);
+ dw_cnt = mei_data2slots(hdr_len + data_len);
if (dw_cnt > slots)
return -EMSGSIZE;
@@ -739,17 +734,20 @@ static int mei_txe_write(struct mei_device *dev,
return -EAGAIN;
}
- mei_txe_input_payload_write(dev, 0, *((u32 *)header));
+ reg_buf = hdr;
+ for (i = 0; i < hdr_len / MEI_SLOT_SIZE; i++)
+ mei_txe_input_payload_write(dev, i, reg_buf[i]);
- for (i = 0; i < length / 4; i++)
- mei_txe_input_payload_write(dev, i + 1, reg_buf[i]);
+ reg_buf = data;
+ for (j = 0; j < data_len / MEI_SLOT_SIZE; j++)
+ mei_txe_input_payload_write(dev, i + j, reg_buf[j]);
- rem = length & 0x3;
+ rem = data_len & 0x3;
if (rem > 0) {
u32 reg = 0;
- memcpy(&reg, &buf[length - rem], rem);
- mei_txe_input_payload_write(dev, i + 1, reg);
+ memcpy(&reg, (const u8 *)data + data_len - rem, rem);
+ mei_txe_input_payload_write(dev, i + j, reg);
}
/* after each write the whole buffer is consumed */
@@ -762,15 +760,15 @@ static int mei_txe_write(struct mei_device *dev,
}
/**
- * mei_txe_hbuf_max_len - mimics the me hbuf circular buffer
+ * mei_txe_hbuf_depth - mimics the me hbuf circular buffer
*
* @dev: the device structure
*
- * Return: the PAYLOAD_SIZE - 4
+ * Return: the TXE_HBUF_DEPTH
*/
-static size_t mei_txe_hbuf_max_len(const struct mei_device *dev)
+static u32 mei_txe_hbuf_depth(const struct mei_device *dev)
{
- return PAYLOAD_SIZE - sizeof(struct mei_msg_hdr);
+ return TXE_HBUF_DEPTH;
}
/**
@@ -778,7 +776,7 @@ static size_t mei_txe_hbuf_max_len(const struct mei_device *dev)
*
* @dev: the device structure
*
- * Return: always hbuf_depth
+ * Return: always TXE_HBUF_DEPTH
*/
static int mei_txe_hbuf_empty_slots(struct mei_device *dev)
{
@@ -797,7 +795,7 @@ static int mei_txe_hbuf_empty_slots(struct mei_device *dev)
static int mei_txe_count_full_read_slots(struct mei_device *dev)
{
/* read buffers has static size */
- return PAYLOAD_SIZE / 4;
+ return TXE_HBUF_DEPTH;
}
/**
@@ -839,7 +837,7 @@ static int mei_txe_read(struct mei_device *dev,
dev_dbg(dev->dev, "buffer-length = %lu buf[0]0x%08X\n",
len, mei_txe_out_data_read(dev, 0));
- for (i = 0; i < len / 4; i++) {
+ for (i = 0; i < len / MEI_SLOT_SIZE; i++) {
/* skip header: index starts from 1 */
reg = mei_txe_out_data_read(dev, i + 1);
dev_dbg(dev->dev, "buf[%d] = 0x%08X\n", i, reg);
@@ -1140,7 +1138,7 @@ irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id)
/* Input Ready: Detection if host can write to SeC */
if (test_and_clear_bit(TXE_INTR_IN_READY_BIT, &hw->intr_cause)) {
dev->hbuf_is_ready = true;
- hw->slots = dev->hbuf_depth;
+ hw->slots = TXE_HBUF_DEPTH;
}
if (hw->aliveness && dev->hbuf_is_ready) {
@@ -1186,7 +1184,7 @@ static const struct mei_hw_ops mei_txe_hw_ops = {
.hbuf_free_slots = mei_txe_hbuf_empty_slots,
.hbuf_is_ready = mei_txe_is_input_ready,
- .hbuf_max_len = mei_txe_hbuf_max_len,
+ .hbuf_depth = mei_txe_hbuf_depth,
.write = mei_txe_write,
diff --git a/drivers/misc/mei/hw.h b/drivers/misc/mei/hw.h
index 5c8286b40b62..65655925791a 100644
--- a/drivers/misc/mei/hw.h
+++ b/drivers/misc/mei/hw.h
@@ -28,8 +28,6 @@
#define MEI_CL_CONNECT_TIMEOUT 15 /* HPS: Client Connect Timeout */
#define MEI_CLIENTS_INIT_TIMEOUT 15 /* HPS: Clients Enumeration Timeout */
-#define MEI_IAMTHIF_STALL_TIMER 12 /* HPS */
-
#define MEI_PGI_TIMEOUT 1 /* PG Isolation time response 1 sec */
#define MEI_D0I3_TIMEOUT 5 /* D0i3 set/unset max response time */
#define MEI_HBM_TIMEOUT 1 /* 1 second */
@@ -82,6 +80,12 @@
#define HBM_MINOR_VERSION_OS 0
#define HBM_MAJOR_VERSION_OS 2
+/*
+ * MEI version with dma ring support
+ */
+#define HBM_MINOR_VERSION_DR 1
+#define HBM_MAJOR_VERSION_DR 2
+
/* Host bus message command opcode */
#define MEI_HBM_CMD_OP_MSK 0x7f
/* Host bus message command RESPONSE */
@@ -124,6 +128,9 @@
#define MEI_HBM_NOTIFY_RES_CMD 0x90
#define MEI_HBM_NOTIFICATION_CMD 0x11
+#define MEI_HBM_DMA_SETUP_REQ_CMD 0x12
+#define MEI_HBM_DMA_SETUP_RES_CMD 0x92
+
/*
* MEI Stop Reason
* used by hbm_host_stop_request.reason
@@ -189,19 +196,27 @@ enum mei_cl_disconnect_status {
MEI_CL_DISCONN_SUCCESS = MEI_HBMS_SUCCESS
};
-/*
- * MEI BUS Interface Section
+/**
+ * struct mei_msg_hdr - MEI BUS Interface Section
+ *
+ * @me_addr: device address
+ * @host_addr: host address
+ * @length: message length
+ * @reserved: reserved
+ * @dma_ring: message is on dma ring
+ * @internal: message is internal
+ * @msg_complete: last packet of the message
*/
struct mei_msg_hdr {
u32 me_addr:8;
u32 host_addr:8;
u32 length:9;
- u32 reserved:5;
+ u32 reserved:4;
+ u32 dma_ring:1;
u32 internal:1;
u32 msg_complete:1;
} __packed;
-
struct mei_bus_message {
u8 hbm_cmd;
u8 data[0];
@@ -451,4 +466,50 @@ struct hbm_notification {
u8 reserved[1];
} __packed;
+/**
+ * struct hbm_dma_mem_dscr - dma ring
+ *
+ * @addr_hi: the high 32bits of 64 bit address
+ * @addr_lo: the low 32bits of 64 bit address
+ * @size : size in bytes (must be power of 2)
+ */
+struct hbm_dma_mem_dscr {
+ u32 addr_hi;
+ u32 addr_lo;
+ u32 size;
+} __packed;
+
+enum {
+ DMA_DSCR_HOST = 0,
+ DMA_DSCR_DEVICE = 1,
+ DMA_DSCR_CTRL = 2,
+ DMA_DSCR_NUM,
+};
+
+/**
+ * struct hbm_dma_setup_request - dma setup request
+ *
+ * @hbm_cmd: bus message command header
+ * @reserved: reserved for alignment
+ * @dma_dscr: dma descriptor for HOST, DEVICE, and CTRL
+ */
+struct hbm_dma_setup_request {
+ u8 hbm_cmd;
+ u8 reserved[3];
+ struct hbm_dma_mem_dscr dma_dscr[DMA_DSCR_NUM];
+} __packed;
+
+/**
+ * struct hbm_dma_setup_response - dma setup response
+ *
+ * @hbm_cmd: bus message command header
+ * @status: 0 on success; otherwise DMA setup failed.
+ * @reserved: reserved for alignment
+ */
+struct hbm_dma_setup_response {
+ u8 hbm_cmd;
+ u8 status;
+ u8 reserved[2];
+} __packed;
+
#endif
diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c
index 6649f0d56d2f..5a661cbdf2ae 100644
--- a/drivers/misc/mei/interrupt.c
+++ b/drivers/misc/mei/interrupt.c
@@ -173,10 +173,12 @@ static int mei_cl_irq_disconnect_rsp(struct mei_cl *cl, struct mei_cl_cb *cb,
int slots;
int ret;
+ msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_response));
slots = mei_hbuf_empty_slots(dev);
- msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_response));
+ if (slots < 0)
+ return -EOVERFLOW;
- if (slots < msg_slots)
+ if ((u32)slots < msg_slots)
return -EMSGSIZE;
ret = mei_hbm_cl_disconnect_rsp(dev, cl);
@@ -206,10 +208,12 @@ static int mei_cl_irq_read(struct mei_cl *cl, struct mei_cl_cb *cb,
if (!list_empty(&cl->rd_pending))
return 0;
- msg_slots = mei_data2slots(sizeof(struct hbm_flow_control));
+ msg_slots = mei_hbm2slots(sizeof(struct hbm_flow_control));
slots = mei_hbuf_empty_slots(dev);
+ if (slots < 0)
+ return -EOVERFLOW;
- if (slots < msg_slots)
+ if ((u32)slots < msg_slots)
return -EMSGSIZE;
ret = mei_hbm_cl_flow_control_req(dev, cl);
@@ -368,7 +372,10 @@ int mei_irq_write_handler(struct mei_device *dev, struct list_head *cmpl_list)
return 0;
slots = mei_hbuf_empty_slots(dev);
- if (slots <= 0)
+ if (slots < 0)
+ return -EOVERFLOW;
+
+ if (slots == 0)
return -EMSGSIZE;
/* complete all waiting for write CB */
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index 7465f17e1559..4d77a6ae183a 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -1,7 +1,7 @@
/*
*
* Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2003-2012, Intel Corporation.
+ * Copyright (c) 2003-2018, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -137,7 +137,7 @@ static ssize_t mei_read(struct file *file, char __user *ubuf,
struct mei_device *dev;
struct mei_cl_cb *cb = NULL;
bool nonblock = !!(file->f_flags & O_NONBLOCK);
- int rets;
+ ssize_t rets;
if (WARN_ON(!cl || !cl->dev))
return -ENODEV;
@@ -170,7 +170,7 @@ static ssize_t mei_read(struct file *file, char __user *ubuf,
rets = mei_cl_read_start(cl, length, file);
if (rets && rets != -EBUSY) {
- cl_dbg(dev, cl, "mei start read failure status = %d\n", rets);
+ cl_dbg(dev, cl, "mei start read failure status = %zd\n", rets);
goto out;
}
@@ -204,7 +204,7 @@ copy_buffer:
/* now copy the data to user space */
if (cb->status) {
rets = cb->status;
- cl_dbg(dev, cl, "read operation failed %d\n", rets);
+ cl_dbg(dev, cl, "read operation failed %zd\n", rets);
goto free;
}
@@ -236,7 +236,7 @@ free:
*offset = 0;
out:
- cl_dbg(dev, cl, "end mei read rets = %d\n", rets);
+ cl_dbg(dev, cl, "end mei read rets = %zd\n", rets);
mutex_unlock(&dev->device_lock);
return rets;
}
@@ -256,7 +256,7 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
struct mei_cl *cl = file->private_data;
struct mei_cl_cb *cb;
struct mei_device *dev;
- int rets;
+ ssize_t rets;
if (WARN_ON(!cl || !cl->dev))
return -ENODEV;
@@ -312,7 +312,6 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
}
}
- *offset = 0;
cb = mei_cl_alloc_cb(cl, length, MEI_FOP_WRITE, file);
if (!cb) {
rets = -ENOMEM;
@@ -812,11 +811,39 @@ static ssize_t tx_queue_limit_store(struct device *device,
}
static DEVICE_ATTR_RW(tx_queue_limit);
+/**
+ * fw_ver_show - display ME FW version
+ *
+ * @device: device pointer
+ * @attr: attribute pointer
+ * @buf: char out buffer
+ *
+ * Return: number of the bytes printed into buf or error
+ */
+static ssize_t fw_ver_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct mei_device *dev = dev_get_drvdata(device);
+ struct mei_fw_version *ver;
+ ssize_t cnt = 0;
+ int i;
+
+ ver = dev->fw_ver;
+
+ for (i = 0; i < MEI_MAX_FW_VER_BLOCKS; i++)
+ cnt += scnprintf(buf + cnt, PAGE_SIZE - cnt, "%u:%u.%u.%u.%u\n",
+ ver[i].platform, ver[i].major, ver[i].minor,
+ ver[i].hotfix, ver[i].buildno);
+ return cnt;
+}
+static DEVICE_ATTR_RO(fw_ver);
+
static struct attribute *mei_attrs[] = {
&dev_attr_fw_status.attr,
&dev_attr_hbm_ver.attr,
&dev_attr_hbm_ver_drv.attr,
&dev_attr_tx_queue_limit.attr,
+ &dev_attr_fw_ver.attr,
NULL
};
ATTRIBUTE_GROUPS(mei);
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index be9c48415da9..377397e1b5a5 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -1,7 +1,7 @@
/*
*
* Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2003-2012, Intel Corporation.
+ * Copyright (c) 2003-2018, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -26,7 +26,8 @@
#include "hw.h"
#include "hbm.h"
-#define MEI_RD_MSG_BUF_SIZE (128 * sizeof(u32))
+#define MEI_SLOT_SIZE sizeof(u32)
+#define MEI_RD_MSG_BUF_SIZE (128 * MEI_SLOT_SIZE)
/*
* Number of Maximum MEI Clients
@@ -174,7 +175,6 @@ struct mei_cl;
* @status: io status of the cb
* @internal: communication between driver and FW flag
* @blocking: transmission blocking mode
- * @completed: the transfer or reception has completed
*/
struct mei_cl_cb {
struct list_head list;
@@ -186,7 +186,6 @@ struct mei_cl_cb {
int status;
u32 internal:1;
u32 blocking:1;
- u32 completed:1;
};
/**
@@ -269,7 +268,7 @@ struct mei_cl {
*
* @hbuf_free_slots : query for write buffer empty slots
* @hbuf_is_ready : query if write buffer is empty
- * @hbuf_max_len : query for write buffer max len
+ * @hbuf_depth : query for write buffer depth
*
* @write : write a message to FW
*
@@ -299,10 +298,10 @@ struct mei_hw_ops {
int (*hbuf_free_slots)(struct mei_device *dev);
bool (*hbuf_is_ready)(struct mei_device *dev);
- size_t (*hbuf_max_len)(const struct mei_device *dev);
+ u32 (*hbuf_depth)(const struct mei_device *dev);
int (*write)(struct mei_device *dev,
- struct mei_msg_hdr *hdr,
- const unsigned char *buf);
+ const void *hdr, size_t hdr_len,
+ const void *data, size_t data_len);
int (*rdbuf_full_slots)(struct mei_device *dev);
@@ -317,7 +316,7 @@ void mei_cl_bus_dev_fixup(struct mei_cl_device *dev);
ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,
unsigned int mode);
ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length,
- unsigned int mode);
+ unsigned int mode, unsigned long timeout);
bool mei_cl_bus_rx_event(struct mei_cl *cl);
bool mei_cl_bus_notify_event(struct mei_cl *cl);
void mei_cl_bus_remove_devices(struct mei_device *bus);
@@ -355,6 +354,25 @@ enum mei_pg_state {
const char *mei_pg_state_str(enum mei_pg_state state);
/**
+ * struct mei_fw_version - MEI FW version struct
+ *
+ * @platform: platform identifier
+ * @major: major version field
+ * @minor: minor version field
+ * @buildno: build number version field
+ * @hotfix: hotfix number version field
+ */
+struct mei_fw_version {
+ u8 platform;
+ u8 major;
+ u16 minor;
+ u16 buildno;
+ u16 hotfix;
+};
+
+#define MEI_MAX_FW_VER_BLOCKS 3
+
+/**
* struct mei_device - MEI private device struct
*
* @dev : device on a bus
@@ -390,7 +408,6 @@ const char *mei_pg_state_str(enum mei_pg_state state);
* @rd_msg_buf : control messages buffer
* @rd_msg_hdr : read message header storage
*
- * @hbuf_depth : depth of hardware host/write buffer is slots
* @hbuf_is_ready : query if the host host/write buffer is ready
*
* @version : HBM protocol version in use
@@ -401,6 +418,9 @@ const char *mei_pg_state_str(enum mei_pg_state state);
* @hbm_f_fa_supported : hbm feature fixed address client
* @hbm_f_ie_supported : hbm feature immediate reply to enum request
* @hbm_f_os_supported : hbm feature support OS ver message
+ * @hbm_f_dr_supported : hbm feature dma ring supported
+ *
+ * @fw_ver : FW versions
*
* @me_clients_rwsem: rw lock over me_clients list
* @me_clients : list of FW clients
@@ -466,7 +486,6 @@ struct mei_device {
u32 rd_msg_hdr;
/* write buffer */
- u8 hbuf_depth;
bool hbuf_is_ready;
struct hbm_version version;
@@ -477,6 +496,9 @@ struct mei_device {
unsigned int hbm_f_fa_supported:1;
unsigned int hbm_f_ie_supported:1;
unsigned int hbm_f_os_supported:1;
+ unsigned int hbm_f_dr_supported:1;
+
+ struct mei_fw_version fw_ver[MEI_MAX_FW_VER_BLOCKS];
struct rw_semaphore me_clients_rwsem;
struct list_head me_clients;
@@ -508,8 +530,7 @@ static inline unsigned long mei_secs_to_jiffies(unsigned long sec)
}
/**
- * mei_data2slots - get slots - number of (dwords) from a message length
- * + size of the mei header
+ * mei_data2slots - get slots number from a message length
*
* @length: size of the messages in bytes
*
@@ -517,7 +538,20 @@ static inline unsigned long mei_secs_to_jiffies(unsigned long sec)
*/
static inline u32 mei_data2slots(size_t length)
{
- return DIV_ROUND_UP(sizeof(struct mei_msg_hdr) + length, 4);
+ return DIV_ROUND_UP(length, MEI_SLOT_SIZE);
+}
+
+/**
+ * mei_hbm2slots - get slots number from a hbm message length
+ * length + size of the mei message header
+ *
+ * @length: size of the messages in bytes
+ *
+ * Return: number of slots
+ */
+static inline u32 mei_hbm2slots(size_t length)
+{
+ return DIV_ROUND_UP(sizeof(struct mei_msg_hdr) + length, MEI_SLOT_SIZE);
}
/**
@@ -529,7 +563,7 @@ static inline u32 mei_data2slots(size_t length)
*/
static inline u32 mei_slots2data(int slots)
{
- return slots * 4;
+ return slots * MEI_SLOT_SIZE;
}
/*
@@ -630,15 +664,16 @@ static inline int mei_hbuf_empty_slots(struct mei_device *dev)
return dev->ops->hbuf_free_slots(dev);
}
-static inline size_t mei_hbuf_max_len(const struct mei_device *dev)
+static inline u32 mei_hbuf_depth(const struct mei_device *dev)
{
- return dev->ops->hbuf_max_len(dev);
+ return dev->ops->hbuf_depth(dev);
}
static inline int mei_write_message(struct mei_device *dev,
- struct mei_msg_hdr *hdr, const void *buf)
+ const void *hdr, size_t hdr_len,
+ const void *data, size_t data_len)
{
- return dev->ops->write(dev, hdr, buf);
+ return dev->ops->write(dev, hdr, hdr_len, data, data_len);
}
static inline u32 mei_read_hdr(const struct mei_device *dev)
@@ -681,10 +716,10 @@ static inline void mei_dbgfs_deregister(struct mei_device *dev) {}
int mei_register(struct mei_device *dev, struct device *parent);
void mei_deregister(struct mei_device *dev);
-#define MEI_HDR_FMT "hdr:host=%02d me=%02d len=%d internal=%1d comp=%1d"
+#define MEI_HDR_FMT "hdr:host=%02d me=%02d len=%d dma=%1d internal=%1d comp=%1d"
#define MEI_HDR_PRM(hdr) \
(hdr)->host_addr, (hdr)->me_addr, \
- (hdr)->length, (hdr)->internal, (hdr)->msg_complete
+ (hdr)->length, (hdr)->dma_ring, (hdr)->internal, (hdr)->msg_complete
ssize_t mei_fw_status2str(struct mei_fw_status *fw_sts, char *buf, size_t len);
/**
diff --git a/drivers/misc/mic/cosm/cosm_main.h b/drivers/misc/mic/cosm/cosm_main.h
index f01156fca881..aa78cdf25e40 100644
--- a/drivers/misc/mic/cosm/cosm_main.h
+++ b/drivers/misc/mic/cosm/cosm_main.h
@@ -45,7 +45,10 @@ struct cosm_msg {
u64 id;
union {
u64 shutdown_status;
- struct timespec64 timespec;
+ struct {
+ u64 tv_sec;
+ u64 tv_nsec;
+ } timespec;
};
};
diff --git a/drivers/misc/mic/cosm/cosm_scif_server.c b/drivers/misc/mic/cosm/cosm_scif_server.c
index 05a63286741c..e94b7eac4a06 100644
--- a/drivers/misc/mic/cosm/cosm_scif_server.c
+++ b/drivers/misc/mic/cosm/cosm_scif_server.c
@@ -179,9 +179,13 @@ static void cosm_set_crashed(struct cosm_device *cdev)
static void cosm_send_time(struct cosm_device *cdev)
{
struct cosm_msg msg = { .id = COSM_MSG_SYNC_TIME };
+ struct timespec64 ts;
int rc;
- getnstimeofday64(&msg.timespec);
+ ktime_get_real_ts64(&ts);
+ msg.timespec.tv_sec = ts.tv_sec;
+ msg.timespec.tv_nsec = ts.tv_nsec;
+
rc = scif_send(cdev->epd, &msg, sizeof(msg), SCIF_SEND_BLOCK);
if (rc < 0)
dev_err(&cdev->dev, "%s %d scif_send failed rc %d\n",
diff --git a/drivers/misc/mic/cosm_client/cosm_scif_client.c b/drivers/misc/mic/cosm_client/cosm_scif_client.c
index beafc0da4027..225078cb51fd 100644
--- a/drivers/misc/mic/cosm_client/cosm_scif_client.c
+++ b/drivers/misc/mic/cosm_client/cosm_scif_client.c
@@ -63,7 +63,11 @@ static struct notifier_block cosm_reboot = {
/* Set system time from timespec value received from the host */
static void cosm_set_time(struct cosm_msg *msg)
{
- int rc = do_settimeofday64(&msg->timespec);
+ struct timespec64 ts = {
+ .tv_sec = msg->timespec.tv_sec,
+ .tv_nsec = msg->timespec.tv_nsec,
+ };
+ int rc = do_settimeofday64(&ts);
if (rc)
dev_err(&client_spdev->dev, "%s: %d settimeofday rc %d\n",
diff --git a/drivers/misc/mic/scif/scif_api.c b/drivers/misc/mic/scif/scif_api.c
index 7b2dddcdd46d..8dd0ccedeb94 100644
--- a/drivers/misc/mic/scif/scif_api.c
+++ b/drivers/misc/mic/scif/scif_api.c
@@ -187,6 +187,7 @@ int scif_close(scif_epd_t epd)
case SCIFEP_ZOMBIE:
dev_err(scif_info.mdev.this_device,
"SCIFAPI close: zombie state unexpected\n");
+ /* fall through */
case SCIFEP_DISCONNECTED:
spin_unlock(&ep->lock);
scif_unregister_all_windows(epd);
@@ -370,11 +371,10 @@ int scif_bind(scif_epd_t epd, u16 pn)
goto scif_bind_exit;
}
} else {
- pn = scif_get_new_port();
- if (!pn) {
- ret = -ENOSPC;
+ ret = scif_get_new_port();
+ if (ret < 0)
goto scif_bind_exit;
- }
+ pn = ret;
}
ep->state = SCIFEP_BOUND;
@@ -648,13 +648,12 @@ int __scif_connect(scif_epd_t epd, struct scif_port_id *dst, bool non_block)
err = -EISCONN;
break;
case SCIFEP_UNBOUND:
- ep->port.port = scif_get_new_port();
- if (!ep->port.port) {
- err = -ENOSPC;
- } else {
- ep->port.node = scif_info.nodeid;
- ep->conn_async_state = ASYNC_CONN_IDLE;
- }
+ err = scif_get_new_port();
+ if (err < 0)
+ break;
+ ep->port.port = err;
+ ep->port.node = scif_info.nodeid;
+ ep->conn_async_state = ASYNC_CONN_IDLE;
/* Fall through */
case SCIFEP_BOUND:
/*
diff --git a/drivers/misc/sgi-xp/xpc_channel.c b/drivers/misc/sgi-xp/xpc_channel.c
index 128d5615c804..05a890ce2ab8 100644
--- a/drivers/misc/sgi-xp/xpc_channel.c
+++ b/drivers/misc/sgi-xp/xpc_channel.c
@@ -656,7 +656,6 @@ xpc_initiate_connect(int ch_number)
{
short partid;
struct xpc_partition *part;
- struct xpc_channel *ch;
DBUG_ON(ch_number < 0 || ch_number >= XPC_MAX_NCHANNELS);
@@ -664,8 +663,6 @@ xpc_initiate_connect(int ch_number)
part = &xpc_partitions[partid];
if (xpc_part_ref(part)) {
- ch = &part->channels[ch_number];
-
/*
* Initiate the establishment of a connection on the
* newly registered channel to the remote partition.
diff --git a/drivers/misc/sgi-xp/xpc_partition.c b/drivers/misc/sgi-xp/xpc_partition.c
index 7284413dabfd..0c3ef6f1df54 100644
--- a/drivers/misc/sgi-xp/xpc_partition.c
+++ b/drivers/misc/sgi-xp/xpc_partition.c
@@ -415,7 +415,6 @@ xpc_discovery(void)
int region_size;
int max_regions;
int nasid;
- struct xpc_rsvd_page *rp;
unsigned long *discovered_nasids;
enum xp_retval ret;
@@ -432,8 +431,6 @@ xpc_discovery(void)
return;
}
- rp = (struct xpc_rsvd_page *)xpc_rsvd_page;
-
/*
* The term 'region' in this context refers to the minimum number of
* nodes that can comprise an access protection grouping. The access
@@ -449,8 +446,10 @@ xpc_discovery(void)
switch (region_size) {
case 128:
max_regions *= 2;
+ /* fall through */
case 64:
max_regions *= 2;
+ /* fall through */
case 32:
max_regions *= 2;
region_size = 16;
diff --git a/drivers/misc/sram.c b/drivers/misc/sram.c
index c5dc6095686a..74b183baf044 100644
--- a/drivers/misc/sram.c
+++ b/drivers/misc/sram.c
@@ -391,29 +391,37 @@ static int sram_probe(struct platform_device *pdev)
if (IS_ERR(sram->pool))
return PTR_ERR(sram->pool);
- ret = sram_reserve_regions(sram, res);
- if (ret)
- return ret;
-
sram->clk = devm_clk_get(sram->dev, NULL);
if (IS_ERR(sram->clk))
sram->clk = NULL;
else
clk_prepare_enable(sram->clk);
+ ret = sram_reserve_regions(sram, res);
+ if (ret)
+ goto err_disable_clk;
+
platform_set_drvdata(pdev, sram);
init_func = of_device_get_match_data(&pdev->dev);
if (init_func) {
ret = init_func();
if (ret)
- return ret;
+ goto err_free_partitions;
}
dev_dbg(sram->dev, "SRAM pool: %zu KiB @ 0x%p\n",
gen_pool_size(sram->pool) / 1024, sram->virt_base);
return 0;
+
+err_free_partitions:
+ sram_free_partitions(sram);
+err_disable_clk:
+ if (sram->clk)
+ clk_disable_unprepare(sram->clk);
+
+ return ret;
}
static int sram_remove(struct platform_device *pdev)
diff --git a/drivers/misc/ti-st/Kconfig b/drivers/misc/ti-st/Kconfig
index f34dcc514730..5bb92698bc80 100644
--- a/drivers/misc/ti-st/Kconfig
+++ b/drivers/misc/ti-st/Kconfig
@@ -5,7 +5,8 @@
menu "Texas Instruments shared transport line discipline"
config TI_ST
tristate "Shared transport core driver"
- depends on NET && GPIOLIB && TTY
+ depends on NET && TTY
+ depends on GPIOLIB || COMPILE_TEST
select FW_LOADER
help
This enables the shared transport core driver for TI
diff --git a/drivers/misc/ti-st/st_kim.c b/drivers/misc/ti-st/st_kim.c
index 5ec3f5a43718..1874ac922166 100644
--- a/drivers/misc/ti-st/st_kim.c
+++ b/drivers/misc/ti-st/st_kim.c
@@ -138,7 +138,7 @@ static void kim_int_recv(struct kim_data_s *kim_gdata,
const unsigned char *data, long count)
{
const unsigned char *ptr;
- int len = 0, type = 0;
+ int len = 0;
unsigned char *plen;
pr_debug("%s", __func__);
@@ -183,7 +183,6 @@ static void kim_int_recv(struct kim_data_s *kim_gdata,
case 0x04:
kim_gdata->rx_state = ST_W4_HEADER;
kim_gdata->rx_count = 2;
- type = *ptr;
break;
default:
pr_info("unknown packet");
@@ -756,14 +755,14 @@ static int kim_probe(struct platform_device *pdev)
err = gpio_request(kim_gdata->nshutdown, "kim");
if (unlikely(err)) {
pr_err(" gpio %d request failed ", kim_gdata->nshutdown);
- return err;
+ goto err_sysfs_group;
}
/* Configure nShutdown GPIO as output=0 */
err = gpio_direction_output(kim_gdata->nshutdown, 0);
if (unlikely(err)) {
pr_err(" unable to configure gpio %d", kim_gdata->nshutdown);
- return err;
+ goto err_sysfs_group;
}
/* get reference of pdev for request_firmware
*/
diff --git a/drivers/misc/tsl2550.c b/drivers/misc/tsl2550.c
index adf46072cb37..3fce3b6a3624 100644
--- a/drivers/misc/tsl2550.c
+++ b/drivers/misc/tsl2550.c
@@ -177,7 +177,7 @@ static int tsl2550_calculate_lux(u8 ch0, u8 ch1)
} else
lux = 0;
else
- return -EAGAIN;
+ return 0;
/* LUX range check */
return lux > TSL2550_MAX_LUX ? TSL2550_MAX_LUX : lux;
diff --git a/drivers/misc/vexpress-syscfg.c b/drivers/misc/vexpress-syscfg.c
index 80a6f199077c..6c3591cdf855 100644
--- a/drivers/misc/vexpress-syscfg.c
+++ b/drivers/misc/vexpress-syscfg.c
@@ -258,13 +258,9 @@ static int vexpress_syscfg_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&syscfg->funcs);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!devm_request_mem_region(&pdev->dev, res->start,
- resource_size(res), pdev->name))
- return -EBUSY;
-
- syscfg->base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
- if (!syscfg->base)
- return -EFAULT;
+ syscfg->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(syscfg->base))
+ return PTR_ERR(syscfg->base);
/* Must use dev.parent (MFD), as that's where DT phandle points at... */
bridge = vexpress_config_bridge_register(pdev->dev.parent,
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
index 56c6f79a5c5a..2543ef1ece17 100644
--- a/drivers/misc/vmw_balloon.c
+++ b/drivers/misc/vmw_balloon.c
@@ -1,27 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* VMware Balloon driver.
*
- * Copyright (C) 2000-2014, VMware, Inc. All Rights Reserved.
+ * Copyright (C) 2000-2018, VMware, Inc. All Rights Reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; version 2 of the License and no later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Maintained by: Xavier Deguillard <xdeguillard@vmware.com>
- * Philip Moltmann <moltmann@vmware.com>
- */
-
-/*
* This is VMware physical memory management driver for Linux. The driver
* acts like a "balloon" that can be inflated to reclaim physical pages by
* reserving them in the guest and invalidating them in the monitor,
@@ -55,25 +37,6 @@ MODULE_ALIAS("vmware_vmmemctl");
MODULE_LICENSE("GPL");
/*
- * Various constants controlling rate of inflaint/deflating balloon,
- * measured in pages.
- */
-
-/*
- * Rates of memory allocaton when guest experiences memory pressure
- * (driver performs sleeping allocations).
- */
-#define VMW_BALLOON_RATE_ALLOC_MIN 512U
-#define VMW_BALLOON_RATE_ALLOC_MAX 2048U
-#define VMW_BALLOON_RATE_ALLOC_INC 16U
-
-/*
- * When guest is under memory pressure, use a reduced page allocation
- * rate for next several cycles.
- */
-#define VMW_BALLOON_SLOW_CYCLES 4
-
-/*
* Use __GFP_HIGHMEM to allow pages from HIGHMEM zone. We don't
* allow wait (__GFP_RECLAIM) for NOSLEEP page allocations. Use
* __GFP_NOWARN, to suppress page allocation failure warnings.
@@ -284,12 +247,6 @@ struct vmballoon {
/* reset flag */
bool reset_required;
- /* adjustment rates (pages per second) */
- unsigned int rate_alloc;
-
- /* slowdown page allocations for next few cycles */
- unsigned int slow_allocation_cycles;
-
unsigned long capabilities;
struct vmballoon_batch_page *batch_page;
@@ -341,7 +298,13 @@ static bool vmballoon_send_start(struct vmballoon *b, unsigned long req_caps)
success = false;
}
- if (b->capabilities & VMW_BALLOON_BATCHED_2M_CMDS)
+ /*
+ * 2MB pages are only supported with batching. If batching is for some
+ * reason disabled, do not use 2MB pages, since otherwise the legacy
+ * mechanism is used with 2MB pages, causing a failure.
+ */
+ if ((b->capabilities & VMW_BALLOON_BATCHED_2M_CMDS) &&
+ (b->capabilities & VMW_BALLOON_BATCHED_CMDS))
b->supported_page_sizes = 2;
else
b->supported_page_sizes = 1;
@@ -450,7 +413,7 @@ static int vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn,
pfn32 = (u32)pfn;
if (pfn32 != pfn)
- return -1;
+ return -EINVAL;
STATS_INC(b->stats.lock[false]);
@@ -460,7 +423,7 @@ static int vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn,
pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status);
STATS_INC(b->stats.lock_fail[false]);
- return 1;
+ return -EIO;
}
static int vmballoon_send_batched_lock(struct vmballoon *b,
@@ -597,11 +560,12 @@ static int vmballoon_lock_page(struct vmballoon *b, unsigned int num_pages,
locked = vmballoon_send_lock_page(b, page_to_pfn(page), &hv_status,
target);
- if (locked > 0) {
+ if (locked) {
STATS_INC(b->stats.refused_alloc[false]);
- if (hv_status == VMW_BALLOON_ERROR_RESET ||
- hv_status == VMW_BALLOON_ERROR_PPN_NOTNEEDED) {
+ if (locked == -EIO &&
+ (hv_status == VMW_BALLOON_ERROR_RESET ||
+ hv_status == VMW_BALLOON_ERROR_PPN_NOTNEEDED)) {
vmballoon_free_page(page, false);
return -EIO;
}
@@ -617,7 +581,7 @@ static int vmballoon_lock_page(struct vmballoon *b, unsigned int num_pages,
} else {
vmballoon_free_page(page, false);
}
- return -EIO;
+ return locked;
}
/* track allocated page */
@@ -790,8 +754,6 @@ static void vmballoon_add_batched_page(struct vmballoon *b, int idx,
*/
static void vmballoon_inflate(struct vmballoon *b)
{
- unsigned rate;
- unsigned int allocations = 0;
unsigned int num_pages = 0;
int error = 0;
gfp_t flags = VMW_PAGE_ALLOC_NOSLEEP;
@@ -818,17 +780,9 @@ static void vmballoon_inflate(struct vmballoon *b)
* Start with no sleep allocation rate which may be higher
* than sleeping allocation rate.
*/
- if (b->slow_allocation_cycles) {
- rate = b->rate_alloc;
- is_2m_pages = false;
- } else {
- rate = UINT_MAX;
- is_2m_pages =
- b->supported_page_sizes == VMW_BALLOON_NUM_PAGE_SIZES;
- }
+ is_2m_pages = b->supported_page_sizes == VMW_BALLOON_NUM_PAGE_SIZES;
- pr_debug("%s - goal: %d, no-sleep rate: %u, sleep rate: %d\n",
- __func__, b->target - b->size, rate, b->rate_alloc);
+ pr_debug("%s - goal: %d", __func__, b->target - b->size);
while (!b->reset_required &&
b->size + num_pages * vmballoon_page_size(is_2m_pages)
@@ -861,31 +815,24 @@ static void vmballoon_inflate(struct vmballoon *b)
if (flags == VMW_PAGE_ALLOC_CANSLEEP) {
/*
* CANSLEEP page allocation failed, so guest
- * is under severe memory pressure. Quickly
- * decrease allocation rate.
+ * is under severe memory pressure. We just log
+ * the event, but do not stop the inflation
+ * due to its negative impact on performance.
*/
- b->rate_alloc = max(b->rate_alloc / 2,
- VMW_BALLOON_RATE_ALLOC_MIN);
STATS_INC(b->stats.sleep_alloc_fail);
break;
}
/*
* NOSLEEP page allocation failed, so the guest is
- * under memory pressure. Let us slow down page
- * allocations for next few cycles so that the guest
- * gets out of memory pressure. Also, if we already
- * allocated b->rate_alloc pages, let's pause,
- * otherwise switch to sleeping allocations.
+ * under memory pressure. Slowing down page alloctions
+ * seems to be reasonable, but doing so might actually
+ * cause the hypervisor to throttle us down, resulting
+ * in degraded performance. We will count on the
+ * scheduler and standard memory management mechanisms
+ * for now.
*/
- b->slow_allocation_cycles = VMW_BALLOON_SLOW_CYCLES;
-
- if (allocations >= b->rate_alloc)
- break;
-
flags = VMW_PAGE_ALLOC_CANSLEEP;
- /* Lower rate for sleeping allocations. */
- rate = b->rate_alloc;
continue;
}
@@ -899,28 +846,11 @@ static void vmballoon_inflate(struct vmballoon *b)
}
cond_resched();
-
- if (allocations >= rate) {
- /* We allocated enough pages, let's take a break. */
- break;
- }
}
if (num_pages > 0)
b->ops->lock(b, num_pages, is_2m_pages, &b->target);
- /*
- * We reached our goal without failures so try increasing
- * allocation rate.
- */
- if (error == 0 && allocations >= b->rate_alloc) {
- unsigned int mult = allocations / b->rate_alloc;
-
- b->rate_alloc =
- min(b->rate_alloc + mult * VMW_BALLOON_RATE_ALLOC_INC,
- VMW_BALLOON_RATE_ALLOC_MAX);
- }
-
vmballoon_release_refused_pages(b, true);
vmballoon_release_refused_pages(b, false);
}
@@ -1029,29 +959,30 @@ static void vmballoon_vmci_cleanup(struct vmballoon *b)
*/
static int vmballoon_vmci_init(struct vmballoon *b)
{
- int error = 0;
+ unsigned long error, dummy;
- if ((b->capabilities & VMW_BALLOON_SIGNALLED_WAKEUP_CMD) != 0) {
- error = vmci_doorbell_create(&b->vmci_doorbell,
- VMCI_FLAG_DELAYED_CB,
- VMCI_PRIVILEGE_FLAG_RESTRICTED,
- vmballoon_doorbell, b);
-
- if (error == VMCI_SUCCESS) {
- VMWARE_BALLOON_CMD(VMCI_DOORBELL_SET,
- b->vmci_doorbell.context,
- b->vmci_doorbell.resource, error);
- STATS_INC(b->stats.doorbell_set);
- }
- }
+ if ((b->capabilities & VMW_BALLOON_SIGNALLED_WAKEUP_CMD) == 0)
+ return 0;
- if (error != 0) {
- vmballoon_vmci_cleanup(b);
+ error = vmci_doorbell_create(&b->vmci_doorbell, VMCI_FLAG_DELAYED_CB,
+ VMCI_PRIVILEGE_FLAG_RESTRICTED,
+ vmballoon_doorbell, b);
- return -EIO;
- }
+ if (error != VMCI_SUCCESS)
+ goto fail;
+
+ error = VMWARE_BALLOON_CMD(VMCI_DOORBELL_SET, b->vmci_doorbell.context,
+ b->vmci_doorbell.resource, dummy);
+
+ STATS_INC(b->stats.doorbell_set);
+
+ if (error != VMW_BALLOON_SUCCESS)
+ goto fail;
return 0;
+fail:
+ vmballoon_vmci_cleanup(b);
+ return -EIO;
}
/*
@@ -1114,9 +1045,6 @@ static void vmballoon_work(struct work_struct *work)
if (b->reset_required)
vmballoon_reset(b);
- if (b->slow_allocation_cycles > 0)
- b->slow_allocation_cycles--;
-
if (!b->reset_required && vmballoon_send_get_target(b, &target)) {
/* update target, adjust size */
b->target = target;
@@ -1160,11 +1088,6 @@ static int vmballoon_debug_show(struct seq_file *f, void *offset)
"current: %8d pages\n",
b->target, b->size);
- /* format rate info */
- seq_printf(f,
- "rateSleepAlloc: %8d pages/sec\n",
- b->rate_alloc);
-
seq_printf(f,
"\n"
"timer: %8u\n"
@@ -1271,9 +1194,6 @@ static int __init vmballoon_init(void)
INIT_LIST_HEAD(&balloon.page_sizes[is_2m_pages].refused_pages);
}
- /* initialize rates */
- balloon.rate_alloc = VMW_BALLOON_RATE_ALLOC_MAX;
-
INIT_DELAYED_WORK(&balloon.dwork, vmballoon_work);
error = vmballoon_debugfs_init(&balloon);
@@ -1289,7 +1209,14 @@ static int __init vmballoon_init(void)
return 0;
}
-module_init(vmballoon_init);
+
+/*
+ * Using late_initcall() instead of module_init() allows the balloon to use the
+ * VMCI doorbell even when the balloon is built into the kernel. Otherwise the
+ * VMCI is probed only after the balloon is initialized. If the balloon is used
+ * as a module, late_initcall() is equivalent to module_init().
+ */
+late_initcall(vmballoon_init);
static void __exit vmballoon_exit(void)
{
diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c
index b4d7774cfe07..bd52f29b4a4e 100644
--- a/drivers/misc/vmw_vmci/vmci_queue_pair.c
+++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c
@@ -668,7 +668,7 @@ static int qp_host_get_user_memory(u64 produce_uva,
retval = get_user_pages_fast((uintptr_t) produce_uva,
produce_q->kernel_if->num_pages, 1,
produce_q->kernel_if->u.h.header_page);
- if (retval < produce_q->kernel_if->num_pages) {
+ if (retval < (int)produce_q->kernel_if->num_pages) {
pr_debug("get_user_pages_fast(produce) failed (retval=%d)",
retval);
qp_release_pages(produce_q->kernel_if->u.h.header_page,
@@ -680,7 +680,7 @@ static int qp_host_get_user_memory(u64 produce_uva,
retval = get_user_pages_fast((uintptr_t) consume_uva,
consume_q->kernel_if->num_pages, 1,
consume_q->kernel_if->u.h.header_page);
- if (retval < consume_q->kernel_if->num_pages) {
+ if (retval < (int)consume_q->kernel_if->num_pages) {
pr_debug("get_user_pages_fast(consume) failed (retval=%d)",
retval);
qp_release_pages(consume_q->kernel_if->u.h.header_page,
@@ -2214,7 +2214,6 @@ int vmci_qp_broker_map(struct vmci_handle handle,
{
struct qp_broker_entry *entry;
const u32 context_id = vmci_ctx_get_id(context);
- bool is_local = false;
int result;
if (vmci_handle_is_invalid(handle) || !context ||
@@ -2243,7 +2242,6 @@ int vmci_qp_broker_map(struct vmci_handle handle,
goto out;
}
- is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL;
result = VMCI_SUCCESS;
if (context_id != VMCI_HOST_CONTEXT_ID) {
@@ -2325,7 +2323,6 @@ int vmci_qp_broker_unmap(struct vmci_handle handle,
{
struct qp_broker_entry *entry;
const u32 context_id = vmci_ctx_get_id(context);
- bool is_local = false;
int result;
if (vmci_handle_is_invalid(handle) || !context ||
@@ -2354,8 +2351,6 @@ int vmci_qp_broker_unmap(struct vmci_handle handle,
goto out;
}
- is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL;
-
if (context_id != VMCI_HOST_CONTEXT_ID) {
qp_acquire_queue_mutex(entry->produce_q);
result = qp_save_headers(entry);
diff --git a/drivers/mtd/nand/raw/brcmnand/brcmstb_nand.c b/drivers/mtd/nand/raw/brcmnand/brcmstb_nand.c
index 5c271077ac87..489af7bc005a 100644
--- a/drivers/mtd/nand/raw/brcmnand/brcmstb_nand.c
+++ b/drivers/mtd/nand/raw/brcmnand/brcmstb_nand.c
@@ -13,6 +13,7 @@
#include <linux/device.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include "brcmnand.h"
diff --git a/drivers/mux/Kconfig b/drivers/mux/Kconfig
index 6241678e99af..7659d6c5f718 100644
--- a/drivers/mux/Kconfig
+++ b/drivers/mux/Kconfig
@@ -21,6 +21,16 @@ config MUX_ADG792A
To compile the driver as a module, choose M here: the module will
be called mux-adg792a.
+config MUX_ADGS1408
+ tristate "Analog Devices ADGS1408/ADGS1409 Multiplexers"
+ depends on SPI
+ help
+ ADGS1408 8:1 multiplexer and ADGS1409 double 4:1 multiplexer
+ switches.
+
+ To compile the driver as a module, choose M here: the module will
+ be called mux-adgs1408.
+
config MUX_GPIO
tristate "GPIO-controlled Multiplexer"
depends on GPIOLIB || COMPILE_TEST
diff --git a/drivers/mux/Makefile b/drivers/mux/Makefile
index c3d883955fd5..6e9fa47daf56 100644
--- a/drivers/mux/Makefile
+++ b/drivers/mux/Makefile
@@ -5,10 +5,12 @@
mux-core-objs := core.o
mux-adg792a-objs := adg792a.o
+mux-adgs1408-objs := adgs1408.o
mux-gpio-objs := gpio.o
mux-mmio-objs := mmio.o
obj-$(CONFIG_MULTIPLEXER) += mux-core.o
obj-$(CONFIG_MUX_ADG792A) += mux-adg792a.o
+obj-$(CONFIG_MUX_ADGS1408) += mux-adgs1408.o
obj-$(CONFIG_MUX_GPIO) += mux-gpio.o
obj-$(CONFIG_MUX_MMIO) += mux-mmio.o
diff --git a/drivers/mux/adgs1408.c b/drivers/mux/adgs1408.c
new file mode 100644
index 000000000000..0f7cf54e3234
--- /dev/null
+++ b/drivers/mux/adgs1408.c
@@ -0,0 +1,131 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * ADGS1408/ADGS1409 SPI MUX driver
+ *
+ * Copyright 2018 Analog Devices Inc.
+ */
+
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/mux/driver.h>
+#include <linux/of_platform.h>
+#include <linux/property.h>
+#include <linux/spi/spi.h>
+
+#define ADGS1408_SW_DATA (0x01)
+#define ADGS1408_REG_READ(reg) ((reg) | 0x80)
+#define ADGS1408_DISABLE (0x00)
+#define ADGS1408_MUX(state) (((state) << 1) | 1)
+
+enum adgs1408_chip_id {
+ ADGS1408 = 1,
+ ADGS1409,
+};
+
+static int adgs1408_spi_reg_write(struct spi_device *spi,
+ u8 reg_addr, u8 reg_data)
+{
+ u8 tx_buf[2];
+
+ tx_buf[0] = reg_addr;
+ tx_buf[1] = reg_data;
+
+ return spi_write_then_read(spi, tx_buf, sizeof(tx_buf), NULL, 0);
+}
+
+static int adgs1408_set(struct mux_control *mux, int state)
+{
+ struct spi_device *spi = to_spi_device(mux->chip->dev.parent);
+ u8 reg;
+
+ if (state == MUX_IDLE_DISCONNECT)
+ reg = ADGS1408_DISABLE;
+ else
+ reg = ADGS1408_MUX(state);
+
+ return adgs1408_spi_reg_write(spi, ADGS1408_SW_DATA, reg);
+}
+
+static const struct mux_control_ops adgs1408_ops = {
+ .set = adgs1408_set,
+};
+
+static int adgs1408_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ enum adgs1408_chip_id chip_id;
+ struct mux_chip *mux_chip;
+ struct mux_control *mux;
+ s32 idle_state;
+ int ret;
+
+ chip_id = (enum adgs1408_chip_id)of_device_get_match_data(dev);
+ if (!chip_id)
+ chip_id = spi_get_device_id(spi)->driver_data;
+
+ mux_chip = devm_mux_chip_alloc(dev, 1, 0);
+ if (IS_ERR(mux_chip))
+ return PTR_ERR(mux_chip);
+
+ mux_chip->ops = &adgs1408_ops;
+
+ ret = adgs1408_spi_reg_write(spi, ADGS1408_SW_DATA, ADGS1408_DISABLE);
+ if (ret < 0)
+ return ret;
+
+ ret = device_property_read_u32(dev, "idle-state", (u32 *)&idle_state);
+ if (ret < 0)
+ idle_state = MUX_IDLE_AS_IS;
+
+ mux = mux_chip->mux;
+
+ if (chip_id == ADGS1408)
+ mux->states = 8;
+ else
+ mux->states = 4;
+
+ switch (idle_state) {
+ case MUX_IDLE_DISCONNECT:
+ case MUX_IDLE_AS_IS:
+ case 0 ... 7:
+ /* adgs1409 supports only 4 states */
+ if (idle_state < mux->states) {
+ mux->idle_state = idle_state;
+ break;
+ }
+ /* fall through */
+ default:
+ dev_err(dev, "invalid idle-state %d\n", idle_state);
+ return -EINVAL;
+ }
+
+ return devm_mux_chip_register(dev, mux_chip);
+}
+
+static const struct spi_device_id adgs1408_spi_id[] = {
+ { "adgs1408", ADGS1408 },
+ { "adgs1409", ADGS1409 },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, adgs1408_spi_id);
+
+static const struct of_device_id adgs1408_of_match[] = {
+ { .compatible = "adi,adgs1408", .data = (void *)ADGS1408, },
+ { .compatible = "adi,adgs1409", .data = (void *)ADGS1409, },
+ { }
+};
+MODULE_DEVICE_TABLE(of, adgs1408_of_match);
+
+static struct spi_driver adgs1408_driver = {
+ .driver = {
+ .name = "adgs1408",
+ .of_match_table = of_match_ptr(adgs1408_of_match),
+ },
+ .probe = adgs1408_probe,
+ .id_table = adgs1408_spi_id,
+};
+module_spi_driver(adgs1408_driver);
+
+MODULE_AUTHOR("Mircea Caprioru <mircea.caprioru@analog.com>");
+MODULE_DESCRIPTION("Analog Devices ADGS1408 MUX driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index 2c63afff1382..13741ee49b9b 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -14,6 +14,7 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/kernel.h>
#include <linux/circ_buf.h>
#include <linux/interrupt.h>
diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
index aecc76504b69..a1197d3adbe0 100644
--- a/drivers/net/ethernet/faraday/ftmac100.c
+++ b/drivers/net/ethernet/faraday/ftmac100.c
@@ -29,6 +29,7 @@
#include <linux/io.h>
#include <linux/mii.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/netdevice.h>
#include <linux/platform_device.h>
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 20275d1e6f9a..507f68190cb1 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -2303,6 +2303,9 @@ static struct hv_driver netvsc_drv = {
.id_table = id_table,
.probe = netvsc_probe,
.remove = netvsc_remove,
+ .driver = {
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
};
/*
diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c
index 2bd982c3a479..63019c3de034 100644
--- a/drivers/net/wireless/ath/ath9k/ahb.c
+++ b/drivers/net/wireless/ath/ath9k/ahb.c
@@ -19,6 +19,7 @@
#include <linux/nl80211.h>
#include <linux/platform_device.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include "ath9k.h"
static const struct platform_device_id ath9k_platform_id_table[] = {
diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
index 22009e14a8fc..4a4f797bb10f 100644
--- a/drivers/net/wireless/ti/wl12xx/main.c
+++ b/drivers/net/wireless/ti/wl12xx/main.c
@@ -20,6 +20,7 @@
*/
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/err.h>
diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
index ca0f936fc119..496b9b63cea1 100644
--- a/drivers/net/wireless/ti/wl18xx/main.c
+++ b/drivers/net/wireless/ti/wl18xx/main.c
@@ -20,6 +20,7 @@
*/
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/ip.h>
#include <linux/firmware.h>
diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig
index 54a3c298247b..0a7a470ee859 100644
--- a/drivers/nvmem/Kconfig
+++ b/drivers/nvmem/Kconfig
@@ -181,4 +181,15 @@ config RAVE_SP_EEPROM
help
Say y here to enable Rave SP EEPROM support.
+config SC27XX_EFUSE
+ tristate "Spreadtrum SC27XX eFuse Support"
+ depends on MFD_SC27XX_PMIC || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ This is a simple driver to dump specified values of Spreadtrum
+ SC27XX PMICs from eFuse.
+
+ This driver can also be built as a module. If so, the module
+ will be called nvmem-sc27xx-efuse.
+
endif
diff --git a/drivers/nvmem/Makefile b/drivers/nvmem/Makefile
index 27e96a8efd1c..4e8c61628f1a 100644
--- a/drivers/nvmem/Makefile
+++ b/drivers/nvmem/Makefile
@@ -39,4 +39,5 @@ obj-$(CONFIG_NVMEM_SNVS_LPGPR) += nvmem_snvs_lpgpr.o
nvmem_snvs_lpgpr-y := snvs_lpgpr.o
obj-$(CONFIG_RAVE_SP_EEPROM) += nvmem-rave-sp-eeprom.o
nvmem-rave-sp-eeprom-y := rave-sp-eeprom.o
-
+obj-$(CONFIG_SC27XX_EFUSE) += nvmem-sc27xx-efuse.o
+nvmem-sc27xx-efuse-y := sc27xx-efuse.o
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index 514d1dfc5630..aa1657831b70 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -31,7 +31,6 @@ struct nvmem_device {
struct device dev;
int stride;
int word_size;
- int ncells;
int id;
int users;
size_t size;
@@ -389,7 +388,6 @@ int nvmem_add_cells(struct nvmem_device *nvmem,
nvmem_cell_add(cells[i]);
}
- nvmem->ncells = ncells;
/* remove tmp array */
kfree(cells);
diff --git a/drivers/nvmem/imx-ocotp.c b/drivers/nvmem/imx-ocotp.c
index 60816c856dd6..afb429a417fe 100644
--- a/drivers/nvmem/imx-ocotp.c
+++ b/drivers/nvmem/imx-ocotp.c
@@ -409,6 +409,12 @@ static const struct ocotp_params imx6sl_params = {
.set_timing = imx_ocotp_set_imx6_timing,
};
+static const struct ocotp_params imx6sll_params = {
+ .nregs = 128,
+ .bank_address_words = 0,
+ .set_timing = imx_ocotp_set_imx6_timing,
+};
+
static const struct ocotp_params imx6sx_params = {
.nregs = 128,
.bank_address_words = 0,
@@ -433,6 +439,7 @@ static const struct of_device_id imx_ocotp_dt_ids[] = {
{ .compatible = "fsl,imx6sx-ocotp", .data = &imx6sx_params },
{ .compatible = "fsl,imx6ul-ocotp", .data = &imx6ul_params },
{ .compatible = "fsl,imx7d-ocotp", .data = &imx7d_params },
+ { .compatible = "fsl,imx6sll-ocotp", .data = &imx6sll_params },
{ },
};
MODULE_DEVICE_TABLE(of, imx_ocotp_dt_ids);
diff --git a/drivers/nvmem/lpc18xx_eeprom.c b/drivers/nvmem/lpc18xx_eeprom.c
index b1af966206a6..a9534a6e8636 100644
--- a/drivers/nvmem/lpc18xx_eeprom.c
+++ b/drivers/nvmem/lpc18xx_eeprom.c
@@ -14,6 +14,7 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/nvmem-provider.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
diff --git a/drivers/nvmem/mtk-efuse.c b/drivers/nvmem/mtk-efuse.c
index e66adf17a747..58c998b2e3bc 100644
--- a/drivers/nvmem/mtk-efuse.c
+++ b/drivers/nvmem/mtk-efuse.c
@@ -14,6 +14,7 @@
#include <linux/device.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/io.h>
#include <linux/nvmem-provider.h>
#include <linux/platform_device.h>
diff --git a/drivers/nvmem/qfprom.c b/drivers/nvmem/qfprom.c
index 4f650baad983..fbb1f1df6fc7 100644
--- a/drivers/nvmem/qfprom.c
+++ b/drivers/nvmem/qfprom.c
@@ -13,6 +13,7 @@
#include <linux/device.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/io.h>
#include <linux/nvmem-provider.h>
#include <linux/platform_device.h>
diff --git a/drivers/nvmem/rave-sp-eeprom.c b/drivers/nvmem/rave-sp-eeprom.c
index 50aeea6ec6cc..66699d44f73d 100644
--- a/drivers/nvmem/rave-sp-eeprom.c
+++ b/drivers/nvmem/rave-sp-eeprom.c
@@ -35,6 +35,7 @@ enum rave_sp_eeprom_header_size {
RAVE_SP_EEPROM_HEADER_SMALL = 4U,
RAVE_SP_EEPROM_HEADER_BIG = 5U,
};
+#define RAVE_SP_EEPROM_HEADER_MAX RAVE_SP_EEPROM_HEADER_BIG
#define RAVE_SP_EEPROM_PAGE_SIZE 32U
@@ -97,9 +98,12 @@ static int rave_sp_eeprom_io(struct rave_sp_eeprom *eeprom,
const unsigned int rsp_size =
is_write ? sizeof(*page) - sizeof(page->data) : sizeof(*page);
unsigned int offset = 0;
- u8 cmd[cmd_size];
+ u8 cmd[RAVE_SP_EEPROM_HEADER_MAX + sizeof(page->data)];
int ret;
+ if (WARN_ON(cmd_size > sizeof(cmd)))
+ return -EINVAL;
+
cmd[offset++] = eeprom->address;
cmd[offset++] = 0;
cmd[offset++] = type;
diff --git a/drivers/nvmem/sc27xx-efuse.c b/drivers/nvmem/sc27xx-efuse.c
new file mode 100644
index 000000000000..33185d8d82cf
--- /dev/null
+++ b/drivers/nvmem/sc27xx-efuse.c
@@ -0,0 +1,264 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 Spreadtrum Communications Inc.
+
+#include <linux/hwspinlock.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/nvmem-provider.h>
+
+/* PMIC global registers definition */
+#define SC27XX_MODULE_EN 0xc08
+#define SC27XX_EFUSE_EN BIT(6)
+
+/* Efuse controller registers definition */
+#define SC27XX_EFUSE_GLB_CTRL 0x0
+#define SC27XX_EFUSE_DATA_RD 0x4
+#define SC27XX_EFUSE_DATA_WR 0x8
+#define SC27XX_EFUSE_BLOCK_INDEX 0xc
+#define SC27XX_EFUSE_MODE_CTRL 0x10
+#define SC27XX_EFUSE_STATUS 0x14
+#define SC27XX_EFUSE_WR_TIMING_CTRL 0x20
+#define SC27XX_EFUSE_RD_TIMING_CTRL 0x24
+#define SC27XX_EFUSE_EFUSE_DEB_CTRL 0x28
+
+/* Mask definition for SC27XX_EFUSE_BLOCK_INDEX register */
+#define SC27XX_EFUSE_BLOCK_MASK GENMASK(4, 0)
+
+/* Bits definitions for SC27XX_EFUSE_MODE_CTRL register */
+#define SC27XX_EFUSE_PG_START BIT(0)
+#define SC27XX_EFUSE_RD_START BIT(1)
+#define SC27XX_EFUSE_CLR_RDDONE BIT(2)
+
+/* Bits definitions for SC27XX_EFUSE_STATUS register */
+#define SC27XX_EFUSE_PGM_BUSY BIT(0)
+#define SC27XX_EFUSE_READ_BUSY BIT(1)
+#define SC27XX_EFUSE_STANDBY BIT(2)
+#define SC27XX_EFUSE_GLOBAL_PROT BIT(3)
+#define SC27XX_EFUSE_RD_DONE BIT(4)
+
+/* Block number and block width (bytes) definitions */
+#define SC27XX_EFUSE_BLOCK_MAX 32
+#define SC27XX_EFUSE_BLOCK_WIDTH 2
+
+/* Timeout (ms) for the trylock of hardware spinlocks */
+#define SC27XX_EFUSE_HWLOCK_TIMEOUT 5000
+
+/* Timeout (us) of polling the status */
+#define SC27XX_EFUSE_POLL_TIMEOUT 3000000
+#define SC27XX_EFUSE_POLL_DELAY_US 10000
+
+struct sc27xx_efuse {
+ struct device *dev;
+ struct regmap *regmap;
+ struct hwspinlock *hwlock;
+ struct mutex mutex;
+ u32 base;
+};
+
+/*
+ * On Spreadtrum platform, we have multi-subsystems will access the unique
+ * efuse controller, so we need one hardware spinlock to synchronize between
+ * the multiple subsystems.
+ */
+static int sc27xx_efuse_lock(struct sc27xx_efuse *efuse)
+{
+ int ret;
+
+ mutex_lock(&efuse->mutex);
+
+ ret = hwspin_lock_timeout_raw(efuse->hwlock,
+ SC27XX_EFUSE_HWLOCK_TIMEOUT);
+ if (ret) {
+ dev_err(efuse->dev, "timeout to get the hwspinlock\n");
+ mutex_unlock(&efuse->mutex);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void sc27xx_efuse_unlock(struct sc27xx_efuse *efuse)
+{
+ hwspin_unlock_raw(efuse->hwlock);
+ mutex_unlock(&efuse->mutex);
+}
+
+static int sc27xx_efuse_poll_status(struct sc27xx_efuse *efuse, u32 bits)
+{
+ int ret;
+ u32 val;
+
+ ret = regmap_read_poll_timeout(efuse->regmap,
+ efuse->base + SC27XX_EFUSE_STATUS,
+ val, (val & bits),
+ SC27XX_EFUSE_POLL_DELAY_US,
+ SC27XX_EFUSE_POLL_TIMEOUT);
+ if (ret) {
+ dev_err(efuse->dev, "timeout to update the efuse status\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int sc27xx_efuse_read(void *context, u32 offset, void *val, size_t bytes)
+{
+ struct sc27xx_efuse *efuse = context;
+ u32 buf;
+ int ret;
+
+ if (offset > SC27XX_EFUSE_BLOCK_MAX || bytes > SC27XX_EFUSE_BLOCK_WIDTH)
+ return -EINVAL;
+
+ ret = sc27xx_efuse_lock(efuse);
+ if (ret)
+ return ret;
+
+ /* Enable the efuse controller. */
+ ret = regmap_update_bits(efuse->regmap, SC27XX_MODULE_EN,
+ SC27XX_EFUSE_EN, SC27XX_EFUSE_EN);
+ if (ret)
+ goto unlock_efuse;
+
+ /*
+ * Before reading, we should ensure the efuse controller is in
+ * standby state.
+ */
+ ret = sc27xx_efuse_poll_status(efuse, SC27XX_EFUSE_STANDBY);
+ if (ret)
+ goto disable_efuse;
+
+ /* Set the block address to be read. */
+ ret = regmap_write(efuse->regmap,
+ efuse->base + SC27XX_EFUSE_BLOCK_INDEX,
+ offset & SC27XX_EFUSE_BLOCK_MASK);
+ if (ret)
+ goto disable_efuse;
+
+ /* Start reading process from efuse memory. */
+ ret = regmap_update_bits(efuse->regmap,
+ efuse->base + SC27XX_EFUSE_MODE_CTRL,
+ SC27XX_EFUSE_RD_START,
+ SC27XX_EFUSE_RD_START);
+ if (ret)
+ goto disable_efuse;
+
+ /*
+ * Polling the read done status to make sure the reading process
+ * is completed, that means the data can be read out now.
+ */
+ ret = sc27xx_efuse_poll_status(efuse, SC27XX_EFUSE_RD_DONE);
+ if (ret)
+ goto disable_efuse;
+
+ /* Read data from efuse memory. */
+ ret = regmap_read(efuse->regmap, efuse->base + SC27XX_EFUSE_DATA_RD,
+ &buf);
+ if (ret)
+ goto disable_efuse;
+
+ /* Clear the read done flag. */
+ ret = regmap_update_bits(efuse->regmap,
+ efuse->base + SC27XX_EFUSE_MODE_CTRL,
+ SC27XX_EFUSE_CLR_RDDONE,
+ SC27XX_EFUSE_CLR_RDDONE);
+
+disable_efuse:
+ /* Disable the efuse controller after reading. */
+ regmap_update_bits(efuse->regmap, SC27XX_MODULE_EN, SC27XX_EFUSE_EN, 0);
+unlock_efuse:
+ sc27xx_efuse_unlock(efuse);
+
+ if (!ret)
+ memcpy(val, &buf, bytes);
+
+ return ret;
+}
+
+static int sc27xx_efuse_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct nvmem_config econfig = { };
+ struct nvmem_device *nvmem;
+ struct sc27xx_efuse *efuse;
+ int ret;
+
+ efuse = devm_kzalloc(&pdev->dev, sizeof(*efuse), GFP_KERNEL);
+ if (!efuse)
+ return -ENOMEM;
+
+ efuse->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!efuse->regmap) {
+ dev_err(&pdev->dev, "failed to get efuse regmap\n");
+ return -ENODEV;
+ }
+
+ ret = of_property_read_u32(np, "reg", &efuse->base);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to get efuse base address\n");
+ return ret;
+ }
+
+ ret = of_hwspin_lock_get_id(np, 0);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to get hwspinlock id\n");
+ return ret;
+ }
+
+ efuse->hwlock = hwspin_lock_request_specific(ret);
+ if (!efuse->hwlock) {
+ dev_err(&pdev->dev, "failed to request hwspinlock\n");
+ return -ENXIO;
+ }
+
+ mutex_init(&efuse->mutex);
+ efuse->dev = &pdev->dev;
+ platform_set_drvdata(pdev, efuse);
+
+ econfig.stride = 1;
+ econfig.word_size = 1;
+ econfig.read_only = true;
+ econfig.name = "sc27xx-efuse";
+ econfig.size = SC27XX_EFUSE_BLOCK_MAX * SC27XX_EFUSE_BLOCK_WIDTH;
+ econfig.reg_read = sc27xx_efuse_read;
+ econfig.priv = efuse;
+ econfig.dev = &pdev->dev;
+ nvmem = devm_nvmem_register(&pdev->dev, &econfig);
+ if (IS_ERR(nvmem)) {
+ dev_err(&pdev->dev, "failed to register nvmem config\n");
+ hwspin_lock_free(efuse->hwlock);
+ return PTR_ERR(nvmem);
+ }
+
+ return 0;
+}
+
+static int sc27xx_efuse_remove(struct platform_device *pdev)
+{
+ struct sc27xx_efuse *efuse = platform_get_drvdata(pdev);
+
+ hwspin_lock_free(efuse->hwlock);
+ return 0;
+}
+
+static const struct of_device_id sc27xx_efuse_of_match[] = {
+ { .compatible = "sprd,sc2731-efuse" },
+ { }
+};
+
+static struct platform_driver sc27xx_efuse_driver = {
+ .probe = sc27xx_efuse_probe,
+ .remove = sc27xx_efuse_remove,
+ .driver = {
+ .name = "sc27xx-efuse",
+ .of_match_table = sc27xx_efuse_of_match,
+ },
+};
+
+module_platform_driver(sc27xx_efuse_driver);
+
+MODULE_AUTHOR("Freeman Liu <freeman.liu@spreadtrum.com>");
+MODULE_DESCRIPTION("Spreadtrum SC27xx efuse driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvmem/uniphier-efuse.c b/drivers/nvmem/uniphier-efuse.c
index 271f0b2ff86a..286910336ef6 100644
--- a/drivers/nvmem/uniphier-efuse.c
+++ b/drivers/nvmem/uniphier-efuse.c
@@ -16,6 +16,7 @@
#include <linux/device.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/nvmem-provider.h>
#include <linux/platform_device.h>
diff --git a/drivers/parport/ieee1284.c b/drivers/parport/ieee1284.c
index 2d1a5c737c6e..f12b9da69255 100644
--- a/drivers/parport/ieee1284.c
+++ b/drivers/parport/ieee1284.c
@@ -267,7 +267,7 @@ static void parport_ieee1284_terminate (struct parport *port)
port->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
}
- /* fall-though.. */
+ /* fall through */
default:
/* Terminate from all other modes. */
@@ -615,6 +615,7 @@ ssize_t parport_write (struct parport *port, const void *buffer, size_t len)
case IEEE1284_MODE_NIBBLE:
case IEEE1284_MODE_BYTE:
parport_negotiate (port, IEEE1284_MODE_COMPAT);
+ /* fall through */
case IEEE1284_MODE_COMPAT:
DPRINTK (KERN_DEBUG "%s: Using compatibility mode\n",
port->name);
diff --git a/drivers/parport/parport_sunbpp.c b/drivers/parport/parport_sunbpp.c
index 01cf1c1a841a..8de329546b82 100644
--- a/drivers/parport/parport_sunbpp.c
+++ b/drivers/parport/parport_sunbpp.c
@@ -286,12 +286,16 @@ static int bpp_probe(struct platform_device *op)
ops = kmemdup(&parport_sunbpp_ops, sizeof(struct parport_operations),
GFP_KERNEL);
- if (!ops)
+ if (!ops) {
+ err = -ENOMEM;
goto out_unmap;
+ }
dprintk(("register_port\n"));
- if (!(p = parport_register_port((unsigned long)base, irq, dma, ops)))
+ if (!(p = parport_register_port((unsigned long)base, irq, dma, ops))) {
+ err = -ENOMEM;
goto out_free_ops;
+ }
p->size = size;
p->dev = &op->dev;
diff --git a/drivers/perf/arm-ccn.c b/drivers/perf/arm-ccn.c
index 4b15c36f4631..7dd850e02f19 100644
--- a/drivers/perf/arm-ccn.c
+++ b/drivers/perf/arm-ccn.c
@@ -17,6 +17,7 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/perf_event.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
diff --git a/drivers/pinctrl/intel/pinctrl-merrifield.c b/drivers/pinctrl/intel/pinctrl-merrifield.c
index 4a916be44f4f..4fa69f988c7b 100644
--- a/drivers/pinctrl/intel/pinctrl-merrifield.c
+++ b/drivers/pinctrl/intel/pinctrl-merrifield.c
@@ -10,6 +10,7 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/pinctrl/pinconf.h>
#include <linux/pinctrl/pinconf-generic.h>
diff --git a/drivers/pinctrl/pinctrl-u300.c b/drivers/pinctrl/pinctrl-u300.c
index 9cc80a500880..2b1a61dba224 100644
--- a/drivers/pinctrl/pinctrl-u300.c
+++ b/drivers/pinctrl/pinctrl-u300.c
@@ -13,6 +13,7 @@
*/
#include <linux/init.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/slab.h>
diff --git a/drivers/pinctrl/sprd/pinctrl-sprd-sc9860.c b/drivers/pinctrl/sprd/pinctrl-sprd-sc9860.c
index 3cdad8bc8f93..5702b6704137 100644
--- a/drivers/pinctrl/sprd/pinctrl-sprd-sc9860.c
+++ b/drivers/pinctrl/sprd/pinctrl-sprd-sc9860.c
@@ -13,6 +13,7 @@
*/
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include "pinctrl-sprd.h"
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c
index bce533f85420..280dca725d6e 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c
@@ -15,6 +15,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/mod_devicetable.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/platform_device.h>
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c
index 99f06fe8e1cb..d2d56c985c83 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c
@@ -15,6 +15,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/mod_devicetable.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/platform_device.h>
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld4.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld4.c
index b247011524bf..03d87ad82726 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld4.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld4.c
@@ -15,6 +15,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/mod_devicetable.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/platform_device.h>
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld6b.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld6b.c
index cb58797adaee..31f36ea53911 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld6b.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld6b.c
@@ -15,6 +15,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/mod_devicetable.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/platform_device.h>
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-pro4.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-pro4.c
index 89148f81d5e0..60722898d5c7 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-pro4.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-pro4.c
@@ -15,6 +15,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/mod_devicetable.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/platform_device.h>
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-pro5.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-pro5.c
index d77d6b37aabe..ae7981530141 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-pro5.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-pro5.c
@@ -15,6 +15,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/mod_devicetable.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/platform_device.h>
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-pxs2.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-pxs2.c
index 90199da87eb9..7975bd7f99c8 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-pxs2.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-pxs2.c
@@ -15,6 +15,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/mod_devicetable.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/platform_device.h>
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-pxs3.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-pxs3.c
index 3b860da47733..b16ce283695b 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-pxs3.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-pxs3.c
@@ -15,6 +15,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/platform_device.h>
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-sld8.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-sld8.c
index f086083368a7..cb44568fcbbc 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-sld8.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-sld8.c
@@ -15,6 +15,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/mod_devicetable.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/platform_device.h>
diff --git a/drivers/platform/goldfish/Kconfig b/drivers/platform/goldfish/Kconfig
index fefbb8370da0..479031aa4f88 100644
--- a/drivers/platform/goldfish/Kconfig
+++ b/drivers/platform/goldfish/Kconfig
@@ -10,11 +10,6 @@ menuconfig GOLDFISH
if GOLDFISH
-config GOLDFISH_BUS
- bool "Goldfish platform bus"
- ---help---
- This is a virtual bus to host Goldfish Android Virtual Devices.
-
config GOLDFISH_PIPE
tristate "Goldfish virtual device for QEMU pipes"
---help---
diff --git a/drivers/platform/goldfish/Makefile b/drivers/platform/goldfish/Makefile
index d3487125838c..e0c202df9674 100644
--- a/drivers/platform/goldfish/Makefile
+++ b/drivers/platform/goldfish/Makefile
@@ -1,5 +1,4 @@
#
# Makefile for Goldfish platform specific drivers
#
-obj-$(CONFIG_GOLDFISH_BUS) += pdev_bus.o
obj-$(CONFIG_GOLDFISH_PIPE) += goldfish_pipe.o
diff --git a/drivers/platform/goldfish/goldfish_pipe.c b/drivers/platform/goldfish/goldfish_pipe.c
index 3e32a4c14d5f..2da567540c2d 100644
--- a/drivers/platform/goldfish/goldfish_pipe.c
+++ b/drivers/platform/goldfish/goldfish_pipe.c
@@ -48,6 +48,7 @@
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
@@ -645,7 +646,7 @@ static void goldfish_interrupt_task(unsigned long unused)
wake_up_interruptible(&pipe->wake_queue);
}
}
-DECLARE_TASKLET(goldfish_interrupt_tasklet, goldfish_interrupt_task, 0);
+static DECLARE_TASKLET(goldfish_interrupt_tasklet, goldfish_interrupt_task, 0);
/*
* The general idea of the interrupt handling:
diff --git a/drivers/platform/goldfish/pdev_bus.c b/drivers/platform/goldfish/pdev_bus.c
deleted file mode 100644
index dd9ea463c2a4..000000000000
--- a/drivers/platform/goldfish/pdev_bus.c
+++ /dev/null
@@ -1,232 +0,0 @@
-/*
- * Copyright (C) 2007 Google, Inc.
- * Copyright (C) 2011 Intel, Inc.
- * Copyright (C) 2013 Intel, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/io.h>
-
-#define PDEV_BUS_OP_DONE (0x00)
-#define PDEV_BUS_OP_REMOVE_DEV (0x04)
-#define PDEV_BUS_OP_ADD_DEV (0x08)
-
-#define PDEV_BUS_OP_INIT (0x00)
-
-#define PDEV_BUS_OP (0x00)
-#define PDEV_BUS_GET_NAME (0x04)
-#define PDEV_BUS_NAME_LEN (0x08)
-#define PDEV_BUS_ID (0x0c)
-#define PDEV_BUS_IO_BASE (0x10)
-#define PDEV_BUS_IO_SIZE (0x14)
-#define PDEV_BUS_IRQ (0x18)
-#define PDEV_BUS_IRQ_COUNT (0x1c)
-#define PDEV_BUS_GET_NAME_HIGH (0x20)
-
-struct pdev_bus_dev {
- struct list_head list;
- struct platform_device pdev;
- struct resource resources[0];
-};
-
-static void goldfish_pdev_worker(struct work_struct *work);
-
-static void __iomem *pdev_bus_base;
-static unsigned long pdev_bus_addr;
-static unsigned long pdev_bus_len;
-static u32 pdev_bus_irq;
-static LIST_HEAD(pdev_bus_new_devices);
-static LIST_HEAD(pdev_bus_registered_devices);
-static LIST_HEAD(pdev_bus_removed_devices);
-static DECLARE_WORK(pdev_bus_worker, goldfish_pdev_worker);
-
-
-static void goldfish_pdev_worker(struct work_struct *work)
-{
- int ret;
- struct pdev_bus_dev *pos, *n;
-
- list_for_each_entry_safe(pos, n, &pdev_bus_removed_devices, list) {
- list_del(&pos->list);
- platform_device_unregister(&pos->pdev);
- kfree(pos);
- }
- list_for_each_entry_safe(pos, n, &pdev_bus_new_devices, list) {
- list_del(&pos->list);
- ret = platform_device_register(&pos->pdev);
- if (ret)
- pr_err("goldfish_pdev_worker failed to register device, %s\n",
- pos->pdev.name);
- list_add_tail(&pos->list, &pdev_bus_registered_devices);
- }
-}
-
-static void goldfish_pdev_remove(void)
-{
- struct pdev_bus_dev *pos, *n;
- u32 base;
-
- base = readl(pdev_bus_base + PDEV_BUS_IO_BASE);
-
- list_for_each_entry_safe(pos, n, &pdev_bus_new_devices, list) {
- if (pos->resources[0].start == base) {
- list_del(&pos->list);
- kfree(pos);
- return;
- }
- }
- list_for_each_entry_safe(pos, n, &pdev_bus_registered_devices, list) {
- if (pos->resources[0].start == base) {
- list_del(&pos->list);
- list_add_tail(&pos->list, &pdev_bus_removed_devices);
- schedule_work(&pdev_bus_worker);
- return;
- }
- };
- pr_err("goldfish_pdev_remove could not find device at %x\n", base);
-}
-
-static int goldfish_new_pdev(void)
-{
- struct pdev_bus_dev *dev;
- u32 name_len;
- u32 irq = -1, irq_count;
- int resource_count = 2;
- u32 base;
- char *name;
-
- base = readl(pdev_bus_base + PDEV_BUS_IO_BASE);
-
- irq_count = readl(pdev_bus_base + PDEV_BUS_IRQ_COUNT);
- name_len = readl(pdev_bus_base + PDEV_BUS_NAME_LEN);
- if (irq_count)
- resource_count++;
-
- dev = kzalloc(sizeof(*dev) +
- sizeof(struct resource) * resource_count +
- name_len + 1 + sizeof(*dev->pdev.dev.dma_mask), GFP_ATOMIC);
- if (dev == NULL)
- return -ENOMEM;
-
- dev->pdev.num_resources = resource_count;
- dev->pdev.resource = (struct resource *)(dev + 1);
- dev->pdev.name = name = (char *)(dev->pdev.resource + resource_count);
- dev->pdev.dev.coherent_dma_mask = ~0;
- dev->pdev.dev.dma_mask = (void *)(dev->pdev.name + name_len + 1);
- *dev->pdev.dev.dma_mask = ~0;
-
-#ifdef CONFIG_64BIT
- writel((u32)((u64)name>>32), pdev_bus_base + PDEV_BUS_GET_NAME_HIGH);
-#endif
- writel((u32)(unsigned long)name, pdev_bus_base + PDEV_BUS_GET_NAME);
- name[name_len] = '\0';
- dev->pdev.id = readl(pdev_bus_base + PDEV_BUS_ID);
- dev->pdev.resource[0].start = base;
- dev->pdev.resource[0].end = base +
- readl(pdev_bus_base + PDEV_BUS_IO_SIZE) - 1;
- dev->pdev.resource[0].flags = IORESOURCE_MEM;
- if (irq_count) {
- irq = readl(pdev_bus_base + PDEV_BUS_IRQ);
- dev->pdev.resource[1].start = irq;
- dev->pdev.resource[1].end = irq + irq_count - 1;
- dev->pdev.resource[1].flags = IORESOURCE_IRQ;
- }
-
- pr_debug("goldfish_new_pdev %s at %x irq %d\n", name, base, irq);
- list_add_tail(&dev->list, &pdev_bus_new_devices);
- schedule_work(&pdev_bus_worker);
-
- return 0;
-}
-
-static irqreturn_t goldfish_pdev_bus_interrupt(int irq, void *dev_id)
-{
- irqreturn_t ret = IRQ_NONE;
-
- while (1) {
- u32 op = readl(pdev_bus_base + PDEV_BUS_OP);
-
- switch (op) {
- case PDEV_BUS_OP_REMOVE_DEV:
- goldfish_pdev_remove();
- ret = IRQ_HANDLED;
- break;
-
- case PDEV_BUS_OP_ADD_DEV:
- goldfish_new_pdev();
- ret = IRQ_HANDLED;
- break;
-
- case PDEV_BUS_OP_DONE:
- default:
- return ret;
- }
- }
-}
-
-static int goldfish_pdev_bus_probe(struct platform_device *pdev)
-{
- int ret;
- struct resource *r;
-
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (r == NULL)
- return -EINVAL;
-
- pdev_bus_addr = r->start;
- pdev_bus_len = resource_size(r);
-
- pdev_bus_base = ioremap(pdev_bus_addr, pdev_bus_len);
- if (pdev_bus_base == NULL) {
- ret = -ENOMEM;
- dev_err(&pdev->dev, "unable to map Goldfish MMIO.\n");
- goto free_resources;
- }
-
- r = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (r == NULL) {
- ret = -ENOENT;
- goto free_map;
- }
-
- pdev_bus_irq = r->start;
-
- ret = request_irq(pdev_bus_irq, goldfish_pdev_bus_interrupt,
- IRQF_SHARED, "goldfish_pdev_bus", pdev);
- if (ret) {
- dev_err(&pdev->dev, "unable to request Goldfish IRQ\n");
- goto free_map;
- }
-
- writel(PDEV_BUS_OP_INIT, pdev_bus_base + PDEV_BUS_OP);
- return 0;
-
-free_map:
- iounmap(pdev_bus_base);
-free_resources:
- release_mem_region(pdev_bus_addr, pdev_bus_len);
- return ret;
-}
-
-static struct platform_driver goldfish_pdev_bus_driver = {
- .probe = goldfish_pdev_bus_probe,
- .driver = {
- .name = "goldfish_pdev_bus"
- }
-};
-builtin_platform_driver(goldfish_pdev_bus_driver);
diff --git a/drivers/platform/x86/intel_bxtwc_tmu.c b/drivers/platform/x86/intel_bxtwc_tmu.c
index ea865d4ca220..227943a20212 100644
--- a/drivers/platform/x86/intel_bxtwc_tmu.c
+++ b/drivers/platform/x86/intel_bxtwc_tmu.c
@@ -19,6 +19,7 @@
*/
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/mfd/intel_soc_pmic.h>
diff --git a/drivers/power/avs/smartreflex.c b/drivers/power/avs/smartreflex.c
index cb0237143dbe..1360a7fa542c 100644
--- a/drivers/power/avs/smartreflex.c
+++ b/drivers/power/avs/smartreflex.c
@@ -18,6 +18,7 @@
*/
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/interrupt.h>
#include <linux/clk.h>
#include <linux/io.h>
diff --git a/drivers/power/reset/ltc2952-poweroff.c b/drivers/power/reset/ltc2952-poweroff.c
index bfcd6fba6363..6b911b6b10a6 100644
--- a/drivers/power/reset/ltc2952-poweroff.c
+++ b/drivers/power/reset/ltc2952-poweroff.c
@@ -62,6 +62,7 @@
#include <linux/slab.h>
#include <linux/kmod.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/gpio/consumer.h>
#include <linux/reboot.h>
diff --git a/drivers/power/supply/max8998_charger.c b/drivers/power/supply/max8998_charger.c
index b64cf0f14142..cad7d1a8feec 100644
--- a/drivers/power/supply/max8998_charger.c
+++ b/drivers/power/supply/max8998_charger.c
@@ -21,6 +21,7 @@
#include <linux/err.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/power_supply.h>
diff --git a/drivers/power/supply/olpc_battery.c b/drivers/power/supply/olpc_battery.c
index 3bc2eea7b3b7..6da79ae14860 100644
--- a/drivers/power/supply/olpc_battery.c
+++ b/drivers/power/supply/olpc_battery.c
@@ -10,6 +10,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/types.h>
#include <linux/err.h>
#include <linux/device.h>
diff --git a/drivers/ptp/ptp_dte.c b/drivers/ptp/ptp_dte.c
index 6edd3b9c7f01..a7dc43368df4 100644
--- a/drivers/ptp/ptp_dte.c
+++ b/drivers/ptp/ptp_dte.c
@@ -14,6 +14,7 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/types.h>
diff --git a/drivers/regulator/tps65912-regulator.c b/drivers/regulator/tps65912-regulator.c
index a4921a70da55..276faeddc370 100644
--- a/drivers/regulator/tps65912-regulator.c
+++ b/drivers/regulator/tps65912-regulator.c
@@ -18,6 +18,7 @@
*/
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
diff --git a/drivers/reset/reset-ath79.c b/drivers/reset/reset-ath79.c
index 2674880e5492..a7455916e396 100644
--- a/drivers/reset/reset-ath79.c
+++ b/drivers/reset/reset-ath79.c
@@ -17,6 +17,7 @@
#include <linux/io.h>
#include <linux/init.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/reset-controller.h>
#include <linux/reboot.h>
diff --git a/drivers/reset/reset-axs10x.c b/drivers/reset/reset-axs10x.c
index afb298e46bd9..a854ef41364d 100644
--- a/drivers/reset/reset-axs10x.c
+++ b/drivers/reset/reset-axs10x.c
@@ -10,6 +10,7 @@
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/reset-controller.h>
diff --git a/drivers/reset/reset-imx7.c b/drivers/reset/reset-imx7.c
index 4db177bc89bc..14bc78d28707 100644
--- a/drivers/reset/reset-imx7.c
+++ b/drivers/reset/reset-imx7.c
@@ -16,6 +16,7 @@
*/
#include <linux/mfd/syscon.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/reset-controller.h>
#include <linux/regmap.h>
diff --git a/drivers/rtc/rtc-coh901331.c b/drivers/rtc/rtc-coh901331.c
index 2fc517498a5d..fc5cf5c44ae7 100644
--- a/drivers/rtc/rtc-coh901331.c
+++ b/drivers/rtc/rtc-coh901331.c
@@ -8,6 +8,7 @@
*/
#include <linux/init.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/rtc.h>
#include <linux/clk.h>
#include <linux/interrupt.h>
diff --git a/drivers/rtc/rtc-cpcap.c b/drivers/rtc/rtc-cpcap.c
index a8856f2b9bc2..6b477174a82f 100644
--- a/drivers/rtc/rtc-cpcap.c
+++ b/drivers/rtc/rtc-cpcap.c
@@ -24,6 +24,7 @@
*/
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/platform_device.h>
diff --git a/drivers/rtc/rtc-ftrtc010.c b/drivers/rtc/rtc-ftrtc010.c
index 61f798c6101f..8f1dd88fa827 100644
--- a/drivers/rtc/rtc-ftrtc010.c
+++ b/drivers/rtc/rtc-ftrtc010.c
@@ -26,6 +26,7 @@
#include <linux/platform_device.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/clk.h>
#define DRV_NAME "rtc-ftrtc010"
diff --git a/drivers/rtc/rtc-mc13xxx.c b/drivers/rtc/rtc-mc13xxx.c
index 1f892b238ddb..0fa33708fc49 100644
--- a/drivers/rtc/rtc-mc13xxx.c
+++ b/drivers/rtc/rtc-mc13xxx.c
@@ -13,6 +13,7 @@
#include <linux/platform_device.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/slab.h>
#include <linux/rtc.h>
diff --git a/drivers/rtc/rtc-mxc_v2.c b/drivers/rtc/rtc-mxc_v2.c
index c75f26dc8fcc..007879a5042d 100644
--- a/drivers/rtc/rtc-mxc_v2.c
+++ b/drivers/rtc/rtc-mxc_v2.c
@@ -8,6 +8,7 @@
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/rtc.h>
diff --git a/drivers/rtc/rtc-r7301.c b/drivers/rtc/rtc-r7301.c
index 169704b2ce13..1943c8151152 100644
--- a/drivers/rtc/rtc-r7301.c
+++ b/drivers/rtc/rtc-r7301.c
@@ -11,6 +11,7 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/delay.h>
#include <linux/regmap.h>
#include <linux/platform_device.h>
diff --git a/drivers/rtc/rtc-sh.c b/drivers/rtc/rtc-sh.c
index 4f98543d1ea5..776b70a14e03 100644
--- a/drivers/rtc/rtc-sh.c
+++ b/drivers/rtc/rtc-sh.c
@@ -15,6 +15,7 @@
* for more details.
*/
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/kernel.h>
#include <linux/bcd.h>
#include <linux/rtc.h>
diff --git a/drivers/rtc/rtc-tegra.c b/drivers/rtc/rtc-tegra.c
index 66efff60c4d5..8dc48fe7fc35 100644
--- a/drivers/rtc/rtc-tegra.c
+++ b/drivers/rtc/rtc-tegra.c
@@ -25,6 +25,7 @@
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/rtc.h>
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 33a4a4dad324..f03dc03a42c3 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1935,6 +1935,9 @@ static struct hv_driver storvsc_drv = {
.id_table = id_table,
.probe = storvsc_probe,
.remove = storvsc_remove,
+ .driver = {
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
};
#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS)
diff --git a/drivers/siox/siox-bus-gpio.c b/drivers/siox/siox-bus-gpio.c
index ea7ef982968b..46b4cda36bac 100644
--- a/drivers/siox/siox-bus-gpio.c
+++ b/drivers/siox/siox-bus-gpio.c
@@ -5,6 +5,7 @@
#include <linux/gpio/consumer.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
diff --git a/drivers/siox/siox-core.c b/drivers/siox/siox-core.c
index 16590dfaafa4..f8c08fb9891d 100644
--- a/drivers/siox/siox-core.c
+++ b/drivers/siox/siox-core.c
@@ -215,26 +215,26 @@ static void siox_poll(struct siox_master *smaster)
siox_status_clean(status,
sdevice->status_written_lastcycle);
- /* Check counter bits */
- if (siox_device_counter_error(sdevice, status_clean)) {
- bool prev_counter_error;
+ /* Check counter and type bits */
+ if (siox_device_counter_error(sdevice, status_clean) ||
+ siox_device_type_error(sdevice, status_clean)) {
+ bool prev_error;
synced = false;
/* only report a new error if the last cycle was ok */
- prev_counter_error =
+ prev_error =
siox_device_counter_error(sdevice,
- prev_status_clean);
- if (!prev_counter_error) {
+ prev_status_clean) ||
+ siox_device_type_error(sdevice,
+ prev_status_clean);
+
+ if (!prev_error) {
sdevice->status_errors++;
sysfs_notify_dirent(sdevice->status_errors_kn);
}
}
- /* Check type bits */
- if (siox_device_type_error(sdevice, status_clean))
- synced = false;
-
/* If the device is unsynced report the watchdog as active */
if (!synced) {
status &= ~SIOX_STATUS_WDG;
@@ -715,17 +715,17 @@ int siox_master_register(struct siox_master *smaster)
dev_set_name(&smaster->dev, "siox-%d", smaster->busno);
+ mutex_init(&smaster->lock);
+ INIT_LIST_HEAD(&smaster->devices);
+
smaster->last_poll = jiffies;
- smaster->poll_thread = kthread_create(siox_poll_thread, smaster,
- "siox-%d", smaster->busno);
+ smaster->poll_thread = kthread_run(siox_poll_thread, smaster,
+ "siox-%d", smaster->busno);
if (IS_ERR(smaster->poll_thread)) {
smaster->active = 0;
return PTR_ERR(smaster->poll_thread);
}
- mutex_init(&smaster->lock);
- INIT_LIST_HEAD(&smaster->devices);
-
ret = device_add(&smaster->dev);
if (ret)
kthread_stop(smaster->poll_thread);
diff --git a/drivers/slimbus/Kconfig b/drivers/slimbus/Kconfig
index 1a632fad597e..9d73ad806698 100644
--- a/drivers/slimbus/Kconfig
+++ b/drivers/slimbus/Kconfig
@@ -15,10 +15,20 @@ if SLIMBUS
# SLIMbus controllers
config SLIM_QCOM_CTRL
tristate "Qualcomm SLIMbus Manager Component"
- depends on SLIMBUS
depends on HAS_IOMEM
help
Select driver if Qualcomm's SLIMbus Manager Component is
programmed using Linux kernel.
+config SLIM_QCOM_NGD_CTRL
+ tristate "Qualcomm SLIMbus Satellite Non-Generic Device Component"
+ depends on QCOM_QMI_HELPERS
+ depends on HAS_IOMEM && DMA_ENGINE
+ help
+ Select driver if Qualcomm's SLIMbus Satellite Non-Generic Device
+ Component is programmed using Linux kernel.
+ This is light-weight slimbus controller driver responsible for
+ communicating with slave HW directly over the bus using messaging
+ interface, and communicating with master component residing on ADSP
+ for bandwidth and data-channel management.
endif
diff --git a/drivers/slimbus/Makefile b/drivers/slimbus/Makefile
index a35a3da4eb78..d9aa011b6804 100644
--- a/drivers/slimbus/Makefile
+++ b/drivers/slimbus/Makefile
@@ -3,8 +3,11 @@
# Makefile for kernel SLIMbus framework.
#
obj-$(CONFIG_SLIMBUS) += slimbus.o
-slimbus-y := core.o messaging.o sched.o
+slimbus-y := core.o messaging.o sched.o stream.o
#Controllers
obj-$(CONFIG_SLIM_QCOM_CTRL) += slim-qcom-ctrl.o
slim-qcom-ctrl-y := qcom-ctrl.o
+
+obj-$(CONFIG_SLIM_QCOM_NGD_CTRL) += slim-qcom-ngd-ctrl.o
+slim-qcom-ngd-ctrl-y := qcom-ngd-ctrl.o
diff --git a/drivers/slimbus/core.c b/drivers/slimbus/core.c
index 7ddfc675b131..95b00d28ad6e 100644
--- a/drivers/slimbus/core.c
+++ b/drivers/slimbus/core.c
@@ -114,6 +114,8 @@ static int slim_add_device(struct slim_controller *ctrl,
sbdev->dev.release = slim_dev_release;
sbdev->dev.driver = NULL;
sbdev->ctrl = ctrl;
+ INIT_LIST_HEAD(&sbdev->stream_list);
+ spin_lock_init(&sbdev->stream_list_lock);
if (node)
sbdev->dev.of_node = of_node_get(node);
@@ -356,6 +358,45 @@ struct slim_device *slim_get_device(struct slim_controller *ctrl,
}
EXPORT_SYMBOL_GPL(slim_get_device);
+static int of_slim_match_dev(struct device *dev, void *data)
+{
+ struct device_node *np = data;
+ struct slim_device *sbdev = to_slim_device(dev);
+
+ return (sbdev->dev.of_node == np);
+}
+
+static struct slim_device *of_find_slim_device(struct slim_controller *ctrl,
+ struct device_node *np)
+{
+ struct slim_device *sbdev;
+ struct device *dev;
+
+ dev = device_find_child(ctrl->dev, np, of_slim_match_dev);
+ if (dev) {
+ sbdev = to_slim_device(dev);
+ return sbdev;
+ }
+
+ return NULL;
+}
+
+/**
+ * of_slim_get_device() - get handle to a device using dt node.
+ *
+ * @ctrl: Controller on which this device will be added/queried
+ * @np: node pointer to device
+ *
+ * Return: pointer to a device if it has already reported. Creates a new
+ * device and returns pointer to it if the device has not yet enumerated.
+ */
+struct slim_device *of_slim_get_device(struct slim_controller *ctrl,
+ struct device_node *np)
+{
+ return of_find_slim_device(ctrl, np);
+}
+EXPORT_SYMBOL_GPL(of_slim_get_device);
+
static int slim_device_alloc_laddr(struct slim_device *sbdev,
bool report_present)
{
diff --git a/drivers/slimbus/messaging.c b/drivers/slimbus/messaging.c
index 457ea1f8db30..d5879142dbef 100644
--- a/drivers/slimbus/messaging.c
+++ b/drivers/slimbus/messaging.c
@@ -29,22 +29,19 @@ void slim_msg_response(struct slim_controller *ctrl, u8 *reply, u8 tid, u8 len)
spin_lock_irqsave(&ctrl->txn_lock, flags);
txn = idr_find(&ctrl->tid_idr, tid);
- if (txn == NULL) {
- spin_unlock_irqrestore(&ctrl->txn_lock, flags);
+ spin_unlock_irqrestore(&ctrl->txn_lock, flags);
+
+ if (txn == NULL)
return;
- }
msg = txn->msg;
if (msg == NULL || msg->rbuf == NULL) {
dev_err(ctrl->dev, "Got response to invalid TID:%d, len:%d\n",
tid, len);
- spin_unlock_irqrestore(&ctrl->txn_lock, flags);
return;
}
- idr_remove(&ctrl->tid_idr, tid);
- spin_unlock_irqrestore(&ctrl->txn_lock, flags);
-
+ slim_free_txn_tid(ctrl, txn);
memcpy(msg->rbuf, reply, len);
if (txn->comp)
complete(txn->comp);
@@ -56,6 +53,48 @@ void slim_msg_response(struct slim_controller *ctrl, u8 *reply, u8 tid, u8 len)
EXPORT_SYMBOL_GPL(slim_msg_response);
/**
+ * slim_alloc_txn_tid() - Allocate a tid to txn
+ *
+ * @ctrl: Controller handle
+ * @txn: transaction to be allocated with tid.
+ *
+ * Return: zero on success with valid txn->tid and error code on failures.
+ */
+int slim_alloc_txn_tid(struct slim_controller *ctrl, struct slim_msg_txn *txn)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&ctrl->txn_lock, flags);
+ ret = idr_alloc_cyclic(&ctrl->tid_idr, txn, 0,
+ SLIM_MAX_TIDS, GFP_ATOMIC);
+ if (ret < 0) {
+ spin_unlock_irqrestore(&ctrl->txn_lock, flags);
+ return ret;
+ }
+ txn->tid = ret;
+ spin_unlock_irqrestore(&ctrl->txn_lock, flags);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(slim_alloc_txn_tid);
+
+/**
+ * slim_free_txn_tid() - Freee tid of txn
+ *
+ * @ctrl: Controller handle
+ * @txn: transaction whose tid should be freed
+ */
+void slim_free_txn_tid(struct slim_controller *ctrl, struct slim_msg_txn *txn)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctrl->txn_lock, flags);
+ idr_remove(&ctrl->tid_idr, txn->tid);
+ spin_unlock_irqrestore(&ctrl->txn_lock, flags);
+}
+EXPORT_SYMBOL_GPL(slim_free_txn_tid);
+
+/**
* slim_do_transfer() - Process a SLIMbus-messaging transaction
*
* @ctrl: Controller handle
@@ -72,8 +111,7 @@ int slim_do_transfer(struct slim_controller *ctrl, struct slim_msg_txn *txn)
{
DECLARE_COMPLETION_ONSTACK(done);
bool need_tid = false, clk_pause_msg = false;
- unsigned long flags;
- int ret, tid, timeout;
+ int ret, timeout;
/*
* do not vote for runtime-PM if the transactions are part of clock
@@ -97,34 +135,26 @@ int slim_do_transfer(struct slim_controller *ctrl, struct slim_msg_txn *txn)
need_tid = slim_tid_txn(txn->mt, txn->mc);
if (need_tid) {
- spin_lock_irqsave(&ctrl->txn_lock, flags);
- tid = idr_alloc(&ctrl->tid_idr, txn, 0,
- SLIM_MAX_TIDS, GFP_ATOMIC);
- txn->tid = tid;
+ ret = slim_alloc_txn_tid(ctrl, txn);
+ if (ret)
+ return ret;
if (!txn->msg->comp)
txn->comp = &done;
else
txn->comp = txn->comp;
-
- spin_unlock_irqrestore(&ctrl->txn_lock, flags);
-
- if (tid < 0)
- return tid;
}
ret = ctrl->xfer_msg(ctrl, txn);
- if (ret && need_tid && !txn->msg->comp) {
+ if (!ret && need_tid && !txn->msg->comp) {
unsigned long ms = txn->rl + HZ;
timeout = wait_for_completion_timeout(txn->comp,
msecs_to_jiffies(ms));
if (!timeout) {
ret = -ETIMEDOUT;
- spin_lock_irqsave(&ctrl->txn_lock, flags);
- idr_remove(&ctrl->tid_idr, tid);
- spin_unlock_irqrestore(&ctrl->txn_lock, flags);
+ slim_free_txn_tid(ctrl, txn);
}
}
@@ -139,7 +169,7 @@ slim_xfer_err:
* if there was error during this transaction
*/
pm_runtime_mark_last_busy(ctrl->dev);
- pm_runtime_mark_last_busy(ctrl->dev);
+ pm_runtime_put_autosuspend(ctrl->dev);
}
return ret;
}
@@ -246,6 +276,7 @@ static void slim_fill_msg(struct slim_val_inf *msg, u32 addr,
msg->num_bytes = count;
msg->rbuf = rbuf;
msg->wbuf = wbuf;
+ msg->comp = NULL;
}
/**
@@ -307,7 +338,7 @@ int slim_write(struct slim_device *sdev, u32 addr, size_t count, u8 *val)
{
struct slim_val_inf msg;
- slim_fill_msg(&msg, addr, count, val, NULL);
+ slim_fill_msg(&msg, addr, count, NULL, val);
return slim_xfer_msg(sdev, &msg, SLIM_MSG_MC_CHANGE_VALUE);
}
diff --git a/drivers/slimbus/qcom-ngd-ctrl.c b/drivers/slimbus/qcom-ngd-ctrl.c
new file mode 100644
index 000000000000..8be4d6786c61
--- /dev/null
+++ b/drivers/slimbus/qcom-ngd-ctrl.c
@@ -0,0 +1,1526 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+// Copyright (c) 2018, Linaro Limited
+
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/slimbus.h>
+#include <linux/delay.h>
+#include <linux/pm_runtime.h>
+#include <linux/of.h>
+#include <linux/io.h>
+#include <linux/soc/qcom/qmi.h>
+#include <net/sock.h>
+#include "slimbus.h"
+
+/* NGD (Non-ported Generic Device) registers */
+#define NGD_CFG 0x0
+#define NGD_CFG_ENABLE BIT(0)
+#define NGD_CFG_RX_MSGQ_EN BIT(1)
+#define NGD_CFG_TX_MSGQ_EN BIT(2)
+#define NGD_STATUS 0x4
+#define NGD_LADDR BIT(1)
+#define NGD_RX_MSGQ_CFG 0x8
+#define NGD_INT_EN 0x10
+#define NGD_INT_RECFG_DONE BIT(24)
+#define NGD_INT_TX_NACKED_2 BIT(25)
+#define NGD_INT_MSG_BUF_CONTE BIT(26)
+#define NGD_INT_MSG_TX_INVAL BIT(27)
+#define NGD_INT_IE_VE_CHG BIT(28)
+#define NGD_INT_DEV_ERR BIT(29)
+#define NGD_INT_RX_MSG_RCVD BIT(30)
+#define NGD_INT_TX_MSG_SENT BIT(31)
+#define NGD_INT_STAT 0x14
+#define NGD_INT_CLR 0x18
+#define DEF_NGD_INT_MASK (NGD_INT_TX_NACKED_2 | NGD_INT_MSG_BUF_CONTE | \
+ NGD_INT_MSG_TX_INVAL | NGD_INT_IE_VE_CHG | \
+ NGD_INT_DEV_ERR | NGD_INT_TX_MSG_SENT | \
+ NGD_INT_RX_MSG_RCVD)
+
+/* Slimbus QMI service */
+#define SLIMBUS_QMI_SVC_ID 0x0301
+#define SLIMBUS_QMI_SVC_V1 1
+#define SLIMBUS_QMI_INS_ID 0
+#define SLIMBUS_QMI_SELECT_INSTANCE_REQ_V01 0x0020
+#define SLIMBUS_QMI_SELECT_INSTANCE_RESP_V01 0x0020
+#define SLIMBUS_QMI_POWER_REQ_V01 0x0021
+#define SLIMBUS_QMI_POWER_RESP_V01 0x0021
+#define SLIMBUS_QMI_CHECK_FRAMER_STATUS_REQ 0x0022
+#define SLIMBUS_QMI_CHECK_FRAMER_STATUS_RESP 0x0022
+#define SLIMBUS_QMI_POWER_REQ_MAX_MSG_LEN 14
+#define SLIMBUS_QMI_POWER_RESP_MAX_MSG_LEN 7
+#define SLIMBUS_QMI_SELECT_INSTANCE_REQ_MAX_MSG_LEN 14
+#define SLIMBUS_QMI_SELECT_INSTANCE_RESP_MAX_MSG_LEN 7
+#define SLIMBUS_QMI_CHECK_FRAMER_STAT_RESP_MAX_MSG_LEN 7
+/* QMI response timeout of 500ms */
+#define SLIMBUS_QMI_RESP_TOUT 1000
+
+/* User defined commands */
+#define SLIM_USR_MC_GENERIC_ACK 0x25
+#define SLIM_USR_MC_MASTER_CAPABILITY 0x0
+#define SLIM_USR_MC_REPORT_SATELLITE 0x1
+#define SLIM_USR_MC_ADDR_QUERY 0xD
+#define SLIM_USR_MC_ADDR_REPLY 0xE
+#define SLIM_USR_MC_DEFINE_CHAN 0x20
+#define SLIM_USR_MC_DEF_ACT_CHAN 0x21
+#define SLIM_USR_MC_CHAN_CTRL 0x23
+#define SLIM_USR_MC_RECONFIG_NOW 0x24
+#define SLIM_USR_MC_REQ_BW 0x28
+#define SLIM_USR_MC_CONNECT_SRC 0x2C
+#define SLIM_USR_MC_CONNECT_SINK 0x2D
+#define SLIM_USR_MC_DISCONNECT_PORT 0x2E
+#define SLIM_USR_MC_REPEAT_CHANGE_VALUE 0x0
+
+#define QCOM_SLIM_NGD_AUTOSUSPEND MSEC_PER_SEC
+#define SLIM_RX_MSGQ_TIMEOUT_VAL 0x10000
+
+#define SLIM_LA_MGR 0xFF
+#define SLIM_ROOT_FREQ 24576000
+#define LADDR_RETRY 5
+
+/* Per spec.max 40 bytes per received message */
+#define SLIM_MSGQ_BUF_LEN 40
+#define QCOM_SLIM_NGD_DESC_NUM 32
+
+#define SLIM_MSG_ASM_FIRST_WORD(l, mt, mc, dt, ad) \
+ ((l) | ((mt) << 5) | ((mc) << 8) | ((dt) << 15) | ((ad) << 16))
+
+#define INIT_MX_RETRIES 10
+#define DEF_RETRY_MS 10
+#define SAT_MAGIC_LSB 0xD9
+#define SAT_MAGIC_MSB 0xC5
+#define SAT_MSG_VER 0x1
+#define SAT_MSG_PROT 0x1
+#define to_ngd(d) container_of(d, struct qcom_slim_ngd, dev)
+
+struct ngd_reg_offset_data {
+ u32 offset, size;
+};
+
+static const struct ngd_reg_offset_data ngd_v1_5_offset_info = {
+ .offset = 0x1000,
+ .size = 0x1000,
+};
+
+enum qcom_slim_ngd_state {
+ QCOM_SLIM_NGD_CTRL_AWAKE,
+ QCOM_SLIM_NGD_CTRL_IDLE,
+ QCOM_SLIM_NGD_CTRL_ASLEEP,
+ QCOM_SLIM_NGD_CTRL_DOWN,
+};
+
+struct qcom_slim_ngd_qmi {
+ struct qmi_handle qmi;
+ struct sockaddr_qrtr svc_info;
+ struct qmi_handle svc_event_hdl;
+ struct qmi_response_type_v01 resp;
+ struct qmi_handle *handle;
+ struct completion qmi_comp;
+};
+
+struct qcom_slim_ngd_ctrl;
+struct qcom_slim_ngd;
+
+struct qcom_slim_ngd_dma_desc {
+ struct dma_async_tx_descriptor *desc;
+ struct qcom_slim_ngd_ctrl *ctrl;
+ struct completion *comp;
+ dma_cookie_t cookie;
+ dma_addr_t phys;
+ void *base;
+};
+
+struct qcom_slim_ngd {
+ struct platform_device *pdev;
+ void __iomem *base;
+ int id;
+};
+
+struct qcom_slim_ngd_ctrl {
+ struct slim_framer framer;
+ struct slim_controller ctrl;
+ struct qcom_slim_ngd_qmi qmi;
+ struct qcom_slim_ngd *ngd;
+ struct device *dev;
+ void __iomem *base;
+ struct dma_chan *dma_rx_channel;
+ struct dma_chan *dma_tx_channel;
+ struct qcom_slim_ngd_dma_desc rx_desc[QCOM_SLIM_NGD_DESC_NUM];
+ struct qcom_slim_ngd_dma_desc txdesc[QCOM_SLIM_NGD_DESC_NUM];
+ struct completion reconf;
+ struct work_struct m_work;
+ struct workqueue_struct *mwq;
+ spinlock_t tx_buf_lock;
+ enum qcom_slim_ngd_state state;
+ dma_addr_t rx_phys_base;
+ dma_addr_t tx_phys_base;
+ void *rx_base;
+ void *tx_base;
+ int tx_tail;
+ int tx_head;
+ u32 ver;
+};
+
+enum slimbus_mode_enum_type_v01 {
+ /* To force a 32 bit signed enum. Do not change or use*/
+ SLIMBUS_MODE_ENUM_TYPE_MIN_ENUM_VAL_V01 = INT_MIN,
+ SLIMBUS_MODE_SATELLITE_V01 = 1,
+ SLIMBUS_MODE_MASTER_V01 = 2,
+ SLIMBUS_MODE_ENUM_TYPE_MAX_ENUM_VAL_V01 = INT_MAX,
+};
+
+enum slimbus_pm_enum_type_v01 {
+ /* To force a 32 bit signed enum. Do not change or use*/
+ SLIMBUS_PM_ENUM_TYPE_MIN_ENUM_VAL_V01 = INT_MIN,
+ SLIMBUS_PM_INACTIVE_V01 = 1,
+ SLIMBUS_PM_ACTIVE_V01 = 2,
+ SLIMBUS_PM_ENUM_TYPE_MAX_ENUM_VAL_V01 = INT_MAX,
+};
+
+enum slimbus_resp_enum_type_v01 {
+ SLIMBUS_RESP_ENUM_TYPE_MIN_VAL_V01 = INT_MIN,
+ SLIMBUS_RESP_SYNCHRONOUS_V01 = 1,
+ SLIMBUS_RESP_ENUM_TYPE_MAX_VAL_V01 = INT_MAX,
+};
+
+struct slimbus_select_inst_req_msg_v01 {
+ uint32_t instance;
+ uint8_t mode_valid;
+ enum slimbus_mode_enum_type_v01 mode;
+};
+
+struct slimbus_select_inst_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+struct slimbus_power_req_msg_v01 {
+ enum slimbus_pm_enum_type_v01 pm_req;
+ uint8_t resp_type_valid;
+ enum slimbus_resp_enum_type_v01 resp_type;
+};
+
+struct slimbus_power_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+static struct qmi_elem_info slimbus_select_inst_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct slimbus_select_inst_req_msg_v01,
+ instance),
+ .ei_array = NULL,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct slimbus_select_inst_req_msg_v01,
+ mode_valid),
+ .ei_array = NULL,
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(enum slimbus_mode_enum_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct slimbus_select_inst_req_msg_v01,
+ mode),
+ .ei_array = NULL,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .elem_len = 0,
+ .elem_size = 0,
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x00,
+ .offset = 0,
+ .ei_array = NULL,
+ },
+};
+
+static struct qmi_elem_info slimbus_select_inst_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct slimbus_select_inst_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .elem_len = 0,
+ .elem_size = 0,
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x00,
+ .offset = 0,
+ .ei_array = NULL,
+ },
+};
+
+static struct qmi_elem_info slimbus_power_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(enum slimbus_pm_enum_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct slimbus_power_req_msg_v01,
+ pm_req),
+ .ei_array = NULL,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct slimbus_power_req_msg_v01,
+ resp_type_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum slimbus_resp_enum_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct slimbus_power_req_msg_v01,
+ resp_type),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .elem_len = 0,
+ .elem_size = 0,
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x00,
+ .offset = 0,
+ .ei_array = NULL,
+ },
+};
+
+static struct qmi_elem_info slimbus_power_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct slimbus_power_resp_msg_v01, resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .elem_len = 0,
+ .elem_size = 0,
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x00,
+ .offset = 0,
+ .ei_array = NULL,
+ },
+};
+
+static int qcom_slim_qmi_send_select_inst_req(struct qcom_slim_ngd_ctrl *ctrl,
+ struct slimbus_select_inst_req_msg_v01 *req)
+{
+ struct slimbus_select_inst_resp_msg_v01 resp = { { 0, 0 } };
+ struct qmi_txn txn;
+ int rc;
+
+ rc = qmi_txn_init(ctrl->qmi.handle, &txn,
+ slimbus_select_inst_resp_msg_v01_ei, &resp);
+ if (rc < 0) {
+ dev_err(ctrl->dev, "QMI TXN init fail: %d\n", rc);
+ return rc;
+ }
+
+ rc = qmi_send_request(ctrl->qmi.handle, NULL, &txn,
+ SLIMBUS_QMI_SELECT_INSTANCE_REQ_V01,
+ SLIMBUS_QMI_SELECT_INSTANCE_REQ_MAX_MSG_LEN,
+ slimbus_select_inst_req_msg_v01_ei, req);
+ if (rc < 0) {
+ dev_err(ctrl->dev, "QMI send req fail %d\n", rc);
+ qmi_txn_cancel(&txn);
+ return rc;
+ }
+
+ rc = qmi_txn_wait(&txn, SLIMBUS_QMI_RESP_TOUT);
+ if (rc < 0) {
+ dev_err(ctrl->dev, "QMI TXN wait fail: %d\n", rc);
+ return rc;
+ }
+ /* Check the response */
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ dev_err(ctrl->dev, "QMI request failed 0x%x\n",
+ resp.resp.result);
+ return -EREMOTEIO;
+ }
+
+ return 0;
+}
+
+static void qcom_slim_qmi_power_resp_cb(struct qmi_handle *handle,
+ struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn, const void *data)
+{
+ struct slimbus_power_resp_msg_v01 *resp;
+
+ resp = (struct slimbus_power_resp_msg_v01 *)data;
+ if (resp->resp.result != QMI_RESULT_SUCCESS_V01)
+ pr_err("QMI power request failed 0x%x\n",
+ resp->resp.result);
+
+ complete(&txn->completion);
+}
+
+static int qcom_slim_qmi_send_power_request(struct qcom_slim_ngd_ctrl *ctrl,
+ struct slimbus_power_req_msg_v01 *req)
+{
+ struct slimbus_power_resp_msg_v01 resp = { { 0, 0 } };
+ struct qmi_txn txn;
+ int rc;
+
+ rc = qmi_txn_init(ctrl->qmi.handle, &txn,
+ slimbus_power_resp_msg_v01_ei, &resp);
+
+ rc = qmi_send_request(ctrl->qmi.handle, NULL, &txn,
+ SLIMBUS_QMI_POWER_REQ_V01,
+ SLIMBUS_QMI_POWER_REQ_MAX_MSG_LEN,
+ slimbus_power_req_msg_v01_ei, req);
+ if (rc < 0) {
+ dev_err(ctrl->dev, "QMI send req fail %d\n", rc);
+ qmi_txn_cancel(&txn);
+ return rc;
+ }
+
+ rc = qmi_txn_wait(&txn, SLIMBUS_QMI_RESP_TOUT);
+ if (rc < 0) {
+ dev_err(ctrl->dev, "QMI TXN wait fail: %d\n", rc);
+ return rc;
+ }
+
+ /* Check the response */
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ dev_err(ctrl->dev, "QMI request failed 0x%x\n",
+ resp.resp.result);
+ return -EREMOTEIO;
+ }
+
+ return 0;
+}
+
+static struct qmi_msg_handler qcom_slim_qmi_msg_handlers[] = {
+ {
+ .type = QMI_RESPONSE,
+ .msg_id = SLIMBUS_QMI_POWER_RESP_V01,
+ .ei = slimbus_power_resp_msg_v01_ei,
+ .decoded_size = sizeof(struct slimbus_power_resp_msg_v01),
+ .fn = qcom_slim_qmi_power_resp_cb,
+ },
+ {}
+};
+
+static int qcom_slim_qmi_init(struct qcom_slim_ngd_ctrl *ctrl,
+ bool apps_is_master)
+{
+ struct slimbus_select_inst_req_msg_v01 req;
+ struct qmi_handle *handle;
+ int rc;
+
+ handle = devm_kzalloc(ctrl->dev, sizeof(*handle), GFP_KERNEL);
+ if (!handle)
+ return -ENOMEM;
+
+ rc = qmi_handle_init(handle, SLIMBUS_QMI_POWER_REQ_MAX_MSG_LEN,
+ NULL, qcom_slim_qmi_msg_handlers);
+ if (rc < 0) {
+ dev_err(ctrl->dev, "QMI client init failed: %d\n", rc);
+ goto qmi_handle_init_failed;
+ }
+
+ rc = kernel_connect(handle->sock,
+ (struct sockaddr *)&ctrl->qmi.svc_info,
+ sizeof(ctrl->qmi.svc_info), 0);
+ if (rc < 0) {
+ dev_err(ctrl->dev, "Remote Service connect failed: %d\n", rc);
+ goto qmi_connect_to_service_failed;
+ }
+
+ /* Instance is 0 based */
+ req.instance = (ctrl->ngd->id >> 1);
+ req.mode_valid = 1;
+
+ /* Mode indicates the role of the ADSP */
+ if (apps_is_master)
+ req.mode = SLIMBUS_MODE_SATELLITE_V01;
+ else
+ req.mode = SLIMBUS_MODE_MASTER_V01;
+
+ ctrl->qmi.handle = handle;
+
+ rc = qcom_slim_qmi_send_select_inst_req(ctrl, &req);
+ if (rc) {
+ dev_err(ctrl->dev, "failed to select h/w instance\n");
+ goto qmi_select_instance_failed;
+ }
+
+ return 0;
+
+qmi_select_instance_failed:
+ ctrl->qmi.handle = NULL;
+qmi_connect_to_service_failed:
+ qmi_handle_release(handle);
+qmi_handle_init_failed:
+ devm_kfree(ctrl->dev, handle);
+ return rc;
+}
+
+static void qcom_slim_qmi_exit(struct qcom_slim_ngd_ctrl *ctrl)
+{
+ if (!ctrl->qmi.handle)
+ return;
+
+ qmi_handle_release(ctrl->qmi.handle);
+ devm_kfree(ctrl->dev, ctrl->qmi.handle);
+ ctrl->qmi.handle = NULL;
+}
+
+static int qcom_slim_qmi_power_request(struct qcom_slim_ngd_ctrl *ctrl,
+ bool active)
+{
+ struct slimbus_power_req_msg_v01 req;
+
+ if (active)
+ req.pm_req = SLIMBUS_PM_ACTIVE_V01;
+ else
+ req.pm_req = SLIMBUS_PM_INACTIVE_V01;
+
+ req.resp_type_valid = 0;
+
+ return qcom_slim_qmi_send_power_request(ctrl, &req);
+}
+
+static u32 *qcom_slim_ngd_tx_msg_get(struct qcom_slim_ngd_ctrl *ctrl, int len,
+ struct completion *comp)
+{
+ struct qcom_slim_ngd_dma_desc *desc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctrl->tx_buf_lock, flags);
+
+ if ((ctrl->tx_tail + 1) % QCOM_SLIM_NGD_DESC_NUM == ctrl->tx_head) {
+ spin_unlock_irqrestore(&ctrl->tx_buf_lock, flags);
+ return NULL;
+ }
+ desc = &ctrl->txdesc[ctrl->tx_tail];
+ desc->base = ctrl->tx_base + ctrl->tx_tail * SLIM_MSGQ_BUF_LEN;
+ desc->comp = comp;
+ ctrl->tx_tail = (ctrl->tx_tail + 1) % QCOM_SLIM_NGD_DESC_NUM;
+
+ spin_unlock_irqrestore(&ctrl->tx_buf_lock, flags);
+
+ return desc->base;
+}
+
+static void qcom_slim_ngd_tx_msg_dma_cb(void *args)
+{
+ struct qcom_slim_ngd_dma_desc *desc = args;
+ struct qcom_slim_ngd_ctrl *ctrl = desc->ctrl;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctrl->tx_buf_lock, flags);
+
+ if (desc->comp) {
+ complete(desc->comp);
+ desc->comp = NULL;
+ }
+
+ ctrl->tx_head = (ctrl->tx_head + 1) % QCOM_SLIM_NGD_DESC_NUM;
+ spin_unlock_irqrestore(&ctrl->tx_buf_lock, flags);
+}
+
+static int qcom_slim_ngd_tx_msg_post(struct qcom_slim_ngd_ctrl *ctrl,
+ void *buf, int len)
+{
+ struct qcom_slim_ngd_dma_desc *desc;
+ unsigned long flags;
+ int index, offset;
+
+ spin_lock_irqsave(&ctrl->tx_buf_lock, flags);
+ offset = buf - ctrl->tx_base;
+ index = offset/SLIM_MSGQ_BUF_LEN;
+
+ desc = &ctrl->txdesc[index];
+ desc->phys = ctrl->tx_phys_base + offset;
+ desc->base = ctrl->tx_base + offset;
+ desc->ctrl = ctrl;
+ len = (len + 3) & 0xfc;
+
+ desc->desc = dmaengine_prep_slave_single(ctrl->dma_tx_channel,
+ desc->phys, len,
+ DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT);
+ if (!desc->desc) {
+ dev_err(ctrl->dev, "unable to prepare channel\n");
+ spin_unlock_irqrestore(&ctrl->tx_buf_lock, flags);
+ return -EINVAL;
+ }
+
+ desc->desc->callback = qcom_slim_ngd_tx_msg_dma_cb;
+ desc->desc->callback_param = desc;
+ desc->desc->cookie = dmaengine_submit(desc->desc);
+ dma_async_issue_pending(ctrl->dma_tx_channel);
+ spin_unlock_irqrestore(&ctrl->tx_buf_lock, flags);
+
+ return 0;
+}
+
+static void qcom_slim_ngd_rx(struct qcom_slim_ngd_ctrl *ctrl, u8 *buf)
+{
+ u8 mc, mt, len;
+
+ mt = SLIM_HEADER_GET_MT(buf[0]);
+ len = SLIM_HEADER_GET_RL(buf[0]);
+ mc = SLIM_HEADER_GET_MC(buf[1]);
+
+ if (mc == SLIM_USR_MC_MASTER_CAPABILITY &&
+ mt == SLIM_MSG_MT_SRC_REFERRED_USER)
+ queue_work(ctrl->mwq, &ctrl->m_work);
+
+ if (mc == SLIM_MSG_MC_REPLY_INFORMATION ||
+ mc == SLIM_MSG_MC_REPLY_VALUE || (mc == SLIM_USR_MC_ADDR_REPLY &&
+ mt == SLIM_MSG_MT_SRC_REFERRED_USER) ||
+ (mc == SLIM_USR_MC_GENERIC_ACK &&
+ mt == SLIM_MSG_MT_SRC_REFERRED_USER)) {
+ slim_msg_response(&ctrl->ctrl, &buf[4], buf[3], len - 4);
+ pm_runtime_mark_last_busy(ctrl->dev);
+ }
+}
+
+static void qcom_slim_ngd_rx_msgq_cb(void *args)
+{
+ struct qcom_slim_ngd_dma_desc *desc = args;
+ struct qcom_slim_ngd_ctrl *ctrl = desc->ctrl;
+
+ qcom_slim_ngd_rx(ctrl, (u8 *)desc->base);
+ /* Add descriptor back to the queue */
+ desc->desc = dmaengine_prep_slave_single(ctrl->dma_rx_channel,
+ desc->phys, SLIM_MSGQ_BUF_LEN,
+ DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT);
+ if (!desc->desc) {
+ dev_err(ctrl->dev, "Unable to prepare rx channel\n");
+ return;
+ }
+
+ desc->desc->callback = qcom_slim_ngd_rx_msgq_cb;
+ desc->desc->callback_param = desc;
+ desc->desc->cookie = dmaengine_submit(desc->desc);
+ dma_async_issue_pending(ctrl->dma_rx_channel);
+}
+
+static int qcom_slim_ngd_post_rx_msgq(struct qcom_slim_ngd_ctrl *ctrl)
+{
+ struct qcom_slim_ngd_dma_desc *desc;
+ int i;
+
+ for (i = 0; i < QCOM_SLIM_NGD_DESC_NUM; i++) {
+ desc = &ctrl->rx_desc[i];
+ desc->phys = ctrl->rx_phys_base + i * SLIM_MSGQ_BUF_LEN;
+ desc->ctrl = ctrl;
+ desc->base = ctrl->rx_base + i * SLIM_MSGQ_BUF_LEN;
+ desc->desc = dmaengine_prep_slave_single(ctrl->dma_rx_channel,
+ desc->phys, SLIM_MSGQ_BUF_LEN,
+ DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT);
+ if (!desc->desc) {
+ dev_err(ctrl->dev, "Unable to prepare rx channel\n");
+ return -EINVAL;
+ }
+
+ desc->desc->callback = qcom_slim_ngd_rx_msgq_cb;
+ desc->desc->callback_param = desc;
+ desc->desc->cookie = dmaengine_submit(desc->desc);
+ }
+ dma_async_issue_pending(ctrl->dma_rx_channel);
+
+ return 0;
+}
+
+static int qcom_slim_ngd_init_rx_msgq(struct qcom_slim_ngd_ctrl *ctrl)
+{
+ struct device *dev = ctrl->dev;
+ int ret, size;
+
+ ctrl->dma_rx_channel = dma_request_slave_channel(dev, "rx");
+ if (!ctrl->dma_rx_channel) {
+ dev_err(dev, "Failed to request dma channels");
+ return -EINVAL;
+ }
+
+ size = QCOM_SLIM_NGD_DESC_NUM * SLIM_MSGQ_BUF_LEN;
+ ctrl->rx_base = dma_alloc_coherent(dev, size, &ctrl->rx_phys_base,
+ GFP_KERNEL);
+ if (!ctrl->rx_base) {
+ dev_err(dev, "dma_alloc_coherent failed\n");
+ ret = -ENOMEM;
+ goto rel_rx;
+ }
+
+ ret = qcom_slim_ngd_post_rx_msgq(ctrl);
+ if (ret) {
+ dev_err(dev, "post_rx_msgq() failed 0x%x\n", ret);
+ goto rx_post_err;
+ }
+
+ return 0;
+
+rx_post_err:
+ dma_free_coherent(dev, size, ctrl->rx_base, ctrl->rx_phys_base);
+rel_rx:
+ dma_release_channel(ctrl->dma_rx_channel);
+ return ret;
+}
+
+static int qcom_slim_ngd_init_tx_msgq(struct qcom_slim_ngd_ctrl *ctrl)
+{
+ struct device *dev = ctrl->dev;
+ unsigned long flags;
+ int ret = 0;
+ int size;
+
+ ctrl->dma_tx_channel = dma_request_slave_channel(dev, "tx");
+ if (!ctrl->dma_tx_channel) {
+ dev_err(dev, "Failed to request dma channels");
+ return -EINVAL;
+ }
+
+ size = ((QCOM_SLIM_NGD_DESC_NUM + 1) * SLIM_MSGQ_BUF_LEN);
+ ctrl->tx_base = dma_alloc_coherent(dev, size, &ctrl->tx_phys_base,
+ GFP_KERNEL);
+ if (!ctrl->tx_base) {
+ dev_err(dev, "dma_alloc_coherent failed\n");
+ ret = -EINVAL;
+ goto rel_tx;
+ }
+
+ spin_lock_irqsave(&ctrl->tx_buf_lock, flags);
+ ctrl->tx_tail = 0;
+ ctrl->tx_head = 0;
+ spin_unlock_irqrestore(&ctrl->tx_buf_lock, flags);
+
+ return 0;
+rel_tx:
+ dma_release_channel(ctrl->dma_tx_channel);
+ return ret;
+}
+
+static int qcom_slim_ngd_init_dma(struct qcom_slim_ngd_ctrl *ctrl)
+{
+ int ret = 0;
+
+ ret = qcom_slim_ngd_init_rx_msgq(ctrl);
+ if (ret) {
+ dev_err(ctrl->dev, "rx dma init failed\n");
+ return ret;
+ }
+
+ ret = qcom_slim_ngd_init_tx_msgq(ctrl);
+ if (ret)
+ dev_err(ctrl->dev, "tx dma init failed\n");
+
+ return ret;
+}
+
+static irqreturn_t qcom_slim_ngd_interrupt(int irq, void *d)
+{
+ struct qcom_slim_ngd_ctrl *ctrl = d;
+ void __iomem *base = ctrl->ngd->base;
+ u32 stat = readl(base + NGD_INT_STAT);
+
+ if ((stat & NGD_INT_MSG_BUF_CONTE) ||
+ (stat & NGD_INT_MSG_TX_INVAL) || (stat & NGD_INT_DEV_ERR) ||
+ (stat & NGD_INT_TX_NACKED_2)) {
+ dev_err(ctrl->dev, "Error Interrupt received 0x%x\n", stat);
+ }
+
+ writel(stat, base + NGD_INT_CLR);
+
+ return IRQ_HANDLED;
+}
+
+static int qcom_slim_ngd_xfer_msg(struct slim_controller *sctrl,
+ struct slim_msg_txn *txn)
+{
+ struct qcom_slim_ngd_ctrl *ctrl = dev_get_drvdata(sctrl->dev);
+ DECLARE_COMPLETION_ONSTACK(tx_sent);
+ DECLARE_COMPLETION_ONSTACK(done);
+ int ret, timeout, i;
+ u8 wbuf[SLIM_MSGQ_BUF_LEN];
+ u8 rbuf[SLIM_MSGQ_BUF_LEN];
+ u32 *pbuf;
+ u8 *puc;
+ u8 la = txn->la;
+ bool usr_msg = false;
+
+ if (txn->mc & SLIM_MSG_CLK_PAUSE_SEQ_FLG)
+ return -EPROTONOSUPPORT;
+
+ if (txn->mt == SLIM_MSG_MT_CORE &&
+ (txn->mc >= SLIM_MSG_MC_BEGIN_RECONFIGURATION &&
+ txn->mc <= SLIM_MSG_MC_RECONFIGURE_NOW))
+ return 0;
+
+ if (txn->dt == SLIM_MSG_DEST_ENUMADDR)
+ return -EPROTONOSUPPORT;
+
+ if (txn->msg->num_bytes > SLIM_MSGQ_BUF_LEN ||
+ txn->rl > SLIM_MSGQ_BUF_LEN) {
+ dev_err(ctrl->dev, "msg exeeds HW limit\n");
+ return -EINVAL;
+ }
+
+ pbuf = qcom_slim_ngd_tx_msg_get(ctrl, txn->rl, &tx_sent);
+ if (!pbuf) {
+ dev_err(ctrl->dev, "Message buffer unavailable\n");
+ return -ENOMEM;
+ }
+
+ if (txn->mt == SLIM_MSG_MT_CORE &&
+ (txn->mc == SLIM_MSG_MC_CONNECT_SOURCE ||
+ txn->mc == SLIM_MSG_MC_CONNECT_SINK ||
+ txn->mc == SLIM_MSG_MC_DISCONNECT_PORT)) {
+ txn->mt = SLIM_MSG_MT_DEST_REFERRED_USER;
+ switch (txn->mc) {
+ case SLIM_MSG_MC_CONNECT_SOURCE:
+ txn->mc = SLIM_USR_MC_CONNECT_SRC;
+ break;
+ case SLIM_MSG_MC_CONNECT_SINK:
+ txn->mc = SLIM_USR_MC_CONNECT_SINK;
+ break;
+ case SLIM_MSG_MC_DISCONNECT_PORT:
+ txn->mc = SLIM_USR_MC_DISCONNECT_PORT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ usr_msg = true;
+ i = 0;
+ wbuf[i++] = txn->la;
+ la = SLIM_LA_MGR;
+ wbuf[i++] = txn->msg->wbuf[0];
+ if (txn->mc != SLIM_USR_MC_DISCONNECT_PORT)
+ wbuf[i++] = txn->msg->wbuf[1];
+
+ txn->comp = &done;
+ ret = slim_alloc_txn_tid(sctrl, txn);
+ if (ret) {
+ dev_err(ctrl->dev, "Unable to allocate TID\n");
+ return ret;
+ }
+
+ wbuf[i++] = txn->tid;
+
+ txn->msg->num_bytes = i;
+ txn->msg->wbuf = wbuf;
+ txn->msg->rbuf = rbuf;
+ txn->rl = txn->msg->num_bytes + 4;
+ }
+
+ /* HW expects length field to be excluded */
+ txn->rl--;
+ puc = (u8 *)pbuf;
+ *pbuf = 0;
+ if (txn->dt == SLIM_MSG_DEST_LOGICALADDR) {
+ *pbuf = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt, txn->mc, 0,
+ la);
+ puc += 3;
+ } else {
+ *pbuf = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt, txn->mc, 1,
+ la);
+ puc += 2;
+ }
+
+ if (slim_tid_txn(txn->mt, txn->mc))
+ *(puc++) = txn->tid;
+
+ if (slim_ec_txn(txn->mt, txn->mc)) {
+ *(puc++) = (txn->ec & 0xFF);
+ *(puc++) = (txn->ec >> 8) & 0xFF;
+ }
+
+ if (txn->msg && txn->msg->wbuf)
+ memcpy(puc, txn->msg->wbuf, txn->msg->num_bytes);
+
+ ret = qcom_slim_ngd_tx_msg_post(ctrl, pbuf, txn->rl);
+ if (ret)
+ return ret;
+
+ timeout = wait_for_completion_timeout(&tx_sent, HZ);
+ if (!timeout) {
+ dev_err(sctrl->dev, "TX timed out:MC:0x%x,mt:0x%x", txn->mc,
+ txn->mt);
+ return -ETIMEDOUT;
+ }
+
+ if (usr_msg) {
+ timeout = wait_for_completion_timeout(&done, HZ);
+ if (!timeout) {
+ dev_err(sctrl->dev, "TX timed out:MC:0x%x,mt:0x%x",
+ txn->mc, txn->mt);
+ return -ETIMEDOUT;
+ }
+ }
+
+ return 0;
+}
+
+static int qcom_slim_ngd_xfer_msg_sync(struct slim_controller *ctrl,
+ struct slim_msg_txn *txn)
+{
+ DECLARE_COMPLETION_ONSTACK(done);
+ int ret, timeout;
+
+ pm_runtime_get_sync(ctrl->dev);
+
+ txn->comp = &done;
+
+ ret = qcom_slim_ngd_xfer_msg(ctrl, txn);
+ if (ret)
+ return ret;
+
+ timeout = wait_for_completion_timeout(&done, HZ);
+ if (!timeout) {
+ dev_err(ctrl->dev, "TX timed out:MC:0x%x,mt:0x%x", txn->mc,
+ txn->mt);
+ return -ETIMEDOUT;
+ }
+ return 0;
+}
+
+static int qcom_slim_ngd_enable_stream(struct slim_stream_runtime *rt)
+{
+ struct slim_device *sdev = rt->dev;
+ struct slim_controller *ctrl = sdev->ctrl;
+ struct slim_val_inf msg = {0};
+ u8 wbuf[SLIM_MSGQ_BUF_LEN];
+ u8 rbuf[SLIM_MSGQ_BUF_LEN];
+ struct slim_msg_txn txn = {0,};
+ int i, ret;
+
+ txn.mt = SLIM_MSG_MT_DEST_REFERRED_USER;
+ txn.dt = SLIM_MSG_DEST_LOGICALADDR;
+ txn.la = SLIM_LA_MGR;
+ txn.ec = 0;
+ txn.msg = &msg;
+ txn.msg->num_bytes = 0;
+ txn.msg->wbuf = wbuf;
+ txn.msg->rbuf = rbuf;
+
+ for (i = 0; i < rt->num_ports; i++) {
+ struct slim_port *port = &rt->ports[i];
+
+ if (txn.msg->num_bytes == 0) {
+ int seg_interval = SLIM_SLOTS_PER_SUPERFRAME/rt->ratem;
+ int exp;
+
+ wbuf[txn.msg->num_bytes++] = sdev->laddr;
+ wbuf[txn.msg->num_bytes] = rt->bps >> 2 |
+ (port->ch.aux_fmt << 6);
+
+ /* Data channel segment interval not multiple of 3 */
+ exp = seg_interval % 3;
+ if (exp)
+ wbuf[txn.msg->num_bytes] |= BIT(5);
+
+ txn.msg->num_bytes++;
+ wbuf[txn.msg->num_bytes++] = exp << 4 | rt->prot;
+
+ if (rt->prot == SLIM_PROTO_ISO)
+ wbuf[txn.msg->num_bytes++] =
+ port->ch.prrate |
+ SLIM_CHANNEL_CONTENT_FL;
+ else
+ wbuf[txn.msg->num_bytes++] = port->ch.prrate;
+
+ ret = slim_alloc_txn_tid(ctrl, &txn);
+ if (ret) {
+ dev_err(&sdev->dev, "Fail to allocate TID\n");
+ return -ENXIO;
+ }
+ wbuf[txn.msg->num_bytes++] = txn.tid;
+ }
+ wbuf[txn.msg->num_bytes++] = port->ch.id;
+ }
+
+ txn.mc = SLIM_USR_MC_DEF_ACT_CHAN;
+ txn.rl = txn.msg->num_bytes + 4;
+ ret = qcom_slim_ngd_xfer_msg_sync(ctrl, &txn);
+ if (ret) {
+ slim_free_txn_tid(ctrl, &txn);
+ dev_err(&sdev->dev, "TX timed out:MC:0x%x,mt:0x%x", txn.mc,
+ txn.mt);
+ return ret;
+ }
+
+ txn.mc = SLIM_USR_MC_RECONFIG_NOW;
+ txn.msg->num_bytes = 2;
+ wbuf[1] = sdev->laddr;
+ txn.rl = txn.msg->num_bytes + 4;
+
+ ret = slim_alloc_txn_tid(ctrl, &txn);
+ if (ret) {
+ dev_err(ctrl->dev, "Fail to allocate TID\n");
+ return ret;
+ }
+
+ wbuf[0] = txn.tid;
+ ret = qcom_slim_ngd_xfer_msg_sync(ctrl, &txn);
+ if (ret) {
+ slim_free_txn_tid(ctrl, &txn);
+ dev_err(&sdev->dev, "TX timed out:MC:0x%x,mt:0x%x", txn.mc,
+ txn.mt);
+ }
+
+ return ret;
+}
+
+static int qcom_slim_ngd_get_laddr(struct slim_controller *ctrl,
+ struct slim_eaddr *ea, u8 *laddr)
+{
+ struct slim_val_inf msg = {0};
+ struct slim_msg_txn txn;
+ u8 wbuf[10] = {0};
+ u8 rbuf[10] = {0};
+ int ret;
+
+ txn.mt = SLIM_MSG_MT_DEST_REFERRED_USER;
+ txn.dt = SLIM_MSG_DEST_LOGICALADDR;
+ txn.la = SLIM_LA_MGR;
+ txn.ec = 0;
+
+ txn.mc = SLIM_USR_MC_ADDR_QUERY;
+ txn.rl = 11;
+ txn.msg = &msg;
+ txn.msg->num_bytes = 7;
+ txn.msg->wbuf = wbuf;
+ txn.msg->rbuf = rbuf;
+
+ ret = slim_alloc_txn_tid(ctrl, &txn);
+ if (ret < 0)
+ return ret;
+
+ wbuf[0] = (u8)txn.tid;
+ memcpy(&wbuf[1], ea, sizeof(*ea));
+
+ ret = qcom_slim_ngd_xfer_msg_sync(ctrl, &txn);
+ if (ret) {
+ slim_free_txn_tid(ctrl, &txn);
+ return ret;
+ }
+
+ *laddr = rbuf[6];
+
+ return ret;
+}
+
+static int qcom_slim_ngd_exit_dma(struct qcom_slim_ngd_ctrl *ctrl)
+{
+ if (ctrl->dma_rx_channel) {
+ dmaengine_terminate_sync(ctrl->dma_rx_channel);
+ dma_release_channel(ctrl->dma_rx_channel);
+ }
+
+ if (ctrl->dma_tx_channel) {
+ dmaengine_terminate_sync(ctrl->dma_tx_channel);
+ dma_release_channel(ctrl->dma_tx_channel);
+ }
+
+ ctrl->dma_tx_channel = ctrl->dma_rx_channel = NULL;
+
+ return 0;
+}
+
+static void qcom_slim_ngd_setup(struct qcom_slim_ngd_ctrl *ctrl)
+{
+ u32 cfg = readl_relaxed(ctrl->ngd->base);
+
+ if (ctrl->state == QCOM_SLIM_NGD_CTRL_DOWN)
+ qcom_slim_ngd_init_dma(ctrl);
+
+ /* By default enable message queues */
+ cfg |= NGD_CFG_RX_MSGQ_EN;
+ cfg |= NGD_CFG_TX_MSGQ_EN;
+
+ /* Enable NGD if it's not already enabled*/
+ if (!(cfg & NGD_CFG_ENABLE))
+ cfg |= NGD_CFG_ENABLE;
+
+ writel_relaxed(cfg, ctrl->ngd->base);
+}
+
+static int qcom_slim_ngd_power_up(struct qcom_slim_ngd_ctrl *ctrl)
+{
+ enum qcom_slim_ngd_state cur_state = ctrl->state;
+ struct qcom_slim_ngd *ngd = ctrl->ngd;
+ u32 laddr, rx_msgq;
+ int timeout, ret = 0;
+
+ if (ctrl->state == QCOM_SLIM_NGD_CTRL_DOWN) {
+ timeout = wait_for_completion_timeout(&ctrl->qmi.qmi_comp, HZ);
+ if (!timeout)
+ return -EREMOTEIO;
+ }
+
+ if (ctrl->state == QCOM_SLIM_NGD_CTRL_ASLEEP ||
+ ctrl->state == QCOM_SLIM_NGD_CTRL_DOWN) {
+ ret = qcom_slim_qmi_power_request(ctrl, true);
+ if (ret) {
+ dev_err(ctrl->dev, "SLIM QMI power request failed:%d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ ctrl->ver = readl_relaxed(ctrl->base);
+ /* Version info in 16 MSbits */
+ ctrl->ver >>= 16;
+
+ laddr = readl_relaxed(ngd->base + NGD_STATUS);
+ if (laddr & NGD_LADDR) {
+ /*
+ * external MDM restart case where ADSP itself was active framer
+ * For example, modem restarted when playback was active
+ */
+ if (cur_state == QCOM_SLIM_NGD_CTRL_AWAKE) {
+ dev_info(ctrl->dev, "Subsys restart: ADSP active framer\n");
+ return 0;
+ }
+ return 0;
+ }
+
+ writel_relaxed(DEF_NGD_INT_MASK, ngd->base + NGD_INT_EN);
+ rx_msgq = readl_relaxed(ngd->base + NGD_RX_MSGQ_CFG);
+
+ writel_relaxed(rx_msgq|SLIM_RX_MSGQ_TIMEOUT_VAL,
+ ngd->base + NGD_RX_MSGQ_CFG);
+ qcom_slim_ngd_setup(ctrl);
+
+ timeout = wait_for_completion_timeout(&ctrl->reconf, HZ);
+ if (!timeout) {
+ dev_err(ctrl->dev, "capability exchange timed-out\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void qcom_slim_ngd_notify_slaves(struct qcom_slim_ngd_ctrl *ctrl)
+{
+ struct slim_device *sbdev;
+ struct device_node *node;
+
+ for_each_child_of_node(ctrl->ngd->pdev->dev.of_node, node) {
+ sbdev = of_slim_get_device(&ctrl->ctrl, node);
+ if (!sbdev)
+ continue;
+
+ if (slim_get_logical_addr(sbdev))
+ dev_err(ctrl->dev, "Failed to get logical address\n");
+ }
+}
+
+static void qcom_slim_ngd_master_worker(struct work_struct *work)
+{
+ struct qcom_slim_ngd_ctrl *ctrl;
+ struct slim_msg_txn txn;
+ struct slim_val_inf msg = {0};
+ int retries = 0;
+ u8 wbuf[8];
+ int ret = 0;
+
+ ctrl = container_of(work, struct qcom_slim_ngd_ctrl, m_work);
+ txn.dt = SLIM_MSG_DEST_LOGICALADDR;
+ txn.ec = 0;
+ txn.mc = SLIM_USR_MC_REPORT_SATELLITE;
+ txn.mt = SLIM_MSG_MT_SRC_REFERRED_USER;
+ txn.la = SLIM_LA_MGR;
+ wbuf[0] = SAT_MAGIC_LSB;
+ wbuf[1] = SAT_MAGIC_MSB;
+ wbuf[2] = SAT_MSG_VER;
+ wbuf[3] = SAT_MSG_PROT;
+ txn.msg = &msg;
+ txn.msg->wbuf = wbuf;
+ txn.msg->num_bytes = 4;
+ txn.rl = 8;
+
+ dev_info(ctrl->dev, "SLIM SAT: Rcvd master capability\n");
+
+capability_retry:
+ ret = qcom_slim_ngd_xfer_msg(&ctrl->ctrl, &txn);
+ if (!ret) {
+ if (ctrl->state >= QCOM_SLIM_NGD_CTRL_ASLEEP)
+ complete(&ctrl->reconf);
+ else
+ dev_err(ctrl->dev, "unexpected state:%d\n",
+ ctrl->state);
+
+ if (ctrl->state == QCOM_SLIM_NGD_CTRL_DOWN)
+ qcom_slim_ngd_notify_slaves(ctrl);
+
+ } else if (ret == -EIO) {
+ dev_err(ctrl->dev, "capability message NACKed, retrying\n");
+ if (retries < INIT_MX_RETRIES) {
+ msleep(DEF_RETRY_MS);
+ retries++;
+ goto capability_retry;
+ }
+ } else {
+ dev_err(ctrl->dev, "SLIM: capability TX failed:%d\n", ret);
+ }
+}
+
+static int qcom_slim_ngd_runtime_resume(struct device *dev)
+{
+ struct qcom_slim_ngd_ctrl *ctrl = dev_get_drvdata(dev);
+ int ret = 0;
+
+ if (ctrl->state >= QCOM_SLIM_NGD_CTRL_ASLEEP)
+ ret = qcom_slim_ngd_power_up(ctrl);
+ if (ret) {
+ /* Did SSR cause this power up failure */
+ if (ctrl->state != QCOM_SLIM_NGD_CTRL_DOWN)
+ ctrl->state = QCOM_SLIM_NGD_CTRL_ASLEEP;
+ else
+ dev_err(ctrl->dev, "HW wakeup attempt during SSR\n");
+ } else {
+ ctrl->state = QCOM_SLIM_NGD_CTRL_AWAKE;
+ }
+
+ return 0;
+}
+
+static int qcom_slim_ngd_enable(struct qcom_slim_ngd_ctrl *ctrl, bool enable)
+{
+ if (enable) {
+ int ret = qcom_slim_qmi_init(ctrl, false);
+
+ if (ret) {
+ dev_err(ctrl->dev, "qmi init fail, ret:%d, state:%d\n",
+ ret, ctrl->state);
+ return ret;
+ }
+ /* controller state should be in sync with framework state */
+ complete(&ctrl->qmi.qmi_comp);
+ if (!pm_runtime_enabled(ctrl->dev) ||
+ !pm_runtime_suspended(ctrl->dev))
+ qcom_slim_ngd_runtime_resume(ctrl->dev);
+ else
+ pm_runtime_resume(ctrl->dev);
+ pm_runtime_mark_last_busy(ctrl->dev);
+ pm_runtime_put(ctrl->dev);
+ } else {
+ qcom_slim_qmi_exit(ctrl);
+ }
+
+ return 0;
+}
+
+static int qcom_slim_ngd_qmi_new_server(struct qmi_handle *hdl,
+ struct qmi_service *service)
+{
+ struct qcom_slim_ngd_qmi *qmi =
+ container_of(hdl, struct qcom_slim_ngd_qmi, svc_event_hdl);
+ struct qcom_slim_ngd_ctrl *ctrl =
+ container_of(qmi, struct qcom_slim_ngd_ctrl, qmi);
+
+ qmi->svc_info.sq_family = AF_QIPCRTR;
+ qmi->svc_info.sq_node = service->node;
+ qmi->svc_info.sq_port = service->port;
+
+ qcom_slim_ngd_enable(ctrl, true);
+
+ return 0;
+}
+
+static void qcom_slim_ngd_qmi_del_server(struct qmi_handle *hdl,
+ struct qmi_service *service)
+{
+ struct qcom_slim_ngd_qmi *qmi =
+ container_of(hdl, struct qcom_slim_ngd_qmi, svc_event_hdl);
+
+ qmi->svc_info.sq_node = 0;
+ qmi->svc_info.sq_port = 0;
+}
+
+static struct qmi_ops qcom_slim_ngd_qmi_svc_event_ops = {
+ .new_server = qcom_slim_ngd_qmi_new_server,
+ .del_server = qcom_slim_ngd_qmi_del_server,
+};
+
+static int qcom_slim_ngd_qmi_svc_event_init(struct qcom_slim_ngd_ctrl *ctrl)
+{
+ struct qcom_slim_ngd_qmi *qmi = &ctrl->qmi;
+ int ret;
+
+ ret = qmi_handle_init(&qmi->svc_event_hdl, 0,
+ &qcom_slim_ngd_qmi_svc_event_ops, NULL);
+ if (ret < 0) {
+ dev_err(ctrl->dev, "qmi_handle_init failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = qmi_add_lookup(&qmi->svc_event_hdl, SLIMBUS_QMI_SVC_ID,
+ SLIMBUS_QMI_SVC_V1, SLIMBUS_QMI_INS_ID);
+ if (ret < 0) {
+ dev_err(ctrl->dev, "qmi_add_lookup failed: %d\n", ret);
+ qmi_handle_release(&qmi->svc_event_hdl);
+ }
+ return ret;
+}
+
+static void qcom_slim_ngd_qmi_svc_event_deinit(struct qcom_slim_ngd_qmi *qmi)
+{
+ qmi_handle_release(&qmi->svc_event_hdl);
+}
+
+static struct platform_driver qcom_slim_ngd_driver;
+#define QCOM_SLIM_NGD_DRV_NAME "qcom,slim-ngd"
+
+static const struct of_device_id qcom_slim_ngd_dt_match[] = {
+ {
+ .compatible = "qcom,slim-ngd-v1.5.0",
+ .data = &ngd_v1_5_offset_info,
+ },
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, qcom_slim_ngd_dt_match);
+
+static int of_qcom_slim_ngd_register(struct device *parent,
+ struct qcom_slim_ngd_ctrl *ctrl)
+{
+ const struct ngd_reg_offset_data *data;
+ struct qcom_slim_ngd *ngd;
+ struct device_node *node;
+ u32 id;
+
+ data = of_match_node(qcom_slim_ngd_dt_match, parent->of_node)->data;
+
+ for_each_available_child_of_node(parent->of_node, node) {
+ if (of_property_read_u32(node, "reg", &id))
+ continue;
+
+ ngd = kzalloc(sizeof(*ngd), GFP_KERNEL);
+ if (!ngd)
+ return -ENOMEM;
+
+ ngd->pdev = platform_device_alloc(QCOM_SLIM_NGD_DRV_NAME, id);
+ ngd->id = id;
+ ngd->pdev->dev.parent = parent;
+ ngd->pdev->driver_override = QCOM_SLIM_NGD_DRV_NAME;
+ ngd->pdev->dev.of_node = node;
+ ctrl->ngd = ngd;
+ platform_set_drvdata(ngd->pdev, ctrl);
+
+ platform_device_add(ngd->pdev);
+ ngd->base = ctrl->base + ngd->id * data->offset +
+ (ngd->id - 1) * data->size;
+ ctrl->ngd = ngd;
+ platform_driver_register(&qcom_slim_ngd_driver);
+
+ return 0;
+ }
+
+ return -ENODEV;
+}
+
+static int qcom_slim_ngd_probe(struct platform_device *pdev)
+{
+ struct qcom_slim_ngd_ctrl *ctrl = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ ctrl->ctrl.dev = dev;
+ ret = slim_register_controller(&ctrl->ctrl);
+ if (ret) {
+ dev_err(dev, "error adding slim controller\n");
+ return ret;
+ }
+
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_set_autosuspend_delay(dev, QCOM_SLIM_NGD_AUTOSUSPEND);
+ pm_runtime_set_suspended(dev);
+ pm_runtime_enable(dev);
+ pm_runtime_get_noresume(dev);
+ ret = qcom_slim_ngd_qmi_svc_event_init(ctrl);
+ if (ret) {
+ dev_err(&pdev->dev, "QMI service registration failed:%d", ret);
+ goto err;
+ }
+
+ INIT_WORK(&ctrl->m_work, qcom_slim_ngd_master_worker);
+ ctrl->mwq = create_singlethread_workqueue("ngd_master");
+ if (!ctrl->mwq) {
+ dev_err(&pdev->dev, "Failed to start master worker\n");
+ ret = -ENOMEM;
+ goto wq_err;
+ }
+
+ return 0;
+err:
+ slim_unregister_controller(&ctrl->ctrl);
+wq_err:
+ qcom_slim_ngd_qmi_svc_event_deinit(&ctrl->qmi);
+ if (ctrl->mwq)
+ destroy_workqueue(ctrl->mwq);
+
+ return 0;
+}
+
+static int qcom_slim_ngd_ctrl_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct qcom_slim_ngd_ctrl *ctrl;
+ struct resource *res;
+ int ret;
+
+ ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL);
+ if (!ctrl)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, ctrl);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ctrl->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ctrl->base))
+ return PTR_ERR(ctrl->base);
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "no slimbus IRQ resource\n");
+ return -ENODEV;
+ }
+
+ ret = devm_request_irq(dev, res->start, qcom_slim_ngd_interrupt,
+ IRQF_TRIGGER_HIGH, "slim-ngd", ctrl);
+ if (ret) {
+ dev_err(&pdev->dev, "request IRQ failed\n");
+ return ret;
+ }
+
+ ctrl->dev = dev;
+ ctrl->framer.rootfreq = SLIM_ROOT_FREQ >> 3;
+ ctrl->framer.superfreq =
+ ctrl->framer.rootfreq / SLIM_CL_PER_SUPERFRAME_DIV8;
+
+ ctrl->ctrl.a_framer = &ctrl->framer;
+ ctrl->ctrl.clkgear = SLIM_MAX_CLK_GEAR;
+ ctrl->ctrl.get_laddr = qcom_slim_ngd_get_laddr;
+ ctrl->ctrl.enable_stream = qcom_slim_ngd_enable_stream;
+ ctrl->ctrl.xfer_msg = qcom_slim_ngd_xfer_msg;
+ ctrl->ctrl.wakeup = NULL;
+ ctrl->state = QCOM_SLIM_NGD_CTRL_DOWN;
+
+ spin_lock_init(&ctrl->tx_buf_lock);
+ init_completion(&ctrl->reconf);
+ init_completion(&ctrl->qmi.qmi_comp);
+
+ return of_qcom_slim_ngd_register(dev, ctrl);
+}
+
+static int qcom_slim_ngd_ctrl_remove(struct platform_device *pdev)
+{
+ platform_driver_unregister(&qcom_slim_ngd_driver);
+
+ return 0;
+}
+
+static int qcom_slim_ngd_remove(struct platform_device *pdev)
+{
+ struct qcom_slim_ngd_ctrl *ctrl = platform_get_drvdata(pdev);
+
+ pm_runtime_disable(&pdev->dev);
+ slim_unregister_controller(&ctrl->ctrl);
+ qcom_slim_ngd_exit_dma(ctrl);
+ qcom_slim_ngd_qmi_svc_event_deinit(&ctrl->qmi);
+ if (ctrl->mwq)
+ destroy_workqueue(ctrl->mwq);
+
+ kfree(ctrl->ngd);
+ ctrl->ngd = NULL;
+ return 0;
+}
+
+static int qcom_slim_ngd_runtime_idle(struct device *dev)
+{
+ struct qcom_slim_ngd_ctrl *ctrl = dev_get_drvdata(dev);
+
+ if (ctrl->state == QCOM_SLIM_NGD_CTRL_AWAKE)
+ ctrl->state = QCOM_SLIM_NGD_CTRL_IDLE;
+ pm_request_autosuspend(dev);
+ return -EAGAIN;
+}
+
+#ifdef CONFIG_PM
+static int qcom_slim_ngd_runtime_suspend(struct device *dev)
+{
+ struct qcom_slim_ngd_ctrl *ctrl = dev_get_drvdata(dev);
+ int ret = 0;
+
+ ret = qcom_slim_qmi_power_request(ctrl, false);
+ if (ret && ret != -EBUSY)
+ dev_info(ctrl->dev, "slim resource not idle:%d\n", ret);
+ if (!ret || ret == -ETIMEDOUT)
+ ctrl->state = QCOM_SLIM_NGD_CTRL_ASLEEP;
+
+ return ret;
+}
+#endif
+
+static const struct dev_pm_ops qcom_slim_ngd_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(
+ qcom_slim_ngd_runtime_suspend,
+ qcom_slim_ngd_runtime_resume,
+ qcom_slim_ngd_runtime_idle
+ )
+};
+
+static struct platform_driver qcom_slim_ngd_ctrl_driver = {
+ .probe = qcom_slim_ngd_ctrl_probe,
+ .remove = qcom_slim_ngd_ctrl_remove,
+ .driver = {
+ .name = "qcom,slim-ngd-ctrl",
+ .of_match_table = qcom_slim_ngd_dt_match,
+ },
+};
+
+static struct platform_driver qcom_slim_ngd_driver = {
+ .probe = qcom_slim_ngd_probe,
+ .remove = qcom_slim_ngd_remove,
+ .driver = {
+ .name = QCOM_SLIM_NGD_DRV_NAME,
+ .pm = &qcom_slim_ngd_dev_pm_ops,
+ },
+};
+
+module_platform_driver(qcom_slim_ngd_ctrl_driver);
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Qualcomm SLIMBus NGD controller");
diff --git a/drivers/slimbus/slimbus.h b/drivers/slimbus/slimbus.h
index 79f8e05d92dd..4399d1873e2d 100644
--- a/drivers/slimbus/slimbus.h
+++ b/drivers/slimbus/slimbus.h
@@ -17,6 +17,8 @@
/* SLIMbus message types. Related to interpretation of message code. */
#define SLIM_MSG_MT_CORE 0x0
+#define SLIM_MSG_MT_DEST_REFERRED_USER 0x2
+#define SLIM_MSG_MT_SRC_REFERRED_USER 0x6
/*
* SLIM Broadcast header format
@@ -43,11 +45,28 @@
#define SLIM_MSG_MC_ASSIGN_LOGICAL_ADDRESS 0x2
#define SLIM_MSG_MC_REPORT_ABSENT 0xF
+/* Data channel management messages */
+#define SLIM_MSG_MC_CONNECT_SOURCE 0x10
+#define SLIM_MSG_MC_CONNECT_SINK 0x11
+#define SLIM_MSG_MC_DISCONNECT_PORT 0x14
+#define SLIM_MSG_MC_CHANGE_CONTENT 0x18
+
/* Clock pause Reconfiguration messages */
#define SLIM_MSG_MC_BEGIN_RECONFIGURATION 0x40
#define SLIM_MSG_MC_NEXT_PAUSE_CLOCK 0x4A
+#define SLIM_MSG_MC_NEXT_DEFINE_CHANNEL 0x50
+#define SLIM_MSG_MC_NEXT_DEFINE_CONTENT 0x51
+#define SLIM_MSG_MC_NEXT_ACTIVATE_CHANNEL 0x54
+#define SLIM_MSG_MC_NEXT_DEACTIVATE_CHANNEL 0x55
+#define SLIM_MSG_MC_NEXT_REMOVE_CHANNEL 0x58
#define SLIM_MSG_MC_RECONFIGURE_NOW 0x5F
+/*
+ * Clock pause flag to indicate that the reconfig message
+ * corresponds to clock pause sequence
+ */
+#define SLIM_MSG_CLK_PAUSE_SEQ_FLG (1U << 8)
+
/* Clock pause values per SLIMbus spec */
#define SLIM_CLK_FAST 0
#define SLIM_CLK_CONST_PHASE 1
@@ -61,7 +80,15 @@
/* Standard values per SLIMbus spec needed by controllers and devices */
#define SLIM_MAX_CLK_GEAR 10
#define SLIM_MIN_CLK_GEAR 1
+#define SLIM_SLOT_LEN_BITS 4
+
+/* Indicate that the frequency of the flow and the bus frequency are locked */
+#define SLIM_CHANNEL_CONTENT_FL BIT(7)
+/* Standard values per SLIMbus spec needed by controllers and devices */
+#define SLIM_CL_PER_SUPERFRAME 6144
+#define SLIM_SLOTS_PER_SUPERFRAME (SLIM_CL_PER_SUPERFRAME >> 2)
+#define SLIM_SL_PER_SUPERFRAME (SLIM_CL_PER_SUPERFRAME >> 2)
/* Manager's logical address is set to 0xFF per spec */
#define SLIM_LA_MANAGER 0xFF
@@ -160,6 +187,169 @@ struct slim_sched {
};
/**
+ * enum slim_port_direction: SLIMbus port direction
+ *
+ * @SLIM_PORT_SINK: SLIMbus port is a sink
+ * @SLIM_PORT_SOURCE: SLIMbus port is a source
+ */
+enum slim_port_direction {
+ SLIM_PORT_SINK = 0,
+ SLIM_PORT_SOURCE,
+};
+/**
+ * enum slim_port_state: SLIMbus Port/Endpoint state machine
+ * according to SLIMbus Spec 2.0
+ * @SLIM_PORT_DISCONNECTED: SLIMbus port is disconnected
+ * entered from Unconfigure/configured state after
+ * DISCONNECT_PORT or REMOVE_CHANNEL core command
+ * @SLIM_PORT_UNCONFIGURED: SLIMbus port is in unconfigured state.
+ * entered from disconnect state after CONNECT_SOURCE/SINK core command
+ * @SLIM_PORT_CONFIGURED: SLIMbus port is in configured state.
+ * entered from unconfigured state after DEFINE_CHANNEL, DEFINE_CONTENT
+ * and ACTIVATE_CHANNEL core commands. Ready for data transmission.
+ */
+enum slim_port_state {
+ SLIM_PORT_DISCONNECTED = 0,
+ SLIM_PORT_UNCONFIGURED,
+ SLIM_PORT_CONFIGURED,
+};
+
+/**
+ * enum slim_channel_state: SLIMbus channel state machine used by core.
+ * @SLIM_CH_STATE_DISCONNECTED: SLIMbus channel is disconnected
+ * @SLIM_CH_STATE_ALLOCATED: SLIMbus channel is allocated
+ * @SLIM_CH_STATE_ASSOCIATED: SLIMbus channel is associated with port
+ * @SLIM_CH_STATE_DEFINED: SLIMbus channel parameters are defined
+ * @SLIM_CH_STATE_CONTENT_DEFINED: SLIMbus channel content is defined
+ * @SLIM_CH_STATE_ACTIVE: SLIMbus channel is active and ready for data
+ * @SLIM_CH_STATE_REMOVED: SLIMbus channel is inactive and removed
+ */
+enum slim_channel_state {
+ SLIM_CH_STATE_DISCONNECTED = 0,
+ SLIM_CH_STATE_ALLOCATED,
+ SLIM_CH_STATE_ASSOCIATED,
+ SLIM_CH_STATE_DEFINED,
+ SLIM_CH_STATE_CONTENT_DEFINED,
+ SLIM_CH_STATE_ACTIVE,
+ SLIM_CH_STATE_REMOVED,
+};
+
+/**
+ * enum slim_ch_data_fmt: SLIMbus channel data Type identifiers according to
+ * Table 60 of SLIMbus Spec 1.01.01
+ * @SLIM_CH_DATA_FMT_NOT_DEFINED: Undefined
+ * @SLIM_CH_DATA_FMT_LPCM_AUDIO: LPCM audio
+ * @SLIM_CH_DATA_FMT_IEC61937_COMP_AUDIO: IEC61937 Compressed audio
+ * @SLIM_CH_DATA_FMT_PACKED_PDM_AUDIO: Packed PDM audio
+ */
+enum slim_ch_data_fmt {
+ SLIM_CH_DATA_FMT_NOT_DEFINED = 0,
+ SLIM_CH_DATA_FMT_LPCM_AUDIO = 1,
+ SLIM_CH_DATA_FMT_IEC61937_COMP_AUDIO = 2,
+ SLIM_CH_DATA_FMT_PACKED_PDM_AUDIO = 3,
+};
+
+/**
+ * enum slim_ch_aux_fmt: SLIMbus channel Aux Field format IDs according to
+ * Table 63 of SLIMbus Spec 2.0
+ * @SLIM_CH_AUX_FMT_NOT_APPLICABLE: Undefined
+ * @SLIM_CH_AUX_FMT_ZCUV_TUNNEL_IEC60958: ZCUV for tunneling IEC60958
+ * @SLIM_CH_AUX_FMT_USER_DEFINED: User defined
+ */
+enum slim_ch_aux_bit_fmt {
+ SLIM_CH_AUX_FMT_NOT_APPLICABLE = 0,
+ SLIM_CH_AUX_FMT_ZCUV_TUNNEL_IEC60958 = 1,
+ SLIM_CH_AUX_FMT_USER_DEFINED = 0xF,
+};
+
+/**
+ * struct slim_channel - SLIMbus channel, used for state machine
+ *
+ * @id: ID of channel
+ * @prrate: Presense rate of channel from Table 66 of SLIMbus 2.0 Specs
+ * @seg_dist: segment distribution code from Table 20 of SLIMbus 2.0 Specs
+ * @data_fmt: Data format of channel.
+ * @aux_fmt: Aux format for this channel.
+ * @state: channel state machine
+ */
+struct slim_channel {
+ int id;
+ int prrate;
+ int seg_dist;
+ enum slim_ch_data_fmt data_fmt;
+ enum slim_ch_aux_bit_fmt aux_fmt;
+ enum slim_channel_state state;
+};
+
+/**
+ * struct slim_port - SLIMbus port
+ *
+ * @id: Port id
+ * @direction: Port direction, Source or Sink.
+ * @state: state machine of port.
+ * @ch: channel associated with this port.
+ */
+struct slim_port {
+ int id;
+ enum slim_port_direction direction;
+ enum slim_port_state state;
+ struct slim_channel ch;
+};
+
+/**
+ * enum slim_transport_protocol: SLIMbus Transport protocol list from
+ * Table 47 of SLIMbus 2.0 specs.
+ * @SLIM_PROTO_ISO: Isochronous Protocol, no flow control as data rate match
+ * channel rate flow control embedded in the data.
+ * @SLIM_PROTO_PUSH: Pushed Protocol, includes flow control, Used to carry
+ * data whose rate is equal to, or lower than the channel rate.
+ * @SLIM_PROTO_PULL: Pulled Protocol, similar usage as pushed protocol
+ * but pull is a unicast.
+ * @SLIM_PROTO_LOCKED: Locked Protocol
+ * @SLIM_PROTO_ASYNC_SMPLX: Asynchronous Protocol-Simplex
+ * @SLIM_PROTO_ASYNC_HALF_DUP: Asynchronous Protocol-Half-duplex
+ * @SLIM_PROTO_EXT_SMPLX: Extended Asynchronous Protocol-Simplex
+ * @SLIM_PROTO_EXT_HALF_DUP: Extended Asynchronous Protocol-Half-duplex
+ */
+enum slim_transport_protocol {
+ SLIM_PROTO_ISO = 0,
+ SLIM_PROTO_PUSH,
+ SLIM_PROTO_PULL,
+ SLIM_PROTO_LOCKED,
+ SLIM_PROTO_ASYNC_SMPLX,
+ SLIM_PROTO_ASYNC_HALF_DUP,
+ SLIM_PROTO_EXT_SMPLX,
+ SLIM_PROTO_EXT_HALF_DUP,
+};
+
+/**
+ * struct slim_stream_runtime - SLIMbus stream runtime instance
+ *
+ * @name: Name of the stream
+ * @dev: SLIM Device instance associated with this stream
+ * @direction: direction of stream
+ * @prot: Transport protocol used in this stream
+ * @rate: Data rate of samples *
+ * @bps: bits per sample
+ * @ratem: rate multipler which is super frame rate/data rate
+ * @num_ports: number of ports
+ * @ports: pointer to instance of ports
+ * @node: list head for stream associated with slim device.
+ */
+struct slim_stream_runtime {
+ const char *name;
+ struct slim_device *dev;
+ int direction;
+ enum slim_transport_protocol prot;
+ unsigned int rate;
+ unsigned int bps;
+ unsigned int ratem;
+ int num_ports;
+ struct slim_port *ports;
+ struct list_head node;
+};
+
+/**
* struct slim_controller - Controls every instance of SLIMbus
* (similar to 'master' on SPI)
* @dev: Device interface to this driver
@@ -188,6 +378,10 @@ struct slim_sched {
* @wakeup: This function pointer implements controller-specific procedure
* to wake it up from clock-pause. Framework will call this to bring
* the controller out of clock pause.
+ * @enable_stream: This function pointer implements controller-specific procedure
+ * to enable a stream.
+ * @disable_stream: This function pointer implements controller-specific procedure
+ * to disable stream.
*
* 'Manager device' is responsible for device management, bandwidth
* allocation, channel setup, and port associations per channel.
@@ -229,6 +423,8 @@ struct slim_controller {
struct slim_eaddr *ea, u8 laddr);
int (*get_laddr)(struct slim_controller *ctrl,
struct slim_eaddr *ea, u8 *laddr);
+ int (*enable_stream)(struct slim_stream_runtime *rt);
+ int (*disable_stream)(struct slim_stream_runtime *rt);
int (*wakeup)(struct slim_controller *ctrl);
};
@@ -240,6 +436,8 @@ int slim_unregister_controller(struct slim_controller *ctrl);
void slim_msg_response(struct slim_controller *ctrl, u8 *reply, u8 tid, u8 l);
int slim_do_transfer(struct slim_controller *ctrl, struct slim_msg_txn *txn);
int slim_ctrl_clk_pause(struct slim_controller *ctrl, bool wakeup, u8 restart);
+int slim_alloc_txn_tid(struct slim_controller *ctrl, struct slim_msg_txn *txn);
+void slim_free_txn_tid(struct slim_controller *ctrl, struct slim_msg_txn *txn);
static inline bool slim_tid_txn(u8 mt, u8 mc)
{
diff --git a/drivers/slimbus/stream.c b/drivers/slimbus/stream.c
new file mode 100644
index 000000000000..2fa05324ed07
--- /dev/null
+++ b/drivers/slimbus/stream.c
@@ -0,0 +1,477 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2018, Linaro Limited
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/slimbus.h>
+#include <uapi/sound/asound.h>
+#include "slimbus.h"
+
+/**
+ * struct segdist_code - Segment Distributions code from
+ * Table 20 of SLIMbus Specs Version 2.0
+ *
+ * @ratem: Channel Rate Multipler(Segments per Superframe)
+ * @seg_interval: Number of slots between the first Slot of Segment
+ * and the first slot of the next consecutive Segment.
+ * @segdist_code: Segment Distribution Code SD[11:0]
+ * @seg_offset_mask: Segment offset mask in SD[11:0]
+ * @segdist_codes: List of all possible Segmet Distribution codes.
+ */
+static const struct segdist_code {
+ int ratem;
+ int seg_interval;
+ int segdist_code;
+ u32 seg_offset_mask;
+
+} segdist_codes[] = {
+ {1, 1536, 0x200, 0xdff},
+ {2, 768, 0x100, 0xcff},
+ {4, 384, 0x080, 0xc7f},
+ {8, 192, 0x040, 0xc3f},
+ {16, 96, 0x020, 0xc1f},
+ {32, 48, 0x010, 0xc0f},
+ {64, 24, 0x008, 0xc07},
+ {128, 12, 0x004, 0xc03},
+ {256, 6, 0x002, 0xc01},
+ {512, 3, 0x001, 0xc00},
+ {3, 512, 0xe00, 0x1ff},
+ {6, 256, 0xd00, 0x0ff},
+ {12, 128, 0xc80, 0x07f},
+ {24, 64, 0xc40, 0x03f},
+ {48, 32, 0xc20, 0x01f},
+ {96, 16, 0xc10, 0x00f},
+ {192, 8, 0xc08, 0x007},
+ {364, 4, 0xc04, 0x003},
+ {768, 2, 0xc02, 0x001},
+};
+
+/*
+ * Presence Rate table for all Natural Frequencies
+ * The Presence rate of a constant bitrate stream is mean flow rate of the
+ * stream expressed in occupied Segments of that Data Channel per second.
+ * Table 66 from SLIMbus 2.0 Specs
+ *
+ * Index of the table corresponds to Presence rate code for the respective rate
+ * in the table.
+ */
+static const int slim_presence_rate_table[] = {
+ 0, /* Not Indicated */
+ 12000,
+ 24000,
+ 48000,
+ 96000,
+ 192000,
+ 384000,
+ 768000,
+ 0, /* Reserved */
+ 110250,
+ 220500,
+ 441000,
+ 882000,
+ 176400,
+ 352800,
+ 705600,
+ 4000,
+ 8000,
+ 16000,
+ 32000,
+ 64000,
+ 128000,
+ 256000,
+ 512000,
+};
+
+/*
+ * slim_stream_allocate() - Allocate a new SLIMbus Stream
+ * @dev:Slim device to be associated with
+ * @name: name of the stream
+ *
+ * This is very first call for SLIMbus streaming, this API will allocate
+ * a new SLIMbus stream and return a valid stream runtime pointer for client
+ * to use it in subsequent stream apis. state of stream is set to ALLOCATED
+ *
+ * Return: valid pointer on success and error code on failure.
+ * From ASoC DPCM framework, this state is linked to startup() operation.
+ */
+struct slim_stream_runtime *slim_stream_allocate(struct slim_device *dev,
+ const char *name)
+{
+ struct slim_stream_runtime *rt;
+
+ rt = kzalloc(sizeof(*rt), GFP_KERNEL);
+ if (!rt)
+ return ERR_PTR(-ENOMEM);
+
+ rt->name = kasprintf(GFP_KERNEL, "slim-%s", name);
+ if (!rt->name) {
+ kfree(rt);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ rt->dev = dev;
+ spin_lock(&dev->stream_list_lock);
+ list_add_tail(&rt->node, &dev->stream_list);
+ spin_unlock(&dev->stream_list_lock);
+
+ return rt;
+}
+EXPORT_SYMBOL_GPL(slim_stream_allocate);
+
+static int slim_connect_port_channel(struct slim_stream_runtime *stream,
+ struct slim_port *port)
+{
+ struct slim_device *sdev = stream->dev;
+ u8 wbuf[2];
+ struct slim_val_inf msg = {0, 2, NULL, wbuf, NULL};
+ u8 mc = SLIM_MSG_MC_CONNECT_SOURCE;
+ DEFINE_SLIM_LDEST_TXN(txn, mc, 6, stream->dev->laddr, &msg);
+
+ if (port->direction == SLIM_PORT_SINK)
+ txn.mc = SLIM_MSG_MC_CONNECT_SINK;
+
+ wbuf[0] = port->id;
+ wbuf[1] = port->ch.id;
+ port->ch.state = SLIM_CH_STATE_ASSOCIATED;
+ port->state = SLIM_PORT_UNCONFIGURED;
+
+ return slim_do_transfer(sdev->ctrl, &txn);
+}
+
+static int slim_disconnect_port(struct slim_stream_runtime *stream,
+ struct slim_port *port)
+{
+ struct slim_device *sdev = stream->dev;
+ u8 wbuf[1];
+ struct slim_val_inf msg = {0, 1, NULL, wbuf, NULL};
+ u8 mc = SLIM_MSG_MC_DISCONNECT_PORT;
+ DEFINE_SLIM_LDEST_TXN(txn, mc, 5, stream->dev->laddr, &msg);
+
+ wbuf[0] = port->id;
+ port->ch.state = SLIM_CH_STATE_DISCONNECTED;
+ port->state = SLIM_PORT_DISCONNECTED;
+
+ return slim_do_transfer(sdev->ctrl, &txn);
+}
+
+static int slim_deactivate_remove_channel(struct slim_stream_runtime *stream,
+ struct slim_port *port)
+{
+ struct slim_device *sdev = stream->dev;
+ u8 wbuf[1];
+ struct slim_val_inf msg = {0, 1, NULL, wbuf, NULL};
+ u8 mc = SLIM_MSG_MC_NEXT_DEACTIVATE_CHANNEL;
+ DEFINE_SLIM_LDEST_TXN(txn, mc, 5, stream->dev->laddr, &msg);
+ int ret;
+
+ wbuf[0] = port->ch.id;
+ ret = slim_do_transfer(sdev->ctrl, &txn);
+ if (ret)
+ return ret;
+
+ txn.mc = SLIM_MSG_MC_NEXT_REMOVE_CHANNEL;
+ port->ch.state = SLIM_CH_STATE_REMOVED;
+
+ return slim_do_transfer(sdev->ctrl, &txn);
+}
+
+static int slim_get_prate_code(int rate)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(slim_presence_rate_table); i++) {
+ if (rate == slim_presence_rate_table[i])
+ return i;
+ }
+
+ return -EINVAL;
+}
+
+/*
+ * slim_stream_prepare() - Prepare a SLIMbus Stream
+ *
+ * @rt: instance of slim stream runtime to configure
+ * @cfg: new configuration for the stream
+ *
+ * This API will configure SLIMbus stream with config parameters from cfg.
+ * return zero on success and error code on failure. From ASoC DPCM framework,
+ * this state is linked to hw_params() operation.
+ */
+int slim_stream_prepare(struct slim_stream_runtime *rt,
+ struct slim_stream_config *cfg)
+{
+ struct slim_controller *ctrl = rt->dev->ctrl;
+ struct slim_port *port;
+ int num_ports, i, port_id;
+
+ if (rt->ports) {
+ dev_err(&rt->dev->dev, "Stream already Prepared\n");
+ return -EINVAL;
+ }
+
+ num_ports = hweight32(cfg->port_mask);
+ rt->ports = kcalloc(num_ports, sizeof(*port), GFP_KERNEL);
+ if (!rt->ports)
+ return -ENOMEM;
+
+ rt->num_ports = num_ports;
+ rt->rate = cfg->rate;
+ rt->bps = cfg->bps;
+ rt->direction = cfg->direction;
+
+ if (cfg->rate % ctrl->a_framer->superfreq) {
+ /*
+ * data rate not exactly multiple of super frame,
+ * use PUSH/PULL protocol
+ */
+ if (cfg->direction == SNDRV_PCM_STREAM_PLAYBACK)
+ rt->prot = SLIM_PROTO_PUSH;
+ else
+ rt->prot = SLIM_PROTO_PULL;
+ } else {
+ rt->prot = SLIM_PROTO_ISO;
+ }
+
+ rt->ratem = cfg->rate/ctrl->a_framer->superfreq;
+
+ i = 0;
+ for_each_set_bit(port_id, &cfg->port_mask, SLIM_DEVICE_MAX_PORTS) {
+ port = &rt->ports[i];
+ port->state = SLIM_PORT_DISCONNECTED;
+ port->id = port_id;
+ port->ch.prrate = slim_get_prate_code(cfg->rate);
+ port->ch.id = cfg->chs[i];
+ port->ch.data_fmt = SLIM_CH_DATA_FMT_NOT_DEFINED;
+ port->ch.aux_fmt = SLIM_CH_AUX_FMT_NOT_APPLICABLE;
+ port->ch.state = SLIM_CH_STATE_ALLOCATED;
+
+ if (cfg->direction == SNDRV_PCM_STREAM_PLAYBACK)
+ port->direction = SLIM_PORT_SINK;
+ else
+ port->direction = SLIM_PORT_SOURCE;
+
+ slim_connect_port_channel(rt, port);
+ i++;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(slim_stream_prepare);
+
+static int slim_define_channel_content(struct slim_stream_runtime *stream,
+ struct slim_port *port)
+{
+ struct slim_device *sdev = stream->dev;
+ u8 wbuf[4];
+ struct slim_val_inf msg = {0, 4, NULL, wbuf, NULL};
+ u8 mc = SLIM_MSG_MC_NEXT_DEFINE_CONTENT;
+ DEFINE_SLIM_LDEST_TXN(txn, mc, 8, stream->dev->laddr, &msg);
+
+ wbuf[0] = port->ch.id;
+ wbuf[1] = port->ch.prrate;
+
+ /* Frequency Locked for ISO Protocol */
+ if (stream->prot != SLIM_PROTO_ISO)
+ wbuf[1] |= SLIM_CHANNEL_CONTENT_FL;
+
+ wbuf[2] = port->ch.data_fmt | (port->ch.aux_fmt << 4);
+ wbuf[3] = stream->bps/SLIM_SLOT_LEN_BITS;
+ port->ch.state = SLIM_CH_STATE_CONTENT_DEFINED;
+
+ return slim_do_transfer(sdev->ctrl, &txn);
+}
+
+static int slim_get_segdist_code(int ratem)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(segdist_codes); i++) {
+ if (segdist_codes[i].ratem == ratem)
+ return segdist_codes[i].segdist_code;
+ }
+
+ return -EINVAL;
+}
+
+static int slim_define_channel(struct slim_stream_runtime *stream,
+ struct slim_port *port)
+{
+ struct slim_device *sdev = stream->dev;
+ u8 wbuf[4];
+ struct slim_val_inf msg = {0, 4, NULL, wbuf, NULL};
+ u8 mc = SLIM_MSG_MC_NEXT_DEFINE_CHANNEL;
+ DEFINE_SLIM_LDEST_TXN(txn, mc, 8, stream->dev->laddr, &msg);
+
+ port->ch.seg_dist = slim_get_segdist_code(stream->ratem);
+
+ wbuf[0] = port->ch.id;
+ wbuf[1] = port->ch.seg_dist & 0xFF;
+ wbuf[2] = (stream->prot << 4) | ((port->ch.seg_dist & 0xF00) >> 8);
+ if (stream->prot == SLIM_PROTO_ISO)
+ wbuf[3] = stream->bps/SLIM_SLOT_LEN_BITS;
+ else
+ wbuf[3] = stream->bps/SLIM_SLOT_LEN_BITS + 1;
+
+ port->ch.state = SLIM_CH_STATE_DEFINED;
+
+ return slim_do_transfer(sdev->ctrl, &txn);
+}
+
+static int slim_activate_channel(struct slim_stream_runtime *stream,
+ struct slim_port *port)
+{
+ struct slim_device *sdev = stream->dev;
+ u8 wbuf[1];
+ struct slim_val_inf msg = {0, 1, NULL, wbuf, NULL};
+ u8 mc = SLIM_MSG_MC_NEXT_ACTIVATE_CHANNEL;
+ DEFINE_SLIM_LDEST_TXN(txn, mc, 5, stream->dev->laddr, &msg);
+
+ txn.msg->num_bytes = 1;
+ txn.msg->wbuf = wbuf;
+ wbuf[0] = port->ch.id;
+ port->ch.state = SLIM_CH_STATE_ACTIVE;
+
+ return slim_do_transfer(sdev->ctrl, &txn);
+}
+
+/*
+ * slim_stream_enable() - Enable a prepared SLIMbus Stream
+ *
+ * @stream: instance of slim stream runtime to enable
+ *
+ * This API will enable all the ports and channels associated with
+ * SLIMbus stream
+ *
+ * Return: zero on success and error code on failure. From ASoC DPCM framework,
+ * this state is linked to trigger() start operation.
+ */
+int slim_stream_enable(struct slim_stream_runtime *stream)
+{
+ DEFINE_SLIM_BCAST_TXN(txn, SLIM_MSG_MC_BEGIN_RECONFIGURATION,
+ 3, SLIM_LA_MANAGER, NULL);
+ struct slim_controller *ctrl = stream->dev->ctrl;
+ int ret, i;
+
+ if (ctrl->enable_stream) {
+ ret = ctrl->enable_stream(stream);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < stream->num_ports; i++)
+ stream->ports[i].ch.state = SLIM_CH_STATE_ACTIVE;
+
+ return ret;
+ }
+
+ ret = slim_do_transfer(ctrl, &txn);
+ if (ret)
+ return ret;
+
+ /* define channels first before activating them */
+ for (i = 0; i < stream->num_ports; i++) {
+ struct slim_port *port = &stream->ports[i];
+
+ slim_define_channel(stream, port);
+ slim_define_channel_content(stream, port);
+ }
+
+ for (i = 0; i < stream->num_ports; i++) {
+ struct slim_port *port = &stream->ports[i];
+
+ slim_activate_channel(stream, port);
+ port->state = SLIM_PORT_CONFIGURED;
+ }
+ txn.mc = SLIM_MSG_MC_RECONFIGURE_NOW;
+
+ return slim_do_transfer(ctrl, &txn);
+}
+EXPORT_SYMBOL_GPL(slim_stream_enable);
+
+/*
+ * slim_stream_disable() - Disable a SLIMbus Stream
+ *
+ * @stream: instance of slim stream runtime to disable
+ *
+ * This API will disable all the ports and channels associated with
+ * SLIMbus stream
+ *
+ * Return: zero on success and error code on failure. From ASoC DPCM framework,
+ * this state is linked to trigger() pause operation.
+ */
+int slim_stream_disable(struct slim_stream_runtime *stream)
+{
+ DEFINE_SLIM_BCAST_TXN(txn, SLIM_MSG_MC_BEGIN_RECONFIGURATION,
+ 3, SLIM_LA_MANAGER, NULL);
+ struct slim_controller *ctrl = stream->dev->ctrl;
+ int ret, i;
+
+ if (ctrl->disable_stream)
+ ctrl->disable_stream(stream);
+
+ ret = slim_do_transfer(ctrl, &txn);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < stream->num_ports; i++)
+ slim_deactivate_remove_channel(stream, &stream->ports[i]);
+
+ txn.mc = SLIM_MSG_MC_RECONFIGURE_NOW;
+
+ return slim_do_transfer(ctrl, &txn);
+}
+EXPORT_SYMBOL_GPL(slim_stream_disable);
+
+/*
+ * slim_stream_unprepare() - Un-prepare a SLIMbus Stream
+ *
+ * @stream: instance of slim stream runtime to unprepare
+ *
+ * This API will un allocate all the ports and channels associated with
+ * SLIMbus stream
+ *
+ * Return: zero on success and error code on failure. From ASoC DPCM framework,
+ * this state is linked to trigger() stop operation.
+ */
+int slim_stream_unprepare(struct slim_stream_runtime *stream)
+{
+ int i;
+
+ for (i = 0; i < stream->num_ports; i++)
+ slim_disconnect_port(stream, &stream->ports[i]);
+
+ kfree(stream->ports);
+ stream->ports = NULL;
+ stream->num_ports = 0;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(slim_stream_unprepare);
+
+/*
+ * slim_stream_free() - Free a SLIMbus Stream
+ *
+ * @stream: instance of slim stream runtime to free
+ *
+ * This API will un allocate all the memory associated with
+ * slim stream runtime, user is not allowed to make an dereference
+ * to stream after this call.
+ *
+ * Return: zero on success and error code on failure. From ASoC DPCM framework,
+ * this state is linked to shutdown() operation.
+ */
+int slim_stream_free(struct slim_stream_runtime *stream)
+{
+ struct slim_device *sdev = stream->dev;
+
+ spin_lock(&sdev->stream_list_lock);
+ list_del(&stream->node);
+ spin_unlock(&sdev->stream_list_lock);
+
+ kfree(stream->name);
+ kfree(stream);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(slim_stream_free);
diff --git a/drivers/thunderbolt/domain.c b/drivers/thunderbolt/domain.c
index a923ebdeb73c..092381e2accf 100644
--- a/drivers/thunderbolt/domain.c
+++ b/drivers/thunderbolt/domain.c
@@ -12,6 +12,7 @@
#include <linux/device.h>
#include <linux/idr.h>
#include <linux/module.h>
+#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/random.h>
#include <crypto/hash.h>
@@ -132,6 +133,8 @@ static ssize_t boot_acl_show(struct device *dev, struct device_attribute *attr,
if (!uuids)
return -ENOMEM;
+ pm_runtime_get_sync(&tb->dev);
+
if (mutex_lock_interruptible(&tb->lock)) {
ret = -ERESTARTSYS;
goto out;
@@ -153,7 +156,10 @@ static ssize_t boot_acl_show(struct device *dev, struct device_attribute *attr,
}
out:
+ pm_runtime_mark_last_busy(&tb->dev);
+ pm_runtime_put_autosuspend(&tb->dev);
kfree(uuids);
+
return ret;
}
@@ -208,9 +214,11 @@ static ssize_t boot_acl_store(struct device *dev, struct device_attribute *attr,
goto err_free_acl;
}
+ pm_runtime_get_sync(&tb->dev);
+
if (mutex_lock_interruptible(&tb->lock)) {
ret = -ERESTARTSYS;
- goto err_free_acl;
+ goto err_rpm_put;
}
ret = tb->cm_ops->set_boot_acl(tb, acl, tb->nboot_acl);
if (!ret) {
@@ -219,6 +227,9 @@ static ssize_t boot_acl_store(struct device *dev, struct device_attribute *attr,
}
mutex_unlock(&tb->lock);
+err_rpm_put:
+ pm_runtime_mark_last_busy(&tb->dev);
+ pm_runtime_put_autosuspend(&tb->dev);
err_free_acl:
kfree(acl);
err_free_str:
@@ -430,6 +441,13 @@ int tb_domain_add(struct tb *tb)
/* This starts event processing */
mutex_unlock(&tb->lock);
+ pm_runtime_no_callbacks(&tb->dev);
+ pm_runtime_set_active(&tb->dev);
+ pm_runtime_enable(&tb->dev);
+ pm_runtime_set_autosuspend_delay(&tb->dev, TB_AUTOSUSPEND_DELAY);
+ pm_runtime_mark_last_busy(&tb->dev);
+ pm_runtime_use_autosuspend(&tb->dev);
+
return 0;
err_domain_del:
@@ -509,26 +527,35 @@ int tb_domain_resume_noirq(struct tb *tb)
int tb_domain_suspend(struct tb *tb)
{
- int ret;
+ return tb->cm_ops->suspend ? tb->cm_ops->suspend(tb) : 0;
+}
- mutex_lock(&tb->lock);
- if (tb->cm_ops->suspend) {
- ret = tb->cm_ops->suspend(tb);
- if (ret) {
- mutex_unlock(&tb->lock);
+void tb_domain_complete(struct tb *tb)
+{
+ if (tb->cm_ops->complete)
+ tb->cm_ops->complete(tb);
+}
+
+int tb_domain_runtime_suspend(struct tb *tb)
+{
+ if (tb->cm_ops->runtime_suspend) {
+ int ret = tb->cm_ops->runtime_suspend(tb);
+ if (ret)
return ret;
- }
}
- mutex_unlock(&tb->lock);
+ tb_ctl_stop(tb->ctl);
return 0;
}
-void tb_domain_complete(struct tb *tb)
+int tb_domain_runtime_resume(struct tb *tb)
{
- mutex_lock(&tb->lock);
- if (tb->cm_ops->complete)
- tb->cm_ops->complete(tb);
- mutex_unlock(&tb->lock);
+ tb_ctl_start(tb->ctl);
+ if (tb->cm_ops->runtime_resume) {
+ int ret = tb->cm_ops->runtime_resume(tb);
+ if (ret)
+ return ret;
+ }
+ return 0;
}
/**
diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c
index 500911f16498..e1e264a9a4c7 100644
--- a/drivers/thunderbolt/icm.c
+++ b/drivers/thunderbolt/icm.c
@@ -15,6 +15,7 @@
#include <linux/delay.h>
#include <linux/mutex.h>
#include <linux/pci.h>
+#include <linux/pm_runtime.h>
#include <linux/platform_data/x86/apple.h>
#include <linux/sizes.h>
#include <linux/slab.h>
@@ -57,9 +58,11 @@
* (only set when @upstream_port is not %NULL)
* @safe_mode: ICM is in safe mode
* @max_boot_acl: Maximum number of preboot ACL entries (%0 if not supported)
+ * @rpm: Does the controller support runtime PM (RTD3)
* @is_supported: Checks if we can support ICM on this controller
* @get_mode: Read and return the ICM firmware mode (optional)
* @get_route: Find a route string for given switch
+ * @save_devices: Ask ICM to save devices to ACL when suspending (optional)
* @driver_ready: Send driver ready message to ICM
* @device_connected: Handle device connected ICM message
* @device_disconnected: Handle device disconnected ICM message
@@ -73,12 +76,14 @@ struct icm {
size_t max_boot_acl;
int vnd_cap;
bool safe_mode;
+ bool rpm;
bool (*is_supported)(struct tb *tb);
int (*get_mode)(struct tb *tb);
int (*get_route)(struct tb *tb, u8 link, u8 depth, u64 *route);
+ void (*save_devices)(struct tb *tb);
int (*driver_ready)(struct tb *tb,
enum tb_security_level *security_level,
- size_t *nboot_acl);
+ size_t *nboot_acl, bool *rpm);
void (*device_connected)(struct tb *tb,
const struct icm_pkg_header *hdr);
void (*device_disconnected)(struct tb *tb,
@@ -95,6 +100,47 @@ struct icm_notification {
struct tb *tb;
};
+struct ep_name_entry {
+ u8 len;
+ u8 type;
+ u8 data[0];
+};
+
+#define EP_NAME_INTEL_VSS 0x10
+
+/* Intel Vendor specific structure */
+struct intel_vss {
+ u16 vendor;
+ u16 model;
+ u8 mc;
+ u8 flags;
+ u16 pci_devid;
+ u32 nvm_version;
+};
+
+#define INTEL_VSS_FLAGS_RTD3 BIT(0)
+
+static const struct intel_vss *parse_intel_vss(const void *ep_name, size_t size)
+{
+ const void *end = ep_name + size;
+
+ while (ep_name < end) {
+ const struct ep_name_entry *ep = ep_name;
+
+ if (!ep->len)
+ break;
+ if (ep_name + ep->len > end)
+ break;
+
+ if (ep->type == EP_NAME_INTEL_VSS)
+ return (const struct intel_vss *)ep->data;
+
+ ep_name += ep->len;
+ }
+
+ return NULL;
+}
+
static inline struct tb *icm_to_tb(struct icm *icm)
{
return ((void *)icm - sizeof(struct tb));
@@ -258,9 +304,14 @@ err_free:
return ret;
}
+static void icm_fr_save_devices(struct tb *tb)
+{
+ nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_SAVE_DEVS, 0);
+}
+
static int
icm_fr_driver_ready(struct tb *tb, enum tb_security_level *security_level,
- size_t *nboot_acl)
+ size_t *nboot_acl, bool *rpm)
{
struct icm_fr_pkg_driver_ready_response reply;
struct icm_pkg_driver_ready request = {
@@ -410,15 +461,19 @@ static int icm_fr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
}
static void add_switch(struct tb_switch *parent_sw, u64 route,
- const uuid_t *uuid, u8 connection_id, u8 connection_key,
+ const uuid_t *uuid, const u8 *ep_name,
+ size_t ep_name_size, u8 connection_id, u8 connection_key,
u8 link, u8 depth, enum tb_security_level security_level,
bool authorized, bool boot)
{
+ const struct intel_vss *vss;
struct tb_switch *sw;
+ pm_runtime_get_sync(&parent_sw->dev);
+
sw = tb_switch_alloc(parent_sw->tb, &parent_sw->dev, route);
if (!sw)
- return;
+ goto out;
sw->uuid = kmemdup(uuid, sizeof(*uuid), GFP_KERNEL);
sw->connection_id = connection_id;
@@ -429,6 +484,10 @@ static void add_switch(struct tb_switch *parent_sw, u64 route,
sw->security_level = security_level;
sw->boot = boot;
+ vss = parse_intel_vss(ep_name, ep_name_size);
+ if (vss)
+ sw->rpm = !!(vss->flags & INTEL_VSS_FLAGS_RTD3);
+
/* Link the two switches now */
tb_port_at(route, parent_sw)->remote = tb_upstream_port(sw);
tb_upstream_port(sw)->remote = tb_port_at(route, parent_sw);
@@ -436,8 +495,11 @@ static void add_switch(struct tb_switch *parent_sw, u64 route,
if (tb_switch_add(sw)) {
tb_port_at(tb_route(sw), parent_sw)->remote = NULL;
tb_switch_put(sw);
- return;
}
+
+out:
+ pm_runtime_mark_last_busy(&parent_sw->dev);
+ pm_runtime_put_autosuspend(&parent_sw->dev);
}
static void update_switch(struct tb_switch *parent_sw, struct tb_switch *sw,
@@ -477,9 +539,11 @@ static void add_xdomain(struct tb_switch *sw, u64 route,
{
struct tb_xdomain *xd;
+ pm_runtime_get_sync(&sw->dev);
+
xd = tb_xdomain_alloc(sw->tb, &sw->dev, route, local_uuid, remote_uuid);
if (!xd)
- return;
+ goto out;
xd->link = link;
xd->depth = depth;
@@ -487,6 +551,10 @@ static void add_xdomain(struct tb_switch *sw, u64 route,
tb_port_at(route, sw)->xdomain = xd;
tb_xdomain_add(xd);
+
+out:
+ pm_runtime_mark_last_busy(&sw->dev);
+ pm_runtime_put_autosuspend(&sw->dev);
}
static void update_xdomain(struct tb_xdomain *xd, u64 route, u8 link)
@@ -534,20 +602,13 @@ icm_fr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
return;
}
- ret = icm->get_route(tb, link, depth, &route);
- if (ret) {
- tb_err(tb, "failed to find route string for switch at %u.%u\n",
- link, depth);
- return;
- }
-
sw = tb_switch_find_by_uuid(tb, &pkg->ep_uuid);
if (sw) {
u8 phy_port, sw_phy_port;
parent_sw = tb_to_switch(sw->dev.parent);
- sw_phy_port = phy_port_from_route(tb_route(sw), sw->depth);
- phy_port = phy_port_from_route(route, depth);
+ sw_phy_port = tb_phy_port_from_link(sw->link);
+ phy_port = tb_phy_port_from_link(link);
/*
* On resume ICM will send us connected events for the
@@ -559,6 +620,22 @@ icm_fr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
*/
if (sw->depth == depth && sw_phy_port == phy_port &&
!!sw->authorized == authorized) {
+ /*
+ * It was enumerated through another link so update
+ * route string accordingly.
+ */
+ if (sw->link != link) {
+ ret = icm->get_route(tb, link, depth, &route);
+ if (ret) {
+ tb_err(tb, "failed to update route string for switch at %u.%u\n",
+ link, depth);
+ tb_switch_put(sw);
+ return;
+ }
+ } else {
+ route = tb_route(sw);
+ }
+
update_switch(parent_sw, sw, route, pkg->connection_id,
pkg->connection_key, link, depth, boot);
tb_switch_put(sw);
@@ -607,7 +684,16 @@ icm_fr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
return;
}
- add_switch(parent_sw, route, &pkg->ep_uuid, pkg->connection_id,
+ ret = icm->get_route(tb, link, depth, &route);
+ if (ret) {
+ tb_err(tb, "failed to find route string for switch at %u.%u\n",
+ link, depth);
+ tb_switch_put(parent_sw);
+ return;
+ }
+
+ add_switch(parent_sw, route, &pkg->ep_uuid, (const u8 *)pkg->ep_name,
+ sizeof(pkg->ep_name), pkg->connection_id,
pkg->connection_key, link, depth, security_level,
authorized, boot);
@@ -650,7 +736,6 @@ icm_fr_xdomain_connected(struct tb *tb, const struct icm_pkg_header *hdr)
struct tb_xdomain *xd;
struct tb_switch *sw;
u8 link, depth;
- bool approved;
u64 route;
/*
@@ -664,7 +749,6 @@ icm_fr_xdomain_connected(struct tb *tb, const struct icm_pkg_header *hdr)
link = pkg->link_info & ICM_LINK_INFO_LINK_MASK;
depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >>
ICM_LINK_INFO_DEPTH_SHIFT;
- approved = pkg->link_info & ICM_LINK_INFO_APPROVED;
if (link > ICM_MAX_LINK || depth > ICM_MAX_DEPTH) {
tb_warn(tb, "invalid topology %u.%u, ignoring\n", link, depth);
@@ -757,7 +841,7 @@ icm_fr_xdomain_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
static int
icm_tr_driver_ready(struct tb *tb, enum tb_security_level *security_level,
- size_t *nboot_acl)
+ size_t *nboot_acl, bool *rpm)
{
struct icm_tr_pkg_driver_ready_response reply;
struct icm_pkg_driver_ready request = {
@@ -776,6 +860,9 @@ icm_tr_driver_ready(struct tb *tb, enum tb_security_level *security_level,
if (nboot_acl)
*nboot_acl = (reply.info & ICM_TR_INFO_BOOT_ACL_MASK) >>
ICM_TR_INFO_BOOT_ACL_SHIFT;
+ if (rpm)
+ *rpm = !!(reply.hdr.flags & ICM_TR_FLAGS_RTD3);
+
return 0;
}
@@ -1005,7 +1092,8 @@ icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
return;
}
- add_switch(parent_sw, route, &pkg->ep_uuid, pkg->connection_id,
+ add_switch(parent_sw, route, &pkg->ep_uuid, (const u8 *)pkg->ep_name,
+ sizeof(pkg->ep_name), pkg->connection_id,
0, 0, 0, security_level, authorized, boot);
tb_switch_put(parent_sw);
@@ -1184,7 +1272,7 @@ static int icm_ar_get_mode(struct tb *tb)
static int
icm_ar_driver_ready(struct tb *tb, enum tb_security_level *security_level,
- size_t *nboot_acl)
+ size_t *nboot_acl, bool *rpm)
{
struct icm_ar_pkg_driver_ready_response reply;
struct icm_pkg_driver_ready request = {
@@ -1203,6 +1291,9 @@ icm_ar_driver_ready(struct tb *tb, enum tb_security_level *security_level,
if (nboot_acl && (reply.info & ICM_AR_INFO_BOOT_ACL_SUPPORTED))
*nboot_acl = (reply.info & ICM_AR_INFO_BOOT_ACL_MASK) >>
ICM_AR_INFO_BOOT_ACL_SHIFT;
+ if (rpm)
+ *rpm = !!(reply.hdr.flags & ICM_AR_FLAGS_RTD3);
+
return 0;
}
@@ -1356,13 +1447,13 @@ static void icm_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
static int
__icm_driver_ready(struct tb *tb, enum tb_security_level *security_level,
- size_t *nboot_acl)
+ size_t *nboot_acl, bool *rpm)
{
struct icm *icm = tb_priv(tb);
unsigned int retries = 50;
int ret;
- ret = icm->driver_ready(tb, security_level, nboot_acl);
+ ret = icm->driver_ready(tb, security_level, nboot_acl, rpm);
if (ret) {
tb_err(tb, "failed to send driver ready to ICM\n");
return ret;
@@ -1632,7 +1723,8 @@ static int icm_driver_ready(struct tb *tb)
return 0;
}
- ret = __icm_driver_ready(tb, &tb->security_level, &tb->nboot_acl);
+ ret = __icm_driver_ready(tb, &tb->security_level, &tb->nboot_acl,
+ &icm->rpm);
if (ret)
return ret;
@@ -1648,13 +1740,12 @@ static int icm_driver_ready(struct tb *tb)
static int icm_suspend(struct tb *tb)
{
- int ret;
+ struct icm *icm = tb_priv(tb);
- ret = nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_SAVE_DEVS, 0);
- if (ret)
- tb_info(tb, "Ignoring mailbox command error (%d) in %s\n",
- ret, __func__);
+ if (icm->save_devices)
+ icm->save_devices(tb);
+ nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DRV_UNLOADS, 0);
return 0;
}
@@ -1739,7 +1830,7 @@ static void icm_complete(struct tb *tb)
* Now all existing children should be resumed, start events
* from ICM to get updated status.
*/
- __icm_driver_ready(tb, NULL, NULL);
+ __icm_driver_ready(tb, NULL, NULL, NULL);
/*
* We do not get notifications of devices that have been
@@ -1749,6 +1840,22 @@ static void icm_complete(struct tb *tb)
queue_delayed_work(tb->wq, &icm->rescan_work, msecs_to_jiffies(500));
}
+static int icm_runtime_suspend(struct tb *tb)
+{
+ nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DRV_UNLOADS, 0);
+ return 0;
+}
+
+static int icm_runtime_resume(struct tb *tb)
+{
+ /*
+ * We can reuse the same resume functionality than with system
+ * suspend.
+ */
+ icm_complete(tb);
+ return 0;
+}
+
static int icm_start(struct tb *tb)
{
struct icm *icm = tb_priv(tb);
@@ -1767,6 +1874,7 @@ static int icm_start(struct tb *tb)
* prevent root switch NVM upgrade on Macs for now.
*/
tb->root_switch->no_nvm_upgrade = x86_apple_machine;
+ tb->root_switch->rpm = icm->rpm;
ret = tb_switch_add(tb->root_switch);
if (ret) {
@@ -1815,6 +1923,8 @@ static const struct tb_cm_ops icm_ar_ops = {
.stop = icm_stop,
.suspend = icm_suspend,
.complete = icm_complete,
+ .runtime_suspend = icm_runtime_suspend,
+ .runtime_resume = icm_runtime_resume,
.handle_event = icm_handle_event,
.get_boot_acl = icm_ar_get_boot_acl,
.set_boot_acl = icm_ar_set_boot_acl,
@@ -1833,6 +1943,8 @@ static const struct tb_cm_ops icm_tr_ops = {
.stop = icm_stop,
.suspend = icm_suspend,
.complete = icm_complete,
+ .runtime_suspend = icm_runtime_suspend,
+ .runtime_resume = icm_runtime_resume,
.handle_event = icm_handle_event,
.get_boot_acl = icm_ar_get_boot_acl,
.set_boot_acl = icm_ar_set_boot_acl,
@@ -1862,6 +1974,7 @@ struct tb *icm_probe(struct tb_nhi *nhi)
case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI:
icm->is_supported = icm_fr_is_supported;
icm->get_route = icm_fr_get_route;
+ icm->save_devices = icm_fr_save_devices;
icm->driver_ready = icm_fr_driver_ready;
icm->device_connected = icm_fr_device_connected;
icm->device_disconnected = icm_fr_device_disconnected;
@@ -1879,6 +1992,7 @@ struct tb *icm_probe(struct tb_nhi *nhi)
icm->is_supported = icm_ar_is_supported;
icm->get_mode = icm_ar_get_mode;
icm->get_route = icm_ar_get_route;
+ icm->save_devices = icm_fr_save_devices;
icm->driver_ready = icm_ar_driver_ready;
icm->device_connected = icm_fr_device_connected;
icm->device_disconnected = icm_fr_device_disconnected;
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
index f5a33e88e676..88cff05a1808 100644
--- a/drivers/thunderbolt/nhi.c
+++ b/drivers/thunderbolt/nhi.c
@@ -900,7 +900,32 @@ static void nhi_complete(struct device *dev)
struct pci_dev *pdev = to_pci_dev(dev);
struct tb *tb = pci_get_drvdata(pdev);
- tb_domain_complete(tb);
+ /*
+ * If we were runtime suspended when system suspend started,
+ * schedule runtime resume now. It should bring the domain back
+ * to functional state.
+ */
+ if (pm_runtime_suspended(&pdev->dev))
+ pm_runtime_resume(&pdev->dev);
+ else
+ tb_domain_complete(tb);
+}
+
+static int nhi_runtime_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct tb *tb = pci_get_drvdata(pdev);
+
+ return tb_domain_runtime_suspend(tb);
+}
+
+static int nhi_runtime_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct tb *tb = pci_get_drvdata(pdev);
+
+ nhi_enable_int_throttling(tb->nhi);
+ return tb_domain_runtime_resume(tb);
}
static void nhi_shutdown(struct tb_nhi *nhi)
@@ -1015,6 +1040,14 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
spin_lock_init(&nhi->lock);
+ res = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (res)
+ res = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (res) {
+ dev_err(&pdev->dev, "failed to set DMA mask\n");
+ return res;
+ }
+
pci_set_master(pdev);
tb = icm_probe(nhi);
@@ -1040,6 +1073,11 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
pci_set_drvdata(pdev, tb);
+ pm_runtime_allow(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, TB_AUTOSUSPEND_DELAY);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_put_autosuspend(&pdev->dev);
+
return 0;
}
@@ -1048,6 +1086,10 @@ static void nhi_remove(struct pci_dev *pdev)
struct tb *tb = pci_get_drvdata(pdev);
struct tb_nhi *nhi = tb->nhi;
+ pm_runtime_get_sync(&pdev->dev);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
+ pm_runtime_forbid(&pdev->dev);
+
tb_domain_remove(tb);
nhi_shutdown(nhi);
}
@@ -1070,6 +1112,8 @@ static const struct dev_pm_ops nhi_pm_ops = {
.freeze = nhi_suspend,
.poweroff = nhi_suspend,
.complete = nhi_complete,
+ .runtime_suspend = nhi_runtime_suspend,
+ .runtime_resume = nhi_runtime_resume,
};
static struct pci_device_id nhi_ids[] = {
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index 25758671ddf4..7442bc4c6433 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -8,6 +8,7 @@
#include <linux/delay.h>
#include <linux/idr.h>
#include <linux/nvmem-provider.h>
+#include <linux/pm_runtime.h>
#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
@@ -236,8 +237,14 @@ static int tb_switch_nvm_read(void *priv, unsigned int offset, void *val,
size_t bytes)
{
struct tb_switch *sw = priv;
+ int ret;
+
+ pm_runtime_get_sync(&sw->dev);
+ ret = dma_port_flash_read(sw->dma_port, offset, val, bytes);
+ pm_runtime_mark_last_busy(&sw->dev);
+ pm_runtime_put_autosuspend(&sw->dev);
- return dma_port_flash_read(sw->dma_port, offset, val, bytes);
+ return ret;
}
static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val,
@@ -722,6 +729,7 @@ static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val)
* the new tunnel too early.
*/
pci_lock_rescan_remove();
+ pm_runtime_get_sync(&sw->dev);
switch (val) {
/* Approve switch */
@@ -742,6 +750,8 @@ static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val)
break;
}
+ pm_runtime_mark_last_busy(&sw->dev);
+ pm_runtime_put_autosuspend(&sw->dev);
pci_unlock_rescan_remove();
if (!ret) {
@@ -888,9 +898,18 @@ static ssize_t nvm_authenticate_store(struct device *dev,
nvm_clear_auth_status(sw);
if (val) {
+ if (!sw->nvm->buf) {
+ ret = -EINVAL;
+ goto exit_unlock;
+ }
+
+ pm_runtime_get_sync(&sw->dev);
ret = nvm_validate_and_write(sw);
- if (ret)
+ if (ret) {
+ pm_runtime_mark_last_busy(&sw->dev);
+ pm_runtime_put_autosuspend(&sw->dev);
goto exit_unlock;
+ }
sw->nvm->authenticating = true;
@@ -898,6 +917,8 @@ static ssize_t nvm_authenticate_store(struct device *dev,
ret = nvm_authenticate_host(sw);
else
ret = nvm_authenticate_device(sw);
+ pm_runtime_mark_last_busy(&sw->dev);
+ pm_runtime_put_autosuspend(&sw->dev);
}
exit_unlock:
@@ -1023,9 +1044,29 @@ static void tb_switch_release(struct device *dev)
kfree(sw);
}
+/*
+ * Currently only need to provide the callbacks. Everything else is handled
+ * in the connection manager.
+ */
+static int __maybe_unused tb_switch_runtime_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static int __maybe_unused tb_switch_runtime_resume(struct device *dev)
+{
+ return 0;
+}
+
+static const struct dev_pm_ops tb_switch_pm_ops = {
+ SET_RUNTIME_PM_OPS(tb_switch_runtime_suspend, tb_switch_runtime_resume,
+ NULL)
+};
+
struct device_type tb_switch_type = {
.name = "thunderbolt_device",
.release = tb_switch_release,
+ .pm = &tb_switch_pm_ops,
};
static int tb_switch_get_generation(struct tb_switch *sw)
@@ -1365,10 +1406,21 @@ int tb_switch_add(struct tb_switch *sw)
return ret;
ret = tb_switch_nvm_add(sw);
- if (ret)
+ if (ret) {
device_del(&sw->dev);
+ return ret;
+ }
- return ret;
+ pm_runtime_set_active(&sw->dev);
+ if (sw->rpm) {
+ pm_runtime_set_autosuspend_delay(&sw->dev, TB_AUTOSUSPEND_DELAY);
+ pm_runtime_use_autosuspend(&sw->dev);
+ pm_runtime_mark_last_busy(&sw->dev);
+ pm_runtime_enable(&sw->dev);
+ pm_request_autosuspend(&sw->dev);
+ }
+
+ return 0;
}
/**
@@ -1383,6 +1435,11 @@ void tb_switch_remove(struct tb_switch *sw)
{
int i;
+ if (sw->rpm) {
+ pm_runtime_get_sync(&sw->dev);
+ pm_runtime_disable(&sw->dev);
+ }
+
/* port 0 is the switch itself and never has a remote */
for (i = 1; i <= sw->config.max_port_number; i++) {
if (tb_is_upstream_port(&sw->ports[i]))
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
index 9d9f0ca16bfb..5067d69d0501 100644
--- a/drivers/thunderbolt/tb.h
+++ b/drivers/thunderbolt/tb.h
@@ -67,6 +67,7 @@ struct tb_switch_nvm {
* @no_nvm_upgrade: Prevent NVM upgrade of this switch
* @safe_mode: The switch is in safe-mode
* @boot: Whether the switch was already authorized on boot or not
+ * @rpm: The switch supports runtime PM
* @authorized: Whether the switch is authorized by user or policy
* @work: Work used to automatically authorize a switch
* @security_level: Switch supported security level
@@ -101,6 +102,7 @@ struct tb_switch {
bool no_nvm_upgrade;
bool safe_mode;
bool boot;
+ bool rpm;
unsigned int authorized;
struct work_struct work;
enum tb_security_level security_level;
@@ -199,6 +201,8 @@ struct tb_path {
* @resume_noirq: Connection manager specific resume_noirq
* @suspend: Connection manager specific suspend
* @complete: Connection manager specific complete
+ * @runtime_suspend: Connection manager specific runtime_suspend
+ * @runtime_resume: Connection manager specific runtime_resume
* @handle_event: Handle thunderbolt event
* @get_boot_acl: Get boot ACL list
* @set_boot_acl: Set boot ACL list
@@ -217,6 +221,8 @@ struct tb_cm_ops {
int (*resume_noirq)(struct tb *tb);
int (*suspend)(struct tb *tb);
void (*complete)(struct tb *tb);
+ int (*runtime_suspend)(struct tb *tb);
+ int (*runtime_resume)(struct tb *tb);
void (*handle_event)(struct tb *tb, enum tb_cfg_pkg_type,
const void *buf, size_t size);
int (*get_boot_acl)(struct tb *tb, uuid_t *uuids, size_t nuuids);
@@ -235,6 +241,8 @@ static inline void *tb_priv(struct tb *tb)
return (void *)tb->privdata;
}
+#define TB_AUTOSUSPEND_DELAY 15000 /* ms */
+
/* helper functions & macros */
/**
@@ -364,6 +372,8 @@ int tb_domain_suspend_noirq(struct tb *tb);
int tb_domain_resume_noirq(struct tb *tb);
int tb_domain_suspend(struct tb *tb);
void tb_domain_complete(struct tb *tb);
+int tb_domain_runtime_suspend(struct tb *tb);
+int tb_domain_runtime_resume(struct tb *tb);
int tb_domain_approve_switch(struct tb *tb, struct tb_switch *sw);
int tb_domain_approve_switch_key(struct tb *tb, struct tb_switch *sw);
int tb_domain_challenge_switch_key(struct tb *tb, struct tb_switch *sw);
diff --git a/drivers/thunderbolt/tb_msgs.h b/drivers/thunderbolt/tb_msgs.h
index bc13f8d6b804..2487e162c885 100644
--- a/drivers/thunderbolt/tb_msgs.h
+++ b/drivers/thunderbolt/tb_msgs.h
@@ -286,6 +286,8 @@ struct icm_ar_pkg_driver_ready_response {
u16 info;
};
+#define ICM_AR_FLAGS_RTD3 BIT(6)
+
#define ICM_AR_INFO_SLEVEL_MASK GENMASK(3, 0)
#define ICM_AR_INFO_BOOT_ACL_SHIFT 7
#define ICM_AR_INFO_BOOT_ACL_MASK GENMASK(11, 7)
@@ -333,6 +335,8 @@ struct icm_tr_pkg_driver_ready_response {
u16 reserved2;
};
+#define ICM_TR_FLAGS_RTD3 BIT(6)
+
#define ICM_TR_INFO_SLEVEL_MASK GENMASK(2, 0)
#define ICM_TR_INFO_BOOT_ACL_SHIFT 7
#define ICM_TR_INFO_BOOT_ACL_MASK GENMASK(12, 7)
diff --git a/drivers/thunderbolt/tb_regs.h b/drivers/thunderbolt/tb_regs.h
index 5d94142afda6..693b0353c3fe 100644
--- a/drivers/thunderbolt/tb_regs.h
+++ b/drivers/thunderbolt/tb_regs.h
@@ -202,7 +202,7 @@ struct tb_regs_port_header {
/* DWORD 5 */
u32 max_in_hop_id:11;
u32 max_out_hop_id:11;
- u32 __unkown4:10;
+ u32 __unknown4:10;
/* DWORD 6 */
u32 __unknown5;
/* DWORD 7 */
diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c
index 8abb4e843085..db8bece63327 100644
--- a/drivers/thunderbolt/xdomain.c
+++ b/drivers/thunderbolt/xdomain.c
@@ -13,6 +13,7 @@
#include <linux/device.h>
#include <linux/kmod.h>
#include <linux/module.h>
+#include <linux/pm_runtime.h>
#include <linux/utsname.h>
#include <linux/uuid.h>
#include <linux/workqueue.h>
@@ -1129,6 +1130,14 @@ struct tb_xdomain *tb_xdomain_alloc(struct tb *tb, struct device *parent,
xd->dev.groups = xdomain_attr_groups;
dev_set_name(&xd->dev, "%u-%llx", tb->index, route);
+ /*
+ * This keeps the DMA powered on as long as we have active
+ * connection to another host.
+ */
+ pm_runtime_set_active(&xd->dev);
+ pm_runtime_get_noresume(&xd->dev);
+ pm_runtime_enable(&xd->dev);
+
return xd;
err_free_local_uuid:
@@ -1174,6 +1183,15 @@ void tb_xdomain_remove(struct tb_xdomain *xd)
device_for_each_child_reverse(&xd->dev, xd, unregister_service);
+ /*
+ * Undo runtime PM here explicitly because it is possible that
+ * the XDomain was never added to the bus and thus device_del()
+ * is not called for it (device_del() would handle this otherwise).
+ */
+ pm_runtime_disable(&xd->dev);
+ pm_runtime_put_noidle(&xd->dev);
+ pm_runtime_set_suspended(&xd->dev);
+
if (!device_is_registered(&xd->dev))
put_device(&xd->dev);
else
diff --git a/drivers/tty/goldfish.c b/drivers/tty/goldfish.c
index 37caba7c3aff..c8c5cdfc5e19 100644
--- a/drivers/tty/goldfish.c
+++ b/drivers/tty/goldfish.c
@@ -13,6 +13,7 @@
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/goldfish.h>
#include <linux/mm.h>
#include <linux/dma-mapping.h>
diff --git a/drivers/tty/serial/8250/8250_em.c b/drivers/tty/serial/8250/8250_em.c
index f6a86f2bc4e5..2a76e22d2ec0 100644
--- a/drivers/tty/serial/8250/8250_em.c
+++ b/drivers/tty/serial/8250/8250_em.c
@@ -8,6 +8,7 @@
#include <linux/device.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/serial_8250.h>
#include <linux/serial_reg.h>
#include <linux/platform_device.h>
diff --git a/drivers/tty/serial/sccnxp.c b/drivers/tty/serial/sccnxp.c
index d6ae3086c2a2..339befdd2f4d 100644
--- a/drivers/tty/serial/sccnxp.c
+++ b/drivers/tty/serial/sccnxp.c
@@ -14,6 +14,7 @@
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/device.h>
#include <linux/console.h>
#include <linux/serial_core.h>
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index 5d421d7e8904..15ad3469660d 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* drivers/uio/uio.c
*
@@ -9,8 +10,6 @@
* Userspace IO
*
* Base Functions
- *
- * Licensed under the GPLv2 only.
*/
#include <linux/module.h>
@@ -625,6 +624,12 @@ static ssize_t uio_write(struct file *filep, const char __user *buf,
ssize_t retval;
s32 irq_on;
+ if (count != sizeof(s32))
+ return -EINVAL;
+
+ if (copy_from_user(&irq_on, buf, count))
+ return -EFAULT;
+
mutex_lock(&idev->info_lock);
if (!idev->info) {
retval = -EINVAL;
@@ -636,21 +641,11 @@ static ssize_t uio_write(struct file *filep, const char __user *buf,
goto out;
}
- if (count != sizeof(s32)) {
- retval = -EINVAL;
- goto out;
- }
-
if (!idev->info->irqcontrol) {
retval = -ENOSYS;
goto out;
}
- if (copy_from_user(&irq_on, buf, count)) {
- retval = -EFAULT;
- goto out;
- }
-
retval = idev->info->irqcontrol(idev->info, irq_on);
out:
@@ -814,7 +809,7 @@ static int uio_mmap(struct file *filep, struct vm_area_struct *vma)
out:
mutex_unlock(&idev->info_lock);
- return 0;
+ return ret;
}
static const struct file_operations uio_fops = {
@@ -958,8 +953,6 @@ int __uio_register_device(struct module *owner,
if (ret)
goto err_uio_dev_add_attributes;
- info->uio_dev = idev;
-
if (info->irq && (info->irq != UIO_IRQ_CUSTOM)) {
/*
* Note that we deliberately don't use devm_request_irq
@@ -976,6 +969,7 @@ int __uio_register_device(struct module *owner,
goto err_request_irq;
}
+ info->uio_dev = idev;
return 0;
err_request_irq:
diff --git a/drivers/uio/uio_cif.c b/drivers/uio/uio_cif.c
index 30f533ce3758..ab60186f9759 100644
--- a/drivers/uio/uio_cif.c
+++ b/drivers/uio/uio_cif.c
@@ -1,11 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* UIO Hilscher CIF card driver
*
* (C) 2007 Hans J. Koch <hjk@hansjkoch.de>
* Original code (C) 2005 Benedikt Spranger <b.spranger@linutronix.de>
- *
- * Licensed under GPL version 2 only.
- *
*/
#include <linux/device.h>
diff --git a/drivers/uio/uio_fsl_elbc_gpcm.c b/drivers/uio/uio_fsl_elbc_gpcm.c
index b55191335d90..bbc17effae5e 100644
--- a/drivers/uio/uio_fsl_elbc_gpcm.c
+++ b/drivers/uio/uio_fsl_elbc_gpcm.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* uio_fsl_elbc_gpcm: UIO driver for eLBC/GPCM peripherals
Copyright (C) 2014 Linutronix GmbH
diff --git a/drivers/uio/uio_hv_generic.c b/drivers/uio/uio_hv_generic.c
index c690d100adcd..e401be8321ab 100644
--- a/drivers/uio/uio_hv_generic.c
+++ b/drivers/uio/uio_hv_generic.c
@@ -1,12 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* uio_hv_generic - generic UIO driver for VMBus
*
* Copyright (c) 2013-2016 Brocade Communications Systems, Inc.
* Copyright (c) 2016, Microsoft Corporation.
*
- *
- * This work is licensed under the terms of the GNU GPL, version 2.
- *
* Since the driver does not declare any device ids, you must allocate
* id and bind the device to the driver yourself. For example:
*
diff --git a/drivers/uio/uio_netx.c b/drivers/uio/uio_netx.c
index 4c345db8b016..9ae29ffde410 100644
--- a/drivers/uio/uio_netx.c
+++ b/drivers/uio/uio_netx.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* UIO driver for Hilscher NetX based fieldbus cards (cifX, comX).
* See http://www.hilscher.com for details.
@@ -5,8 +6,6 @@
* (C) 2007 Hans J. Koch <hjk@hansjkoch.de>
* (C) 2008 Manuel Traut <manut@linutronix.de>
*
- * Licensed under GPL version 2 only.
- *
*/
#include <linux/device.h>
diff --git a/drivers/uio/uio_pci_generic.c b/drivers/uio/uio_pci_generic.c
index a56fdf972dbe..8773e373ffe5 100644
--- a/drivers/uio/uio_pci_generic.c
+++ b/drivers/uio/uio_pci_generic.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/* uio_pci_generic - generic UIO driver for PCI 2.3 devices
*
* Copyright (C) 2009 Red Hat, Inc.
* Author: Michael S. Tsirkin <mst@redhat.com>
*
- * This work is licensed under the terms of the GNU GPL, version 2.
- *
* Since the driver does not declare any device ids, you must allocate
* id and bind the device to the driver yourself. For example:
*
diff --git a/drivers/uio/uio_pruss.c b/drivers/uio/uio_pruss.c
index 91aea8823af5..1cc175d3c25c 100644
--- a/drivers/uio/uio_pruss.c
+++ b/drivers/uio/uio_pruss.c
@@ -122,7 +122,7 @@ static int pruss_probe(struct platform_device *pdev)
struct uio_pruss_dev *gdev;
struct resource *regs_prussio;
struct device *dev = &pdev->dev;
- int ret = -ENODEV, cnt = 0, len;
+ int ret, cnt, i, len;
struct uio_pruss_pdata *pdata = dev_get_platdata(dev);
gdev = kzalloc(sizeof(struct uio_pruss_dev), GFP_KERNEL);
@@ -131,8 +131,8 @@ static int pruss_probe(struct platform_device *pdev)
gdev->info = kcalloc(MAX_PRUSS_EVT, sizeof(*p), GFP_KERNEL);
if (!gdev->info) {
- kfree(gdev);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_free_gdev;
}
/* Power on PRU in case its not done as part of boot-loader */
@@ -140,29 +140,26 @@ static int pruss_probe(struct platform_device *pdev)
if (IS_ERR(gdev->pruss_clk)) {
dev_err(dev, "Failed to get clock\n");
ret = PTR_ERR(gdev->pruss_clk);
- kfree(gdev->info);
- kfree(gdev);
- return ret;
- } else {
- ret = clk_enable(gdev->pruss_clk);
- if (ret) {
- dev_err(dev, "Failed to enable clock\n");
- clk_put(gdev->pruss_clk);
- kfree(gdev->info);
- kfree(gdev);
- return ret;
- }
+ goto err_free_info;
+ }
+
+ ret = clk_enable(gdev->pruss_clk);
+ if (ret) {
+ dev_err(dev, "Failed to enable clock\n");
+ goto err_clk_put;
}
regs_prussio = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!regs_prussio) {
dev_err(dev, "No PRUSS I/O resource specified\n");
- goto out_free;
+ ret = -EIO;
+ goto err_clk_disable;
}
if (!regs_prussio->start) {
dev_err(dev, "Invalid memory resource\n");
- goto out_free;
+ ret = -EIO;
+ goto err_clk_disable;
}
if (pdata->sram_pool) {
@@ -172,7 +169,8 @@ static int pruss_probe(struct platform_device *pdev)
sram_pool_sz, &gdev->sram_paddr);
if (!gdev->sram_vaddr) {
dev_err(dev, "Could not allocate SRAM pool\n");
- goto out_free;
+ ret = -ENOMEM;
+ goto err_clk_disable;
}
}
@@ -180,14 +178,16 @@ static int pruss_probe(struct platform_device *pdev)
&(gdev->ddr_paddr), GFP_KERNEL | GFP_DMA);
if (!gdev->ddr_vaddr) {
dev_err(dev, "Could not allocate external memory\n");
- goto out_free;
+ ret = -ENOMEM;
+ goto err_free_sram;
}
len = resource_size(regs_prussio);
gdev->prussio_vaddr = ioremap(regs_prussio->start, len);
if (!gdev->prussio_vaddr) {
dev_err(dev, "Can't remap PRUSS I/O address range\n");
- goto out_free;
+ ret = -ENOMEM;
+ goto err_free_ddr_vaddr;
}
gdev->pintc_base = pdata->pintc_base;
@@ -215,15 +215,36 @@ static int pruss_probe(struct platform_device *pdev)
p->priv = gdev;
ret = uio_register_device(dev, p);
- if (ret < 0)
- goto out_free;
+ if (ret < 0) {
+ kfree(p->name);
+ goto err_unloop;
+ }
}
platform_set_drvdata(pdev, gdev);
return 0;
-out_free:
- pruss_cleanup(dev, gdev);
+err_unloop:
+ for (i = 0, p = gdev->info; i < cnt; i++, p++) {
+ uio_unregister_device(p);
+ kfree(p->name);
+ }
+ iounmap(gdev->prussio_vaddr);
+err_free_ddr_vaddr:
+ dma_free_coherent(dev, extram_pool_sz, gdev->ddr_vaddr,
+ gdev->ddr_paddr);
+err_free_sram:
+ if (pdata->sram_pool)
+ gen_pool_free(gdev->sram_pool, gdev->sram_vaddr, sram_pool_sz);
+err_clk_disable:
+ clk_disable(gdev->pruss_clk);
+err_clk_put:
+ clk_put(gdev->pruss_clk);
+err_free_info:
+ kfree(gdev->info);
+err_free_gdev:
+ kfree(gdev);
+
return ret;
}
diff --git a/drivers/uio/uio_sercos3.c b/drivers/uio/uio_sercos3.c
index 9cfdfcafa262..9658a0887fee 100644
--- a/drivers/uio/uio_sercos3.c
+++ b/drivers/uio/uio_sercos3.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* sercos3: UIO driver for the Automata Sercos III PCI card
Copyright (C) 2008 Linutronix GmbH
diff --git a/drivers/usb/gadget/udc/fsl_mxc_udc.c b/drivers/usb/gadget/udc/fsl_mxc_udc.c
index f29cf5c6160c..5a321992decc 100644
--- a/drivers/usb/gadget/udc/fsl_mxc_udc.c
+++ b/drivers/usb/gadget/udc/fsl_mxc_udc.c
@@ -11,6 +11,7 @@
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/fsl_devices.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/io.h>
diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
index 2fd49b2358f8..403d8cd3e582 100644
--- a/drivers/video/fbdev/hyperv_fb.c
+++ b/drivers/video/fbdev/hyperv_fb.c
@@ -912,6 +912,9 @@ static struct hv_driver hvfb_drv = {
.id_table = id_table,
.probe = hvfb_probe,
.remove = hvfb_remove,
+ .driver = {
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
};
static int hvfb_pci_stub_probe(struct pci_dev *pdev,
@@ -929,6 +932,9 @@ static struct pci_driver hvfb_pci_stub_driver = {
.id_table = pci_stub_id_table,
.probe = hvfb_pci_stub_probe,
.remove = hvfb_pci_stub_remove,
+ .driver = {
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ }
};
static int __init hvfb_drv_init(void)
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/encoder-tpd12s015.c b/drivers/video/fbdev/omap2/omapfb/displays/encoder-tpd12s015.c
index 80dc47347e21..3079a3df8c37 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/encoder-tpd12s015.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/encoder-tpd12s015.c
@@ -12,6 +12,7 @@
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/gpio/consumer.h>
diff --git a/drivers/vme/bridges/vme_ca91cx42.c b/drivers/vme/bridges/vme_ca91cx42.c
index 5dd284008630..53bdc256805f 100644
--- a/drivers/vme/bridges/vme_ca91cx42.c
+++ b/drivers/vme/bridges/vme_ca91cx42.c
@@ -970,7 +970,6 @@ static unsigned int ca91cx42_master_rmw(struct vme_master_resource *image,
{
u32 result;
uintptr_t pci_addr;
- int i;
struct ca91cx42_driver *bridge;
struct device *dev;
@@ -978,7 +977,6 @@ static unsigned int ca91cx42_master_rmw(struct vme_master_resource *image,
dev = image->parent->parent;
/* Find the PCI address that maps to the desired VME address */
- i = image->number;
/* Locking as we can only do one of these at a time */
mutex_lock(&bridge->vme_rmw);
diff --git a/drivers/w1/masters/ds2482.c b/drivers/w1/masters/ds2482.c
index 5b3e017d9276..8b5e598ffdb3 100644
--- a/drivers/w1/masters/ds2482.c
+++ b/drivers/w1/masters/ds2482.c
@@ -71,7 +71,7 @@ MODULE_PARM_DESC(active_pullup, "Active pullup (apply to all buses): " \
#define DS2482_REG_CFG_APU 0x01 /* active pull-up */
/* extra configurations - e.g. 1WS */
-int extra_config;
+static int extra_config;
/**
* Write and verify codes for the CHANNEL_SELECT command (DS2482-800 only).
diff --git a/drivers/w1/masters/ds2490.c b/drivers/w1/masters/ds2490.c
index c423bdb982bb..0f4ecfcdb549 100644
--- a/drivers/w1/masters/ds2490.c
+++ b/drivers/w1/masters/ds2490.c
@@ -134,8 +134,7 @@
#define EP_DATA_OUT 2
#define EP_DATA_IN 3
-struct ds_device
-{
+struct ds_device {
struct list_head ds_entry;
struct usb_device *udev;
@@ -158,8 +157,7 @@ struct ds_device
struct w1_bus_master master;
};
-struct ds_status
-{
+struct ds_status {
u8 enable;
u8 speed;
u8 pullup_dur;
@@ -236,7 +234,7 @@ static void ds_dump_status(struct ds_device *dev, unsigned char *buf, int count)
int i;
pr_info("0x%x: count=%d, status: ", dev->ep[EP_STATUS], count);
- for (i=0; i<count; ++i)
+ for (i = 0; i < count; ++i)
pr_info("%02x ", buf[i]);
pr_info("\n");
@@ -358,7 +356,7 @@ static int ds_recv_data(struct ds_device *dev, unsigned char *buf, int size)
int i;
printk("%s: count=%d: ", __func__, count);
- for (i=0; i<count; ++i)
+ for (i = 0; i < count; ++i)
printk("%02x ", buf[i]);
printk("\n");
}
@@ -404,7 +402,7 @@ int ds_stop_pulse(struct ds_device *dev, int limit)
if (err)
break;
}
- } while(++count < limit);
+ } while (++count < limit);
return err;
}
@@ -447,7 +445,7 @@ static int ds_wait_status(struct ds_device *dev, struct ds_status *st)
if (err >= 0) {
int i;
printk("0x%x: count=%d, status: ", dev->ep[EP_STATUS], err);
- for (i=0; i<err; ++i)
+ for (i = 0; i < err; ++i)
printk("%02x ", dev->st_buf[i]);
printk("\n");
}
@@ -613,7 +611,7 @@ static int ds_read_byte(struct ds_device *dev, u8 *byte)
int err;
struct ds_status st;
- err = ds_send_control(dev, COMM_BYTE_IO | COMM_IM , 0xff);
+ err = ds_send_control(dev, COMM_BYTE_IO | COMM_IM, 0xff);
if (err)
return err;
diff --git a/drivers/w1/masters/mxc_w1.c b/drivers/w1/masters/mxc_w1.c
index 8851d441e5fd..50b46c4399ea 100644
--- a/drivers/w1/masters/mxc_w1.c
+++ b/drivers/w1/masters/mxc_w1.c
@@ -17,6 +17,7 @@
#include <linux/io.h>
#include <linux/jiffies.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/w1.h>
diff --git a/drivers/watchdog/coh901327_wdt.c b/drivers/watchdog/coh901327_wdt.c
index e3a78f927f83..f29d1edc5bad 100644
--- a/drivers/watchdog/coh901327_wdt.c
+++ b/drivers/watchdog/coh901327_wdt.c
@@ -7,6 +7,7 @@
* Author: Linus Walleij <linus.walleij@stericsson.com>
*/
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/types.h>
#include <linux/watchdog.h>
#include <linux/interrupt.h>
diff --git a/drivers/watchdog/davinci_wdt.c b/drivers/watchdog/davinci_wdt.c
index 6c6594261cb7..ebb85d60b6d5 100644
--- a/drivers/watchdog/davinci_wdt.c
+++ b/drivers/watchdog/davinci_wdt.c
@@ -13,6 +13,7 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
+#include <linux/mod_devicetable.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/watchdog.h>
diff --git a/drivers/watchdog/imgpdc_wdt.c b/drivers/watchdog/imgpdc_wdt.c
index 6ed39dee995f..a3134ffa59f8 100644
--- a/drivers/watchdog/imgpdc_wdt.c
+++ b/drivers/watchdog/imgpdc_wdt.c
@@ -44,6 +44,7 @@
#include <linux/io.h>
#include <linux/log2.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/watchdog.h>
diff --git a/drivers/watchdog/max63xx_wdt.c b/drivers/watchdog/max63xx_wdt.c
index ac5840d9689a..bf6a068245ba 100644
--- a/drivers/watchdog/max63xx_wdt.c
+++ b/drivers/watchdog/max63xx_wdt.c
@@ -17,6 +17,7 @@
#include <linux/err.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
+#include <linux/mod_devicetable.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/watchdog.h>
diff --git a/drivers/watchdog/max77620_wdt.c b/drivers/watchdog/max77620_wdt.c
index 2c9f53eaff4f..70c9cd3ba938 100644
--- a/drivers/watchdog/max77620_wdt.c
+++ b/drivers/watchdog/max77620_wdt.c
@@ -14,6 +14,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/mfd/max77620.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
diff --git a/drivers/watchdog/moxart_wdt.c b/drivers/watchdog/moxart_wdt.c
index 2c4a73d1e214..430c3ab84c07 100644
--- a/drivers/watchdog/moxart_wdt.c
+++ b/drivers/watchdog/moxart_wdt.c
@@ -13,6 +13,7 @@
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
diff --git a/drivers/watchdog/omap_wdt.c b/drivers/watchdog/omap_wdt.c
index ae77112ce97f..cbd752f9ac56 100644
--- a/drivers/watchdog/omap_wdt.c
+++ b/drivers/watchdog/omap_wdt.c
@@ -29,6 +29,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/mm.h>
diff --git a/drivers/watchdog/tangox_wdt.c b/drivers/watchdog/tangox_wdt.c
index b1de8297fa40..d0b53f3c0d17 100644
--- a/drivers/watchdog/tangox_wdt.c
+++ b/drivers/watchdog/tangox_wdt.c
@@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/watchdog.h>